ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4276bf2542d83b3e3058c323bcd5b3778a5a0c | from bakery.custom_filebrowser.sites import FileBrowserSite
from filebrowser.sites import site as default_site # noqa
firmware_site = FileBrowserSite(
name='firmware',
directory='firmware',
extensions={
'Tarball': ['.tar', '.tgz'],
'Package': ['.deb', '.rpm'],
},
select_formats={
'Firmware': ['Tarball', 'Package'],
})
|
py | 1a4277368354cd7a83101339ac594639267d19be | # https://github.com/alexalemi/segmentation/blob/master/code/representations.py
"""
Code for figuring out various vector representions of documents
"""
import numpy as np
from collections import defaultdict
import tools
def tf_sents(doc):
""" Create a sentence level tf representation of the document """
words = set( word for word in tools.word_iter(doc) )
word_pk = { word:pk for pk,word in enumerate(words) }
vecs = []
for part in doc:
for sent in part:
wordcounter = defaultdict(int)
for word in sent:
wordcounter[word] += 1
vec = np.zeros(len(words))
for word,count in wordcounter.iteritems():
if word in words:
vec[word_pk[word]] += count
vecs.append(vec)
return np.array(vecs)
def tf_words(doc):
""" Create a sentence level tf representation of the document """
words = set( word for word in tools.word_iter(doc) )
word_pk = { word:pk for pk,word in enumerate(words) }
vecs = []
for part in doc:
for sent in part:
for word in sent:
vec = np.zeros(len(words))
if word in words:
vec[word_pk[word]] += 1
vecs.append(vec)
return np.array(vecs)
def vec_sents(doc, word_lookup, wordreps):
""" Create a vector representation of the document """
vecs = []
for part in doc:
for sent in part:
wordvecs = [np.zeros(wordreps.shape[1])]
for word in sent:
pk = word_lookup.get(word,-1)
if pk >= 0:
wordvecs.append( wordreps[pk] )
vecs.append( np.mean(wordvecs,0) )
return np.array(vecs)
def vec_words(doc, word_lookup, wordreps):
""" Create a vector representation of the document """
vecs = []
for part in doc:
for sent in part:
for word in sent:
pk = word_lookup.get(word,-1)
if pk >= 0:
vecs.append( wordreps[pk] )
else:
vecs.append( np.zeros(wordreps.shape[1]) )
return np.array(vecs)
def vectop_sents(doc, word_lookup, wordreps):
""" Create a vector representation of the document """
vecs = []
N = wordreps.max()+1
for part in doc:
for sent in part:
sentvec = np.zeros(N)
for word in sent:
pk = word_lookup.get(word,-1)
if pk >= 0:
sentvec[wordreps[word_lookup[word]]] += 1
vecs.append( sentvec )
return np.array(vecs)
def vecdf_sents(doc, word_lookup, wordreps, dfcounter):
""" Create a vector representation of the document """
vecs = []
for part in doc:
for sent in part:
wordvecs = [np.zeros(wordreps.shape[1])]
for word in sent:
pk = word_lookup.get(word,-1)
if pk >= 0:
wordvecs.append( np.log(500./(dfcounter.get(word,1.0)+0.0))*wordreps[pk] )
vecs.append( np.mean(wordvecs,0) )
return np.array(vecs)
def vecdf_words(doc, word_lookup, wordreps, dfcounter):
""" Create a vector representation of the document """
vecs = []
for part in doc:
for sent in part:
for word in sent:
pk = word_lookup.get(word,-1)
if pk >= 0:
vecs.append( np.log(500./(dfcounter.get(word,1.0)+0.0))*wordreps[pk] )
else:
vecs.append( np.zeros(wordreps.shape[1]) )
return np.array(vecs)
|
py | 1a4278d6ccf7ffcf9e70fd885de129a163a4d9c4 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
import mindspore.nn as nn
import mindspore.context as context
from mindspore.common import dtype as mstype
context.set_context(device_target='GPU')
class UnsortedSegmentSumNet(nn.Cell):
def __init__(self, num_segments):
super(UnsortedSegmentSumNet, self).__init__()
self.unsorted_segment_sum = P.UnsortedSegmentSum()
self.num_segments = num_segments
def construct(self, data, ids):
return self.unsorted_segment_sum(data, ids, self.num_segments)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_1D():
input_x = Tensor([1, 2, 3, 4], mstype.float32)
segment_ids = Tensor([0, 0, 1, 2], mstype.int32)
num_segments = 4
net = UnsortedSegmentSumNet(num_segments)
output = net(input_x, segment_ids)
expect = [3, 3, 4, 0]
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_2D():
input_x = Tensor([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], mstype.float32)
segment_ids = Tensor([2, 1, 1], mstype.int32)
num_segments = 4
net = UnsortedSegmentSumNet(num_segments)
output = net(input_x, segment_ids)
expect = [[ 0, 0, 0, 0],
[14, 16, 18, 20],
[ 1, 2, 3, 4],
[ 0, 0, 0, 0]]
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_3D():
input_x = Tensor(np.arange(4 * 5 * 3, dtype=np.float32).reshape(4, 5, 3))
segment_ids = Tensor([2, 1, 1, -1], mstype.int32)
num_segments = 5
net = UnsortedSegmentSumNet(num_segments)
output = net(input_x, segment_ids)
expect = [[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[45., 47., 49.],
[51., 53., 55.],
[57., 59., 61.],
[63., 65., 67.],
[69., 71., 73.]],
[[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.],
[12., 13., 14.]],
[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]]]
assert (output.asnumpy() == expect).all()
|
py | 1a427aa7d82e80507877d4838863128c4b543ab8 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An optimization pass that pushes Z gates later and later in the circuit."""
from typing import Iterator, Tuple, cast
from cirq import ops, extension
from cirq.circuits import Circuit, InsertStrategy, OptimizationPass
from cirq.google.decompositions import is_negligible_turn
from cirq.google.xmon_gates import ExpZGate
from cirq.value import Symbol
KNOWN_Z_TYPES = (ExpZGate, ops.RotZGate)
class EjectZ(OptimizationPass):
"""Removes Z gates by pushing them later and later until they merge.
As Z gates are removed from the circuit, 'lost phase' builds up. As lost
phase is pushed rightward, it modifies phaseable operations along the way.
Eventually the lost phase is discharged into a 'drain'. Only Z gates
without a parameter dependence are removed.
There are three kinds of drains:
- Measurement gates, which absorb phase by discarding it.
- Parameterized Z gates, which absorb phase into their turns attribute.
- The end of the circuit, which absorbs phase into a new Z gate.
"""
def __init__(self,
tolerance: float = 0.0,
ext: extension.Extensions=None) -> None:
"""
Args:
tolerance: Maximum absolute error tolerance. The optimization is
permitted to simply drop negligible combinations of Z gates,
with a threshold determined by this tolerance.
ext: Extensions object used for determining if gates are phaseable
(i.e. if Z gates can pass through them).
"""
self.tolerance = tolerance
self.ext = ext or extension.Extensions()
def optimize_circuit(self, circuit: Circuit):
qubits = {
q
for m in circuit.moments for op in m.operations for q in op.qubits
}
for qubit in qubits:
for start, drain in self._find_optimization_range_drains(circuit,
qubit):
self._optimize_range(circuit, qubit, start, drain)
def _find_optimization_range_drains(
self,
circuit: Circuit,
qubit: ops.QubitId) -> Iterator[Tuple[int, int]]:
"""Finds ranges where Z gates can be pushed rightward.
Args:
circuit: The circuit being optimized.
qubit: The qubit along which Z operations are being merged.
Yields:
(start, drain) tuples. Z gates on the given qubit from moments with
indices in the range [start, drain) should all be merged into
whatever is at the drain index.
"""
start_z = None
prev_z = None
for i in range(len(circuit.moments)):
op = circuit.operation_at(qubit, i)
if op is None:
continue
if start_z is None:
# Unparameterized Zs start optimization ranges.
if (isinstance(op.gate, KNOWN_Z_TYPES) and
not isinstance(op.gate.half_turns,
Symbol)):
start_z = i
prev_z = None
elif self.ext.can_cast(op.gate, ops.MeasurementGate):
# Measurement acts like a drain. It destroys phase information.
yield start_z, i
start_z = None
elif (isinstance(op.gate, KNOWN_Z_TYPES) and
not isinstance(op.gate.half_turns, Symbol)):
# Could be a drain. Depends if an unphaseable gate follows.
prev_z = i
elif not self.ext.can_cast(op.gate, ops.PhaseableGate):
# Unphaseable gates force earlier draining.
if prev_z is not None:
yield start_z, prev_z
start_z = None
# End of the circuit forces draining.
if start_z is not None:
yield start_z, len(circuit.moments)
def _optimize_range(self, circuit: Circuit, qubit: ops.QubitId,
start: int, drain: int):
"""Pushes Z gates from [start, drain) into the drain.
Assumes no unphaseable gates will be crossed, and that the drain is
valid.
Args:
circuit: The circuit being optimized.
qubit: The qubit along which Z operations are being merged.
start: The inclusive start of the range containing Z gates to
eject.
drain: The exclusive end of the range containing Z gates to eject.
Also the index of where the effects of the Z gates should end
up.
"""
lost_phase_turns = 0.0
for i in range(start, drain):
op = circuit.operation_at(qubit, i)
if op is None:
# Empty.
pass
elif isinstance(op.gate, KNOWN_Z_TYPES):
# Move Z effects out of the circuit and into lost_phase_turns.
circuit.clear_operations_touching([qubit], [i])
lost_phase_turns += cast(float, op.gate.half_turns) / 2
elif self.ext.can_cast(op.gate, ops.PhaseableGate):
# Adjust phaseable gates to account for the lost phase.
phaseable = self.ext.cast(op.gate, ops.PhaseableGate)
k = op.qubits.index(qubit)
circuit.clear_operations_touching(op.qubits, [i])
circuit.insert(i + 1,
phaseable.phase_by(-lost_phase_turns, k).on(
*op.qubits),
InsertStrategy.INLINE)
self._drain_into(circuit, qubit, drain, lost_phase_turns)
def _drain_into(self, circuit: Circuit, qubit: ops.QubitId,
drain: int, accumulated_phase: float):
if is_negligible_turn(accumulated_phase, self.tolerance):
return
# Drain type: end of circuit.
if drain == len(circuit.moments):
circuit.append(
ExpZGate(half_turns=2*accumulated_phase).on(qubit),
InsertStrategy.INLINE)
return
# Drain type: another Z gate.
op = cast(ops.Operation, circuit.operation_at(qubit, drain))
if isinstance(op.gate, ExpZGate):
half_turns = cast(float, op.gate.half_turns) + accumulated_phase * 2
circuit.clear_operations_touching([qubit], [drain])
circuit.insert(
drain + 1,
ExpZGate(half_turns=half_turns).on(qubit),
InsertStrategy.INLINE)
return
# Drain type: measurement gate.
# (Don't have to do anything.)
|
py | 1a427b0aa627d909473b9be0ad04e4e1604d2271 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline)) |
py | 1a427c1fa96a10cbea3a629e018db1d4ef064a71 | import platform
import os
import sys
# Resources shared by both the Network and Server files
BUFSIZE = 4096 * 2
PORT = 5555
INTERNAL_PORT = 4321
# The ipv4 address of the host machine. Run ipconfig from cmd to get this
HOST = "127.0.0.1"
if platform.system() == 'Darwin':
LOCAL = "127.0.0.1" #"192.168.1.154"
else:
# This allows you to try out different ports when running through DO cli
if len(sys.argv) >= 2:
LOCAL = os.getenv('HOSTNAME')
PORT = sys.argv[1]
else:
LOCAL = os.getenv('HOSTNAME')
SINGLE_PLAYER = True
# Time client waits between sending requests for changed state
CLIENT_WAIT = 0.1
# Messages
GET_STATE = 'Get'
DO_ACTION = 'Do'
INIT_MSG = 'Init'
MULLIGAN_MSG = 'Mull'
# Responses
NO_UPDATE = 'No update'
UPDATE = 'Update'
VALID_CHOICE = 'Valid choice'
INVALID_CHOICE = 'Invalid choice'
# Log into router, port forwarding, port 5555 to my local machine
# Tell my router goes to the ip I had been using |
py | 1a427c7c3a5ddc6e8620f1c8a5e6f5392913efd4 | """
TODO: ADD FEATURE TO ENABLE USE OF UNBOUNDED VARIABLES
Note: This does not work with the current version of PLEpy, to be fixed
in future versions
Uses a calculated "cross-talk" matrix (converts 3D counts to 2D
activity for each 3D and 2D shell) to fit first-order rate coefficients
and initial activity in 3D shells using simulated 2D planar imaging
data. Each 3D shell only moves inward.
Model:
dA5/dt = -k5*A5
dA4/dt = k5*A5 - k4*A4
dA3/dt = k4*A4 - k3*A3
dA2/dt = k3*A3 - k2*A2
dA1/dt = k2*A2 - k1*A1
where k1-k5 are the rate coefficients and k1 > k2 > k3 > k4 > k5
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.io import loadmat
import pyomo.environ as penv
from pyomo.dae import ContinuousSet, DerivativeVar
sys.path.append(os.path.abspath("../../"))
from plepy import PLEpy
pwd = os.getcwd()
fpath = os.path.dirname(__file__)
os.chdir(fpath)
# Import 2D data
ydata = np.load('toy2D_data_exp3.npz')['arr_0']
ytotal = ydata.sum(axis=1)
tdata = list(range(0, 81, 2))
# Import cross-talk matrix
crssfile = loadmat('shelltoy_crsstlk_dist.mat')
ctalk = crssfile['crsstlk']
ictalk = np.linalg.inv(ctalk)
iydata = np.dot(ictalk, ydata.T).T
iydata[1:, :] = (iydata[1:, :] + iydata[:-1, :])/2
# Actual data (for comparison)
datafile = loadmat('shelltoydata_exp3.mat')
data3d = datafile['a']
# Initial guesses
k0 = [5., 5., 1., 0.75, 0.5] # [p1, p2, p3, p4, k5]
a0 = np.dot(ictalk, ydata[0, :].T) # [A1, A2, a3, A4, A5]'
da0dt = [k0[i+1]*a0[i+1] - k0[i]*a0[i] for i in range(4)]
da0dt.append(-k0[4]*a0[4])
da0dt = [1e-2*a for a in da0dt]
# Create dynamic model
model = penv.ConcreteModel()
# Define parameters
model.t = ContinuousSet(bounds=(0, 81), initialize=range(81))
# Rate coefficients are fit as sum of previous rate coefficient and
# corresponding "p" parameter.
# k4 = k5 + p4, k3 = k4 + p3, etc.
model.p1 = penv.Var(initialize=k0[0], bounds=(1e-3, 100.))
model.p2 = penv.Var(initialize=k0[1], bounds=(1e-3, 100.))
model.p3 = penv.Var(initialize=k0[2], bounds=(1e-3, 100.))
model.p4 = penv.Var(initialize=k0[3], bounds=(1e-3, 100.))
model.k5 = penv.Var(initialize=k0[4], bounds=(1e-3, 100.))
# Define 3D shell states
model.A1 = penv.Var(model.t, initialize=a0[0], within=penv.NonNegativeReals)
model.A2 = penv.Var(model.t, initialize=a0[1], within=penv.NonNegativeReals)
model.A3 = penv.Var(model.t, initialize=a0[2], within=penv.NonNegativeReals)
model.A4 = penv.Var(model.t, initialize=a0[3], within=penv.NonNegativeReals)
model.A5 = penv.Var(model.t, initialize=a0[4], within=penv.NonNegativeReals)
# Initialize derivatives
model.dA1dt = DerivativeVar(model.A1, wrt=model.t, initialize=da0dt[0])
model.dA2dt = DerivativeVar(model.A2, wrt=model.t, initialize=da0dt[1])
model.dA3dt = DerivativeVar(model.A3, wrt=model.t, initialize=da0dt[2])
model.dA4dt = DerivativeVar(model.A4, wrt=model.t, initialize=da0dt[3])
model.dA5dt = DerivativeVar(model.A5, wrt=model.t, initialize=da0dt[4])
# System dynamics
def _dA1dt(m, t):
k4 = m.k5 + m.p4
k3 = k4 + m.p3
k2 = k3 + m.p2
k1 = k2 + m.p1
return m.dA1dt[t] == 1e-2*(k2*m.A2[t] - k1*m.A1[t])
model.dA1dt_ode = penv.Constraint(model.t, rule=_dA1dt)
def _dA2dt(m, t):
k4 = m.k5 + m.p4
k3 = k4 + m.p3
k2 = k3 + m.p2
return m.dA1dt[t] == 1e-2*(k3*m.A3[t] - k2*m.A2[t])
model.dA2dt_ode = penv.Constraint(model.t, rule=_dA2dt)
def _dA3dt(m, t):
k4 = m.k5 + m.p4
k3 = k4 + m.p3
return m.dA3dt[t] == 1e-2*(k4*m.A4[t] - k3*m.A3[t])
model.dA3dt_ode = penv.Constraint(model.t, rule=_dA3dt)
def _dA4dt(m, t):
k4 = m.k5 + m.p4
return m.dA4dt[t] == 1e-2*(m.k5*m.A5[t] - k4*m.A4[t])
model.dA4dt_ode = penv.Constraint(model.t, rule=_dA4dt)
def _dA5dt(m, t):
return m.dA5dt[t] == 1e-2*(- m.k5*m.A5[t])
model.dA5dt_ode = penv.Constraint(model.t, rule=_dA5dt)
# Objective function (SSE)
def _obj(m):
a3D = np.array([[m.A1[t], m.A2[t], m.A3[t], m.A4[t], m.A5[t]]
for t in tdata]).T
a2D = np.dot(ctalk, a3D).T
# err = (ydata - a2D)**2
err = (iydata - a3D.T)**2
return sum(sum(err))
model.obj = penv.Objective(rule=_obj)
# Set-up solver
TFD=penv.TransformationFactory("dae.finite_difference")
TFD.apply_to(model, nfe=2*len(model.t), wrt=model.t, scheme="BACKWARD")
solver = penv.SolverFactory('ipopt')
solver.options['linear_solver'] = 'ma97' # academic solver
solver.options['tol'] = 1e-6
solver.options['max_iter'] = 6000
results = solver.solve(model, keepfiles=False, tee=True)
model.solutions.load_from(results)
# Plot results
sns.set(context='talk')
plt.figure()
ccycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.plot(tdata, data3d[:, 0], ls='None', marker='o', color=ccycle[0])
plt.plot(tdata, data3d[:, 1], ls='None', marker='o', color=ccycle[1])
plt.plot(tdata, data3d[:, 2], ls='None', marker='o', color=ccycle[2])
plt.plot(tdata, data3d[:, 3], ls='None', marker='o', color=ccycle[3])
plt.plot(tdata, data3d[:, 4], ls='None', marker='o', color=ccycle[4])
# plt.plot(tdata, iydata[:, 0], label='Shell 1', color=ccycle[0])
# plt.plot(tdata, iydata[:, 1], label='Shell 2', color=ccycle[1])
# plt.plot(tdata, iydata[:, 2], label='Shell 3', color=ccycle[2])
# plt.plot(tdata, iydata[:, 3], label='Shell 4', color=ccycle[3])
# plt.plot(tdata, iydata[:, 4], label='Shell 5', color=ccycle[4])
plt.plot(model.t, model.A1[:](), label='Shell 1', color=ccycle[0])
plt.plot(model.t, model.A2[:](), label='Shell 2', color=ccycle[1])
plt.plot(model.t, model.A3[:](), label='Shell 3', color=ccycle[2])
plt.plot(model.t, model.A4[:](), label='Shell 4', color=ccycle[3])
plt.plot(model.t, model.A5[:](), label='Shell 5', color=ccycle[4])
plt.xlabel('Time (min)')
plt.ylabel('Activity (counts)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
plt.tight_layout()
plt.show()
# Initialize PLEpy object
ps = [model.p1(), model.p2(), model.p3(), model.p4(), model.k5()]
ps.reverse()
ks = np.cumsum(ps)
A0s = [model.A1[0](), model.A2[0](), model.A3[0](), model.A4[0](),
model.A5[0]()]
PLobj = PLEpy(model,
['p1', 'p2', 'p3', 'p4', 'k5', 'A1', 'A2', 'A3', 'A4', 'A5'],
indices={'t0': [0]})
PLobj.set_index('A1', 't0')
PLobj.set_index('A2', 't0')
PLobj.set_index('A3', 't0')
PLobj.set_index('A4', 't0')
PLobj.set_index('A5', 't0')
# Get confidence limits using binary search (currently won't work
# because initial activity is unbounded)
PLobj.get_clims(['A1', 'A2', 'A3', 'A4', 'A5'])
# Generate profile likelihood curves
PLobj.get_PL(['A1', 'A2', 'A3', 'A4', 'A5'])
PLobj.plot_PL(pnames=['A1', 'A2', 'A3', 'A4', 'A5'], join=True, jmax=5)
os.chdir(pwd)
|
py | 1a427c8ac3d169c181d0904d196f7f6c4144ad60 | import logging
import platform
import sys
import warnings
if platform.system() == "Windows":
# Configure comtypes to not generate DLL bindings into
# current environment, instead keeping them in memory.
# Slower, but prevents dirtying environments.
import comtypes.client
comtypes.client.gen_dir = None
# Ignore pywinauto warning about threading mode,
# which comtypes initializes to STA instead of MTA on import.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
import pywinauto
# pylint: disable=wrong-import-position
from robotlibcore import DynamicCore
from RPA.Desktop.utils import Buffer, is_windows
from RPA.Desktop.keywords import (
ElementNotFound,
MultipleElementsFound,
TimeoutException,
ApplicationKeywords,
ClipboardKeywords,
FinderKeywords,
KeyboardKeywords,
MouseKeywords,
ScreenKeywords,
TextKeywords,
)
class Desktop(DynamicCore):
"""`Desktop` is a cross-platform library for navigating and interacting with
desktop environments. It can be used to automate applications through
the same interfaces that are available to human users.
The library includes the following features:
- Mouse and keyboard input emulation
- Starting and stopping applications
- Finding elements through image template matching
- Scraping text from given regions
- Taking screenshots
- Clipboard management
.. warning:: Windows element selectors are not currently supported, and require the use of ``RPA.Desktop.Windows``
**Installation**
The basic features such as mouse and keyboard input and application
control work with a default ``rpaframework`` install.
Advanced computer-vision features such as image template matching and
OCR require an additional library called ``rpaframework-recognition``.
The dependency can be either added separately or through additional
extras with ``rpaframework[cv]``. If installing recognition through
``pip`` instead of ``conda``, the OCR feature also requires ``tesseract``.
**Locating elements**
To automate actions on the desktop, a robot needs to interact with various
graphical elements such as buttons or input fields. The locations of these
elements can be found using a feature called `locators`.
A locator describes the properties or features of an element. This information
can be later used to locate similar elements even when window positions or
states change.
The currently supported locator types are:
=========== ================================================ ===========
Name Arguments Description
=========== ================================================ ===========
alias name (str) A custom named locator from the locator database, the default.
image path (str) Image of an element that is matched to current screen content.
point x (int), y (int) Pixel coordinates as absolute position.
offset x (int), y (int) Pixel coordinates relative to current mouse position.
size width (int), height (int) Region of fixed size, around point or screen top-left
region left (int), top (int), right (int), bottom (int) Bounding coordinates for a rectangular region.
ocr text (str), confidence (float, optional) Text to find from the current screen.
=========== ================================================ ===========
A locator is defined by its type and arguments, divided by a colon.
Some example usages are shown below. Note that the prefix for ``alias`` can
be omitted as its the default type.
.. code-block:: robotframework
Click point:50,100
Click region:20,20,100,30
Move mouse image:%{ROBOT_ROOT}/logo.png
Move mouse offset:200,0
Click
Click alias:SpareBin.Login
Click SpareBin.Login
Click ocr:"Create New Account"
You can also pass internal ``region`` objects as locators:
.. code-block:: robotframework
${region}= Find Element ocr:"Customer name"
Click ${region}
**Locator chaining**
Often it is not enough to have one locator, but instead an element
is defined through a relationship of various locators. For this use
case the library supports a special syntax, which we will call
locator chaining.
An example of chaining:
.. code-block:: robotframework
# Read text from area on the right side of logo
Read text image:logo.png + offset:600,0 + size:400,200
The supported operators are:
========== =========================================
Operator Description
========== =========================================
then, + Base locator relative to the previous one
and, &&, & Both locators should be found
or, ||, | Either of the locators should be found
not, ! The locator should not be found
========== =========================================
Further examples:
.. code-block:: robotframework
# Click below either label
Click (image:name.png or image:email.png) then offset:0,300
# Wait until dialog disappears
Wait for element not image:cookie.png
**Named locators**
The library supports storing locators in a database, which contains
all of the required fields and various bits of metadata. This enables
having one source of truth, which can be updated if a website's or applications's
UI changes. Robot Framework scripts can then only contain a reference
to a stored locator by name.
The main way to create named locators is with `Robocorp Lab`_.
.. _Robocorp Lab: https://robocorp.com/docs/product-manuals/robocorp-lab/robocorp-lab-overview
**Keyboard and mouse**
Keyboard keywords can emulate typing text, but also pressing various function keys.
The name of a key is case-insensitive and spaces will be converted to underscores,
i.e. the key ``Page Down`` and ``page_down`` are equivalent.
The following function keys are supported:
=============== ===========
Key Description
=============== ===========
shift A generic Shift key. This is a modifier.
shift_l The left Shift key. This is a modifier.
shift_r The right Shift key. This is a modifier.
ctrl A generic Ctrl key. This is a modifier.
ctrl_l he left Ctrl key. This is a modifier.
ctrl_r The right Ctrl key. This is a modifier.
alt A generic Alt key. This is a modifier.
alt_l The left Alt key. This is a modifier.
alt_r The right Alt key. This is a modifier.
alt_gr The AltGr key. This is a modifier.
cmd A generic command button (Windows / Command / Super key). This may be a modifier.
cmd_l The left command button (Windows / Command / Super key). This may be a modifier.
cmd_r The right command button (Windows / Command / Super key). This may be a modifier.
up An up arrow key.
down A down arrow key.
left A left arrow key.
right A right arrow key.
enter The Enter or Return key.
space The Space key.
tab The Tab key.
backspace The Backspace key.
delete The Delete key.
esc The Esc key.
home The Home key.
end The End key.
page_down The Page Down key.
page_up The Page Up key.
caps_lock The Caps Lock key.
f1 to f20 The function keys.
insert The Insert key. This may be undefined for some platforms.
menu The Menu key. This may be undefined for some platforms.
num_lock The Num Lock key. This may be undefined for some platforms.
pause The Pause / Break key. This may be undefined for some platforms.
print_screen The Print Screen key. This may be undefined for some platforms.
scroll_lock The Scroll Lock key. This may be undefined for some platforms.
=============== ===========
When controlling the mouse, there are different types of actions that can be
done. Same formatting rules as function keys apply. They are as follows:
============ ===========
Action Description
============ ===========
click Click with left mouse button
left_click Click with left mouse button
double_click Double click with left mouse button
triple_click Triple click with left mouse button
right_click Click with right mouse button
============ ===========
The supported mouse button types are ``left``, ``right``, and ``middle``.
**Examples**
Both Robot Framework and Python examples follow.
The library must be imported first.
.. code-block:: robotframework
*** Settings ***
Library RPA.Desktop
.. code-block:: python
from RPA.Desktop import Desktop
desktop = Desktop()
The library can open applications and interact with them through
keyboard and mouse events.
.. code-block:: robotframework
*** Keywords ***
Write entry in accounting
[Arguments] ${entry}
Open application erp_client.exe
Click image:%{ROBOT_ROOT}/images/create.png
Type text ${entry}
Press keys ctrl s
Press keys enter
.. code-block:: python
def write_entry_in_accounting(entry):
desktop.open_application("erp_client.exe")
desktop.click(f"image:{ROBOT_ROOT}/images/create.png")
desktop.type_text(entry)
desktop.press_keys("ctrl", "s")
desktop.press_keys("enter")
Targeting can be currently done using coordinates (absolute or relative),
but using template matching is preferred.
.. code-block:: robotframework
*** Keywords ***
Write to field
[Arguments] ${text}
Move mouse image:input_label.png
Move mouse offset:200,0
Click
Type text ${text}
Press keys enter
.. code-block:: python
def write_to_field(text):
desktop.move_mouse("image:input_label.png")
desktop.move_mouse("offset:200,0")
desktop.click()
desktop.type_text(text)
desktop.press_keys("enter")
Elements can be found by text too.
.. code-block:: robotframework
*** Keywords ***
Click New
Click ocr:New
.. code-block:: python
def click_new():
desktop.click('ocr:"New"')
It is recommended to wait for the elements to be visible before
trying any interaction. You can also pass ``region`` objects as locators.
.. code-block:: robotframework
*** Keywords ***
Click New
${region}= Wait For element ocr:New
Click ${region}
.. code-block:: python
def click_new():
region = desktop.wait_for_element("ocr:New")
desktop.click(region)
Another way to find elements by offsetting from an anchor:
.. code-block:: robotframework
*** Keywords ***
Type Notes
[Arguments] ${text}
Click With Offset ocr:Notes 500 0
Type Text ${text}
.. code-block:: python
def type_notes(text):
desktop.click_with_offset("ocr:Notes", 500, 0)
desktop.type_text(text)
""" # noqa: E501
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LIBRARY_DOC_FORMAT = "REST"
def __init__(self):
self.logger = logging.getLogger(__name__)
self.buffer = Buffer(self.logger)
# Register keyword libraries to LibCore
libraries = [
ApplicationKeywords(self),
ClipboardKeywords(self),
FinderKeywords(self),
KeyboardKeywords(self),
MouseKeywords(self),
ScreenKeywords(self),
TextKeywords(self),
]
super().__init__(libraries)
|
py | 1a427c99e0636f615185906fe35749ee3b4d36b7 | """
WSGI config for todoapp_backend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todoapp_backend.settings')
application = get_wsgi_application()
|
py | 1a427cd393c54cbfaaffa15214fa9444794b802c | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.precise import Precise
class lykke(Exchange):
def describe(self):
return self.deep_extend(super(lykke, self).describe(), {
'id': 'lykke',
'name': 'Lykke',
'countries': ['CH'],
'version': 'v1',
'rateLimit': 200,
'has': {
'CORS': False,
'fetchOHLCV': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchTrades': True,
'fetchMyTrades': True,
'createOrder': True,
'cancelOrder': True,
'cancelAllOrders': True,
'fetchBalance': True,
'fetchMarkets': True,
'fetchOrderBook': True,
'fetchTicker': True,
},
'timeframes': {
'1m': 'Minute',
'5m': 'Min5',
'15m': 'Min15',
'30m': 'Min30',
'1h': 'Hour',
'4h': 'Hour4',
'6h': 'Hour6',
'12h': 'Hour12',
'1d': 'Day',
'1w': 'Week',
'1M': 'Month',
},
'requiredCredentials': {
'apiKey': True,
'secret': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/34487620-3139a7b0-efe6-11e7-90f5-e520cef74451.jpg',
'api': {
'mobile': 'https://public-api.lykke.com/api',
'public': 'https://hft-api.lykke.com/api',
'private': 'https://hft-api.lykke.com/api',
},
'test': {
'mobile': 'https://public-api.lykke.com/api',
'public': 'https://hft-service-dev.lykkex.net/api',
'private': 'https://hft-service-dev.lykkex.net/api',
},
'www': 'https://www.lykke.com',
'doc': [
'https://hft-api.lykke.com/swagger/ui/',
'https://www.lykke.com/lykke_api',
],
'fees': 'https://www.lykke.com/trading-conditions',
},
'api': {
'mobile': {
'get': [
'AssetPairs/rate',
'AssetPairs/rate/{assetPairId}',
'AssetPairs/dictionary/{market}',
'Assets/dictionary',
'Candles/history/{market}/available',
'Candles/history/{market}/{assetPair}/{period}/{type}/{from}/{to}',
'Company/ownershipStructure',
'Company/registrationsCount',
'IsAlive',
'Market',
'Market/{market}',
'Market/capitalization/{market}',
'OrderBook',
'OrderBook/{assetPairId}',
'Trades/{AssetPairId}',
'Trades/Last/{assetPair}/{n}',
],
'post': [
'AssetPairs/rate/history',
'AssetPairs/rate/history/{assetPairId}',
],
},
'public': {
'get': [
'AssetPairs',
'AssetPairs/{id}',
'IsAlive',
'OrderBooks',
'OrderBooks/{AssetPairId}',
],
},
'private': {
'get': [
'Orders',
'Orders/{id}',
'Wallets',
'History/trades',
],
'post': [
'Orders/limit',
'Orders/market',
'Orders/{id}/Cancel',
'Orders/v2/market',
'Orders/v2/limit',
'Orders/stoplimit',
'Orders/bulk',
],
'delete': [
'Orders',
'Orders/{id}',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.0, # as of 7 Feb 2018, see https://github.com/ccxt/ccxt/issues/1863
'taker': 0.0, # https://www.lykke.com/cp/wallet-fees-and-limits
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
},
'deposit': {
'BTC': 0,
},
},
},
'commonCurrencies': {
'CAN': 'CanYaCoin',
'XPD': 'Lykke XPD',
},
})
def parse_trade(self, trade, market):
#
# public fetchTrades
#
# {
# "id": "d5983ab8-e9ec-48c9-bdd0-1b18f8e80a71",
# "assetPairId": "BTCUSD",
# "dateTime": "2019-05-15T06:52:02.147Z",
# "volume": 0.00019681,
# "index": 0,
# "price": 8023.333,
# "action": "Buy"
# }
#
# private fetchMyTrades
# {
# Id: '3500b83c-9963-4349-b3ee-b3e503073cea',
# OrderId: '83b50feb-8615-4dc6-b606-8a4168ecd708',
# DateTime: '2020-05-19T11:17:39.31+00:00',
# Timestamp: '2020-05-19T11:17:39.31+00:00',
# State: null,
# Amount: -0.004,
# BaseVolume: -0.004,
# QuotingVolume: 39.3898,
# Asset: 'BTC',
# BaseAssetId: 'BTC',
# QuotingAssetId: 'USD',
# AssetPair: 'BTCUSD',
# AssetPairId: 'BTCUSD',
# Price: 9847.427,
# Fee: {Amount: null, Type: 'Unknown', FeeAssetId: null}
# },
marketId = self.safe_string(trade, 'AssetPairId')
symbol = self.safe_symbol(marketId, market)
id = self.safe_string_2(trade, 'id', 'Id')
orderId = self.safe_string(trade, 'OrderId')
timestamp = self.parse8601(self.safe_string_2(trade, 'dateTime', 'DateTime'))
priceString = self.safe_string_2(trade, 'price', 'Price')
amountString = self.safe_string_2(trade, 'volume', 'Amount')
side = self.safe_string_lower(trade, 'action')
if side is None:
side = 'sell' if (amountString[0] == '-') else 'buy'
amountString = Precise.string_abs(amountString)
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
fee = {
'cost': 0, # There are no fees for trading. https://www.lykke.com/wallet-fees-and-limits/
'currency': market['quote'],
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'order': orderId,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100
request = {
'AssetPairId': market['id'],
'skip': 0,
'take': limit,
}
response = self.mobileGetTradesAssetPairId(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if limit is not None:
request['take'] = limit # How many maximum items have to be returned, max 1000 default 100.
if symbol is not None:
market = self.market(symbol)
request['assetPairId'] = market['id']
response = self.privateGetHistoryTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetWallets(params)
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'AssetId')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'Balance')
account['used'] = self.safe_string(balance, 'Reserved')
result[code] = account
return self.parse_balance(result, False)
def cancel_order(self, id, symbol=None, params={}):
request = {'id': id}
return self.privateDeleteOrdersId(self.extend(request, params))
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['assetPairId'] = market['id']
return self.privateDeleteOrders(self.extend(request, params))
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
query = {
'AssetPairId': market['id'],
'OrderAction': self.capitalize(side),
'Volume': amount,
'Asset': market['baseId'],
}
if type == 'limit':
query['Price'] = price
method = 'privatePostOrdersV2' + self.capitalize(type)
result = getattr(self, method)(self.extend(query, params))
#
# market
#
# {
# "Price": 0
# }
#
# limit
#
# {
# "Id":"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
# }
#
id = self.safe_string(result, 'Id')
price = self.safe_number(result, 'Price')
return {
'id': id,
'info': result,
'clientOrderId': None,
'timestamp': None,
'datetime': None,
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': None,
'filled': None,
'remaining': None,
'status': None,
'fee': None,
'trades': None,
}
def fetch_markets(self, params={}):
markets = self.publicGetAssetPairs()
#
# [{ Id: "AEBTC",
# Name: "AE/BTC",
# Accuracy: 6,
# InvertedAccuracy: 8,
# BaseAssetId: "6f75280b-a005-4016-a3d8-03dc644e8912",
# QuotingAssetId: "BTC",
# MinVolume: 0.4,
# MinInvertedVolume: 0.0001 },
# { Id: "AEETH",
# Name: "AE/ETH",
# Accuracy: 6,
# InvertedAccuracy: 8,
# BaseAssetId: "6f75280b-a005-4016-a3d8-03dc644e8912",
# QuotingAssetId: "ETH",
# MinVolume: 0.4,
# MinInvertedVolume: 0.001 }]
#
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'Id')
name = self.safe_string(market, 'Name')
baseId, quoteId = name.split('/')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
pricePrecision = self.safe_string(market, 'Accuracy')
priceLimit = self.parse_precision(pricePrecision)
precision = {
'price': int(pricePrecision),
'amount': self.safe_integer(market, 'InvertedAccuracy'),
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': True,
'info': market,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(market, 'MinVolume'),
'max': None,
},
'price': {
'min': self.parse_number(priceLimit),
'max': None,
},
'cost': {
'min': self.safe_number(market, 'MinInvertedVolume'),
'max': None,
},
},
'baseId': None,
'quoteId': None,
})
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
close = self.safe_number(ticker, 'lastPrice')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': close,
'last': close,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': self.safe_number(ticker, 'volume24H'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
ticker = self.mobileGetMarketMarket(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_order_status(self, status):
statuses = {
'Open': 'open',
'Pending': 'open',
'InOrderBook': 'open',
'Processing': 'open',
'Matched': 'closed',
'Cancelled': 'canceled',
'Rejected': 'rejected',
'Replaced': 'canceled',
'Placed': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# "Id": "string",
# "Status": "Unknown",
# "AssetPairId": "string",
# "Volume": 0,
# "Price": 0,
# "RemainingVolume": 0,
# "LastMatchTime": "2020-03-26T20:58:50.710Z",
# "CreatedAt": "2020-03-26T20:58:50.710Z",
# "Type": "Unknown",
# "LowerLimitPrice": 0,
# "LowerPrice": 0,
# "UpperLimitPrice": 0,
# "UpperPrice": 0
# }
#
status = self.parse_order_status(self.safe_string(order, 'Status'))
marketId = self.safe_string(order, 'AssetPairId')
symbol = self.safe_symbol(marketId, market)
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'LastMatchTime'))
timestamp = None
if ('Registered' in order) and (order['Registered']):
timestamp = self.parse8601(order['Registered'])
elif ('CreatedAt' in order) and (order['CreatedAt']):
timestamp = self.parse8601(order['CreatedAt'])
price = self.safe_number(order, 'Price')
side = None
amount = self.safe_number(order, 'Volume')
if amount < 0:
side = 'sell'
amount = abs(amount)
else:
side = 'buy'
remaining = abs(self.safe_number(order, 'RemainingVolume'))
id = self.safe_string(order, 'Id')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': None,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': None,
'average': None,
'amount': amount,
'filled': None,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
})
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privateGetOrdersId(self.extend(request, params))
return self.parse_order(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privateGetOrders(params)
market = None
if symbol is not None:
market = self.market(symbol)
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 'InOrderBook',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 'Matched',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.publicGetOrderBooksAssetPairId(self.extend({
'AssetPairId': self.market_id(symbol),
}, params))
orderbook = {
'timestamp': None,
'bids': [],
'asks': [],
}
timestamp = None
for i in range(0, len(response)):
side = response[i]
if side['IsBuy']:
orderbook['bids'] = self.array_concat(orderbook['bids'], side['Prices'])
else:
orderbook['asks'] = self.array_concat(orderbook['asks'], side['Prices'])
sideTimestamp = self.parse8601(side['Timestamp'])
timestamp = sideTimestamp if (timestamp is None) else max(timestamp, sideTimestamp)
return self.parse_order_book(orderbook, symbol, timestamp, 'bids', 'asks', 'Price', 'Volume')
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1):
price = self.safe_number(bidask, priceKey)
amount = self.safe_number(bidask, amountKey)
if amount < 0:
amount = -amount
return [price, amount]
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'mobile':
if query:
url += '?' + self.urlencode(query)
elif api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
if (method == 'GET') or (method == 'DELETE'):
if query:
url += '?' + self.urlencode(query)
self.check_required_credentials()
headers = {
'api-key': self.apiKey,
'Accept': 'application/json',
'Content-Type': 'application/json',
}
if method == 'POST':
if params:
body = self.json(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
py | 1a427dd3f2aa43bbe7aeb34d2c877c5c75c9f6df | # Pelenet modules
from .anisotropic import AnisotropicExperiment
from ..network import ReservoirNetwork
from ._abstract import Experiment
"""
@desc: Class for running an experiment, usually contains performing
several networks (e.g. for training and testing)
"""
class AnisotropicReadoutExperiment(AnisotropicExperiment):
"""
# @desc: Define parameters for this experiment
# """
def defineParameters(self):
# Parent parameters
aniP = super().defineParameters()
expP = {
# Experiment
'seed': 3, # Random seed
'trials': 25, # Number of trials
'stepsPerTrial': 110, # Number of simulation steps for every trial
'isReset': True, # Activate reset after every trial
# Network
'refractoryDelay': 2, # Refactory period
'voltageTau': 10.24, # Voltage time constant
'currentTau': 10.78, # Current time constant
'thresholdMant': 1000, # Spiking threshold for membrane potential
'reservoirConnProb': 0.05,
# Anisotropic
'anisoStdE': 12, # Space constant, std of gaussian for excitatory neurons
'anisoStdI': 9, # Space constant, std of gaussian for inhibitory neurons (range 9 - 11)
'anisoShift': 1, # Intensity of the shift of the connectivity distribution for a neuron
#'percShift': 1, # Percentage of shift (default 1)
'anisoPerlinScale': 4, # Perlin noise scale, high value => dense valleys, low value => broad valleys
'weightExCoefficient': 12, # Coefficient for excitatory anisotropic weight
'weightInCoefficient': 48, # Coefficient for inhibitory anisotropic weight
# Input
'inputIsTopology': True, # Activate a 2D input area
'inputIsLeaveOut': True, # Leaves one target neuron out per trial
'patchNeuronsShiftX': 44, # x-position of the input area
'patchNeuronsShiftY': 24, # y-position of the input area
'inputNumTargetNeurons': 25, # Number of target neurons for the input
'inputSteps': 5, # Number of steps the network is activated by the input
'inputWeightExponent': 0, # The weight exponent of the weights from the generator to the target neurons
'inputGenSpikeProb': 1.0, # Spiking probability of the spike generators
# Output
'partitioningClusterSize': 10, # Size of clusters connected to an output neuron (6|10)
# Probes
'isExSpikeProbe': True, # Probe excitatory spikes
'isInSpikeProbe': True, # Probe inhibitory spikes
'isOutSpikeProbe': True # Probe output spikes
}
# Experiment parameters overwrite parameters from parent experiment
return { **aniP, **expP }
"""
@desc: Build all networks
"""
def build(self):
# Instanciate innate network
self.net = ReservoirNetwork(self.p)
self.net.landscape = None
# Draw anisotropic mask and weights
self.drawMaskAndWeights()
# Draw output weights
self.net.drawOutputMaskAndWeights()
# Connect ex-in reservoir
self.net.connectReservoir()
# Connect reservoir to output
self.net.connectOutput()
# Add patch input
self.net.addInput()
# Add Probes
self.net.addProbes()
|
py | 1a427eab085c36d330a820eea2b331671825739b | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
import unicodedata
def stringify(value):
"""Convert unicode to ascii"""
try:
value = unicodedata.normalize('NFKD', value)
except TypeError:
pass
return value.encode('ascii', 'ignore').decode('ascii')
def slugify(value):
"""Return value suitable for filename (taken from django.utils.text.slugify)"""
value = re.sub('[^\w\s-]', '', stringify(value)).strip().lower()
return re.sub('[-\s]+', '-', value)
|
py | 1a427ec7b4d7c89dd2dc27daec3ce98d299a9e74 | import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import PolynomialFeatures
datos = pd.read_csv('../data/201213336.csv', sep=';')
X = datos['dia'].values.reshape(-1, 1)
y = datos['confirmados'].values.reshape(-1, 1)
z = datos['muertos'].values.reshape(-1,1)
model = LinearRegression()
model.fit(X=X, y=y)
predict = [365]
predict = np.array(predict).reshape(-1,1)
prediccionConfirmado = model.predict(predict)
model.fit(X=X, y=z)
prediccionesMuerto = model.predict(predict)
print(prediccionConfirmado) #255,556
print(prediccionesMuerto) #8844 |
py | 1a427f8b69208556e93451dab6d2a617e485a676 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from flask import (
Flask, abort, request, redirect, url_for, render_template, g,
send_from_directory, jsonify)
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql.expression import func
from PIL import Image, ImageDraw, ImageFont
from configuration import (
get_args, get_db_uri, get_templates_list,
BASE_DIR, MEME_DIR, FONT_PATH)
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = get_db_uri()
db = SQLAlchemy(app)
# Model for representing created Memes
class Meme(db.Model):
id = db.Column(db.Integer, primary_key=True)
template = db.Column(db.String(80), nullable=False)
top_text = db.Column(db.String(80), nullable=False)
bot_text = db.Column(db.String(80), nullable=False)
def __repr__(self):
return '<Meme %r>' % self.id
Portfolio_Stocks = db.Table(
'Portfolio_Stocks',
db.Column('stock_id', db.Integer, db.ForeignKey('stock.id'),
primary_key=True),
db.Column('portfolio_id', db.Integer, db.ForeignKey('portfolio.id'),
primary_key=True)
)
# class Stock(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String(80), nullable=False)
# symbol = db.Column(db.String(10), nullable=False)
# price = db.Column(db.Float, nullable=False)
# def __repr__(self):
# return '<Stock %r>' % self.id
# class Portfolio(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# portfolio_owner = db.Column(db.String(80), nullable=False)
# stock_id = db.Column(db.Integer, db.ForeignKey('stock.id'), nullable=Fals
# e)
# def __repr__(self):
# return '<Portfolio %r>' % self.id
class Stock(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
symbol = db.Column(db.String(10), nullable=False)
price = db.Column(db.Float, nullable=False)
portfolios_linked = db.relationship('Portfolio',
secondary=Portfolio_Stocks,
lazy='subquery',
backref=db.backref('stocks_linked',
lazy=True)
)
def __repr__(self):
return '<Stock %r>' % self.id
def serialize(self):
return {
"id": self.id,
"name": self.name,
"symbol": self.symbol,
"price": self.price
}
class Portfolio(db.Model):
id = db.Column(db.Integer, primary_key=True)
owner = db.Column(db.String(80), nullable=False)
def __repr__(self):
return '<Portfolio %r>' % self.id
# Portfolio_Stocks = db.Table(
# db.Column('stock_id', db.Integer, db.ForeignKey('stock.id'),
# primary_key=True),
# db.Column('portfolio_id', db.Integer, db.ForeignKey('portfolio.id'),
# primary_key=True)
# )
#
#
@app.before_first_request
def setup_db():
# Create folder for memes if it doesn't exist
if not os.path.exists(MEME_DIR):
os.makedirs(MEME_DIR)
# Create tables for models if they don't exist
db.create_all()
@app.before_request
def setup_request_time():
start_time = time.time()
g.request_time = lambda: "%d ms" % ((time.time() - start_time) * 1000)
@app.route('/')
def index():
return redirect(url_for("get_create_menu"))
@app.route('/recent', methods=['GET'])
def view_recent():
memes = Meme.query.order_by(Meme.id.desc()).limit(20).all()
return render_template('recent.html', memes=memes)
@app.route('/random', methods=['GET'])
def view_random():
meme = Meme.query.order_by(func.random()).first()
return redirect(url_for('view_meme', meme_id=meme.id))
@app.route('/template', methods=['GET'])
def get_create_menu():
templates = get_templates_list()
return render_template('view.html', templates=templates)
@app.route('/template/<string:template>', methods=['GET'])
def get_create(template):
if template not in get_templates_list():
abort(400, "Template does not exist.")
return render_template('create_meme.html', template=template)
@app.route('/meme/<int:meme_id>', methods=['GET'])
def view_meme(meme_id):
meme_file = os.path.join(MEME_DIR, '%d.png' % meme_id)
if not os.path.exists(meme_file):
generate_meme(meme_file, meme_id)
print(meme_file)
return send_from_directory(MEME_DIR, '%d.png' % meme_id)
@app.route('/meme', methods=['POST'])
def create_meme():
try:
meme = Meme(
template=request.form['template'],
top_text=request.form['top'],
bot_text=request.form['bottom']
)
db.session.add(meme)
db.session.commit()
return redirect(url_for('view_meme', meme_id=meme.id))
except KeyError:
abort(400, "Incorrect parameters.")
# Creates a stock
@app.route('/stock', methods=["POST"])
def create_stock():
try:
stock = Stock(
name=request.form['name'],
symbol=request.form['symbol'],
price=request.form['price']
)
db.session.add(stock)
db.session.commit()
print("stock created!")
# return redirect(url_for('view_stock', stock_id=stock.id))
return redirect('/template')
except KeyError:
abort(400, "Incorrect Parameters!")
# Gets all stocks
@app.route('/api/v1/stocks', methods=["GET"])
def api_stocks():
stocks = Stock.query.order_by(Stock.id.desc()).all()
return jsonify([s.serialize() for s in stocks])
# Gets all stocks
@app.route('/stock', methods=["GET"])
def view_stocks():
stocks = Stock.query.order_by(Stock.id.desc()).all()
return render_template('stocks.html', stocks=stocks)
# Get stock by stock id
@app.route('/stock/<int:stock_id>', methods=["GET"])
def view_stock(stock_id):
stock = Stock.query.filter_by(id=stock_id).first()
return render_template('stock_id.html', stock=stock)
# Renders create_stock.html
@app.route('/stock/cstock', methods=["GET"])
def get_create_stock():
return render_template("create_stock.html")
# Renders create_stock.html
@app.route('/portfolio/cportfolio', methods=["GET"])
def get_create_portfolio():
return render_template("create_portfolio.html")
# Creates a portfolio
@app.route('/portfolio', methods=["POST"])
def create_portfolio():
try:
portfolio = Portfolio(
owner=request.form['owner']
)
db.session.add(portfolio)
db.session.commit()
print("portfolio created!")
return redirect('/template')
except KeyError:
abort(400, "Incorrect Parameters!")
# Gets all portfolios
@app.route('/portfolio', methods=["GET"])
def view_portfolios():
portfolios = Portfolio.query.order_by(Portfolio.id.desc()).all()
return render_template('portfolios.html', portfolios=portfolios)
# Gets portfolio by stock id
@app.route('/portfolio/<int:portfolio_id>', methods=["GET"])
def view_portfolio(portfolio_id):
portfolio = Portfolio.query.filter_by(id=portfolio_id).first()
return render_template('portfolio_id.html', portfolio=portfolio)
# Allows a stock to be assigned to a portfolio
@app.route('/portfolio/psip/<int:stock_id>/<int:portfolio_id>',
methods=["POST"])
def put_stock_in_portfolio(stock_id, portfolio_id):
portfolio_rel = Portfolio.query.filter_by(id=portfolio_id).first()
stock_rel = Stock.query.filter_by(id=stock_id).first()
portfolio_rel.stocks_linked.append(stock_rel)
print("stock assigned to portfolio")
db.session.commit()
# return redirect('/template')
return render_template('portfolio_id.html', portfolio=portfolio_rel)
def generate_meme(file, meme_id):
# Query for meme
meme = Meme.query.filter(Meme.id == meme_id).first()
if meme is None:
abort(400, 'Meme does not exist.')
# Load template
template_file = os.path.join(
BASE_DIR, 'static', 'templates', meme.template)
if not os.path.exists(template_file):
abort(400, 'Template does not exist')
template = Image.open(template_file)
# Get Font Details
font, top_loc, bot_loc = calc_font_details(
meme.top_text, meme.bot_text, template.size)
draw = ImageDraw.Draw(template)
draw_text(draw, top_loc[0], top_loc[1], meme.top_text, font)
draw_text(draw, bot_loc[0], bot_loc[1], meme.bot_text, font)
template.save(file)
# Calculate font size and location
def calc_font_details(top, bot, img_size):
font_size = 50
font = ImageFont.truetype(FONT_PATH, font_size)
max_width = img_size[0] - 20
# Get ideal font size
while font.getsize(top)[0] > max_width or font.getsize(bot)[0] > max_width:
font_size = font_size - 1
font = ImageFont.truetype(FONT_PATH, font_size)
# Get font locations
top_loc = ((img_size[0] - font.getsize(top)[0])/2, -5)
bot_size = font.getsize(bot)
bot_loc = ((img_size[0] - bot_size[0])/2, img_size[1] - bot_size[1] - 5)
return font, top_loc, bot_loc
# Draws the given text with a border
def draw_text(draw, x, y, text, font):
# Draw border
draw.text((x-1, y-1), text, font=font, fill="black")
draw.text((x+1, y-1), text, font=font, fill="black")
draw.text((x-1, y+1), text, font=font, fill="black")
draw.text((x+1, y+1), text, font=font, fill="black")
# Draw text
draw.text((x, y), text, font=font, fill="white")
if __name__ == '__main__':
# Run dev server (for debugging only)
args = get_args()
app.run(host=args.host, port=args.port, debug=True)
|
py | 1a427ff7bafde7673f2a65184a01dc8aed6d332f | # Copyright 2017 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from distutils import version
import sys
import decorator
import eventlet
from oslo_log import log as logging
from oslo_utils import uuidutils
from vmware_nsxlib._i18n import _
from vmware_nsxlib.v3 import exceptions
from vmware_nsxlib.v3 import nsx_constants
from vmware_nsxlib.v3 import utils
from vmware_nsxlib.v3.policy import constants
from vmware_nsxlib.v3.policy import core_defs
from vmware_nsxlib.v3.policy import transaction as trans
from vmware_nsxlib.v3.policy import utils as p_utils
LOG = logging.getLogger(__name__)
# Sentitel object to indicate unspecified attribute value
# None value in attribute would indicate "unset" functionality,
# while "ignore" means that the value not be present in request
# body
IGNORE = object()
DEFAULT_MAP_ID = 'DEFAULT'
@decorator.decorator
def check_allowed_passthrough(f, *args, **kwargs):
resource_api = args[0]
if not resource_api.nsx_api:
caller = sys._getframe(1).f_code.co_name
LOG.error("%s failed: Passthrough api is disabled", caller)
return
return f(*args, **kwargs)
class NsxPolicyResourceBase(object, metaclass=abc.ABCMeta):
"""Abstract class for NSX policy resources
declaring the basic apis each policy resource should support,
and implement some common apis and utilities
"""
SINGLE_ENTRY_ID = 'entry'
def __init__(self, policy_api, nsx_api, version, nsxlib_config):
self.policy_api = policy_api
self.nsx_api = nsx_api
self.version = version
self.nsxlib_config = nsxlib_config
@property
def entry_def(self):
pass
@abc.abstractmethod
def list(self, *args, **kwargs):
pass
@abc.abstractmethod
def get(self, uuid, *args, **kwargs):
pass
@abc.abstractmethod
def delete(self, uuid, *args, **kwargs):
pass
@abc.abstractmethod
def create_or_overwrite(self, *args, **kwargs):
"""Create new or overwrite existing resource
Create would list keys and attributes, set defaults and
perform necessary validations.
If object with same IDs exists on backend, it will
be overridden.
"""
pass
@abc.abstractmethod
def update(self, *args, **kwargs):
"""Update existing resource
Update is different from create since it specifies only
attributes that need changing. Non-updateble attributes
should not be listed as update arguments.
Create_or_overwrite is not
good enough since it sets defaults, and thus would return
non-default values to default if not specified in kwargs.
"""
pass
def _any_arg_set(self, *args):
"""Helper to identify if user specified any of args"""
for arg in args:
if arg != IGNORE:
return True
return False
def _get_user_args(self, **kwargs):
return {key: value for key, value in kwargs.items()
if value != IGNORE}
def _init_def(self, **kwargs):
"""Helper for update function - ignore attrs without explicit value"""
args = self._get_user_args(**kwargs)
return self.entry_def(nsx_version=self.version, **args)
def _init_parent_def(self, **kwargs):
"""Helper for update function - ignore attrs without explicit value"""
args = self._get_user_args(**kwargs)
return self.parent_entry_def(**args)
def _get_and_update_def(self, **kwargs):
"""Helper for update function - ignore attrs without explicit value"""
args = self._get_user_args(**kwargs)
resource_def = self.entry_def(nsx_version=self.version, **args)
body = self.policy_api.get(resource_def)
if body:
resource_def.set_obj_dict(body)
return resource_def
def _update(self, allow_partial_updates=True,
force=False, put=False, revision=None, **kwargs):
"""Helper for update function - ignore attrs without explicit value"""
# DO NOT retry if caller specifies revision
max_attempts = (self.policy_api.client.max_attempts
if revision is None else 0)
@utils.retry_upon_exception(
exceptions.StaleRevision,
max_attempts=max_attempts)
def _do_update_with_retry():
if (allow_partial_updates and
self.policy_api.partial_updates_supported() and not put):
policy_def = self._init_def(**kwargs)
partial_updates = True
else:
policy_def = self._get_and_update_def(**kwargs)
partial_updates = False
if policy_def.bodyless():
# Nothing to update - only keys provided in kwargs
return
if put:
return self.policy_api.update_with_put(
policy_def, revision=revision)
else:
self.policy_api.create_or_update(
policy_def, partial_updates=partial_updates, force=force)
return _do_update_with_retry()
@staticmethod
def _init_obj_uuid(obj_uuid):
if not obj_uuid:
# generate a random id
obj_uuid = str(uuidutils.generate_uuid())
return obj_uuid
def _canonize_name(self, name):
# remove spaces and slashes from objects names
return name.replace(' ', '_').replace('/', '_')
def get_by_name(self, name, *args, **kwargs):
# Return first match by name
resources_list = self.list(*args, **kwargs)
for obj in resources_list:
if obj.get('display_name') == name:
return obj
def _get_realization_info(self, resource_def, entity_type=None,
silent=False, all_results=False):
entities = []
results = []
try:
path = resource_def.get_resource_full_path()
entities = self.policy_api.get_realized_entities(
path, silent=silent)
if entities:
if entity_type:
# look for the entry with the right entity_type
for entity in entities:
if entity.get('entity_type') == entity_type:
if all_results:
results.append(entity)
else:
return entity
return results
else:
# return the first realization entry
# (Useful for resources with single realization entity)
if not all_results:
return entities[0]
else:
return entities
except exceptions.ResourceNotFound:
pass
# If we got here the resource was not deployed yet
if silent:
LOG.debug("No realization info found for %(path)s type %(type)s: "
"%(entities)s",
{"path": path, "type": entity_type,
"entities": entities})
else:
LOG.warning("No realization info found for %(path)s type %(type)s",
{"path": path, "type": entity_type})
def _get_realized_state(self, resource_def, entity_type=None,
realization_info=None):
if not realization_info:
realization_info = self._get_realization_info(
resource_def, entity_type=entity_type)
if realization_info and realization_info.get('state'):
return realization_info['state']
def _get_realized_id(self, resource_def, entity_type=None,
realization_info=None):
if not realization_info:
realization_info = self._get_realization_info(
resource_def, entity_type=entity_type)
if (realization_info and
realization_info.get('realization_specific_identifier')):
return realization_info['realization_specific_identifier']
def _get_realization_error_message_and_code(self, info):
error_msg = 'unknown'
error_code = None
related_error_codes = []
if info.get('alarms'):
alarm = info['alarms'][0]
error_msg = alarm.get('message')
if alarm.get('error_details'):
error_code = alarm['error_details'].get('error_code')
if alarm['error_details'].get('related_errors'):
related = alarm['error_details']['related_errors']
for err_obj in related:
error_msg = '%s: %s' % (error_msg,
err_obj.get('error_message'))
if err_obj.get('error_code'):
related_error_codes.append(err_obj['error_code'])
return error_msg, error_code, related_error_codes
def _wait_until_realized(self, resource_def, entity_type=None,
sleep=None, max_attempts=None):
"""Wait until the resource has been realized
Return the realization info, or raise an error
"""
if sleep is None:
sleep = self.nsxlib_config.realization_wait_sec
if max_attempts is None:
max_attempts = self.nsxlib_config.realization_max_attempts
info = {}
@utils.retry_upon_none_result(max_attempts, delay=sleep, random=True)
def get_info():
info = self._get_realization_info(
resource_def, entity_type=entity_type, silent=True)
if info:
if info['state'] == constants.STATE_REALIZED:
return info
if info['state'] == constants.STATE_ERROR:
error_msg, error_code, related_error_codes = \
self._get_realization_error_message_and_code(info)
# There could be a delay between setting NSX-T
# Error realization state and updating the realization
# entity with alarms. Retry should be perform upon None
# error code to avoid 'Unknown' RealizationErrorStateError
# exception
if error_code is None:
return
raise exceptions.RealizationErrorStateError(
resource_type=resource_def.resource_type(),
resource_id=resource_def.get_id(),
error=error_msg, error_code=error_code,
related_error_codes=related_error_codes)
try:
return get_info()
except exceptions.RealizationError as e:
raise e
except Exception:
# max retries reached
LOG.error("_wait_until_realized maxed-out for "
"resource: %s. Last realization info was %s",
resource_def.get_resource_full_path(), info)
raise exceptions.RealizationTimeoutError(
resource_type=resource_def.resource_type(),
resource_id=resource_def.get_id(),
attempts=max_attempts,
sleep=sleep)
def _wait_until_state_successful(self, res_def,
sleep=None, max_attempts=None,
with_refresh=False):
res_path = res_def.get_resource_full_path()
state = {}
if sleep is None:
sleep = self.nsxlib_config.realization_wait_sec
if max_attempts is None:
max_attempts = self.nsxlib_config.realization_max_attempts
@utils.retry_upon_none_result(max_attempts, delay=sleep, random=True)
def get_state():
state = self.policy_api.get_intent_consolidated_status(
res_path, silent=True)
if state and state.get('consolidated_status'):
con_state = state['consolidated_status'].get(
'consolidated_status')
if con_state == 'SUCCESS':
return True
if con_state == 'ERROR':
LOG.error("_wait_until_state_successful errored for "
"resource: %s. Last consolidated_status result "
"was %s", res_path, state)
raise exceptions.RealizationErrorStateError(
resource_type=res_def.resource_type(),
resource_id=res_def.get_id(),
error="Unknown")
if with_refresh:
# Refresh the consolidated state for the next time
# (if not, it will be refreshed at the policy level after a
# refresh cycle)
self.policy_api.refresh_realized_state(res_path)
try:
return get_state()
except exceptions.RealizationError as e:
raise e
except Exception:
# max retries reached
LOG.error("_wait_until_state_successful maxed-out for "
"resource: %s. Last consolidated_status result was %s",
res_path, state)
raise exceptions.RealizationTimeoutError(
resource_type=res_def.resource_type(),
resource_id=res_def.get_id(),
attempts=max_attempts,
sleep=sleep)
@check_allowed_passthrough
def _get_realized_id_using_search(self, policy_resource_path,
mp_resource_type, resource_def=None,
entity_type=None, silent=False,
sleep=None, max_attempts=None):
"""Wait until the policy path will be found using search api
And return the NSX ID of the MP resource that was found
"""
if sleep is None:
sleep = self.nsxlib_config.realization_wait_sec
if max_attempts is None:
max_attempts = self.nsxlib_config.realization_max_attempts
check_status = 3
tag = [{'scope': 'policyPath',
'tag': utils.escape_tag_data(policy_resource_path)}]
resources = []
test_num = 0
while test_num < max_attempts:
# Use the search api to find the realization id of this entity.
resources = self.nsx_api.search_by_tags(
tags=tag, resource_type=mp_resource_type,
silent=silent)['results']
if resources:
# If status exists, make sure the state is successful
if (not resources[0].get('status') or
resources[0]['status'].get('state') == 'success'):
return resources[0]['id']
# From time to time also check the Policy realization state,
# as if it is in ERROR waiting should be avoided.
if resource_def and test_num % check_status == (check_status - 1):
info = self._get_realization_info(resource_def,
entity_type=entity_type)
if info and info['state'] == constants.STATE_ERROR:
error_msg, error_code, related_error_codes = \
self._get_realization_error_message_and_code(info)
LOG.error("_get_realized_id_using_search Failed for "
"resource: %s. Got error in realization info %s",
policy_resource_path, info)
raise exceptions.RealizationErrorStateError(
resource_type=resource_def.resource_type(),
resource_id=resource_def.get_id(),
error=error_msg, error_code=error_code,
related_error_codes=related_error_codes)
if (info and info['state'] == constants.STATE_REALIZED and
info.get('realization_specific_identifier')):
LOG.warning("Realization ID for %s was not found via "
"search api although it was realized",
policy_resource_path)
return info['realization_specific_identifier']
eventlet.sleep(sleep)
test_num += 1
# max retries reached
LOG.error("_get_realized_id_using_search maxed-out for "
"resource: %s. Last search result was %s",
policy_resource_path, resources)
raise exceptions.RealizationTimeoutError(
resource_type=mp_resource_type,
resource_id=policy_resource_path,
attempts=max_attempts,
sleep=sleep)
def _get_extended_attr_from_realized_info(self, realization_info,
requested_attr):
# Returns a list. In case a single value is expected,
# caller must extract the first index to retrieve the value
if realization_info:
try:
for attr in realization_info.get('extended_attributes', []):
if attr.get('key') == requested_attr:
return attr.get('values')
except IndexError:
return
def _list(self, obj_def, silent=False):
return self.policy_api.list(obj_def, silent=silent).get('results', [])
def _create_or_store(self, policy_def, child_def=None):
transaction = trans.NsxPolicyTransaction.get_current()
if transaction:
# Store this def for batch apply for this transaction
transaction.store_def(policy_def, self.policy_api.client)
if child_def and not policy_def.mandatory_child_def:
transaction.store_def(child_def, self.policy_api.client)
else:
# No transaction - apply now
# In case the same object was just deleted, or depends on another
# resource, create may need to be retried.
@utils.retry_upon_exception(
(exceptions.NsxPendingDelete, exceptions.StaleRevision),
max_attempts=self.policy_api.client.max_attempts)
def _do_create_with_retry():
if child_def:
self.policy_api.create_with_parent(policy_def, child_def)
else:
self.policy_api.create_or_update(policy_def)
_do_create_with_retry()
def _delete_or_store(self, policy_def):
transaction = trans.NsxPolicyTransaction.get_current()
if transaction:
# Mark this resource is about to be deleted
policy_def.set_delete()
# Set some mandatory default values to avoid failure
# TODO(asarfaty): This can be removed once platform bug is fixed
policy_def.set_default_mandatory_vals()
# Store this def for batch apply for this transaction
transaction.store_def(policy_def, self.policy_api.client)
else:
# No transaction - apply now
self._delete_with_retry(policy_def)
def _delete_with_retry(self, policy_def):
@utils.retry_upon_exception(
exceptions.StaleRevision,
max_attempts=self.policy_api.client.max_attempts)
def do_delete():
self.policy_api.delete(policy_def)
do_delete()
class NsxPolicyDomainApi(NsxPolicyResourceBase):
"""NSX Policy Domain."""
@property
def entry_def(self):
return core_defs.DomainDef
def create_or_overwrite(self, name, domain_id=None,
description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
domain_id = self._init_obj_uuid(domain_id)
domain_def = self._init_def(domain_id=domain_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
self._create_or_store(domain_def)
return domain_id
def delete(self, domain_id, tenant=constants.POLICY_INFRA_TENANT):
domain_def = core_defs.DomainDef(domain_id=domain_id, tenant=tenant)
self._delete_with_retry(domain_def)
def get(self, domain_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
domain_def = core_defs.DomainDef(domain_id=domain_id, tenant=tenant)
return self.policy_api.get(domain_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
domain_def = core_defs.DomainDef(tenant=tenant)
return self._list(domain_def)
def update(self, domain_id, name=IGNORE,
description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(domain_id=domain_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
class NsxPolicyGroupApi(NsxPolicyResourceBase):
"""NSX Policy Group (under a Domain) with condition/s"""
@property
def entry_def(self):
return core_defs.GroupDef
def create_or_overwrite(
self, name, domain_id, group_id=None,
description=IGNORE,
cond_val=None,
cond_key=constants.CONDITION_KEY_TAG,
cond_op=constants.CONDITION_OP_EQUALS,
cond_member_type=constants.CONDITION_MEMBER_PORT,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
"""Create a group with/without a condition.
Empty condition value will result a group with no condition.
"""
group_id = self._init_obj_uuid(group_id)
# Prepare the condition
if cond_val is not None:
condition = core_defs.Condition(value=cond_val,
key=cond_key,
operator=cond_op,
member_type=cond_member_type)
conditions = [condition]
else:
conditions = []
group_def = self._init_def(domain_id=domain_id,
group_id=group_id,
name=name,
description=description,
conditions=conditions,
tags=tags,
tenant=tenant)
self._create_or_store(group_def)
return group_id
def build_condition(
self, cond_val=None,
cond_key=constants.CONDITION_KEY_TAG,
cond_op=constants.CONDITION_OP_EQUALS,
cond_scope_op=None,
cond_member_type=constants.CONDITION_MEMBER_PORT):
return core_defs.Condition(value=cond_val,
key=cond_key,
operator=cond_op,
scope_operator=cond_scope_op,
member_type=cond_member_type,
nsx_version=self.version)
def build_ip_address_expression(self, ip_addresses):
return core_defs.IPAddressExpression(ip_addresses)
def build_path_expression(self, paths):
return core_defs.PathExpression(paths)
def build_union_condition(self, operator=constants.CONDITION_OP_OR,
conditions=None):
# NSX don't allow duplicate expressions in expression list
# of a group -> (ERROR: Duplicate expressions specified)
# Members of input conditions is either instance of Condition
# or NestedExpression class.
expressions = []
if conditions:
conditions = list(set(conditions))
expressions = []
for cond in conditions:
if len(expressions):
expressions.append(core_defs.ConjunctionOperator(
operator=operator))
expressions.append(cond)
return expressions
def build_nested_condition(
self, operator=constants.CONDITION_OP_AND,
conditions=None):
expressions = self.build_union_condition(
operator=operator, conditions=conditions)
return core_defs.NestedExpression(expressions=expressions)
def create_or_overwrite_with_conditions(
self, name, domain_id, group_id=None,
description=IGNORE,
conditions=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
"""Create a group with a list of conditions.
To build the conditions in the list, build_condition
or build_nested_condition can be used
"""
group_id = self._init_obj_uuid(group_id)
if not conditions:
conditions = []
group_def = self._init_def(domain_id=domain_id,
group_id=group_id,
name=name,
description=description,
conditions=conditions,
tags=tags,
tenant=tenant)
self._create_or_store(group_def)
return group_id
def delete(self, domain_id, group_id,
tenant=constants.POLICY_INFRA_TENANT):
group_def = core_defs.GroupDef(domain_id=domain_id,
group_id=group_id,
tenant=tenant)
self._delete_with_retry(group_def)
def get(self, domain_id, group_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
group_def = core_defs.GroupDef(domain_id=domain_id,
group_id=group_id,
tenant=tenant)
return self.policy_api.get(group_def, silent=silent)
def list(self, domain_id,
tenant=constants.POLICY_INFRA_TENANT):
"""List all the groups of a specific domain."""
group_def = core_defs.GroupDef(domain_id=domain_id,
tenant=tenant)
return self._list(group_def)
def get_by_name(self, domain_id, name,
tenant=constants.POLICY_INFRA_TENANT):
"""Return first group matched by name of this domain"""
return super(NsxPolicyGroupApi, self).get_by_name(name, domain_id,
tenant=tenant)
def update(self, domain_id, group_id,
name=IGNORE, description=IGNORE,
tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT):
self._update(domain_id=domain_id,
group_id=group_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
def update_with_conditions(
self, domain_id, group_id,
name=IGNORE, description=IGNORE, conditions=IGNORE,
tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT,
update_payload_cbk=None):
group_def = self._init_def(domain_id=domain_id,
group_id=group_id,
name=name,
description=description,
conditions=conditions,
tags=tags,
tenant=tenant)
group_path = group_def.get_resource_path()
@utils.retry_upon_exception(
exceptions.StaleRevision,
max_attempts=self.policy_api.client.max_attempts)
def _update():
# Get the current data of group
group = self.policy_api.get(group_def)
if update_payload_cbk:
# The update_payload_cbk function takes two arguments.
# The first one is the result from the internal GET request.
# The second one is a dict of user-provided attributes,
# which can be changed inside the callback function and
# used as the new payload for the following PUT request.
# For example, users want to combine the new conditions
# passed to update_with_conditions() with the original
# conditions retrieved from the internal GET request
# instead of overriding the original conditions.
update_payload_cbk(group, group_def.attrs)
group_def.set_obj_dict(group)
body = group_def.get_obj_dict()
# Update the entire group at the NSX
self.policy_api.client.update(group_path, body)
_update()
def get_realized_state(self, domain_id, group_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
group_def = core_defs.GroupDef(domain_id=domain_id,
group_id=group_id,
tenant=tenant)
return self._get_realized_state(group_def, entity_type=entity_type,
realization_info=realization_info)
def get_realized_id(self, domain_id, group_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
group_def = core_defs.GroupDef(domain_id=domain_id,
group_id=group_id,
tenant=tenant)
return self._get_realized_id(group_def, entity_type=entity_type,
realization_info=realization_info)
def get_realization_info(self, domain_id, group_id, entity_type=None,
silent=False,
tenant=constants.POLICY_INFRA_TENANT):
group_def = core_defs.GroupDef(domain_id=domain_id,
group_id=group_id,
tenant=tenant)
return self._get_realization_info(group_def, entity_type=entity_type,
silent=silent)
def get_path(self, domain_id, group_id,
tenant=constants.POLICY_INFRA_TENANT):
group_def = self.entry_def(domain_id=domain_id,
group_id=group_id,
tenant=tenant)
return group_def.get_resource_full_path()
def wait_until_realized(self, domain_id, group_id,
entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
group_def = self.entry_def(domain_id=domain_id, group_id=group_id,
tenant=tenant)
return self._wait_until_realized(group_def, entity_type=entity_type,
sleep=sleep,
max_attempts=max_attempts)
class NsxPolicyServiceBase(NsxPolicyResourceBase):
"""Base class for NSX Policy Service with a single entry.
Note the nsx-policy backend supports multiple service entries per service.
At this point this is not supported here.
"""
@property
def parent_entry_def(self):
return core_defs.ServiceDef
def delete(self, service_id,
tenant=constants.POLICY_INFRA_TENANT):
"""Delete the service with all its entries"""
service_def = core_defs.ServiceDef(service_id=service_id,
tenant=tenant)
self._delete_with_retry(service_def)
def get(self, service_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
service_def = core_defs.ServiceDef(service_id=service_id,
tenant=tenant)
return self.policy_api.get(service_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
service_def = core_defs.ServiceDef(tenant=tenant)
return self._list(service_def)
def get_realized_state(self, service_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
service_def = core_defs.ServiceDef(service_id=service_id,
tenant=tenant)
return self._get_realized_state(service_def, entity_type=entity_type,
realization_info=realization_info)
def get_realized_id(self, service_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
service_def = core_defs.ServiceDef(service_id=service_id,
tenant=tenant)
return self._get_realized_id(service_def, entity_type=entity_type,
realization_info=realization_info)
def get_realization_info(self, service_id, entity_type=None,
silent=False,
tenant=constants.POLICY_INFRA_TENANT):
service_def = core_defs.ServiceDef(service_id=service_id,
tenant=tenant)
return self._get_realization_info(service_def,
entity_type=entity_type,
silent=silent)
class NsxPolicyL4ServiceApi(NsxPolicyServiceBase):
"""NSX Policy Service with a single L4 service entry.
Note the nsx-policy backend supports multiple service entries per service.
At this point this is not supported here.
"""
@property
def entry_def(self):
return core_defs.L4ServiceEntryDef
def create_or_overwrite(self, name, service_id=None,
description=IGNORE,
protocol=constants.TCP,
dest_ports=IGNORE,
source_ports=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
service_id = self._init_obj_uuid(service_id)
service_def = self._init_parent_def(service_id=service_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
entry_def = self._init_def(service_id=service_id,
entry_id=self.SINGLE_ENTRY_ID,
name=self.SINGLE_ENTRY_ID,
protocol=protocol,
dest_ports=dest_ports,
source_ports=source_ports,
tenant=tenant)
service_def.mandatory_child_def = entry_def
self._create_or_store(service_def, entry_def)
return service_id
def update(self, service_id,
name=IGNORE, description=IGNORE,
protocol=IGNORE, dest_ports=IGNORE, source_ports=IGNORE,
tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT):
parent_def = self._init_parent_def(
service_id=service_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
entry_def = self._get_and_update_def(
service_id=service_id,
entry_id=self.SINGLE_ENTRY_ID,
protocol=protocol,
dest_ports=dest_ports,
source_ports=source_ports,
tenant=tenant)
self.policy_api.create_with_parent(parent_def, entry_def)
def build_entry(self, name, service_id, entry_id,
description=None, protocol=None,
dest_ports=None, source_ports=None,
tags=None, tenant=constants.POLICY_INFRA_TENANT):
return self._init_def(service_id=service_id,
entry_id=entry_id,
name=name,
description=description,
protocol=protocol,
dest_ports=dest_ports,
source_ports=source_ports,
tags=tags,
tenant=tenant)
class NsxPolicyIcmpServiceApi(NsxPolicyServiceBase):
"""NSX Policy Service with a single ICMP service entry.
Note the nsx-policy backend supports multiple service entries per service.
At this point this is not supported here.
"""
@property
def entry_def(self):
return core_defs.IcmpServiceEntryDef
def create_or_overwrite(self, name, service_id=None,
description=IGNORE,
version=4, icmp_type=IGNORE, icmp_code=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
service_id = self._init_obj_uuid(service_id)
service_def = self._init_parent_def(service_id=service_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
entry_def = self._init_def(
service_id=service_id,
entry_id=self.SINGLE_ENTRY_ID,
name=self.SINGLE_ENTRY_ID,
version=version,
icmp_type=icmp_type,
icmp_code=icmp_code,
tenant=tenant)
service_def.mandatory_child_def = entry_def
self._create_or_store(service_def, entry_def)
return service_id
def update(self, service_id,
name=IGNORE, description=IGNORE,
version=IGNORE, icmp_type=IGNORE,
icmp_code=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
parent_def = self._init_parent_def(
service_id=service_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
entry_def = self._get_and_update_def(
service_id=service_id,
entry_id=self.SINGLE_ENTRY_ID,
version=version,
icmp_type=icmp_type,
icmp_code=icmp_code,
tenant=tenant)
return self.policy_api.create_with_parent(parent_def, entry_def)
def build_entry(self, name, service_id, entry_id,
description=None, version=4,
icmp_type=None, icmp_code=None,
tags=None, tenant=constants.POLICY_INFRA_TENANT):
return self._init_def(service_id=service_id,
entry_id=entry_id,
name=name,
description=description,
version=version,
icmp_type=icmp_type,
icmp_code=icmp_code,
tags=tags,
tenant=tenant)
class NsxPolicyIPProtocolServiceApi(NsxPolicyServiceBase):
"""NSX Policy Service with a single IPProtocol service entry.
Note the nsx-policy backend supports multiple service entries per service.
At this point this is not supported here.
"""
@property
def entry_def(self):
return core_defs.IPProtocolServiceEntryDef
def create_or_overwrite(self, name, service_id=None,
description=IGNORE,
protocol_number=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
service_id = self._init_obj_uuid(service_id)
service_def = self._init_parent_def(service_id=service_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
entry_def = self._init_def(
service_id=service_id,
entry_id=self.SINGLE_ENTRY_ID,
name=self.SINGLE_ENTRY_ID,
protocol_number=protocol_number,
tenant=tenant)
service_def.mandatory_child_def = entry_def
self._create_or_store(service_def, entry_def)
return service_id
def update(self, service_id,
name=IGNORE, description=IGNORE,
protocol_number=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
parent_def = self._init_parent_def(
service_id=service_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
entry_def = self._get_and_update_def(
service_id=service_id,
entry_id=self.SINGLE_ENTRY_ID,
protocol_number=protocol_number,
tenant=tenant)
return self.policy_api.create_with_parent(parent_def, entry_def)
def build_entry(self, name, service_id, entry_id,
description=None, protocol_number=None,
tags=None, tenant=constants.POLICY_INFRA_TENANT):
return self._init_def(service_id=service_id,
entry_id=entry_id,
name=name,
protocol_number=protocol_number,
tags=tags,
tenant=tenant)
class NsxPolicyMixedServiceApi(NsxPolicyServiceBase):
"""NSX Policy Service with mixed service entries."""
def create_or_overwrite(self, name, service_id,
description=IGNORE,
entries=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
service_def = self._init_parent_def(service_id=service_id,
name=name,
description=description,
entries=entries,
tags=tags,
tenant=tenant)
if entries != IGNORE:
self._create_or_store(service_def, entries)
else:
self._create_or_store(service_def)
return service_id
def update(self, service_id,
name=IGNORE, description=IGNORE,
entries=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
parent_def = self._init_parent_def(
service_id=service_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
if entries != IGNORE:
self.policy_api.create_with_parent(parent_def, entries)
else:
self.policy_api.create_or_update(parent_def)
class NsxPolicyTier1Api(NsxPolicyResourceBase):
"""NSX Tier1 API """
LOCALE_SERVICE_SUFF = '-0'
@property
def entry_def(self):
return core_defs.Tier1Def
def build_route_advertisement(self, static_routes=False, subnets=False,
nat=False, lb_vip=False, lb_snat=False,
ipsec_endpoints=False):
return core_defs.RouteAdvertisement(static_routes=static_routes,
subnets=subnets,
nat=nat,
lb_vip=lb_vip,
lb_snat=lb_snat,
ipsec_endpoints=ipsec_endpoints)
def create_or_overwrite(self, name, tier1_id=None,
description=IGNORE,
tier0=IGNORE,
force_whitelisting=IGNORE,
failover_mode=constants.NON_PREEMPTIVE,
route_advertisement=IGNORE,
dhcp_config=IGNORE,
disable_firewall=IGNORE,
ipv6_ndra_profile_id=IGNORE,
pool_allocation=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
tier1_id = self._init_obj_uuid(tier1_id)
tier1_def = self._init_def(tier1_id=tier1_id,
name=name,
description=description,
tier0=tier0,
force_whitelisting=force_whitelisting,
tags=tags,
failover_mode=failover_mode,
route_advertisement=route_advertisement,
dhcp_config=dhcp_config,
disable_firewall=disable_firewall,
ipv6_ndra_profile_id=ipv6_ndra_profile_id,
pool_allocation=pool_allocation,
tenant=tenant)
self._create_or_store(tier1_def)
return tier1_id
def delete(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT):
tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
self._delete_with_retry(tier1_def)
def get(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
return self.policy_api.get(tier1_def, silent=silent)
def get_path(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT):
tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
return tier1_def.get_resource_full_path()
def list(self, tenant=constants.POLICY_INFRA_TENANT):
tier1_def = self.entry_def(tenant=tenant)
return self._list(tier1_def)
def update(self, tier1_id, name=IGNORE, description=IGNORE,
force_whitelisting=IGNORE,
failover_mode=IGNORE, tier0=IGNORE,
dhcp_config=IGNORE, tags=IGNORE,
enable_standby_relocation=IGNORE,
disable_firewall=IGNORE,
ipv6_ndra_profile_id=IGNORE,
route_advertisement=IGNORE,
route_advertisement_rules=IGNORE,
pool_allocation=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
current_body=None):
self._update(tier1_id=tier1_id,
name=name,
description=description,
force_whitelisting=force_whitelisting,
failover_mode=failover_mode,
dhcp_config=dhcp_config,
tier0=tier0,
enable_standby_relocation=enable_standby_relocation,
disable_firewall=disable_firewall,
ipv6_ndra_profile_id=ipv6_ndra_profile_id,
route_advertisement=route_advertisement,
route_advertisement_rules=route_advertisement_rules,
pool_allocation=pool_allocation,
tags=tags,
tenant=tenant)
def update_route_advertisement(
self, tier1_id,
static_routes=None,
subnets=None,
nat=None,
lb_vip=None,
lb_snat=None,
ipsec_endpoints=None,
tier0=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
tier1_dict = self.get(tier1_id, tenant)
route_adv = self.entry_def.get_route_adv(tier1_dict)
route_adv.update(static_routes=static_routes,
subnets=subnets,
nat=nat,
lb_vip=lb_vip,
lb_snat=lb_snat,
ipsec_endpoints=ipsec_endpoints)
self.update(tier1_id,
route_advertisement=route_adv,
tier0=tier0,
tenant=tenant)
def add_advertisement_rule(
self, tier1_id, name, action=None, prefix_operator=None,
route_advertisement_types=None, subnets=None,
tenant=constants.POLICY_INFRA_TENANT):
tier1_dict = self.get(tier1_id, tenant)
adv_rules = tier1_dict.get('route_advertisement_rules', [])
adv_rules = [r for r in adv_rules if r.get('name') != name]
adv_rule = core_defs.RouteAdvertisementRule(
name=name, action=action, prefix_operator=prefix_operator,
route_advertisement_types=route_advertisement_types,
subnets=subnets)
adv_rules.append(adv_rule)
self.update(tier1_id,
route_advertisement_rules=adv_rules,
tenant=tenant,
current_body=tier1_dict)
def remove_advertisement_rule(self, tier1_id, name,
tenant=constants.POLICY_INFRA_TENANT):
tier1_dict = self.get(tier1_id, tenant)
adv_rules = tier1_dict.get('route_advertisement_rules', [])
updated_adv_rules = [r for r in adv_rules if r.get('name') != name]
if updated_adv_rules != adv_rules:
self.update(tier1_id,
route_advertisement_rules=updated_adv_rules,
tenant=tenant,
current_body=tier1_dict)
def build_advertisement_rule(self, name, action=None, prefix_operator=None,
route_advertisement_types=None, subnets=None):
return core_defs.RouteAdvertisementRule(
name=name, action=action, prefix_operator=prefix_operator,
route_advertisement_types=route_advertisement_types,
subnets=subnets)
def update_advertisement_rules(self, tier1_id, rules=None,
name_prefix=None,
tenant=constants.POLICY_INFRA_TENANT):
"""Update the router advertisement rules
If name_prefix is None, replace the entire list of NSX rules with the
new given 'rules'.
Else - delete the NSX rules with this name prefix, and add 'rules' to
the rest.
"""
tier1_dict = self.get(tier1_id, tenant)
current_rules = tier1_dict.get('route_advertisement_rules', [])
if name_prefix:
# delete rules with this prefix:
new_rules = []
for rule in current_rules:
if (not rule.get('name') or
not rule['name'].startswith(name_prefix)):
new_rules.append(rule)
# add new rules if provided
if rules:
new_rules.extend(rules)
else:
new_rules = rules
self.update(tier1_id,
route_advertisement_rules=new_rules,
tenant=tenant,
current_body=tier1_dict)
@staticmethod
def _locale_service_id(tier1_id):
# Supporting only a single locale-service per router for now
# with the same id as the router id with a constant suffix
return tier1_id + NsxPolicyTier1Api.LOCALE_SERVICE_SUFF
def create_locale_service(self, tier1_id,
tenant=constants.POLICY_INFRA_TENANT):
t1service_def = core_defs.Tier1LocaleServiceDef(
tier1_id=tier1_id,
service_id=self._locale_service_id(tier1_id),
tenant=tenant)
self._create_or_store(t1service_def)
def delete_locale_service(self, tier1_id,
tenant=constants.POLICY_INFRA_TENANT):
t1service_def = core_defs.Tier1LocaleServiceDef(
tier1_id=tier1_id,
service_id=self._locale_service_id(tier1_id),
tenant=tenant)
self._delete_with_retry(t1service_def)
def get_preferred_edge_paths(self, tier1_id,
tenant=constants.POLICY_INFRA_TENANT):
services = self.get_locale_tier1_services(tier1_id, tenant=tenant)
for srv in services:
if 'preferred_edge_paths' in srv:
return srv['preferred_edge_paths']
def set_edge_cluster_path(self, tier1_id, edge_cluster_path,
preferred_edge_paths=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
kwargs = self._get_user_args(
tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id),
edge_cluster_path=edge_cluster_path,
preferred_edge_paths=preferred_edge_paths,
tenant=tenant
)
t1service_def = core_defs.Tier1LocaleServiceDef(**kwargs)
self._create_or_store(t1service_def)
def remove_edge_cluster(self, tier1_id,
tenant=constants.POLICY_INFRA_TENANT):
"""Reset the path in the locale-service (deleting it is not allowed)"""
t1service_def = core_defs.Tier1LocaleServiceDef(
tier1_id=tier1_id,
service_id=self._locale_service_id(tier1_id),
edge_cluster_path="",
tenant=tenant)
self.policy_api.create_or_update(t1service_def)
def get_edge_cluster_path(self, tier1_id,
tenant=constants.POLICY_INFRA_TENANT):
t1service_def = core_defs.Tier1LocaleServiceDef(
tier1_id=tier1_id,
service_id=self._locale_service_id(tier1_id),
tenant=tenant)
try:
t1service = self.policy_api.get(t1service_def)
return t1service.get('edge_cluster_path')
except exceptions.ResourceNotFound:
return
def get_edge_cluster_path_by_searching(
self, tier1_id, tenant=constants.POLICY_INFRA_TENANT):
"""Get the edge_cluster path of a Tier1 router"""
services = self.get_locale_tier1_services(tier1_id, tenant=tenant)
for srv in services:
if 'edge_cluster_path' in srv:
return srv['edge_cluster_path']
def get_locale_tier1_services(self, tier1_id,
tenant=constants.POLICY_INFRA_TENANT):
t1service_def = core_defs.Tier1LocaleServiceDef(
tier1_id=tier1_id,
tenant=constants.POLICY_INFRA_TENANT)
return self.policy_api.list(t1service_def)['results']
def add_segment_interface(self, tier1_id, interface_id, segment_id,
subnets, ipv6_ndra_profile_id=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
args = {'tier1_id': tier1_id,
'service_id': self._locale_service_id(tier1_id),
'interface_id': interface_id,
'segment_id': segment_id,
'subnets': subnets,
'tenant': tenant}
if ipv6_ndra_profile_id != IGNORE:
args['ipv6_ndra_profile_id'] = ipv6_ndra_profile_id
t1interface_def = core_defs.Tier1InterfaceDef(**args)
self.policy_api.create_or_update(t1interface_def)
def remove_segment_interface(self, tier1_id, interface_id,
tenant=constants.POLICY_INFRA_TENANT):
t1interface_def = core_defs.Tier1InterfaceDef(
tier1_id=tier1_id,
service_id=self._locale_service_id(tier1_id),
interface_id=interface_id,
tenant=tenant)
self._delete_with_retry(t1interface_def)
def list_segment_interface(self, tier1_id,
tenant=constants.POLICY_INFRA_TENANT):
t1interface_def = core_defs.Tier1InterfaceDef(
tier1_id=tier1_id,
service_id=self._locale_service_id(tier1_id),
tenant=tenant)
return self._list(t1interface_def)
def get_multicast(self, tier1_id, service_id=None,
tenant=constants.POLICY_INFRA_TENANT):
mcast_def = core_defs.Tier1MulticastDef(
tier1_id=tier1_id,
service_id=(service_id or
self._locale_service_id(tier1_id)),
tenant=tenant)
mcast_data = self.policy_api.get(mcast_def)
return mcast_data.get('enabled')
def _set_multicast(self, tier1_id, enabled, service_id, tenant):
args = {'tier1_id': tier1_id,
'service_id': (service_id or
self._locale_service_id(tier1_id)),
'enabled': enabled,
'tenant': tenant}
mcast_def = core_defs.Tier1MulticastDef(**args)
self._create_or_store(mcast_def)
def enable_multicast(self, tier1_id, service_id=None,
tenant=constants.POLICY_INFRA_TENANT):
self._set_multicast(tier1_id, True, service_id, tenant)
def disable_multicast(self, tier1_id, service_id=None,
tenant=constants.POLICY_INFRA_TENANT):
self._set_multicast(tier1_id, False, service_id, tenant)
def get_realized_state(self, tier1_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
return self._get_realized_state(tier1_def, entity_type=entity_type,
realization_info=realization_info)
def get_realized_id(self, tier1_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
if self.nsx_api:
# Use MP search api to find the LR ID as it is faster
return self._get_realized_id_using_search(
self.get_path(tier1_id, tenant=tenant),
self.nsx_api.logical_router.resource_type,
resource_def=tier1_def, entity_type=entity_type)
return self._get_realized_id(tier1_def, entity_type=entity_type,
realization_info=realization_info)
def get_realization_info(self, tier1_id, entity_type=None,
silent=False,
tenant=constants.POLICY_INFRA_TENANT):
tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
return self._get_realization_info(tier1_def, silent=silent,
entity_type=entity_type)
def get_realized_router_port(self, tier1_id, silent=False,
tenant=constants.POLICY_INFRA_TENANT):
tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
ports = self._get_realization_info(
tier1_def, entity_type='RealizedLogicalRouterPort',
all_result=True, silent=silent)
return ports
def wait_until_realized(self, tier1_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
return self._wait_until_realized(tier1_def, entity_type=entity_type,
sleep=sleep,
max_attempts=max_attempts)
@check_allowed_passthrough
def update_transport_zone(self, tier1_id, transport_zone_id,
tenant=constants.POLICY_INFRA_TENANT):
"""Use the pass-through api to update the TZ zone on the NSX router"""
realization_info = self.wait_until_realized(
tier1_id, entity_type='RealizedLogicalRouter', tenant=tenant)
nsx_router_uuid = self.get_realized_id(
tier1_id, tenant=tenant, realization_info=realization_info)
self.nsx_api.logical_router.update(
nsx_router_uuid,
transport_zone_id=transport_zone_id)
@check_allowed_passthrough
def _get_realized_downlink_port(
self, tier1_id, segment_id,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
"""Return the realized ID of a tier1 downlink port of a segment
If not found, wait until it has been realized
"""
if sleep is None:
sleep = self.nsxlib_config.realization_wait_sec
if max_attempts is None:
max_attempts = self.nsxlib_config.realization_max_attempts
tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
path = tier1_def.get_resource_full_path()
test_num = 0
while test_num < max_attempts:
# get all the realized resources of the tier1
entities = self.policy_api.get_realized_entities(path)
for e in entities:
# Look for router ports
if (e['entity_type'] == 'RealizedLogicalRouterPort' and
e['state'] == constants.STATE_REALIZED):
# Get the NSX port to check if its the downlink port
port = self.nsx_api.logical_router_port.get(
e['realization_specific_identifier'])
# compare the segment ID to the port display name as this
# is the way policy sets it
port_type = port.get('resource_type')
if (port_type == nsx_constants.LROUTERPORT_DOWNLINK and
segment_id in port.get('display_name', '')):
return port['id']
eventlet.sleep(sleep)
test_num += 1
raise exceptions.DetailedRealizationTimeoutError(
resource_type='Tier1',
resource_id=tier1_id,
realized_type="downlink port",
related_type="segment",
related_id=segment_id,
attempts=max_attempts,
sleep=sleep)
@check_allowed_passthrough
def set_dhcp_relay(self, tier1_id, segment_id, relay_service_uuid,
tenant=constants.POLICY_INFRA_TENANT):
"""Set relay service on the nsx logical router port
Using passthrough api, as the policy api does not support this yet
"""
downlink_port_id = self._get_realized_downlink_port(
tier1_id, segment_id, tenant=tenant)
self.nsx_api.logical_router_port.update(
downlink_port_id, relay_service_uuid=relay_service_uuid)
def set_standby_relocation(self, tier1_id,
enable_standby_relocation=True,
tenant=constants.POLICY_INFRA_TENANT):
"""Set the flag for standby relocation on the Tier1 router
"""
return self.update(tier1_id,
enable_standby_relocation=enable_standby_relocation,
tenant=tenant)
class NsxPolicyTier0Api(NsxPolicyResourceBase):
"""NSX Tier0 API """
@property
def entry_def(self):
return core_defs.Tier0Def
def create_or_overwrite(self, name, tier0_id=None,
description=IGNORE,
ha_mode=constants.ACTIVE_ACTIVE,
failover_mode=constants.NON_PREEMPTIVE,
dhcp_config=IGNORE,
force_whitelisting=IGNORE,
default_rule_logging=IGNORE,
transit_subnets=IGNORE,
disable_firewall=IGNORE,
ipv6_ndra_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
tier0_id = self._init_obj_uuid(tier0_id)
tier0_def = self._init_def(tier0_id=tier0_id,
name=name,
description=description,
ha_mode=ha_mode,
failover_mode=failover_mode,
dhcp_config=dhcp_config,
force_whitelisting=force_whitelisting,
default_rule_logging=default_rule_logging,
transit_subnets=transit_subnets,
disable_firewall=disable_firewall,
ipv6_ndra_profile_id=ipv6_ndra_profile_id,
tags=tags,
tenant=tenant)
self.policy_api.create_or_update(tier0_def)
return tier0_id
def delete(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT):
tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant)
self._delete_with_retry(tier0_def)
def get(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant)
return self.policy_api.get(tier0_def, silent=silent)
def get_path(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT):
tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant)
return tier0_def.get_resource_full_path()
def list(self, tenant=constants.POLICY_INFRA_TENANT):
tier0_def = self.entry_def(tenant=tenant)
return self._list(tier0_def)
def update(self, tier0_id, name=IGNORE, description=IGNORE,
failover_mode=IGNORE,
dhcp_config=IGNORE,
force_whitelisting=IGNORE,
default_rule_logging=IGNORE,
transit_subnets=IGNORE,
disable_firewall=IGNORE,
ipv6_ndra_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(tier0_id=tier0_id,
name=name,
description=description,
failover_mode=failover_mode,
dhcp_config=dhcp_config,
force_whitelisting=force_whitelisting,
default_rule_logging=default_rule_logging,
transit_subnets=transit_subnets,
disable_firewall=disable_firewall,
ipv6_ndra_profile_id=ipv6_ndra_profile_id,
tags=tags,
tenant=tenant)
def get_locale_services(self, tier0_id,
tenant=constants.POLICY_INFRA_TENANT):
t0service_def = core_defs.Tier0LocaleServiceDef(
tier0_id=tier0_id,
tenant=constants.POLICY_INFRA_TENANT)
return self.policy_api.list(t0service_def)['results']
def get_edge_cluster_path(self, tier0_id,
tenant=constants.POLICY_INFRA_TENANT):
"""Get the edge_cluster path of a Tier0 router"""
services = self.get_locale_services(tier0_id, tenant=tenant)
for srv in services:
if 'edge_cluster_path' in srv:
return srv['edge_cluster_path']
@check_allowed_passthrough
def get_overlay_transport_zone(
self, tier0_id,
tenant=constants.POLICY_INFRA_TENANT):
"""Use the pass-through api to get the TZ zone of the NSX tier0"""
realization_info = self.wait_until_realized(
tier0_id, entity_type='RealizedLogicalRouter', tenant=tenant)
nsx_router_uuid = self.get_realized_id(
tier0_id, tenant=tenant,
realization_info=realization_info)
return self.nsx_api.router.get_tier0_router_overlay_tz(
nsx_router_uuid)
def get_realized_state(self, tier0_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant)
return self._get_realized_state(tier0_def, entity_type=entity_type,
realization_info=realization_info)
def get_realized_id(self, tier0_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant)
return self._get_realized_id(tier0_def, entity_type=entity_type,
realization_info=realization_info)
def get_realization_info(self, tier0_id, entity_type=None,
silent=False,
tenant=constants.POLICY_INFRA_TENANT):
tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant)
return self._get_realization_info(tier0_def, entity_type=entity_type,
silent=silent)
def wait_until_realized(self, tier0_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant)
return self._wait_until_realized(tier0_def, entity_type=entity_type,
sleep=sleep,
max_attempts=max_attempts)
@check_allowed_passthrough
def get_transport_zones(self, tier0_id,
tenant=constants.POLICY_INFRA_TENANT):
"""Return a list of the transport zones IDs connected to the tier0
Currently this is supported only with the passthrough api
"""
realization_info = self.wait_until_realized(
tier0_id, entity_type='RealizedLogicalRouter', tenant=tenant)
nsx_router_uuid = self.get_realized_id(
tier0_id, tenant=tenant,
realization_info=realization_info)
return self.nsx_api.router.get_tier0_router_tz(
nsx_router_uuid)
def _get_uplink_subnets(self, tier0_id,
tenant=constants.POLICY_INFRA_TENANT):
subnets = []
services = self.get_locale_services(tier0_id, tenant=tenant)
for srv in services:
# get the interfaces of this service
t0interface_def = core_defs.Tier0InterfaceDef(
tier0_id=tier0_id,
service_id=srv['id'],
tenant=constants.POLICY_INFRA_TENANT)
interfaces = self.policy_api.list(
t0interface_def).get('results', [])
for interface in interfaces:
if interface.get('type') == 'EXTERNAL':
subnets.extend(interface.get('subnets', []))
return subnets
def get_uplink_ips(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT):
"""Return a link of all uplink ips of this tier0 router"""
subnets = self._get_uplink_subnets(tier0_id, tenant=tenant)
uplink_ips = []
for subnet in subnets:
uplink_ips.extend(subnet.get('ip_addresses', []))
return uplink_ips
def get_uplink_cidrs(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT):
"""Return a link of all uplink cidrs of this tier0 router"""
subnets = self._get_uplink_subnets(tier0_id, tenant=tenant)
cidrs = []
for subnet in subnets:
for ip_address in subnet.get('ip_addresses'):
cidrs.append('%s/%s' % (ip_address,
subnet.get('prefix_len')))
return cidrs
def get_bgp_config(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT):
services = self.get_locale_services(tier0_id, tenant=tenant)
for srv in services:
bgpconfig_def = core_defs.BgpRoutingConfigDef(
tier0_id=tier0_id,
service_id=srv['id'],
tenant=constants.POLICY_INFRA_TENANT)
try:
return self.policy_api.get(bgpconfig_def)
except exceptions.ResourceNotFound:
continue
def build_route_redistribution_rule(self, name=None, types=None,
route_map_path=None):
return core_defs.Tier0RouteRedistributionRule(
name, types, route_map_path)
def build_route_redistribution_config(self, enabled=None, rules=None):
return core_defs.Tier0RouteRedistributionConfig(enabled, rules)
def get_route_redistribution_config(self, tier0_id,
tenant=constants.POLICY_INFRA_TENANT):
services = self.get_locale_services(tier0_id, tenant=tenant)
for srv in services:
if srv.get('route_redistribution_config'):
return srv['route_redistribution_config']
def update_route_redistribution_config(
self, tier0_id, redistribution_config, service_id=None,
tenant=constants.POLICY_INFRA_TENANT):
if not service_id:
# Update on the first locale service
services = self.get_locale_services(tier0_id, tenant=tenant)
if len(services) > 0:
service_id = services[0]['id']
if not service_id:
err_msg = (_("Cannot update route redistribution config without "
"locale service on Tier0 router"))
raise exceptions.ManagerError(details=err_msg)
service_def = core_defs.Tier0LocaleServiceDef(
nsx_version=self.version,
tier0_id=tier0_id,
service_id=service_id,
route_redistribution_config=redistribution_config,
tenant=tenant)
self.policy_api.create_or_update(service_def)
class NsxPolicyTier0BgpApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.BgpRoutingConfigDef
def delete(self, tier0_id, service_id,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = _("This action is currently not supported")
raise exceptions.ManagerError(details=err_msg)
def create_or_overwrite(self, tier0_id, service_id,
name=IGNORE,
description=IGNORE,
ecmp=IGNORE,
enabled=IGNORE,
graceful_restart_config=IGNORE,
inter_sr_ibgp=IGNORE,
local_as_num=IGNORE,
multipath_relax=IGNORE,
route_aggregations=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
bgp_config_def = self._init_def(
name=name,
description=description,
tier0_id=tier0_id,
service_id=service_id,
ecmp=ecmp,
enabled=enabled,
graceful_restart_config=graceful_restart_config,
inter_sr_ibgp=inter_sr_ibgp,
local_as_num=local_as_num,
multipath_relax=multipath_relax,
route_aggregations=route_aggregations,
tags=tags,
tenant=tenant)
self._create_or_store(bgp_config_def)
def get(self, tier0_id, service_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
bgp_config_def = self.entry_def(
tier0_id=tier0_id, service_id=service_id, tenant=tenant)
return self.policy_api.get(bgp_config_def, silent=silent)
def list(self, tier0_id, service_id,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = _("This action is currently not supported")
raise exceptions.ManagerError(details=err_msg)
def update(self, tier0_id, service_id,
name=IGNORE,
description=IGNORE,
ecmp=IGNORE,
enabled=IGNORE,
graceful_restart_config=IGNORE,
inter_sr_ibgp=IGNORE,
local_as_num=IGNORE,
multipath_relax=IGNORE,
route_aggregations=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
put=False,
revision=None):
return self._update(name=name,
description=description,
tier0_id=tier0_id,
service_id=service_id,
ecmp=ecmp,
enabled=enabled,
graceful_restart_config=graceful_restart_config,
inter_sr_ibgp=inter_sr_ibgp,
local_as_num=local_as_num,
multipath_relax=multipath_relax,
route_aggregations=route_aggregations,
tags=tags,
tenant=tenant,
put=put,
revision=revision)
class NsxPolicyTier0NatRuleApi(NsxPolicyResourceBase):
DEFAULT_NAT_ID = 'USER'
@property
def entry_def(self):
return core_defs.Tier0NatRule
def create_or_overwrite(self, name, tier0_id,
nat_id=DEFAULT_NAT_ID,
nat_rule_id=None,
description=IGNORE,
source_network=IGNORE,
destination_network=IGNORE,
translated_network=IGNORE,
firewall_match=constants.NAT_FIREWALL_MATCH_BYPASS,
action=IGNORE,
sequence_number=IGNORE,
logging=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
enabled=IGNORE):
nat_rule_id = self._init_obj_uuid(nat_rule_id)
nat_rule_def = self._init_def(tier0_id=tier0_id,
nat_id=nat_id,
nat_rule_id=nat_rule_id,
name=name,
description=description,
source_network=source_network,
destination_network=destination_network,
translated_network=translated_network,
firewall_match=firewall_match,
action=action,
sequence_number=sequence_number,
logging=logging,
tags=tags,
tenant=tenant,
enabled=enabled)
self._create_or_store(nat_rule_def)
return nat_rule_id
def delete(self, tier0_id, nat_rule_id, nat_id=DEFAULT_NAT_ID,
tenant=constants.POLICY_INFRA_TENANT):
nat_rule_def = self.entry_def(tier0_id=tier0_id, nat_id=nat_id,
nat_rule_id=nat_rule_id, tenant=tenant)
self._delete_with_retry(nat_rule_def)
def get(self, tier0_id, nat_rule_id, nat_id=DEFAULT_NAT_ID,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
nat_rule_def = self.entry_def(tier0_id=tier0_id, nat_id=nat_id,
nat_rule_id=nat_rule_id, tenant=tenant)
return self.policy_api.get(nat_rule_def, silent=silent)
def list(self, tier0_id, nat_id=DEFAULT_NAT_ID,
tenant=constants.POLICY_INFRA_TENANT):
nat_rule_def = self.entry_def(tier0_id=tier0_id, nat_id=nat_id,
tenant=tenant)
return self._list(nat_rule_def)
def update(self, tier0_id, nat_rule_id,
nat_id=DEFAULT_NAT_ID,
name=IGNORE,
description=IGNORE,
source_network=IGNORE,
destination_network=IGNORE,
translated_network=IGNORE,
firewall_match=IGNORE,
action=IGNORE,
sequence_number=IGNORE,
logging=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
enabled=IGNORE):
self._update(tier0_id=tier0_id,
nat_id=nat_id,
nat_rule_id=nat_rule_id,
name=name,
description=description,
source_network=source_network,
destination_network=destination_network,
translated_network=translated_network,
firewall_match=firewall_match,
action=action,
sequence_number=sequence_number,
logging=logging,
tags=tags,
tenant=tenant,
enabled=enabled)
class NsxPolicyTier1NatRuleApi(NsxPolicyResourceBase):
DEFAULT_NAT_ID = 'USER'
@property
def entry_def(self):
return core_defs.Tier1NatRule
def create_or_overwrite(self, name, tier1_id,
nat_id=DEFAULT_NAT_ID,
nat_rule_id=None,
description=IGNORE,
source_network=IGNORE,
destination_network=IGNORE,
translated_network=IGNORE,
firewall_match=constants.NAT_FIREWALL_MATCH_BYPASS,
action=IGNORE,
sequence_number=IGNORE,
logging=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
enabled=IGNORE):
nat_rule_id = self._init_obj_uuid(nat_rule_id)
nat_rule_def = self._init_def(tier1_id=tier1_id,
nat_id=nat_id,
nat_rule_id=nat_rule_id,
name=name,
description=description,
source_network=source_network,
destination_network=destination_network,
translated_network=translated_network,
firewall_match=firewall_match,
action=action,
sequence_number=sequence_number,
logging=logging,
tags=tags,
tenant=tenant,
enabled=enabled)
self._create_or_store(nat_rule_def)
return nat_rule_id
def delete(self, tier1_id, nat_rule_id, nat_id=DEFAULT_NAT_ID,
tenant=constants.POLICY_INFRA_TENANT):
nat_rule_def = self.entry_def(tier1_id=tier1_id, nat_id=nat_id,
nat_rule_id=nat_rule_id, tenant=tenant)
self._delete_or_store(nat_rule_def)
def get(self, tier1_id, nat_rule_id, nat_id=DEFAULT_NAT_ID,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
nat_rule_def = self.entry_def(tier1_id=tier1_id, nat_id=nat_id,
nat_rule_id=nat_rule_id, tenant=tenant)
return self.policy_api.get(nat_rule_def, silent=silent)
def list(self, tier1_id, nat_id=DEFAULT_NAT_ID,
tenant=constants.POLICY_INFRA_TENANT):
nat_rule_def = self.entry_def(tier1_id=tier1_id, nat_id=nat_id,
tenant=tenant)
return self._list(nat_rule_def)
def update(self, tier1_id, nat_rule_id,
nat_id=DEFAULT_NAT_ID,
name=IGNORE,
description=IGNORE,
source_network=IGNORE,
destination_network=IGNORE,
translated_network=IGNORE,
firewall_match=IGNORE,
action=IGNORE,
sequence_number=IGNORE,
logging=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
enabled=IGNORE):
self._update(tier1_id=tier1_id,
nat_id=nat_id,
nat_rule_id=nat_rule_id,
name=name,
description=description,
source_network=source_network,
destination_network=destination_network,
translated_network=translated_network,
firewall_match=firewall_match,
action=action,
sequence_number=sequence_number,
logging=logging,
tags=tags,
tenant=tenant,
enabled=enabled)
class NSXPolicyTier0StaticRouteApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.Tier0StaticRoute
def create_or_overwrite(self, name, tier0_id,
static_route_id=None,
description=IGNORE,
network=IGNORE,
next_hop=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
scope=IGNORE):
static_route_id = self._init_obj_uuid(static_route_id)
static_route_def = self._init_def(tier0_id=tier0_id,
static_route_id=static_route_id,
name=name,
description=description,
network=network,
next_hop=next_hop,
tags=tags,
tenant=tenant,
scope=scope)
self._create_or_store(static_route_def)
return static_route_id
def delete(self, tier0_id, static_route_id,
tenant=constants.POLICY_INFRA_TENANT):
static_route_def = self.entry_def(tier0_id=tier0_id,
static_route_id=static_route_id,
tenant=tenant)
self._delete_with_retry(static_route_def)
def get(self, tier0_id, static_route_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
static_route_def = self.entry_def(tier0_id=tier0_id,
static_route_id=static_route_id,
tenant=tenant)
return self.policy_api.get(static_route_def, silent=silent)
def list(self, tier0_id,
tenant=constants.POLICY_INFRA_TENANT):
static_route_def = self.entry_def(tier0_id=tier0_id,
tenant=tenant)
return self._list(static_route_def)
def update(self, tier0_id, static_route_id,
name=IGNORE,
description=IGNORE,
network=IGNORE,
next_hop=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(tier0_id=tier0_id,
static_route_id=static_route_id,
name=name,
description=description,
network=network,
next_hop=next_hop,
tags=tags,
tenant=tenant)
class NsxPolicyTier1StaticRouteApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.Tier1StaticRoute
def create_or_overwrite(self, name, tier1_id,
static_route_id=None,
description=IGNORE,
network=IGNORE,
next_hop=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
static_route_id = self._init_obj_uuid(static_route_id)
static_route_def = self._init_def(tier1_id=tier1_id,
static_route_id=static_route_id,
name=name,
description=description,
network=network,
next_hop=next_hop,
tags=tags,
tenant=tenant)
self._create_or_store(static_route_def)
return static_route_id
def delete(self, tier1_id, static_route_id,
tenant=constants.POLICY_INFRA_TENANT):
static_route_def = self.entry_def(tier1_id=tier1_id,
static_route_id=static_route_id,
tenant=tenant)
self._delete_with_retry(static_route_def)
def get(self, tier1_id, static_route_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
static_route_def = self.entry_def(tier1_id=tier1_id,
static_route_id=static_route_id,
tenant=tenant)
return self.policy_api.get(static_route_def, silent=silent)
def list(self, tier1_id,
tenant=constants.POLICY_INFRA_TENANT):
static_route_def = self.entry_def(tier1_id=tier1_id,
tenant=tenant)
return self._list(static_route_def)
def update(self, tier1_id, static_route_id,
name=IGNORE,
description=IGNORE,
network=IGNORE,
next_hop=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(tier1_id=tier1_id,
static_route_id=static_route_id,
name=name,
description=description,
network=network,
next_hop=next_hop,
tags=tags,
tenant=tenant)
def wait_until_realized(self, tier1_id, static_route_id,
entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
static_route_def = self.entry_def(tier1_id=tier1_id,
static_route_id=static_route_id,
tenant=tenant)
return self._wait_until_realized(static_route_def,
entity_type=entity_type,
sleep=sleep,
max_attempts=max_attempts)
class NsxPolicyTier1SegmentApi(NsxPolicyResourceBase):
"""NSX Tier1 Segment API """
@property
def entry_def(self):
return core_defs.Tier1SegmentDef
def build_subnet(self, gateway_address, dhcp_ranges=None,
dhcp_config=None):
return core_defs.Subnet(gateway_address, dhcp_ranges, dhcp_config)
def build_dhcp_config_v4(self, server_address, dns_servers=None,
lease_time=None, options=None):
return core_defs.SegmentDhcpConfigV4(server_address, dns_servers,
lease_time, options)
def build_dhcp_config_v6(self, server_address, dns_servers=None,
lease_time=None, domain_names=None):
return core_defs.SegmentDhcpConfigV6(server_address, dns_servers,
lease_time, domain_names)
def create_or_overwrite(self, name, tier1_id,
segment_id=None,
description=IGNORE,
subnets=IGNORE,
dhcp_config=IGNORE,
dns_domain_name=IGNORE,
vlan_ids=IGNORE,
default_rule_logging=IGNORE,
ip_pool_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
segment_id = self._init_obj_uuid(segment_id)
segment_def = self._init_def(tier1_id=tier1_id,
segment_id=segment_id,
name=name,
description=description,
subnets=subnets,
dhcp_config=dhcp_config,
dns_domain_name=dns_domain_name,
vlan_ids=vlan_ids,
default_rule_logging=default_rule_logging,
ip_pool_id=ip_pool_id,
tags=tags,
tenant=tenant)
self._create_or_store(segment_def)
return segment_id
def delete(self, tier1_id, segment_id,
tenant=constants.POLICY_INFRA_TENANT):
segment_def = self.entry_def(tier1_id=tier1_id,
segment_id=segment_id,
tenant=tenant)
self._delete_with_retry(segment_def)
def get(self, tier1_id, segment_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
segment_def = self.entry_def(tier1_id=tier1_id,
segment_id=segment_id,
tenant=tenant)
return self.policy_api.get(segment_def, silent=silent)
def list(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT):
segment_def = self.entry_def(tier1_id=tier1_id, tenant=tenant)
return self._list(segment_def)
def update(self, tier1_id, segment_id,
name=IGNORE,
description=IGNORE,
subnets=IGNORE,
dhcp_config=IGNORE,
dns_domain_name=IGNORE,
vlan_ids=IGNORE,
default_rule_logging=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(tier1_id=tier1_id,
segment_id=segment_id,
name=name,
description=description,
subnets=subnets,
dhcp_config=dhcp_config,
dns_domain_name=dns_domain_name,
vlan_ids=vlan_ids,
default_rule_logging=default_rule_logging,
tags=tags,
tenant=tenant)
class NsxPolicySegmentApi(NsxPolicyResourceBase):
"""NSX Infra Segment API """
@property
def entry_def(self):
return core_defs.SegmentDef
def build_subnet(self, gateway_address, dhcp_ranges=None,
dhcp_config=None):
return core_defs.Subnet(gateway_address, dhcp_ranges, dhcp_config)
def build_dhcp_config_v4(self, server_address, dns_servers=None,
lease_time=None, options=None):
return core_defs.SegmentDhcpConfigV4(server_address, dns_servers,
lease_time, options)
def build_dhcp_config_v6(self, server_address, dns_servers=None,
lease_time=None, domain_names=None):
return core_defs.SegmentDhcpConfigV6(server_address, dns_servers,
lease_time, domain_names)
def create_or_overwrite(self, name,
segment_id=None,
tier1_id=IGNORE,
tier0_id=IGNORE,
description=IGNORE,
subnets=IGNORE,
dns_domain_name=IGNORE,
vlan_ids=IGNORE,
transport_zone_id=IGNORE,
ip_pool_id=IGNORE,
multicast=IGNORE,
metadata_proxy_id=IGNORE,
dhcp_server_config_id=IGNORE,
admin_state=IGNORE,
ls_id=IGNORE,
unique_id=IGNORE,
ep_id=IGNORE,
overlay_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
if tier0_id != IGNORE and tier1_id != IGNORE:
err_msg = (_("Cannot connect Segment to a Tier-0 and Tier-1 "
"Gateway simultaneously"))
raise exceptions.InvalidInput(details=err_msg)
segment_id = self._init_obj_uuid(segment_id)
segment_def = self._init_def(
segment_id=segment_id,
name=name,
description=description,
tier1_id=tier1_id,
tier0_id=tier0_id,
subnets=subnets,
dns_domain_name=dns_domain_name,
vlan_ids=vlan_ids,
transport_zone_id=transport_zone_id,
ip_pool_id=ip_pool_id,
multicast=multicast,
metadata_proxy_id=metadata_proxy_id,
dhcp_server_config_id=dhcp_server_config_id,
admin_state=admin_state,
ls_id=ls_id,
unique_id=unique_id,
ep_id=ep_id,
overlay_id=overlay_id,
tags=tags,
tenant=tenant)
self._create_or_store(segment_def)
return segment_id
def delete(self, segment_id,
tenant=constants.POLICY_INFRA_TENANT):
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
@utils.retry_upon_exception(
exceptions.NsxSegemntWithVM,
delay=self.nsxlib_config.realization_wait_sec,
max_attempts=self.nsxlib_config.realization_max_attempts)
def do_delete():
self._delete_with_retry(segment_def)
do_delete()
def get(self, segment_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
return self.policy_api.get(segment_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
segment_def = self.entry_def(tenant=tenant)
return self._list(segment_def)
def update(self, segment_id, name=IGNORE, description=IGNORE,
tier1_id=IGNORE, tier0_id=IGNORE, subnets=IGNORE,
dns_domain_name=IGNORE,
vlan_ids=IGNORE, multicast=IGNORE, metadata_proxy_id=IGNORE,
dhcp_server_config_id=IGNORE, admin_state=IGNORE,
tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT):
# NOTE: Setting multicast upon update will reset any other advanced
# config attribute that might have been set previously
# TODO(sorlando): Regardless of patch strategy always fetch advanced
# config and merge before updating
self._update(segment_id=segment_id,
name=name,
description=description,
tier1_id=tier1_id,
tier0_id=tier0_id,
subnets=subnets,
dns_domain_name=dns_domain_name,
vlan_ids=vlan_ids,
multicast=multicast,
metadata_proxy_id=metadata_proxy_id,
dhcp_server_config_id=dhcp_server_config_id,
admin_state=admin_state,
tags=tags,
tenant=tenant)
def remove_connectivity_and_subnets(
self, segment_id,
tenant=constants.POLICY_INFRA_TENANT):
"""Disconnect a segment from a router and remove its subnets.
PATCH does not support this action so PUT is used for this
"""
# Get the current segment and update it
segment = self.get(segment_id)
segment['subnets'] = None
segment['connectivity_path'] = None
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
path = segment_def.get_resource_path()
self.policy_api.client.update(path, segment)
def remove_connectivity_path(self, segment_id,
tenant=constants.POLICY_INFRA_TENANT):
"""Disconnect a segment from a router.
PATCH does not support this action so PUT is used for this
"""
# Get the current segment and update it
segment = self.get(segment_id)
segment['connectivity_path'] = None
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
path = segment_def.get_resource_path()
self.policy_api.client.update(path, segment)
def get_realized_state(self, segment_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
return self._get_realized_state(segment_def, entity_type=entity_type,
realization_info=realization_info)
def get_realized_id(self, segment_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
return self._get_realized_id(segment_def, entity_type=entity_type,
realization_info=realization_info)
def get_path(self, segment_id, tenant=constants.POLICY_INFRA_TENANT):
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
return segment_def.get_resource_full_path()
def get_realized_logical_switch_id(self, segment_id,
tenant=constants.POLICY_INFRA_TENANT):
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
entity_type = 'RealizedLogicalSwitch'
if self.nsx_api:
# Use MP search api to find the LS ID as it is faster
return self._get_realized_id_using_search(
self.get_path(segment_id, tenant=tenant),
self.nsx_api.logical_switch.resource_type,
resource_def=segment_def, entity_type=entity_type)
realization_info = self._wait_until_realized(
segment_def, entity_type=entity_type)
return self._get_realized_id(segment_def,
realization_info=realization_info)
def get_realization_info(self, segment_id, entity_type=None,
silent=False,
tenant=constants.POLICY_INFRA_TENANT):
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
return self._get_realization_info(segment_def,
entity_type=entity_type,
silent=silent)
def wait_until_realized(self, segment_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
return self._wait_until_realized(segment_def, entity_type=entity_type,
sleep=sleep,
max_attempts=max_attempts)
def wait_until_state_successful(self, segment_id,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None,
with_refresh=False):
segment_def = self.entry_def(segment_id=segment_id, tenant=tenant)
self._wait_until_state_successful(segment_def, sleep=sleep,
max_attempts=max_attempts,
with_refresh=with_refresh)
@check_allowed_passthrough
def set_admin_state(self, segment_id, admin_state,
tenant=constants.POLICY_INFRA_TENANT):
"""Set the segment admin state using the passthrough/policy api"""
if (version.LooseVersion(self.version) >=
version.LooseVersion(nsx_constants.NSX_VERSION_3_0_0)):
return self.update(segment_id, admin_state=admin_state,
tenant=tenant)
realization_info = self.wait_until_realized(
segment_id, entity_type='RealizedLogicalSwitch', tenant=tenant)
nsx_ls_uuid = self.get_realized_id(
segment_id, tenant=tenant, realization_info=realization_info)
self.nsx_api.logical_switch.update(
nsx_ls_uuid,
admin_state=admin_state)
def get_transport_zone_id(self, segment_id,
tenant=constants.POLICY_INFRA_TENANT):
segment = self.get(segment_id, tenant=tenant)
tz_path = segment.get('transport_zone_path')
if tz_path:
return p_utils.path_to_id(tz_path)
class NsxPolicySegmentPortApi(NsxPolicyResourceBase):
"""NSX Segment Port API """
@property
def entry_def(self):
return core_defs.SegmentPortDef
def build_address_binding(self, ip_address, mac_address,
vlan_id=None):
return core_defs.PortAddressBinding(ip_address,
mac_address,
vlan_id)
def create_or_overwrite(self, name,
segment_id,
port_id=None,
description=IGNORE,
address_bindings=IGNORE,
attachment_type=IGNORE,
vif_id=IGNORE,
app_id=IGNORE,
context_id=IGNORE,
traffic_tag=IGNORE,
allocate_addresses=IGNORE,
hyperbus_mode=IGNORE,
admin_state=IGNORE,
init_state=IGNORE,
extra_configs=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
port_id = self._init_obj_uuid(port_id)
port_def = self._init_def(segment_id=segment_id,
port_id=port_id,
name=name,
description=description,
address_bindings=address_bindings,
attachment_type=attachment_type,
vif_id=vif_id,
app_id=app_id,
context_id=context_id,
traffic_tag=traffic_tag,
allocate_addresses=allocate_addresses,
hyperbus_mode=hyperbus_mode,
admin_state=admin_state,
init_state=init_state,
extra_configs=extra_configs,
tags=tags,
tenant=tenant)
self._create_or_store(port_def)
return port_id
def delete(self, segment_id, port_id,
tenant=constants.POLICY_INFRA_TENANT):
port_def = self.entry_def(segment_id=segment_id,
port_id=port_id,
tenant=tenant)
self._delete_with_retry(port_def)
def get(self, segment_id, port_id,
tenant=constants.POLICY_INFRA_TENANT,
silent=False):
port_def = self.entry_def(segment_id=segment_id,
port_id=port_id,
tenant=tenant)
return self.policy_api.get(port_def, silent=silent)
def list(self, segment_id, tenant=constants.POLICY_INFRA_TENANT):
port_def = self.entry_def(segment_id=segment_id, tenant=tenant)
return self._list(port_def)
def update(self, segment_id, port_id,
name=IGNORE,
description=IGNORE,
address_bindings=IGNORE,
hyperbus_mode=IGNORE,
admin_state=IGNORE,
extra_configs=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(segment_id=segment_id,
port_id=port_id,
name=name,
description=description,
address_bindings=address_bindings,
hyperbus_mode=hyperbus_mode,
admin_state=admin_state,
extra_configs=extra_configs,
tags=tags,
tenant=tenant)
def detach(self, segment_id, port_id, vif_id=None, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
"""Reset the attachment with or without a vif_id"""
# Due to platform limitation, PUT should be used here and not PATCH
port_def = self.entry_def(
segment_id=segment_id,
port_id=port_id,
tenant=tenant)
path = port_def.get_resource_path()
@utils.retry_upon_exception(
exceptions.StaleRevision,
max_attempts=self.policy_api.client.max_attempts)
def _detach():
port = self.policy_api.get(port_def)
if vif_id:
port['attachment'] = {'id': vif_id}
else:
port['attachment'] = None
if tags != IGNORE:
port['tags'] = tags
self.policy_api.client.update(path, port)
_detach()
def attach(self, segment_id, port_id,
attachment_type,
vif_id,
allocate_addresses=None,
app_id=None,
context_id=None,
traffic_tag=None,
hyperbus_mode=IGNORE,
extra_configs=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(segment_id=segment_id,
port_id=port_id,
attachment_type=attachment_type,
allocate_addresses=allocate_addresses,
vif_id=vif_id,
app_id=app_id,
context_id=context_id,
traffic_tag=traffic_tag,
hyperbus_mode=hyperbus_mode,
extra_configs=extra_configs,
tags=tags,
tenant=tenant)
def get_realized_state(self, segment_id, port_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
port_def = self.entry_def(segment_id=segment_id,
port_id=port_id,
tenant=tenant)
return self._get_realized_state(port_def, entity_type=entity_type,
realization_info=realization_info)
def get_realized_id(self, segment_id, port_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
port_def = self.entry_def(segment_id=segment_id,
port_id=port_id,
tenant=tenant)
return self._get_realized_id(port_def, entity_type=entity_type,
realization_info=realization_info)
def get_realization_info(self, segment_id, port_id, entity_type=None,
silent=False,
tenant=constants.POLICY_INFRA_TENANT):
port_def = self.entry_def(segment_id=segment_id,
port_id=port_id,
tenant=tenant)
return self._get_realization_info(port_def, entity_type=entity_type,
silent=silent)
def wait_until_realized(self, segment_id, port_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
port_def = self.entry_def(segment_id=segment_id, port_id=port_id,
tenant=tenant)
return self._wait_until_realized(port_def, entity_type=entity_type,
sleep=sleep,
max_attempts=max_attempts)
@check_allowed_passthrough
def set_admin_state(self, segment_id, port_id, admin_state,
tenant=constants.POLICY_INFRA_TENANT):
"""Set the segment port admin state using the passthrough/policy api"""
if (version.LooseVersion(self.version) >=
version.LooseVersion(nsx_constants.NSX_VERSION_3_0_0)):
return self.update(segment_id, port_id, admin_state=admin_state,
tenant=tenant)
realization_info = self.wait_until_realized(
segment_id, port_id, entity_type='RealizedLogicalPort',
tenant=tenant)
nsx_lp_uuid = self.get_realized_id(
segment_id, port_id, tenant=tenant,
realization_info=realization_info)
self.nsx_api.logical_port.update(
nsx_lp_uuid, False,
admin_state=admin_state)
class SegmentProfilesBindingMapBaseApi(NsxPolicyResourceBase):
def delete(self, segment_id, map_id=DEFAULT_MAP_ID,
tenant=constants.POLICY_INFRA_TENANT):
map_def = self.entry_def(segment_id=segment_id,
map_id=map_id,
tenant=tenant)
self._delete_with_retry(map_def)
def get(self, segment_id, map_id=DEFAULT_MAP_ID,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
map_def = self.entry_def(segment_id=segment_id,
map_id=map_id,
tenant=tenant)
return self.policy_api.get(map_def, silent=silent)
def list(self, segment_id,
tenant=constants.POLICY_INFRA_TENANT):
map_def = self.entry_def(segment_id=segment_id,
tenant=tenant)
return self._list(map_def)
class SegmentSecurityProfilesBindingMapApi(SegmentProfilesBindingMapBaseApi):
@property
def entry_def(self):
return core_defs.SegmentSecProfilesBindingMapDef
def create_or_overwrite(self, name, segment_id,
map_id=DEFAULT_MAP_ID,
description=IGNORE,
segment_security_profile_id=IGNORE,
spoofguard_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
map_id = self._init_obj_uuid(map_id)
map_def = self._init_def(
segment_id=segment_id,
map_id=map_id,
name=name,
description=description,
segment_security_profile_id=segment_security_profile_id,
spoofguard_profile_id=spoofguard_profile_id,
tags=tags,
tenant=tenant)
self._create_or_store(map_def)
return map_id
def update(self, segment_id,
map_id=DEFAULT_MAP_ID,
name=IGNORE,
description=IGNORE,
segment_security_profile_id=IGNORE,
spoofguard_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(
segment_id=segment_id,
map_id=map_id,
name=name,
description=description,
segment_security_profile_id=segment_security_profile_id,
spoofguard_profile_id=spoofguard_profile_id,
tags=tags,
tenant=tenant)
class SegmentDiscoveryProfilesBindingMapApi(SegmentProfilesBindingMapBaseApi):
@property
def entry_def(self):
return core_defs.SegmentDiscoveryProfilesBindingMapDef
def create_or_overwrite(self, name, segment_id,
map_id=DEFAULT_MAP_ID,
description=IGNORE,
ip_discovery_profile_id=IGNORE,
mac_discovery_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
map_id = self._init_obj_uuid(map_id)
map_def = self._init_def(
segment_id=segment_id,
map_id=map_id,
name=name,
description=description,
ip_discovery_profile_id=ip_discovery_profile_id,
mac_discovery_profile_id=mac_discovery_profile_id,
tags=tags,
tenant=tenant)
self._create_or_store(map_def)
return map_id
def update(self, segment_id,
map_id=DEFAULT_MAP_ID,
name=IGNORE,
description=IGNORE,
ip_discovery_profile_id=IGNORE,
mac_discovery_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(
segment_id=segment_id,
map_id=map_id,
name=name,
description=description,
ip_discovery_profile_id=ip_discovery_profile_id,
mac_discovery_profile_id=mac_discovery_profile_id,
tags=tags,
tenant=tenant)
class SegmentQosProfilesBindingMapApi(SegmentProfilesBindingMapBaseApi):
@property
def entry_def(self):
return core_defs.SegmentQosProfilesBindingMapDef
def create_or_overwrite(self, name, segment_id,
map_id=DEFAULT_MAP_ID,
description=IGNORE,
qos_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
map_id = self._init_obj_uuid(map_id)
map_def = self._init_def(
segment_id=segment_id,
map_id=map_id,
name=name,
description=description,
qos_profile_id=qos_profile_id,
tags=tags,
tenant=tenant)
self._create_or_store(map_def)
return map_id
def update(self, segment_id,
map_id=DEFAULT_MAP_ID,
name=IGNORE,
description=IGNORE,
qos_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(
segment_id=segment_id,
map_id=map_id,
name=name,
description=description,
qos_profile_id=qos_profile_id,
tags=tags,
tenant=tenant)
class SegmentPortProfilesBindingMapBaseApi(NsxPolicyResourceBase):
def delete(self, segment_id, port_id, map_id=DEFAULT_MAP_ID,
tenant=constants.POLICY_INFRA_TENANT):
map_def = self.entry_def(segment_id=segment_id,
port_id=port_id,
map_id=map_id,
tenant=tenant)
self._delete_with_retry(map_def)
def get(self, segment_id, port_id, map_id=DEFAULT_MAP_ID,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
map_def = self.entry_def(segment_id=segment_id,
port_id=port_id,
map_id=map_id,
tenant=tenant)
return self.policy_api.get(map_def, silent=silent)
def list(self, segment_id, port_id,
tenant=constants.POLICY_INFRA_TENANT):
map_def = self.entry_def(segment_id=segment_id,
port_id=port_id,
tenant=tenant)
return self._list(map_def)
class SegmentPortSecurityProfilesBindingMapApi(
SegmentPortProfilesBindingMapBaseApi):
@property
def entry_def(self):
return core_defs.SegmentPortSecProfilesBindingMapDef
def create_or_overwrite(self, name, segment_id, port_id,
map_id=DEFAULT_MAP_ID,
description=IGNORE,
segment_security_profile_id=IGNORE,
spoofguard_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
map_id = self._init_obj_uuid(map_id)
map_def = self._init_def(
segment_id=segment_id,
port_id=port_id,
map_id=map_id,
name=name,
description=description,
segment_security_profile_id=segment_security_profile_id,
spoofguard_profile_id=spoofguard_profile_id,
tags=tags,
tenant=tenant)
self._create_or_store(map_def)
return map_id
def update(self, segment_id, port_id,
map_id=DEFAULT_MAP_ID,
name=IGNORE,
description=IGNORE,
segment_security_profile_id=IGNORE,
spoofguard_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(
segment_id=segment_id,
port_id=port_id,
map_id=map_id,
name=name,
description=description,
segment_security_profile_id=segment_security_profile_id,
spoofguard_profile_id=spoofguard_profile_id,
tags=tags,
tenant=tenant)
class SegmentPortDiscoveryProfilesBindingMapApi(
SegmentPortProfilesBindingMapBaseApi):
@property
def entry_def(self):
return core_defs.SegmentPortDiscoveryProfilesBindingMapDef
def create_or_overwrite(self, name, segment_id, port_id,
map_id=DEFAULT_MAP_ID,
description=IGNORE,
mac_discovery_profile_id=IGNORE,
ip_discovery_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
map_id = self._init_obj_uuid(map_id)
map_def = self._init_def(
segment_id=segment_id,
port_id=port_id,
map_id=map_id,
name=name,
description=description,
mac_discovery_profile_id=mac_discovery_profile_id,
ip_discovery_profile_id=ip_discovery_profile_id,
tags=tags,
tenant=tenant)
self._create_or_store(map_def)
return map_id
def update(self, segment_id, port_id,
map_id=DEFAULT_MAP_ID,
name=IGNORE,
description=IGNORE,
mac_discovery_profile_id=IGNORE,
ip_discovery_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(
segment_id=segment_id,
port_id=port_id,
map_id=map_id,
name=name,
description=description,
mac_discovery_profile_id=mac_discovery_profile_id,
ip_discovery_profile_id=ip_discovery_profile_id,
tags=tags,
tenant=tenant)
class SegmentPortQosProfilesBindingMapApi(
SegmentPortProfilesBindingMapBaseApi):
@property
def entry_def(self):
return core_defs.SegmentPortQoSProfilesBindingMapDef
def create_or_overwrite(self, name, segment_id, port_id,
map_id=DEFAULT_MAP_ID,
description=IGNORE,
qos_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
map_id = self._init_obj_uuid(map_id)
map_def = self._init_def(
segment_id=segment_id,
port_id=port_id,
map_id=map_id,
name=name,
description=description,
qos_profile_id=qos_profile_id,
tags=tags,
tenant=tenant)
self._create_or_store(map_def)
return map_id
def update(self, segment_id, port_id,
map_id=DEFAULT_MAP_ID,
name=IGNORE,
description=IGNORE,
qos_profile_id=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(
segment_id=segment_id,
port_id=port_id,
map_id=map_id,
name=name,
description=description,
qos_profile_id=qos_profile_id,
tags=tags,
tenant=tenant)
class NsxPolicyTier1SegmentPortApi(NsxPolicyResourceBase):
"""NSX Tier1 Segment Port API """
@property
def entry_def(self):
return core_defs.Tier1SegmentPortDef
def build_address_binding(self, ip_address, mac_address,
vlan_id=None):
return core_defs.PortAddressBinding(ip_address,
mac_address,
vlan_id)
def create_or_overwrite(self, name,
tier1_id,
segment_id,
port_id=None,
description=IGNORE,
address_bindings=IGNORE,
attachment_type=IGNORE,
vif_id=IGNORE,
app_id=IGNORE,
context_id=IGNORE,
traffic_tag=IGNORE,
allocate_addresses=IGNORE,
hyperbus_mode=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
port_id = self._init_obj_uuid(port_id)
port_def = self._init_def(segment_id=segment_id,
tier1_id=tier1_id,
port_id=port_id,
name=name,
description=description,
address_bindings=address_bindings,
attachment_type=attachment_type,
vif_id=vif_id,
app_id=app_id,
context_id=context_id,
traffic_tag=traffic_tag,
allocate_addresses=allocate_addresses,
hyperbus_mode=hyperbus_mode,
tags=tags,
tenant=tenant)
self._create_or_store(port_def)
return port_id
def delete(self, tier1_id, segment_id, port_id,
tenant=constants.POLICY_INFRA_TENANT):
port_def = self.entry_def(segment_id=segment_id,
tier1_id=tier1_id,
port_id=port_id,
tenant=tenant)
self._delete_with_retry(port_def)
def get(self, tier1_id, segment_id, port_id,
tenant=constants.POLICY_INFRA_TENANT,
silent=False):
port_def = self.entry_def(segment_id=segment_id,
tier1_id=tier1_id,
port_id=port_id,
tenant=tenant)
return self.policy_api.get(port_def, silent=silent)
def list(self, tier1_id, segment_id,
tenant=constants.POLICY_INFRA_TENANT):
port_def = self.entry_def(segment_id=segment_id, tier1_id=tier1_id,
tenant=tenant)
return self._list(port_def)
def update(self, tier1_id, segment_id, port_id,
name=IGNORE,
description=IGNORE,
address_bindings=IGNORE,
hyperbus_mode=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(segment_id=segment_id,
tier1_id=tier1_id,
port_id=port_id,
name=name,
description=description,
address_bindings=address_bindings,
hyperbus_mode=hyperbus_mode,
tags=tags,
tenant=tenant)
def detach(self, tier1_id, segment_id, port_id,
tenant=constants.POLICY_INFRA_TENANT):
port_def = self.entry_def(segment_id=segment_id,
tier1_id=tier1_id,
port_id=port_id,
attachment_type=None,
tenant=tenant)
self.policy_api.create_or_update(port_def)
def attach(self, tier1_id, segment_id, port_id,
attachment_type,
vif_id,
allocate_addresses,
app_id=None,
context_id=None,
hyperbus_mode=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
port_def = self.entry_def(segment_id=segment_id,
tier1_id=tier1_id,
port_id=port_id,
attachment_type=attachment_type,
allocate_addresses=allocate_addresses,
vif_id=vif_id,
app_id=app_id,
context_id=context_id,
hyperbus_mode=hyperbus_mode,
tenant=tenant)
self.policy_api.create_or_update(port_def)
def get_realized_state(self, tier1_id, segment_id, port_id,
entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
port_def = self.entry_def(segment_id=segment_id,
tier1_id=tier1_id,
port_id=port_id,
tenant=tenant)
return self._get_realized_state(port_def, entity_type=entity_type,
realization_info=realization_info)
def get_realized_id(self, tier1_id, segment_id, port_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
port_def = self.entry_def(segment_id=segment_id,
tier1_id=tier1_id,
port_id=port_id,
tenant=tenant)
return self._get_realized_id(port_def, entity_type=entity_type,
realization_info=realization_info)
def get_realization_info(self, tier1_id, segment_id, port_id,
entity_type=None, silent=False,
tenant=constants.POLICY_INFRA_TENANT):
port_def = self.entry_def(segment_id=segment_id,
tier1_id=tier1_id,
port_id=port_id,
tenant=tenant)
return self._get_realization_info(port_def, entity_type=entity_type,
silent=silent)
def wait_until_realized(self, tier1_id, segment_id, port_id,
entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
port_def = self.entry_def(segment_id=segment_id, port_id=port_id,
tier1_id=tier1_id, tenant=tenant)
return self._wait_until_realized(port_def, entity_type=entity_type,
sleep=sleep,
max_attempts=max_attempts)
# This resource is both for DhcpV4StaticBindingConfig and
# DhcpV6StaticBindingConfig
class SegmentDhcpStaticBindingConfigApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.DhcpV4StaticBindingConfig
def create_or_overwrite(self, name,
segment_id,
binding_id=None,
**kwargs):
err_msg = (_("This action is not supported. Please call "
"create_or_overwrite_v4 or create_or_overwrite_v6"))
raise exceptions.ManagerError(details=err_msg)
def create_or_overwrite_v4(self, name,
segment_id,
binding_id=None,
description=IGNORE,
gateway_address=IGNORE,
host_name=IGNORE,
ip_address=IGNORE,
lease_time=IGNORE,
mac_address=IGNORE,
options=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
binding_id = self._init_obj_uuid(binding_id)
binding_def = self._init_def(segment_id=segment_id,
binding_id=binding_id,
name=name,
description=description,
gateway_address=gateway_address,
host_name=host_name,
ip_address=ip_address,
lease_time=lease_time,
mac_address=mac_address,
options=options,
tags=tags,
tenant=tenant)
self._create_or_store(binding_def)
return binding_id
def create_or_overwrite_v6(self, name,
segment_id,
binding_id=None,
description=IGNORE,
domain_names=IGNORE,
dns_nameservers=IGNORE,
ip_addresses=IGNORE,
sntp_servers=IGNORE,
preferred_time=IGNORE,
lease_time=IGNORE,
mac_address=IGNORE,
options=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
binding_id = self._init_obj_uuid(binding_id)
args = self._get_user_args(segment_id=segment_id,
binding_id=binding_id,
name=name,
description=description,
domain_names=domain_names,
dns_nameservers=dns_nameservers,
ip_addresses=ip_addresses,
sntp_servers=sntp_servers,
preferred_time=preferred_time,
lease_time=lease_time,
mac_address=mac_address,
options=options,
tags=tags,
tenant=tenant)
binding_def = core_defs.DhcpV6StaticBindingConfig(**args)
self._create_or_store(binding_def)
return binding_id
def delete(self, segment_id, binding_id,
tenant=constants.POLICY_INFRA_TENANT):
binding_def = self.entry_def(segment_id=segment_id,
binding_id=binding_id,
tenant=tenant)
self._delete_with_retry(binding_def)
def get(self, segment_id, binding_id,
tenant=constants.POLICY_INFRA_TENANT,
silent=False):
binding_def = self.entry_def(segment_id=segment_id,
binding_id=binding_id,
tenant=tenant)
return self.policy_api.get(binding_def, silent=silent)
def list(self, segment_id, tenant=constants.POLICY_INFRA_TENANT):
binding_def = self.entry_def(segment_id=segment_id, tenant=tenant)
return self._list(binding_def)
def update(self, segment_id, binding_id, **kwargs):
err_msg = (_("This action is currently not supported"))
raise exceptions.ManagerError(details=err_msg)
class NsxPolicyIpBlockApi(NsxPolicyResourceBase):
"""NSX Policy IP Block API"""
@property
def entry_def(self):
return core_defs.IpBlockDef
def create_or_overwrite(self, name,
ip_block_id=None,
description=IGNORE,
cidr=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
ip_block_id = self._init_obj_uuid(ip_block_id)
ip_block_def = self._init_def(ip_block_id=ip_block_id,
name=name,
description=description,
cidr=cidr,
tags=tags,
tenant=tenant)
self._create_or_store(ip_block_def)
return ip_block_id
def delete(self, ip_block_id, tenant=constants.POLICY_INFRA_TENANT):
ip_block_def = self.entry_def(ip_block_id=ip_block_id,
tenant=tenant)
self._delete_with_retry(ip_block_def)
def get(self, ip_block_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
ip_block_def = self.entry_def(ip_block_id=ip_block_id,
tenant=tenant)
return self.policy_api.get(ip_block_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
ip_block_def = self.entry_def(tenant=tenant)
return self._list(ip_block_def)
def update(self, ip_block_id, name=IGNORE, description=IGNORE,
cidr=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(ip_block_id=ip_block_id,
name=name,
description=description,
cidr=cidr,
tags=tags,
tenant=tenant)
class NsxPolicyIpPoolApi(NsxPolicyResourceBase):
"""NSX Policy IP Pool API"""
@property
def entry_def(self):
return core_defs.IpPoolDef
def create_or_overwrite(self, name,
ip_pool_id=None,
description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
ip_pool_id = self._init_obj_uuid(ip_pool_id)
ip_pool_def = self._init_def(ip_pool_id=ip_pool_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
self._create_or_store(ip_pool_def)
return ip_pool_id
def delete(self, ip_pool_id, tenant=constants.POLICY_INFRA_TENANT):
ip_pool_def = self.entry_def(ip_pool_id=ip_pool_id,
tenant=tenant)
self._delete_or_store(ip_pool_def)
def get(self, ip_pool_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
ip_pool_def = self.entry_def(ip_pool_id=ip_pool_id,
tenant=tenant)
return self.policy_api.get(ip_pool_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
ip_pool_def = self.entry_def(tenant=tenant)
return self._list(ip_pool_def)
def update(self, ip_pool_id, name=IGNORE, description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(ip_pool_id=ip_pool_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
def allocate_ip(self, ip_pool_id, ip_allocation_id=None, ip_address=IGNORE,
name=IGNORE, description=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
# If ip_address is not set, a random IP will be allocated
# from the pool.
ip_allocation_id = self._init_obj_uuid(ip_allocation_id)
args = self._get_user_args(
ip_pool_id=ip_pool_id,
ip_allocation_id=ip_allocation_id,
allocation_ip=ip_address,
name=name,
description=description,
tags=tags,
tenant=tenant)
ip_allocation_def = core_defs.IpPoolAllocationDef(**args)
self._create_or_store(ip_allocation_def)
def release_ip(self, ip_pool_id, ip_allocation_id,
tenant=constants.POLICY_INFRA_TENANT):
ip_allocation_def = core_defs.IpPoolAllocationDef(
ip_allocation_id=ip_allocation_id,
ip_pool_id=ip_pool_id,
tenant=tenant)
self._delete_with_retry(ip_allocation_def)
def list_allocations(self, ip_pool_id,
tenant=constants.POLICY_INFRA_TENANT):
ip_allocation_def = core_defs.IpPoolAllocationDef(
ip_pool_id=ip_pool_id,
tenant=tenant)
return self._list(ip_allocation_def)
def get_allocation(self, ip_pool_id, ip_allocation_id,
tenant=constants.POLICY_INFRA_TENANT):
ip_allocation_def = core_defs.IpPoolAllocationDef(
ip_pool_id=ip_pool_id,
ip_allocation_id=ip_allocation_id,
tenant=tenant)
return self.policy_api.get(ip_allocation_def)
def allocate_block_subnet(self, ip_pool_id, ip_block_id, size,
ip_subnet_id=None, auto_assign_gateway=IGNORE,
name=IGNORE, description=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
start_ip=IGNORE):
ip_subnet_id = self._init_obj_uuid(ip_subnet_id)
args = self._get_user_args(
ip_pool_id=ip_pool_id,
ip_block_id=ip_block_id,
ip_subnet_id=ip_subnet_id,
size=size,
auto_assign_gateway=auto_assign_gateway,
name=name,
description=description,
tags=tags,
tenant=tenant,
start_ip=start_ip)
ip_subnet_def = core_defs.IpPoolBlockSubnetDef(
nsx_version=self.version, **args)
self._create_or_store(ip_subnet_def)
def release_block_subnet(self, ip_pool_id, ip_subnet_id,
tenant=constants.POLICY_INFRA_TENANT):
ip_subnet_def = core_defs.IpPoolBlockSubnetDef(
ip_subnet_id=ip_subnet_id,
ip_pool_id=ip_pool_id,
tenant=tenant)
self._delete_with_retry(ip_subnet_def)
def list_block_subnets(self, ip_pool_id,
tenant=constants.POLICY_INFRA_TENANT):
ip_subnet_def = core_defs.IpPoolBlockSubnetDef(
ip_pool_id=ip_pool_id,
tenant=tenant)
subnets = self._list(ip_subnet_def)
block_subnets = []
for subnet in subnets:
if subnet['resource_type'] == ip_subnet_def.resource_type():
block_subnets.append(subnet)
return block_subnets
def get_ip_block_subnet(self, ip_pool_id, ip_subnet_id,
tenant=constants.POLICY_INFRA_TENANT):
ip_subnet_def = core_defs.IpPoolBlockSubnetDef(
ip_pool_id=ip_pool_id,
ip_subnet_id=ip_subnet_id,
tenant=tenant)
return self.policy_api.get(ip_subnet_def)
def get_ip_block_subnet_cidr(self, ip_pool_id, ip_subnet_id,
entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
wait=False, sleep=None,
max_attempts=None):
# Retrieve the allocated Subnet CIDR for Subnet ID
# Return None in case the CIDR is not yet allocated
realized_info = self.get_ip_subnet_realization_info(
ip_pool_id, ip_subnet_id, entity_type, tenant, wait,
sleep, max_attempts)
# Returns a list of CIDRs. In case a single value is expected,
# caller must extract the first index to retrieve the CIDR value
return self._get_extended_attr_from_realized_info(
realized_info, requested_attr='cidr')
def create_or_update_static_subnet(self, ip_pool_id, cidr,
allocation_ranges, ip_subnet_id=None,
name=IGNORE, description=IGNORE,
gateway_ip=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
ip_subnet_id = self._init_obj_uuid(ip_subnet_id)
args = self._get_user_args(
ip_pool_id=ip_pool_id,
ip_subnet_id=ip_subnet_id,
cidr=cidr,
allocation_ranges=allocation_ranges,
name=name,
description=description,
tags=tags,
tenant=tenant)
ip_subnet_def = core_defs.IpPoolStaticSubnetDef(**args)
self._create_or_store(ip_subnet_def)
def release_static_subnet(self, ip_pool_id, ip_subnet_id,
tenant=constants.POLICY_INFRA_TENANT):
ip_subnet_def = core_defs.IpPoolStaticSubnetDef(
ip_subnet_id=ip_subnet_id,
ip_pool_id=ip_pool_id,
tenant=tenant)
self._delete_with_retry(ip_subnet_def)
def list_static_subnets(self, ip_pool_id,
tenant=constants.POLICY_INFRA_TENANT):
ip_subnet_def = core_defs.IpPoolStaticSubnetDef(
ip_pool_id=ip_pool_id,
tenant=tenant)
subnets = self._list(ip_subnet_def)
static_subnets = []
for subnet in subnets:
if subnet['resource_type'] == ip_subnet_def.resource_type():
static_subnets.append(subnet)
return static_subnets
def get_static_subnet(self, ip_pool_id, ip_subnet_id,
tenant=constants.POLICY_INFRA_TENANT):
ip_subnet_def = core_defs.IpPoolStaticSubnetDef(
ip_pool_id=ip_pool_id,
ip_subnet_id=ip_subnet_id,
tenant=tenant)
return self.policy_api.get(ip_subnet_def)
def get_realization_info(self, ip_pool_id, entity_type=None,
silent=False,
tenant=constants.POLICY_INFRA_TENANT):
ip_pool_def = self.entry_def(ip_pool_id=ip_pool_id, tenant=tenant)
return self._get_realization_info(ip_pool_def, entity_type=entity_type,
silent=silent)
def get_ip_subnet_realization_info(
self, ip_pool_id, ip_subnet_id,
entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
wait=False, sleep=None,
max_attempts=None,
subnet_type=constants.IPPOOL_BLOCK_SUBNET):
if subnet_type == constants.IPPOOL_BLOCK_SUBNET:
ip_subnet_def = core_defs.IpPoolBlockSubnetDef(
ip_pool_id=ip_pool_id,
ip_subnet_id=ip_subnet_id,
tenant=tenant)
else:
ip_subnet_def = core_defs.IpPoolStaticSubnetDef(
ip_pool_id=ip_pool_id,
ip_subnet_id=ip_subnet_id,
tenant=tenant)
if wait:
return self._wait_until_realized(
ip_subnet_def, entity_type=entity_type,
sleep=sleep, max_attempts=max_attempts)
return self._get_realization_info(ip_subnet_def,
entity_type=entity_type)
def get_ip_alloc_realization_info(self, ip_pool_id, ip_allocation_id,
entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
wait=False, sleep=None,
max_attempts=None):
ip_allocation_def = core_defs.IpPoolAllocationDef(
ip_pool_id=ip_pool_id,
ip_allocation_id=ip_allocation_id,
tenant=tenant)
if wait:
return self._wait_until_realized(
ip_allocation_def, entity_type=entity_type,
sleep=sleep, max_attempts=max_attempts)
return self._get_realization_info(ip_allocation_def,
entity_type=entity_type)
def get_realized_allocated_ip(self, ip_pool_id, ip_allocation_id,
entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
wait=False, sleep=None,
max_attempts=None):
# Retrieve the allocated IpAddress for allocation ID
# Return None in case the IP is not yet allocated
realized_info = self.get_ip_alloc_realization_info(
ip_pool_id, ip_allocation_id, entity_type, tenant, wait,
sleep, max_attempts)
if realized_info:
try:
return realized_info['extended_attributes'][0].get(
'values')[0]
except IndexError:
return
def wait_until_realized(self, ip_pool_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
ip_pool_def = self.entry_def(ip_pool_id=ip_pool_id, tenant=tenant)
return self._wait_until_realized(ip_pool_def, entity_type=entity_type,
sleep=sleep,
max_attempts=max_attempts)
class NsxPolicySecurityPolicyBaseApi(NsxPolicyResourceBase):
def _get_last_seq_num(self, domain_id, map_id,
tenant=constants.POLICY_INFRA_TENANT):
# get the current entries, and choose the next unused sequence number
# between the entries under the same communication map
try:
com_map = self.get(domain_id, map_id, tenant=tenant)
com_entries = com_map.get('rules')
except exceptions.ResourceNotFound:
return -1
if not com_entries:
return 0
seq_nums = [int(cm['sequence_number']) for cm in com_entries]
seq_nums.sort()
return seq_nums[-1]
def _get_seq_num(self, last_sequence):
if last_sequence < 0:
return 1
return last_sequence + 1
def create_or_overwrite(self, name, domain_id, map_id=None,
description=IGNORE,
category=constants.CATEGORY_APPLICATION,
sequence_number=None, service_ids=IGNORE,
action=constants.ACTION_ALLOW,
scope=IGNORE,
source_groups=IGNORE, dest_groups=IGNORE,
direction=nsx_constants.IN_OUT,
logged=IGNORE, tags=IGNORE,
map_sequence_number=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
"""Create CommunicationMap & Entry.
source_groups/dest_groups should be a list of group ids belonging
to the domain.
NOTE: In multi-connection environment, it is recommended to execute
this call under lock to prevent race condition where two entries
end up with same sequence number.
"""
last_sequence = -1
if map_id:
if not sequence_number:
# get the next available sequence number
last_sequence = self._get_last_seq_num(domain_id, map_id,
tenant=tenant)
else:
map_id = self._init_obj_uuid(map_id)
if not sequence_number:
sequence_number = self._get_seq_num(last_sequence)
# Build the communication entry. Since we currently support only one
# it will have the same id as its parent
entry_def = self._init_def(
domain_id=domain_id,
map_id=map_id,
entry_id=self.SINGLE_ENTRY_ID,
name=name,
description=description,
sequence_number=sequence_number,
source_groups=source_groups,
dest_groups=dest_groups,
service_ids=service_ids,
action=action,
scope=scope,
direction=direction,
logged=logged,
tenant=tenant)
map_def = self._init_parent_def(
domain_id=domain_id, map_id=map_id,
tenant=tenant, name=name, description=description,
category=category, tags=tags,
map_sequence_number=map_sequence_number)
self._create_or_store(map_def, entry_def)
return map_id
def create_or_overwrite_map_only(
self, name, domain_id, map_id=None, description=IGNORE,
category=constants.CATEGORY_APPLICATION,
tags=IGNORE, map_sequence_number=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
"""Create or update a CommunicationMap
Create a communication map without any entries, or update the
communication map itself, leaving the entries unchanged.
"""
map_id = self._init_obj_uuid(map_id)
map_def = self._init_parent_def(
domain_id=domain_id, map_id=map_id,
tenant=tenant, name=name, description=description,
category=category, tags=tags,
map_sequence_number=map_sequence_number)
self._create_or_store(map_def)
return map_id
def build_entry(self, name, domain_id, map_id, entry_id=None,
description=None,
sequence_number=None, service_ids=None,
action=constants.ACTION_ALLOW,
scope=None,
source_groups=None, dest_groups=None,
direction=nsx_constants.IN_OUT, logged=False, tag=None,
ip_protocol=nsx_constants.IPV4_IPV6,
service_entries=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
plain_groups=False):
"""Get the definition of a single map entry.
plain_groups should be True if source_groups/dest_groups is a list
of group paths and IP addresses. IP address support from NSX 3.0.0.
"""
if (version.LooseVersion(self.version) <
version.LooseVersion(nsx_constants.NSX_VERSION_3_0_0) and
plain_groups):
err_msg = _("plain_groups support is from NSX 3.0.0")
raise exceptions.NsxLibInvalidInput(error_message=err_msg)
entry_id = self._init_obj_uuid(entry_id)
return self._init_def(domain_id=domain_id,
map_id=map_id,
entry_id=entry_id,
name=name,
description=description,
sequence_number=sequence_number,
source_groups=source_groups,
dest_groups=dest_groups,
service_ids=service_ids,
action=action,
scope=scope,
direction=direction,
ip_protocol=ip_protocol,
logged=logged,
tag=tag,
service_entries=service_entries,
tenant=tenant,
plain_groups=plain_groups)
def create_with_entries(
self, name, domain_id, map_id=None,
description=IGNORE,
category=constants.CATEGORY_APPLICATION,
entries=None, tags=IGNORE, map_sequence_number=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
"""Create CommunicationMap with entries"""
map_id = self._init_obj_uuid(map_id)
map_def = self._init_parent_def(
domain_id=domain_id, map_id=map_id,
tenant=tenant, name=name, description=description,
category=category, tags=tags,
map_sequence_number=map_sequence_number)
# in case the same object was just deleted, create may need to
# be retried
@utils.retry_upon_exception(
exceptions.NsxPendingDelete,
delay=self.nsxlib_config.realization_wait_sec,
max_attempts=self.nsxlib_config.realization_max_attempts)
def _do_create_with_retry():
self._create_or_store(map_def, entries)
_do_create_with_retry()
return map_id
def create_entry(self, name, domain_id, map_id, entry_id=None,
description=None, sequence_number=None, service_ids=None,
action=constants.ACTION_ALLOW,
source_groups=None, dest_groups=None,
scope=None, tags=IGNORE,
ip_protocol=nsx_constants.IPV4_IPV6,
direction=nsx_constants.IN_OUT,
logged=False, tag=None,
service_entries=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
plain_groups=False):
"""Create CommunicationMap Entry.
source_groups/dest_groups should be a list of group ids belonging
to the domain.
plain_groups should be True if source_groups/dest_groups is a list
of group paths and IP addresses. IP address support from NSX 3.0.0.
"""
if (version.LooseVersion(self.version) <
version.LooseVersion(nsx_constants.NSX_VERSION_3_0_0) and
plain_groups):
err_msg = _("plain_groups support is from NSX 3.0.0")
raise exceptions.NsxLibInvalidInput(error_message=err_msg)
# get the next available sequence number
if not sequence_number:
last_sequence = self._get_last_seq_num(domain_id, map_id,
tenant=tenant)
sequence_number = self._get_seq_num(last_sequence)
entry_id = self._init_obj_uuid(entry_id)
# Build the communication entry
entry_def = self._init_def(domain_id=domain_id,
map_id=map_id,
entry_id=entry_id,
name=name,
description=description,
sequence_number=sequence_number,
source_groups=source_groups,
dest_groups=dest_groups,
service_ids=service_ids,
action=action,
scope=scope,
ip_protocol=ip_protocol,
direction=direction,
logged=logged,
tag=tag,
tags=tags,
service_entries=service_entries,
tenant=tenant,
plain_groups=plain_groups)
self._create_or_store(entry_def)
return entry_id
def create_entry_from_def(self, entry_def):
"""Create CommunicationMap Entry from a predefined entry def"""
self._create_or_store(entry_def)
def delete(self, domain_id, map_id,
tenant=constants.POLICY_INFRA_TENANT):
map_def = self._init_parent_def(
domain_id=domain_id,
map_id=map_id,
tenant=tenant)
self._delete_with_retry(map_def)
def delete_entry(self, domain_id, map_id, entry_id,
tenant=constants.POLICY_INFRA_TENANT):
entry_def = self.entry_def(
domain_id=domain_id,
map_id=map_id,
entry_id=entry_id,
tenant=tenant)
self._delete_with_retry(entry_def)
def get(self, domain_id, map_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
map_def = self.parent_entry_def(
domain_id=domain_id,
map_id=map_id,
tenant=tenant)
return self.policy_api.get(map_def, silent=silent)
def get_entry(self, domain_id, map_id, entry_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
entry_def = self.entry_def(
domain_id=domain_id,
map_id=map_id,
entry_id=entry_id,
tenant=tenant)
return self.policy_api.get(entry_def, silent=silent)
def get_by_name(self, domain_id, name,
tenant=constants.POLICY_INFRA_TENANT):
"""Return first communication map entry matched by name"""
return super(NsxPolicySecurityPolicyBaseApi, self).get_by_name(
name, domain_id, tenant=tenant)
def list(self, domain_id,
tenant=constants.POLICY_INFRA_TENANT):
"""List all the map entries of a specific domain."""
map_def = self.parent_entry_def(
domain_id=domain_id,
tenant=tenant)
return self._list(map_def)
def update(self, domain_id, map_id,
name=IGNORE, description=IGNORE,
sequence_number=IGNORE, service_ids=IGNORE,
action=IGNORE,
source_groups=IGNORE, dest_groups=IGNORE,
direction=IGNORE, logged=IGNORE, tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
# Note(asarfaty): Category is mandatory in update calls for now
# although it cannot change. Getting it from the NSX
orig_entry = self.get(domain_id, map_id, tenant=tenant)
category = orig_entry.get('category')
parent_def = self._init_parent_def(
domain_id=domain_id,
map_id=map_id,
name=name,
description=description,
category=category,
tags=tags,
tenant=tenant)
if self._any_arg_set(sequence_number, service_ids,
action, source_groups, dest_groups,
direction, logged):
# Update the entry only if relevant attributes were changed
entry_def = self._get_and_update_def(
domain_id=domain_id,
map_id=map_id,
entry_id=self.SINGLE_ENTRY_ID,
service_ids=service_ids,
source_groups=source_groups,
dest_groups=dest_groups,
sequence_number=sequence_number,
action=action,
direction=direction,
logged=logged,
tenant=tenant)
self.policy_api.create_with_parent(parent_def, entry_def)
else:
self.policy_api.create_or_update(parent_def)
def update_entry(self, domain_id, map_id, entry_id,
name=IGNORE, description=IGNORE,
sequence_number=IGNORE, service_ids=IGNORE,
action=IGNORE, source_groups=IGNORE, dest_groups=IGNORE,
scope=IGNORE, ip_protocol=IGNORE,
direction=IGNORE, logged=IGNORE, tags=IGNORE, tag=IGNORE,
service_entries=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
if self._any_arg_set(name, description, sequence_number, service_ids,
action, source_groups, dest_groups, scope,
ip_protocol, direction, logged, tags):
entry_def = self._get_and_update_def(
domain_id=domain_id,
map_id=map_id,
entry_id=entry_id,
name=name,
description=description,
sequence_number=sequence_number,
service_ids=service_ids,
action=action,
source_groups=source_groups,
dest_groups=dest_groups,
scope=scope,
ip_protocol=ip_protocol,
direction=direction,
logged=logged,
tags=tags,
tag=tag,
service_entries=service_entries,
tenant=tenant)
self.policy_api.create_or_update(entry_def)
def update_entries(self, domain_id, map_id, entries,
category=constants.CATEGORY_APPLICATION,
use_child_rules=True,
tenant=constants.POLICY_INFRA_TENANT):
self.update_with_entries(domain_id, map_id, entries, category=category,
use_child_rules=use_child_rules,
tenant=tenant)
def update_with_entries(self, domain_id, map_id, entries=IGNORE,
name=IGNORE, description=IGNORE,
category=constants.CATEGORY_APPLICATION,
tags=IGNORE, map_sequence_number=IGNORE,
use_child_rules=True,
tenant=constants.POLICY_INFRA_TENANT):
map_def = self._init_parent_def(
domain_id=domain_id, map_id=map_id,
tenant=tenant, name=name, description=description,
category=category, tags=tags,
map_sequence_number=map_sequence_number)
map_path = map_def.get_resource_path()
def _overwrite_entries(old_entries, new_entries, transaction):
# Replace old entries with new entries, but copy additional
# attributes from old entries for those kept in new entries
# and marked the unwanted ones in the old entries as deleted
# if it is in the transaction call.
old_rules = {entry["id"]: entry for entry in old_entries}
replaced_entries = []
for entry in new_entries:
rule_id = entry.get_id()
new_rule = entry.get_obj_dict()
old_rule = old_rules.get(rule_id)
if old_rule:
old_rules.pop(rule_id)
for key, value in old_rule.items():
if key not in new_rule:
new_rule[key] = value
replaced_entries.append(
self.entry_def.adapt_from_rule_dict(
new_rule, domain_id, map_id))
if transaction:
replaced_entries.extend(
_mark_delete_entries(old_rules.values()))
return replaced_entries
def _mark_delete_entries(delete_rule_dicts):
delete_entries = []
for delete_rule_dict in delete_rule_dicts:
delete_entry = self.entry_def.adapt_from_rule_dict(
delete_rule_dict, domain_id, map_id)
delete_entry.set_delete()
delete_entries.append(delete_entry)
return delete_entries
@utils.retry_upon_exception(
exceptions.StaleRevision,
max_attempts=self.policy_api.client.max_attempts)
def _update():
transaction = trans.NsxPolicyTransaction.get_current()
# Get the current data of communication map & its entries
comm_map = self.policy_api.get(map_def)
replaced_entries = None
ignore_entries = (entries == IGNORE)
if not ignore_entries:
replaced_entries = _overwrite_entries(comm_map['rules'],
entries, transaction)
comm_map.pop('rules')
map_def.set_obj_dict(comm_map)
# Update the entire map at the NSX
if transaction:
if use_child_rules:
self._create_or_store(map_def, replaced_entries)
else:
if not ignore_entries:
# Add the rules under the map and not as ChildRules for
# improved performance on the NSX side
comm_map['rules'] = [rule.get_obj_dict() for rule in
replaced_entries]
map_def.set_obj_dict(comm_map)
self._create_or_store(map_def)
else:
body = map_def.get_obj_dict()
if not ignore_entries:
body['rules'] = [rule.get_obj_dict() for rule in
replaced_entries]
self.policy_api.client.update(map_path, body)
_update()
def patch_entries(self, domain_id, map_id, entries,
tenant=constants.POLICY_INFRA_TENANT):
# Specify that we want to use a childresourcerefence
map_def = self._init_parent_def(
domain_id=domain_id, map_id=map_id, tenant=tenant,
child_resource_ref=True)
@utils.retry_upon_exception(
exceptions.StaleRevision,
max_attempts=self.policy_api.client.max_attempts)
def _update():
transaction = trans.NsxPolicyTransaction.get_current()
if not transaction:
err_msg = ("patch_entries can only be used within "
"H-API transactions")
raise exceptions.ManagerException(
details=err_msg)
patch_entries = []
for entry in entries:
rule = entry.get_obj_dict()
LOG.debug("#### ADDING ENTRY: %s", entry)
patch_entries.append(
self.entry_def.adapt_from_rule_dict(
rule, domain_id, map_id))
self._create_or_store(map_def, patch_entries)
_update()
def update_entries_logged(self, domain_id, map_id, logged,
tenant=constants.POLICY_INFRA_TENANT):
"""Update all communication map entries logged flags"""
map_def = self.parent_entry_def(
domain_id=domain_id,
map_id=map_id,
tenant=tenant)
map_path = map_def.get_resource_path()
@utils.retry_upon_exception(
exceptions.StaleRevision,
max_attempts=self.policy_api.client.max_attempts)
def _update():
# Get the current data of communication map & its' entries
comm_map = self.policy_api.get(map_def)
# Update the field in all the entries
if comm_map.get('rules'):
for comm_entry in comm_map['rules']:
comm_entry['logged'] = logged
# Update the entire map at the NSX
self.policy_api.client.update(map_path, comm_map)
_update()
def get_realized_state(self, domain_id, map_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
map_def = self.parent_entry_def(map_id=map_id,
domain_id=domain_id,
tenant=tenant)
return self._get_realized_state(map_def, entity_type=entity_type,
realization_info=realization_info)
def get_realized_id(self, domain_id, map_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
map_def = self.parent_entry_def(map_id=map_id,
domain_id=domain_id,
tenant=tenant)
return self._get_realized_id(map_def, entity_type=entity_type,
realization_info=realization_info)
def get_realization_info(self, domain_id, map_id, entity_type=None,
silent=False,
tenant=constants.POLICY_INFRA_TENANT):
map_def = self.parent_entry_def(map_id=map_id,
domain_id=domain_id,
tenant=tenant)
return self._get_realization_info(map_def, entity_type=entity_type,
silent=silent)
def wait_until_realized(self, domain_id, map_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
map_def = self.parent_entry_def(map_id=map_id,
domain_id=domain_id,
tenant=tenant)
return self._wait_until_realized(map_def, entity_type=entity_type,
sleep=sleep,
max_attempts=max_attempts)
def wait_until_state_sucessful(self, domain_id, map_id,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None,
with_refresh=False):
map_def = self.parent_entry_def(map_id=map_id,
domain_id=domain_id,
tenant=tenant)
self._wait_until_state_successful(map_def, sleep=sleep,
max_attempts=max_attempts,
with_refresh=with_refresh)
class NsxPolicyCommunicationMapApi(NsxPolicySecurityPolicyBaseApi):
"""NSX Policy CommunicationMap (Under a Domain). AKA Security"""
@property
def entry_def(self):
return core_defs.CommunicationMapEntryDef
@property
def parent_entry_def(self):
return core_defs.CommunicationMapDef
class NsxPolicyGatewayPolicyApi(NsxPolicySecurityPolicyBaseApi):
"""NSX Policy Gateway policy (Edge firewall)"""
@property
def entry_def(self):
return core_defs.GatewayPolicyRuleDef
@property
def parent_entry_def(self):
return core_defs.GatewayPolicyDef
class NsxPolicyEnforcementPointApi(NsxPolicyResourceBase):
"""NSX Policy Enforcement Point."""
@property
def entry_def(self):
return core_defs.EnforcementPointDef
def create_or_overwrite(self, name, ep_id=None, description=IGNORE,
ip_address=IGNORE, username=IGNORE,
password=IGNORE, thumbprint=IGNORE,
edge_cluster_id=IGNORE,
transport_zone_id=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
if not ip_address or not username or password is None:
err_msg = (_("Cannot create an enforcement point without "
"ip_address, username and password"))
raise exceptions.ManagerError(details=err_msg)
ep_id = self._init_obj_uuid(ep_id)
ep_def = self._init_def(ep_id=ep_id,
name=name,
description=description,
ip_address=ip_address,
username=username,
password=password,
thumbprint=thumbprint,
edge_cluster_id=edge_cluster_id,
transport_zone_id=transport_zone_id,
tenant=tenant)
self._create_or_store(ep_def)
return ep_id
def delete(self, ep_id,
tenant=constants.POLICY_INFRA_TENANT):
ep_def = core_defs.EnforcementPointDef(
ep_id=ep_id, tenant=tenant)
self._delete_with_retry(ep_def)
def get(self, ep_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
ep_def = core_defs.EnforcementPointDef(
ep_id=ep_id, tenant=tenant)
return self.policy_api.get(ep_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
ep_def = core_defs.EnforcementPointDef(tenant=tenant)
return self._list(ep_def)
def update(self, ep_id, name=IGNORE, description=IGNORE,
ip_address=IGNORE, username=IGNORE,
password=IGNORE, thumbprint=IGNORE,
edge_cluster_id=IGNORE, transport_zone_id=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
"""Update the enforcement point.
username & password must be defined
"""
if not username or password is None:
# username/password must be provided
err_msg = (_("Cannot update an enforcement point without "
"username and password"))
raise exceptions.ManagerError(details=err_msg)
# Get the original body because ip & thumbprint are mandatory
ep_def = self._get_and_update_def(ep_id=ep_id,
name=name,
description=description,
ip_address=ip_address,
username=username,
password=password,
edge_cluster_id=edge_cluster_id,
transport_zone_id=transport_zone_id,
thumbprint=thumbprint,
tenant=tenant)
self.policy_api.create_or_update(ep_def)
def get_realized_state(self, ep_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
ep_def = core_defs.EnforcementPointDef(ep_id=ep_id, tenant=tenant)
return self._get_realized_state(ep_def, entity_type=entity_type,
realization_info=realization_info)
def get_realization_info(self, ep_id, entity_type=None,
silent=False,
tenant=constants.POLICY_INFRA_TENANT,
realization_info=None):
ep_def = core_defs.EnforcementPointDef(ep_id=ep_id, tenant=tenant)
return self._get_realization_info(ep_def, entity_type=entity_type,
silent=silent,
realization_info=realization_info)
def reload(self, ep_id, tenant=constants.POLICY_INFRA_TENANT):
# Use post command to reload the enforcement point
ep_def = core_defs.EnforcementPointDef(ep_id=ep_id, tenant=tenant)
path = "%s?action=reload" % ep_def.get_resource_path()
self.policy_api.client.create(path)
class NsxPolicyTransportZoneApi(NsxPolicyResourceBase):
TZ_TYPE_OVERLAY = 'OVERLAY_STANDARD'
TZ_TYPE_ENS = 'OVERLAY_ENS'
TZ_TYPE_VLAN = 'VLAN_BACKED'
@property
def entry_def(self):
return core_defs.TransportZoneDef
def get(self, tz_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
tz_def = core_defs.TransportZoneDef(
ep_id=ep_id, tz_id=tz_id, tenant=tenant)
return self.policy_api.get(tz_def, silent=silent)
def get_tz_type(self, tz_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
tz = self.get(tz_id, ep_id=ep_id, tenant=tenant)
return tz.get('tz_type')
def get_transport_type(self, tz_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
"""This api is consistent with the nsx manager resource api"""
tz_type = self.get_tz_type(tz_id, ep_id=ep_id, tenant=tenant)
if tz_type == self.TZ_TYPE_VLAN:
return nsx_constants.TRANSPORT_TYPE_VLAN
else:
return nsx_constants.TRANSPORT_TYPE_OVERLAY
def get_host_switch_mode(self, tz_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
"""This api is consistent with the nsx manager resource api"""
tz_type = self.get_tz_type(tz_id, ep_id=ep_id, tenant=tenant)
if tz_type == self.TZ_TYPE_ENS:
return nsx_constants.HOST_SWITCH_MODE_ENS
else:
return nsx_constants.HOST_SWITCH_MODE_STANDARD
def list(self, ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
tz_def = core_defs.TransportZoneDef(ep_id=ep_id, tenant=tenant)
return self._list(tz_def)
def get_by_name(self, name,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
"""Return first group matched by name"""
return super(NsxPolicyTransportZoneApi, self).get_by_name(
name, ep_id, tenant=tenant)
def create_or_overwrite(self, name, tz_id=None,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def update(self, tz_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def delete(self, tz_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
class NsxPolicyEdgeClusterApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.EdgeClusterDef
def get(self, ec_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
ec_def = core_defs.EdgeClusterDef(
ep_id=ep_id, ec_id=ec_id, tenant=tenant)
return self.policy_api.get(ec_def, silent=silent)
def list(self, ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
ec_def = core_defs.EdgeClusterDef(ep_id=ep_id, tenant=tenant)
return self._list(ec_def)
def get_by_name(self, name,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
"""Return first group matched by name"""
return super(NsxPolicyEdgeClusterApi, self).get_by_name(
name, ep_id, tenant=tenant)
def create_or_overwrite(self, name, ec_id=None,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def update(self, ec_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def delete(self, ec_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def get_path(self, ec_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
ec_def = core_defs.EdgeClusterDef(
ep_id=ep_id, ec_id=ec_id, tenant=tenant)
return ec_def.get_resource_full_path()
def get_edge_node_ids(self, ec_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
nodes_def = core_defs.EdgeClusterNodeDef(
ep_id=ep_id, ec_id=ec_id, tenant=tenant)
nodes = self._list(nodes_def)
return [node['id'] for node in nodes]
def get_edge_node_nsx_ids(self, ec_id,
ep_id=constants.DEFAULT_ENFORCEMENT_POINT,
tenant=constants.POLICY_INFRA_TENANT):
nodes_def = core_defs.EdgeClusterNodeDef(
ep_id=ep_id, ec_id=ec_id, tenant=tenant)
nodes = self._list(nodes_def)
return [node.get('nsx_id', node['id']) for node in nodes]
class NsxPolicyMetadataProxyApi(NsxPolicyResourceBase):
# Currently this is used as a ready only Api
@property
def entry_def(self):
return core_defs.MetadataProxyDef
def get(self, mdproxy_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
md_def = core_defs.MetadataProxyDef(
mdproxy_id=mdproxy_id, tenant=tenant)
return self.policy_api.get(md_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
md_def = core_defs.MetadataProxyDef(tenant=tenant)
return self._list(md_def)
def get_by_name(self, name,
tenant=constants.POLICY_INFRA_TENANT):
return super(NsxPolicyMetadataProxyApi, self).get_by_name(
name, tenant=tenant)
def create_or_overwrite(self, name, mdproxy_id=None,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def update(self, mdproxy_id,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def delete(self, mdproxy_id,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def get_path(self, mdproxy_id,
tenant=constants.POLICY_INFRA_TENANT):
md_def = core_defs.MetadataProxyDef(
mdproxy_id=mdproxy_id, tenant=tenant)
return md_def.get_resource_full_path()
class NsxPolicyDeploymentMapApi(NsxPolicyResourceBase):
"""NSX Policy Deployment Map."""
@property
def entry_def(self):
return core_defs.DeploymentMapDef
def create_or_overwrite(self, name, map_id=None,
description=IGNORE,
ep_id=IGNORE, domain_id=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
map_id = self._init_obj_uuid(map_id)
map_def = core_defs.DeploymentMapDef(
map_id=map_id,
name=name,
description=description,
ep_id=ep_id,
domain_id=domain_id,
tenant=tenant)
self._create_or_store(map_def)
return map_id
def delete(self, map_id, domain_id=None,
tenant=constants.POLICY_INFRA_TENANT):
if not domain_id:
# domain_id must be provided
err_msg = (_("Cannot delete deployment maps without a domain"))
raise exceptions.ManagerError(details=err_msg)
map_def = core_defs.DeploymentMapDef(
map_id=map_id, domain_id=domain_id, tenant=tenant)
self._delete_with_retry(map_def)
def get(self, map_id, domain_id=None,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
if not domain_id:
# domain_id must be provided
err_msg = (_("Cannot get deployment maps without a domain"))
raise exceptions.ManagerError(details=err_msg)
map_def = core_defs.DeploymentMapDef(
map_id=map_id, domain_id=domain_id, tenant=tenant)
return self.policy_api.get(map_def, silent=silent)
def list(self, domain_id=None,
tenant=constants.POLICY_INFRA_TENANT):
if not domain_id:
# domain_id must be provided
err_msg = (_("Cannot list deployment maps without a domain"))
raise exceptions.ManagerError(details=err_msg)
map_def = core_defs.DeploymentMapDef(domain_id=domain_id,
tenant=tenant)
return self._list(map_def)
def update(self, map_id, name=IGNORE, description=IGNORE,
ep_id=IGNORE, domain_id=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(map_id=map_id,
name=name,
description=description,
ep_id=ep_id,
domain_id=domain_id,
tenant=tenant)
class NsxSegmentProfileBaseApi(NsxPolicyResourceBase):
"""NSX Segment Profile base API"""
def create_or_overwrite(self, name,
profile_id=None,
description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
profile_id = self._init_obj_uuid(profile_id)
profile_def = self._init_def(profile_id=profile_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
self._create_or_store(profile_def)
return profile_id
def delete(self, profile_id, tenant=constants.POLICY_INFRA_TENANT):
profile_def = self.entry_def(profile_id=profile_id,
tenant=tenant)
self._delete_with_retry(profile_def)
def get(self, profile_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
profile_def = self.entry_def(profile_id=profile_id,
tenant=tenant)
return self.policy_api.get(profile_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
profile_def = self.entry_def(tenant=tenant)
return self._list(profile_def)
def get_by_name(self, name, tenant=constants.POLICY_INFRA_TENANT):
return super(NsxSegmentProfileBaseApi, self).get_by_name(
name, tenant=tenant)
def update(self, profile_id, name=IGNORE, description=IGNORE,
tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT):
self._update(profile_id=profile_id,
name=name,
description=description,
tags=tags,
tenant=tenant)
def get_path(self, profile_id, tenant=constants.POLICY_INFRA_TENANT):
profile_def = self.entry_def(profile_id=profile_id, tenant=tenant)
return profile_def.get_resource_full_path()
class NsxSegmentSecurityProfileApi(NsxSegmentProfileBaseApi):
@property
def entry_def(self):
return core_defs.SegmentSecurityProfileDef
def create_or_overwrite(self, name,
profile_id=None,
description=IGNORE,
bpdu_filter_enable=IGNORE,
dhcp_client_block_enabled=IGNORE,
dhcp_client_block_v6_enabled=IGNORE,
dhcp_server_block_enabled=IGNORE,
dhcp_server_block_v6_enabled=IGNORE,
non_ip_traffic_block_enabled=IGNORE,
ra_guard_enabled=IGNORE,
rate_limits_enabled=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
profile_id = self._init_obj_uuid(profile_id)
profile_def = self._init_def(
profile_id=profile_id,
name=name,
description=description,
bpdu_filter_enable=bpdu_filter_enable,
dhcp_client_block_enabled=dhcp_client_block_enabled,
dhcp_client_block_v6_enabled=dhcp_client_block_v6_enabled,
dhcp_server_block_enabled=dhcp_server_block_enabled,
dhcp_server_block_v6_enabled=dhcp_server_block_v6_enabled,
non_ip_traffic_block_enabled=non_ip_traffic_block_enabled,
ra_guard_enabled=ra_guard_enabled,
rate_limits_enabled=rate_limits_enabled,
tags=tags,
tenant=tenant)
self._create_or_store(profile_def)
return profile_id
class NsxQosProfileApi(NsxSegmentProfileBaseApi):
@property
def entry_def(self):
return core_defs.QosProfileDef
def _build_rate_limiter(self, resource_type, average_bandwidth,
peak_bandwidth, burst_size, enabled):
return core_defs.QoSRateLimiter(
resource_type=resource_type,
average_bandwidth=average_bandwidth,
peak_bandwidth=peak_bandwidth,
burst_size=burst_size,
enabled=enabled)
def build_ingress_rate_limiter(
self,
average_bandwidth=None,
peak_bandwidth=None,
burst_size=None,
enabled=True):
return self._build_rate_limiter(
resource_type=core_defs.QoSRateLimiter.INGRESS_RATE_LIMITER_TYPE,
average_bandwidth=average_bandwidth,
peak_bandwidth=peak_bandwidth,
burst_size=burst_size,
enabled=enabled)
def build_egress_rate_limiter(
self,
average_bandwidth=None,
peak_bandwidth=None,
burst_size=None,
enabled=True):
return self._build_rate_limiter(
resource_type=core_defs.QoSRateLimiter.EGRESS_RATE_LIMITER_TYPE,
average_bandwidth=average_bandwidth,
peak_bandwidth=peak_bandwidth,
burst_size=burst_size,
enabled=enabled)
def build_dscp(self, trusted=False, priority=None):
mode = (core_defs.QoSDscp.QOS_DSCP_TRUSTED if trusted
else core_defs.QoSDscp.QOS_DSCP_UNTRUSTED)
return core_defs.QoSDscp(mode=mode, priority=priority)
def create_or_overwrite(self, name,
profile_id=None,
description=IGNORE,
class_of_service=IGNORE,
dscp=IGNORE,
shaper_configurations=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
profile_id = self._init_obj_uuid(profile_id)
profile_def = self._init_def(
profile_id=profile_id,
name=name,
description=description,
class_of_service=class_of_service,
dscp=dscp,
shaper_configurations=shaper_configurations,
tags=tags,
tenant=tenant)
self._create_or_store(profile_def)
return profile_id
class NsxSpoofguardProfileApi(NsxSegmentProfileBaseApi):
@property
def entry_def(self):
return core_defs.SpoofguardProfileDef
def create_or_overwrite(self, name,
profile_id=None,
description=IGNORE,
address_binding_whitelist=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
profile_id = self._init_obj_uuid(profile_id)
profile_def = self._init_def(
profile_id=profile_id,
name=name,
description=description,
address_binding_whitelist=address_binding_whitelist,
tags=tags,
tenant=tenant)
self._create_or_store(profile_def)
return profile_id
class NsxIpDiscoveryProfileApi(NsxSegmentProfileBaseApi):
@property
def entry_def(self):
return core_defs.IpDiscoveryProfileDef
class NsxWAFProfileApi(NsxSegmentProfileBaseApi):
@property
def entry_def(self):
return core_defs.WAFProfileDef
class NsxMacDiscoveryProfileApi(NsxSegmentProfileBaseApi):
@property
def entry_def(self):
return core_defs.MacDiscoveryProfileDef
def create_or_overwrite(self, name,
profile_id=None,
description=IGNORE,
mac_change_enabled=IGNORE,
mac_learning_enabled=IGNORE,
unknown_unicast_flooding_enabled=IGNORE,
mac_limit_policy=IGNORE,
mac_limit=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
profile_id = self._init_obj_uuid(profile_id)
profile_def = self._init_def(
profile_id=profile_id,
name=name,
description=description,
mac_change_enabled=mac_change_enabled,
mac_learning_enabled=mac_learning_enabled,
unknown_unicast_flooding_enabled=unknown_unicast_flooding_enabled,
mac_limit_policy=mac_limit_policy,
mac_limit=mac_limit,
tags=tags,
tenant=tenant)
self._create_or_store(profile_def)
return profile_id
class NsxIpv6NdraProfileApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.Ipv6NdraProfileDef
def create_or_overwrite(self, name,
profile_id=None,
description=IGNORE,
ra_mode=IGNORE,
reachable_timer=IGNORE,
retransmit_interval=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
profile_id = self._init_obj_uuid(profile_id)
profile_def = self._init_def(
profile_id=profile_id,
name=name,
description=description,
ra_mode=ra_mode,
reachable_timer=reachable_timer,
retransmit_interval=retransmit_interval,
tags=tags,
tenant=tenant)
self._create_or_store(profile_def)
return profile_id
def delete(self, profile_id, tenant=constants.POLICY_INFRA_TENANT):
profile_def = self.entry_def(profile_id=profile_id,
tenant=tenant)
self._delete_with_retry(profile_def)
def get(self, profile_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
profile_def = self.entry_def(profile_id=profile_id,
tenant=tenant)
return self.policy_api.get(profile_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
profile_def = self.entry_def(tenant=tenant)
return self._list(profile_def)
def get_by_name(self, name, tenant=constants.POLICY_INFRA_TENANT):
return super(NsxSegmentProfileBaseApi, self).get_by_name(
name, tenant=tenant)
def update(self, profile_id, name=IGNORE, description=IGNORE,
ra_mode=IGNORE, reachable_timer=IGNORE,
retransmit_interval=IGNORE,
tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT):
self._update(profile_id=profile_id,
name=name,
description=description,
ra_mode=ra_mode,
reachable_timer=reachable_timer,
retransmit_interval=retransmit_interval,
tags=tags,
tenant=tenant)
class NsxDhcpRelayConfigApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.DhcpRelayConfigDef
def create_or_overwrite(self, name,
config_id=None,
description=None,
server_addresses=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
config_id = self._init_obj_uuid(config_id)
config_def = self._init_def(
config_id=config_id,
name=name,
description=description,
server_addresses=server_addresses,
tags=tags,
tenant=tenant)
self._create_or_store(config_def)
return config_id
def delete(self, config_id, tenant=constants.POLICY_INFRA_TENANT):
config_def = self.entry_def(config_id=config_id, tenant=tenant)
self._delete_with_retry(config_def)
def get(self, config_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
config_def = self.entry_def(config_id=config_id, tenant=tenant)
return self.policy_api.get(config_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
config_def = self.entry_def(tenant=tenant)
return self._list(config_def)
def update(self, config_id, name=IGNORE,
description=IGNORE,
server_addresses=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(config_id=config_id,
name=name,
description=description,
server_addresses=server_addresses,
tags=tags,
tenant=tenant)
class NsxDhcpServerConfigApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.DhcpServerConfigDef
def create_or_overwrite(self, name,
config_id=None,
description=None,
server_addresses=IGNORE,
edge_cluster_path=IGNORE,
lease_time=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
config_id = self._init_obj_uuid(config_id)
config_def = self._init_def(
config_id=config_id,
name=name,
description=description,
server_addresses=server_addresses,
edge_cluster_path=edge_cluster_path,
lease_time=lease_time,
tags=tags,
tenant=tenant)
self._create_or_store(config_def)
return config_id
def delete(self, config_id, tenant=constants.POLICY_INFRA_TENANT):
config_def = self.entry_def(config_id=config_id, tenant=tenant)
self._delete_with_retry(config_def)
def get(self, config_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
config_def = self.entry_def(config_id=config_id, tenant=tenant)
return self.policy_api.get(config_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
config_def = self.entry_def(tenant=tenant)
return self._list(config_def)
def update(self, config_id, name=IGNORE,
description=IGNORE,
server_addresses=IGNORE,
edge_cluster_path=IGNORE,
lease_time=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(config_id=config_id,
name=name,
description=description,
server_addresses=server_addresses,
edge_cluster_path=edge_cluster_path,
lease_time=lease_time,
tags=tags,
tenant=tenant)
class NsxPolicyCertApi(NsxPolicyResourceBase):
"""NSX Policy Certificate API."""
@property
def entry_def(self):
return core_defs.CertificateDef
def create_or_overwrite(self, name, certificate_id=None,
pem_encoded=IGNORE, private_key=IGNORE,
passphrase=IGNORE,
key_algo=IGNORE,
description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
certificate_id = self._init_obj_uuid(certificate_id)
certificate_def = self._init_def(certificate_id=certificate_id,
name=name,
private_key=private_key,
pem_encoded=pem_encoded,
passphrase=passphrase,
key_algo=key_algo,
description=description,
tags=tags,
tenant=tenant)
self._create_or_store(certificate_def)
return certificate_id
def delete(self, certificate_id,
tenant=constants.POLICY_INFRA_TENANT):
certificate_def = self.entry_def(certificate_id=certificate_id,
tenant=tenant)
self._delete_with_retry(certificate_def)
def get(self, certificate_id, tenant=constants.POLICY_INFRA_TENANT,
silent=False):
certificate_def = self.entry_def(certificate_id=certificate_id,
tenant=tenant)
return self.policy_api.get(certificate_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
certificate_def = self.entry_def(tenant=tenant)
return self._list(certificate_def)
def find_cert_with_pem(self, cert_pem,
tenant=constants.POLICY_INFRA_TENANT):
"""Find NSX certificates with specific pem and return their IDs"""
# First fix Dos to unix possible issues, as the NSX backed also does
nsx_style_pem = cert_pem.replace('\r\n', '\n')
certs = self.list(tenant=tenant)
cert_ids = [cert['id'] for cert in certs
if cert['pem_encoded'] == nsx_style_pem]
return cert_ids
def update(self, certificate_id, name=IGNORE,
pem_encoded=IGNORE, private_key=IGNORE,
passphrase=IGNORE, key_algo=IGNORE, description=IGNORE,
tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT):
self._update(certificate_id=certificate_id,
name=name,
description=description,
tags=tags,
private_key=private_key,
pem_encoded=pem_encoded,
passphrase=passphrase,
key_algo=key_algo,
tenant=tenant)
def get_path(self, certificate_id, tenant=constants.POLICY_INFRA_TENANT):
c_def = self.entry_def(certificate_id=certificate_id, tenant=tenant)
return c_def.get_resource_full_path()
def wait_until_realized(self, certificate_id, entity_type=None,
tenant=constants.POLICY_INFRA_TENANT,
sleep=None, max_attempts=None):
cert_def = self.entry_def(
certificate_id=certificate_id, tenant=tenant)
return self._wait_until_realized(
cert_def, entity_type=entity_type,
sleep=sleep, max_attempts=max_attempts)
class NsxPolicyExcludeListApi(NsxPolicyResourceBase):
"""NSX Policy Exclude list."""
@property
def entry_def(self):
return core_defs.ExcludeListDef
def create_or_overwrite(self, members=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
exclude_list_def = self._init_def(members=members,
tenant=tenant)
self._create_or_store(exclude_list_def)
def delete(self, tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def get(self, tenant=constants.POLICY_INFRA_TENANT, silent=False):
exclude_list_def = self.entry_def(tenant=tenant)
return self.policy_api.get(exclude_list_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def update(self, members=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
# TODO(asarfaty): Add support for add/remove member
class NsxPolicyTier0RouteMapApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.Tier0RouteMapDef
def create_or_overwrite(self, name, tier0_id,
route_map_id=None,
entries=IGNORE,
description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
route_map_id = self._init_obj_uuid(route_map_id)
route_map_def = self._init_def(tier0_id=tier0_id,
route_map_id=route_map_id,
name=name,
entries=entries,
description=description,
tags=tags,
tenant=tenant)
self._create_or_store(route_map_def)
return route_map_id
def delete(self, tier0_id, route_map_id,
tenant=constants.POLICY_INFRA_TENANT):
route_map_def = self.entry_def(tier0_id=tier0_id,
route_map_id=route_map_id,
tenant=tenant)
self._delete_with_retry(route_map_def)
def get(self, tier0_id, route_map_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
route_map_def = self.entry_def(tier0_id=tier0_id,
route_map_id=route_map_id,
tenant=tenant)
return self.policy_api.get(route_map_def, silent=silent)
def list(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT):
route_map_def = self.entry_def(tier0_id=tier0_id, tenant=tenant)
return self._list(route_map_def)
def update(self, name, tier0_id,
route_map_id,
entries,
description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT,
force=False):
self._update(tier0_id=tier0_id,
route_map_id=route_map_id,
name=name,
entries=entries,
description=description,
tags=tags,
tenant=tenant,
force=force)
def build_route_map_entry(self, action, community_list_matches=None,
prefix_list_matches=None, entry_set=None):
return core_defs.RouteMapEntry(action, community_list_matches,
prefix_list_matches, entry_set)
def build_route_map_entry_set(self, local_preference=100,
as_path_prepend=None, community=None,
med=None, weight=None):
return core_defs.RouteMapEntrySet(local_preference, as_path_prepend,
community, med, weight)
def build_community_match_criteria(self, criteria, match_operator=None):
return core_defs.CommunityMatchCriteria(criteria, match_operator)
class NsxPolicyTier0PrefixListApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.Tier0PrefixListDef
def create_or_overwrite(self, name, tier0_id,
prefix_list_id=None,
prefixes=IGNORE,
description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
prefix_list_id = self._init_obj_uuid(prefix_list_id)
prefix_list_def = self._init_def(tier0_id=tier0_id,
prefix_list_id=prefix_list_id,
name=name,
prefixes=prefixes,
description=description,
tags=tags,
tenant=tenant)
self._create_or_store(prefix_list_def)
return prefix_list_id
def delete(self, tier0_id, prefix_list_id,
tenant=constants.POLICY_INFRA_TENANT):
prefix_list_def = self.entry_def(tier0_id=tier0_id,
prefix_list_id=prefix_list_id,
tenant=tenant)
self._delete_with_retry(prefix_list_def)
def get(self, tier0_id, prefix_list_id,
tenant=constants.POLICY_INFRA_TENANT, silent=False):
prefix_list_def = self.entry_def(tier0_id=tier0_id,
prefix_list_id=prefix_list_id,
tenant=tenant)
return self.policy_api.get(prefix_list_def, silent=silent)
def list(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT):
prefix_list_def = self.entry_def(tier0_id=tier0_id, tenant=tenant)
return self._list(prefix_list_def)
def update(self, name, tier0_id,
prefix_list_id,
prefixes,
description=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
self._update(tier0_id=tier0_id,
prefix_list_id=prefix_list_id,
name=name,
prefixes=prefixes,
description=description,
tags=tags,
tenant=tenant)
def build_prefix_entry(self, network, le=None, ge=None,
action=constants.ADV_RULE_PERMIT):
return core_defs.PrefixEntry(network, le, ge, action)
class NsxPolicyGlobalConfig(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.GlobalConfigDef
def create_or_overwrite(self, tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def delete(self, tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def get(self, tenant=constants.POLICY_INFRA_TENANT, silent=False):
global_config_def = self.entry_def(tenant=tenant)
return self.policy_api.get(global_config_def, silent=silent)
def list(self, tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def update(self, members=IGNORE,
tenant=constants.POLICY_INFRA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
def _set_l3_forwarding_mode(self, mode, tenant):
# Using PUT as PATCH is not supported for this API.
config = self.get()
if config['l3_forwarding_mode'] != mode:
config['l3_forwarding_mode'] = mode
config_def = self.entry_def(tenant=tenant)
path = config_def.get_resource_path()
self.policy_api.client.update(path, config)
def enable_ipv6(self, tenant=constants.POLICY_INFRA_TENANT):
return self._set_l3_forwarding_mode('IPV4_AND_IPV6', tenant)
def disable_ipv6(self, tenant=constants.POLICY_INFRA_TENANT):
return self._set_l3_forwarding_mode('IPV4_ONLY', tenant)
class NsxPolicyObjectRolePermissionGroupApi(NsxPolicyResourceBase):
@property
def entry_def(self):
return core_defs.ObjectRolePermissionGroupDef
# This will send a PATCH call: /policy/api/v1/aaa/object-permissions.
def create_or_overwrite(self, name, operation, path_prefix, role_name,
orbac_id=IGNORE,
description=IGNORE,
inheritance_disabled=IGNORE,
rule_disabled=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_AAA_TENANT):
orbac_def = self._init_def(name=name,
operation=operation,
path_prefix=path_prefix,
role_name=role_name,
orbac_id=orbac_id,
description=description,
inheritance_disabled=inheritance_disabled,
rule_disabled=rule_disabled,
tags=tags,
tenant=tenant,
patch=True)
self.policy_api.create_or_update(orbac_def)
# This will send a PATCH call: /policy/api/v1/aaa/object-permissions.
def update(self, name, operation, path_prefix, role_name,
orbac_id=IGNORE,
description=IGNORE,
inheritance_disabled=IGNORE,
rule_disabled=IGNORE,
tags=IGNORE,
tenant=constants.POLICY_AAA_TENANT):
self._update(name=name,
operation=operation,
path_prefix=path_prefix,
role_name=role_name,
orbac_id=orbac_id,
description=description,
inheritance_disabled=inheritance_disabled,
rule_disabled=rule_disabled,
tags=tags,
tenant=tenant,
patch=True)
def get(self, path_prefix, role_name, tenant=constants.POLICY_AAA_TENANT):
err_msg = (_("This action is not supported"))
raise exceptions.ManagerError(details=err_msg)
# This will send a GET call:
# /policy/api/v1/aaa/object-permissions?path_prefix=...&role_name=...
def list(self, path_prefix=None, role_name=None,
tenant=constants.POLICY_AAA_TENANT):
orbac_def = self.entry_def(path_prefix=path_prefix,
role_name=role_name,
tenant=tenant)
return self._list(orbac_def)
# This will send a DELETE call:
# /policy/api/v1/aaa/object-permissions?path_prefix=...&role_name=...
# path_prefix and role_name must be specified in the url as they are
# the identifier for an ORBAC object on NSX. Otherwise, NSX will
# still return success but actually delete nothing.
def delete(self, path_prefix, role_name,
tenant=constants.POLICY_AAA_TENANT):
orbac_def = self.entry_def(path_prefix=path_prefix,
role_name=role_name,
tenant=tenant)
self._delete_with_retry(orbac_def)
|
py | 1a4280d737a94e764e293497adc3e81abdb7e3fe | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import datetime as pydt
import pytz
import time
import emission.core.wrapper.wrapperbase as ecwb
class Metadata(ecwb.WrapperBase):
props = {"key": ecwb.WrapperBase.Access.WORM,
"platform": ecwb.WrapperBase.Access.WORM,
"type": ecwb.WrapperBase.Access.WORM,
"write_ts": ecwb.WrapperBase.Access.WORM,
"write_local_dt": ecwb.WrapperBase.Access.WORM,
"time_zone": ecwb.WrapperBase.Access.WORM,
"write_fmt_time": ecwb.WrapperBase.Access.WORM,
"read_ts": ecwb.WrapperBase.Access.WORM}
enums = {}
geojson = []
nullable = []
local_dates = ['write_local_dt']
def _populateDependencies(self):
pass
@staticmethod
def create_metadata_for_result(key):
import emission.storage.decorations.local_date_queries as esdl
import arrow
m = Metadata()
m.key = key
m.platform = "server"
m.write_ts = time.time()
m.time_zone = "America/Los_Angeles"
m.write_local_dt = esdl.get_local_date(m.write_ts, m.time_zone)
m.write_fmt_time = arrow.get(m.write_ts).to(m.time_zone).isoformat()
return m
@staticmethod
def create_metadata_for_fake_result(key, write_ts):
import emission.storage.decorations.local_date_queries as esdl
import arrow
m = Metadata()
m.key = key
m.platform = "server"
m.write_ts = write_ts
m.time_zone = "America/Los_Angeles"
m.write_local_dt = esdl.get_local_date(m.write_ts, m.time_zone)
m.write_fmt_time = arrow.get(m.write_ts).to(m.time_zone).isoformat()
return m
def isAndroid(self):
return self.platform == "android"
def isIOS(self):
return self.platform == "ios"
|
py | 1a4280f53466390057b25a943028b00ecc3f1a6a | # Copyright (c) 2020, Vercer Ltd. Rights set out in LICENCE.txt
from collections import UserList
from enum import Enum
class Placeholder:
def __init__(self, name):
if "%" in name:
raise ValueError("Placeholders cannot contain the % symbol")
self.name = name
def __repr__(self):
return "dqp.placeholder.{}".format(self.name)
class ListPlaceholder(UserList):
def __init__(self, name):
self.name = name
self.data = [Placeholder(name)]
class FailureBehaviour(Enum):
ERROR = "error"
WARN = "warn"
|
py | 1a428317f105d6960533790f02fa83e8e6be3f1e | import torch
import torch.nn as nn
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# from .utils import load_state_dict_from_url
from ib_layers import *
# __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
# 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
# 'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
resnet_alpha = 1.0
def conv3x3(in_planes, out_planes, wib, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
# resnet_wib = False
resnet_wib = True
resnet_alpha = 1E-3
if not wib:
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
else:
return WibConv2d(alpha=resnet_alpha,
in_channels=in_planes, out_channels=out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, wib, stride=1):
"""1x1 convolution"""
# resnet_wib = False
resnet_wib = True
resnet_alpha = 1E-3
if not wib:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
else:
return WibConv2d(alpha=resnet_alpha,
in_channels=in_planes, out_channels=out_planes, kernel_size=1, stride=stride, bias=False)
cfg = {
##### 括号里第一个数是卷积核个数;第二个数是卷积结构,1表示正常卷基层,2表示resnet的2层block结构,3表示resnet
##### 的三层Bottleneck结构;第三个数表示如果是resnet结构,第一个卷积层的stride
#resnet18 (2,2,2,2)
'G5': [(64, 1, 1, 1.0/32), ## InformationBottleneck
'M',
(64, 2, 1, 1.0/32), ## InformationBottleneck
(64, 2, 1, 1.0/32), ## InformationBottleneck
(128, 2, 2, 1.0/16), ## InformationBottleneck
(128, 2, 1, 1.0/16), ## InformationBottleneck
(256, 2, 2, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(512, 2, 2, 1.0/4), ## InformationBottleneck
(512, 2, 1, 1.0/4), ## InformationBottleneck
'A'],
#resnet34 (3,4,6,3)
'G1': [(64, 1, 1, 1.0/32), ## InformationBottleneck
'M',
(64, 2, 1, 1.0/32), ## InformationBottleneck
(64, 2, 1, 1.0/32), ## InformationBottleneck
(64, 2, 1, 1.0/32), ## InformationBottleneck
(128, 2, 2, 1.0/16), ## InformationBottleneck
(128, 2, 1, 1.0/16), ## InformationBottleneck
(128, 2, 1, 1.0/16), ## InformationBottleneck
(128, 2, 1, 1.0/16), ## InformationBottleneck
(256, 2, 2, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(512, 2, 2, 1.0/4), ## InformationBottleneck
(512, 2, 1, 1.0/4), ## InformationBottleneck
(512, 2, 1, 1.0/4), ## InformationBottleneck
'A'],
# resnet50 (3,4,6,3)
'G2': [(64, 1, 1, 1.0 / 32), ## InformationBottleneck
'M',
(64, 3, 1, 1.0 / 32), ## InformationBottleneck
(64, 3, 1, 1.0 / 32), ## InformationBottleneck
(64, 3, 1, 1.0 / 32), ## InformationBottleneck
(128, 3, 2, 1.0 / 16), ## InformationBottleneck
(128, 3, 1, 1.0 / 16), ## InformationBottleneck
(128, 3, 1, 1.0 / 16), ## InformationBottleneck
(128, 3, 1, 1.0 / 16), ## InformationBottleneck
(256, 3, 2, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(512, 3, 2, 1.0 / 4), ## InformationBottleneck
(512, 3, 1, 1.0 / 4), ## InformationBottleneck
(512, 3, 1, 1.0 / 4), ## InformationBottleneck
'A']
}
def reparameterize(mu, logalpha):
std = logalpha.mul(0.5).exp_()
eps = torch.FloatTensor(std.size(0)).cuda(mu.get_device()).normal_()
eps = Variable(eps)
# phi = std * eps - std * std / 2
# return phi
phi = (std * eps - std * std / 2).exp_()
return phi * mu
# std = logalpha.mul(0.5).exp_()
# eps = torch.FloatTensor(std.size(0)).cuda(mu.get_device()).normal_()
# eps = Variable(eps)
# return mu + eps * std
class WeightIB(nn.Module):
def __init__(self, out_channels, init_mag=9, init_var=0.01):
super(WeightIB, self).__init__()
self.dim = out_channels
print(self.dim)
# self.phi = Parameter(torch.Tensor(self.dim))
self.logalpha = Parameter(torch.Tensor(self.dim))
self.mu = Parameter(torch.Tensor(self.dim))
self.epsilon = 1e-8
self.offset = 0.00
self.mu.data.normal_(1, init_var)
self.logalpha.data.normal_(-init_mag, init_var)
def forward(self, x, training=False):
if self.training:
# z_scale = reparameterize(self.mu, self.logalpha)
# z_scale_exp = z_scale.exp_()
# hard_mask, _ = self.get_mask_hard(self.epsilon)
# z_scale = z_scale_exp * Variable(hard_mask)
z_scale = reparameterize(self.mu, self.logalpha)
hard_mask, _ = self.get_mask_hard(self.epsilon)
z_scale *= Variable(hard_mask)
# print('self.mu: ', self.mu)
# print('z_scale1: ', z_scale)
# print('z_scale1: ', z_scale)
else:
# z_scale = reparameterize(self.mu, self.logalpha)
# z_scale_exp = z_scale.exp_()
z_scale = reparameterize(self.mu, self.logalpha)
hard_mask, _ = self.get_mask_hard(self.epsilon)
z_scale *= Variable(hard_mask)
# z_scale = Variable(self.get_mask_weighted(self.epsilon))
# print('z_scale2: ', z_scale)
# new_shape = self.adapt_shape(z_scale_exp.size(), x.size())
# return x * z_scale_exp.view(new_shape)
new_shape = self.adapt_shape(z_scale.size(), x.size())
return x * z_scale.view(new_shape)
def adapt_shape(self, src_shape, x_shape):
if len(src_shape) == 2:
new_shape = src_shape
# print('new_shape1: ',new_shape)
else:
new_shape = (src_shape[0], 1)
# print('new_shape2: ', new_shape)
if len(x_shape)>2:
new_shape = list(new_shape)
new_shape += [1 for i in range(len(x_shape)-2)]
# print('new_shape3: ', new_shape)
return new_shape
def get_mask_hard(self, threshold=0):
hard_mask = (self.mu.abs() > threshold).float()
prune = self.mu.abs().cpu() > threshold # e.g. [True, False, True, True, False]
mask = np.where(prune)[0] # e.g. [0, 2, 3]
return hard_mask, len(mask)
def get_mask_weighted(self, threshold=0):
mask = (self.mu.abs() > threshold).float() * self.mu.data.float()
return mask
def compute_Wib_upbound(self, logalpha):
return - 0.5 * logalpha.sum()
class WibConv2d(nn.Conv2d):
def __init__(self, alpha, **kwargs):
super(WibConv2d, self).__init__(**kwargs)
self.alpha = alpha
self.weight_ib = WeightIB(self.out_channels)
self.W = torch.empty(self.weight.data.size())
torch.nn.init.xavier_normal(self.W, gain=1)
def forward(self, x):
if self.training:
# kernel_in = self.weight.data
# self.W.data = self.weight_ib(self.weight, training=self.training)
# y = nn.functional.conv2d(x, self.W, self.bias, self.stride, self.padding, self.dilation, self.groups)
new_weight = self.weight_ib(self.weight, training=self.training)
y = nn.functional.conv2d(x, new_weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# y = nn.functional.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# self.W.data= self.W
else:
# y = nn.functional.conv2d(x, self.W, self.bias, self.stride, self.padding, self.dilation, self.groups)
new_weight = self.weight_ib(self.weight, training=self.training)
y = nn.functional.conv2d(x, new_weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# y = nn.functional.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# self.weight.data = self.W.data
# print('self.weight2: ', self.weight)
# new_weight = self.weight_ib(self.weight, training=self.training)
# y = nn.functional.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# y = nn.functional.conv2d(x, new_weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return y
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, wib=0, kl_mult=1):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
# self.wib=wib
self.conv1 = conv3x3(inplanes, planes, wib, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.ib1 = InformationBottleneck(planes, kl_mult=kl_mult)
self.conv2 = conv3x3(planes, planes, wib)
self.bn2 = norm_layer(planes)
self.ib2 = InformationBottleneck(planes, kl_mult=kl_mult)
self.downsample = downsample
self.stride = stride
def compute_Wib_upbound(self, ):
wib_upbound =0
wib_upbound += self.conv1.weight_ib.compute_Wib_upbound(self.conv1.weight_ib.logalpha)
# 之前版本错了
# wib_upbound += self.conv2.weight_ib.compute_Wib_upbound(self.conv1.weight_ib.logalpha)
# 正确版本
wib_upbound += self.conv2.weight_ib.compute_Wib_upbound(self.conv2.weight_ib.logalpha)
return wib_upbound
def compute_compression_ratio(self, threshold, pre_mask, n=0):
# applicable for structures with global pooling before fc
total_params, pruned_params, remain_params = 0, 0, 0
fmap_size=32
out_channels1 = self.conv1.out_channels
out_channels2 = self.conv2.out_channels
in_channels1=self.conv1.in_channels
in_channels2 = self.conv2.in_channels
total_params = in_channels1 * out_channels1 * 9
total_params += in_channels2 * out_channels2 * 9
hard_mask1 = self.conv1.get_mask_hard(threshold)
hard_mask2 = self.conv2.get_mask_hard(threshold)
remain_params = pre_mask * hard_mask1 * 9
remain_params += hard_mask1 *hard_mask2 * 9
pruned_params = total_params - remain_params
flops = (fmap_size ** 2) * remain_params
# print('in_channels1: {}, in_channels2: {}, out_channels1:{}, out_channels2: {},'
# .format(in_channels1, in_channels2, out_channels1, out_channels2))
# print('pre_mask: {}, hard_mask1: {}, hard_mask2:{},'
# .format(pre_mask, hard_mask1, hard_mask2))
# print('total parameters: {}, pruned parameters: {}, remaining params:{}, remaining flops: {},'
# .format(total_params, pruned_params, remain_params, flops))
return total_params, pruned_params, remain_params, flops
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.ib1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.ib2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, wib=0, kl_mult=1):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width, wib)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, wib, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.ib2 = InformationBottleneck(width, kl_mult=kl_mult)
self.conv3 = conv1x1(width, planes * self.expansion, wib)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def compute_Wib_upbound(self, ):
wib_upbound =0
wib_upbound += self.conv1.weight_ib.compute_Wib_upbound(self.conv1.weight_ib.logalpha)
wib_upbound += self.conv2.weight_ib.compute_Wib_upbound(self.conv2.weight_ib.logalpha)
wib_upbound += self.conv3.weight_ib.compute_Wib_upbound(self.conv3.weight_ib.logalpha)
return wib_upbound
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.ib2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class RESNET_IB(nn.Module):
def __init__(self, block, config=None, mag=9, batch_norm=False, threshold=0,
init_var=0.01, sample_in_training=True, sample_in_testing=False, n_cls=10, no_ib=False, a=0.5, b=0.5,
###resnet 初始参数
zero_init_residual=False, wib=1,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None
):
super(RESNET_IB, self).__init__()
self.expansion = block.expansion
### resnet 初始化
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
# self.layers = layers
self.wib = wib
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.init_mag = mag
self.threshold = threshold
self.config = config
self.init_var = init_var
self.sample_in_training = sample_in_training
self.sample_in_testing = sample_in_testing
self.no_ib = no_ib
self.a = a
self.b = b
print('Using structure1 {}'.format(cfg[config]))
self.conv_layers, conv_kl_list = self.make_conv_layers(cfg[config], batch_norm, block)
print('Using structure {}'.format(cfg[config]))
# print('conv_layers {}'.format(self.conv_layers))
print('conv_layers {}'.format(self.conv_layers))
print('conv_kl_list {}'.format(conv_kl_list))
# self.compute_Wib_upbound()
fc_ib1 = InformationBottleneck(512*block.expansion, mask_thresh=threshold, init_mag=self.init_mag, init_var=self.init_var,
sample_in_training=sample_in_training, sample_in_testing=sample_in_testing,a=self.a,b=self.b)
fc_ib2 = InformationBottleneck(512*block.expansion, mask_thresh=threshold, init_mag=self.init_mag, init_var=self.init_var,
sample_in_training=sample_in_training, sample_in_testing=sample_in_testing,a=self.a,b=self.b)
self.n_cls = n_cls
# self.n = 2048
# self.n = 4096
self.n = 1024
if self.config in ['G1', 'D6']:
# t3p3 t4p2
self.fc_layers = nn.Sequential(nn.Linear(512*block.expansion, self.n_cls))
self.kl_list = conv_kl_list
#resnet32
init_kl_list = [64, 64, 64, 64, 64, 64, 64,
128, 128, 128, 128, 128, 128, 128, 128,
256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256,
512, 512, 512, 512, 512, 512]
self.init_kl_list = [x / self.n for x in init_kl_list]
# resnet32
kl_mult_temp = [64, 64, 64, 64, 64, 64, 64,
128, 128, 128, 128, 128, 128, 128, 128,
256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256,
512, 512, 512, 512, 512, 512]
self.kl_mult_temp = [x / self.n for x in kl_mult_temp]
self.ratio = [1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1]
_,self.last_prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
_,self.pruned_structure = self.get_masks(hard_mask=True, threshold=threshold)
elif self.config == 'G2':
# t3p3 t4p2
self.fc_layers = nn.Sequential(nn.Linear(512 * block.expansion, self.n_cls))
self.kl_list = conv_kl_list
# resnet50
init_kl_list = [64, 64, 64, 64,
128, 128, 128, 128,
256, 256, 256, 256, 256, 256,
512, 512, 512]
# init_kl_list = [256, 256, 256, 256,
# 256, 256, 256, 256,
# 256, 256, 256, 256, 256, 256,
# 256, 256, 256]
self.init_kl_list = [x / self.n for x in init_kl_list]
# resnet50
kl_mult_temp = [64, 64, 64, 64,
128, 128, 128, 128,
256, 256, 256, 256, 256, 256,
512, 512, 512]
# kl_mult_temp = [256, 256, 256, 256,
# 256, 256, 256, 256,
# 256, 256, 256, 256, 256, 256,
# 256, 256, 256]
self.kl_mult_temp = [x / self.n for x in kl_mult_temp]
self.ratio = [1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1, 1, 1,
1, 1, 1]
_, self.last_prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
_, self.pruned_structure = self.get_masks(hard_mask=True, threshold=threshold)
elif self.config == 'G5':
# t3p3 t4p2
self.fc_layers = nn.Sequential(nn.Linear(512*block.expansion, self.n_cls))
self.kl_list = conv_kl_list
init_kl_list = [64,
64, 64, 64, 64,
128, 128, 128, 128,
256, 256, 256, 256,
512, 512, 512, 512]
self.init_kl_list = [x / self.n for x in init_kl_list]
kl_mult_temp = [64,
64, 64, 64, 64,
128, 128, 128, 128,
256, 256, 256, 256,
512, 512, 512, 512]
self.kl_mult_temp = [x / self.n for x in kl_mult_temp]
self.ratio = [1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1]
_,self.last_prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
_,self.pruned_structure = self.get_masks(hard_mask=True, threshold=threshold)
else:
# D4 t3p1
fc_layer_list = [nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, self.n_cls)] if no_ib else \
[nn.Linear(512, 512), nn.ReLU(), fc_ib1, nn.Linear(512, 512), nn.ReLU(), fc_ib2, nn.Linear(512, self.n_cls)]
self.fc_layers = nn.Sequential(*fc_layer_list)
self.kl_list = conv_kl_list + [fc_ib1, fc_ib2]
self.init_kl_list = [1/32, 1/32, 1/16, 1/16, 1/8, 1/8, 1/8, 1/4, 1/4, 1/4, 1/2, 1/2, 1/2, 1, 1]
self.kl_mult_temp = [1/32, 1/32, 1/16, 1/16, 1/8, 1/8, 1/8, 1/4, 1/4, 1/4, 1/2, 1/2, 1/2, 1, 1]
_,self.last_prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
_,self.pruned_structure = self.get_masks(hard_mask=True, threshold=threshold)
print(self.kl_mult_temp)
print(self.init_kl_list)
### resnet 初始化
for m in self.modules():
if isinstance(m, nn.Conv2d):
print('ok1')
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, WibConv2d):
# print('ok2')
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
### resnet的make_layer函数
def _make_layer(self, block, planes, blocks=1, stride=1, dilate=False, kl_mult=1 ):
norm_layer = self._norm_layer
wib = self.wib
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, wib, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, wib, kl_mult))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, wib = wib, kl_mult = kl_mult))
return nn.Sequential(*layers)
def make_conv_layers(self, config, batch_norm, block, blocks=1, dilate=False):
layers, kl_list = [], []
in_channels = 3
for v in config:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
elif v == 'A':
# layers += [nn.AvgPool2d(kernel_size=2, stride=2)]
layers += [nn.AdaptiveAvgPool2d((1, 1))]
else:
##判断是第一个卷积层,还是block模块
if v[1]==1:#第一个卷积层,按照vgg类似操作构建信息瓶颈层
# conv2d = nn.Conv2d(in_channels, v[0], kernel_size=7, stride=2, padding=3, bias=False)
# conv2d = nn.Conv2d(in_channels, v[0], kernel_size=3, stride=1, padding=1, bias=False)
conv2d = conv3x3(3, v[0], stride=1, wib=self.wib)
ib = InformationBottleneck(v[0], mask_thresh=self.threshold, init_mag=self.init_mag, init_var=self.init_var,
kl_mult=v[3], sample_in_training=self.sample_in_training, sample_in_testing=self.sample_in_testing,a=self.a,b=self.b)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v[0]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
if not self.no_ib:
layers.append(ib)
kl_list.append(ib)
if v[1]==2:#属于resnet的BasicBlock模块,调用resnet的make_layer函数
resblock = self._make_layer(block, v[0], stride=v[2], kl_mult=v[3])
layers += [resblock]
kl_list.append(resblock[0].ib1)
kl_list.append(resblock[0].ib2)
ib = InformationBottleneck(v[0]*block.expansion, mask_thresh=self.threshold, init_mag=self.init_mag, init_var=self.init_var,
kl_mult=v[3], sample_in_training=self.sample_in_training, sample_in_testing=self.sample_in_testing,a=self.a,b=self.b)
# if not self.no_ib:
# layers.append(ib)
# kl_list.append(ib)
if v[1]==3:#属于resnet的Bottleneck模块,调用resnet的make_layer函数
resblock = self._make_layer(block, v[0], stride=v[2], kl_mult=v[3])
layers += [resblock]
kl_list.append(resblock[0].ib2)
# ib = InformationBottleneck(v[0]*block.expansion, mask_thresh=self.threshold, init_mag=self.init_mag, init_var=self.init_var,
# kl_mult=v[3], sample_in_training=self.sample_in_training, sample_in_testing=self.sample_in_testing,a=self.a,b=self.b)
in_channels = v[0]
# if not self.no_ib:
# layers.append(ib)
# kl_list.append(ib)
return nn.Sequential(*layers), kl_list
def auto_kl_mult(self):
# _, prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
# conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
if self.config in ['G', 'D6']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
fc_shapes = [512]
elif self.config in ['G5', 'G1']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
conv_shapes_temp=[]
conv_shapes_temp += [conv_shapes[0]]
for i in range(len(conv_shapes)-1):
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes = conv_shapes_temp
fc_shapes = []
elif self.config in ['G2']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
# conv_shapes[0]=conv_shapes[0]/self.expansion
fc_shapes = []
else:
fc_shapes = [512, 512]
# print('prune_stat: {}, last_prune_stat:{}'.format(prune_stat, self.last_prune_stat))
remain_stat = [out_channels - self.last_prune_stat[idx] for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
init_stat = [out_channels for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
sum = 0
# a=32
for i in range(len(init_stat)):
a = init_stat[i]/2
self.ratio[i] = remain_stat[i] / init_stat[i]
# sum = sum + self.ratio[i]
sum = sum + math.tan(math.pi*(a-1)/a/2*self.ratio[i])
# offset = 1 / len(self.init_kl_list)
b = 1.2
c= 0.01
# conv_kl_mult = 4
for i in range(len(self.init_kl_list)):
a=init_stat[i]/2
temp1 = len(self.init_kl_list)/2 - abs(i-len(self.init_kl_list)/2)
max1 = len(self.init_kl_list)/2
temp2 = remain_stat[i]
max2 = max(remain_stat)
# print('i:')
# print('(a-1)/a/2:',(a-1)/a/2)
# print('self.ratio[i]:', self.ratio[i])
# print('math.pi*(a-1)/a/2*self.ratio[i]:', math.pi*(a-1)/a/2*self.ratio[i])
# self.kl_list[i].kl_mult = self.init_kl_list[i] * (
# 1 + b* math.log(temp2,2)/math.log(max2,2)*
# (math.log(1 + temp1, 2) / math.log(1 + max1, 2)) *
# (math.tan(math.pi*(a-1)/a/2*self.ratio[i]) / sum) * len(self.init_kl_list))
if temp2==0:
self.kl_list[i].kl_mult=0
self.kl_mult_temp[i]=0
else:
self.kl_list[i].kl_mult = self.init_kl_list[i] * (
2* b* math.log(2+temp2,2)/math.log(max2,2)*
(math.log(1 + temp1, 2) / math.log(2 + max1, 2)) *
(math.tan(math.pi*(a-1)/a/2*self.ratio[i]) / sum) * len(self.init_kl_list))
self.kl_mult_temp[i] = self.kl_list[i].kl_mult / self.init_kl_list[i]
# print('conv_kl_mult:',conv_kl_mult)
print(b)
print(self.ratio)
print(self.init_kl_list)
print(self.kl_mult_temp)
def adapt_dropout(self, p):
# conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
if self.config in ['G', 'D6']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
fc_shapes = [512]
elif self.config in ['G5', 'G1']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
conv_shapes_temp = []
conv_shapes_temp += [conv_shapes[0]]
for i in range(len(conv_shapes) - 1):
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes = conv_shapes_temp
fc_shapes = []
elif self.config in ['G2']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
# conv_shapes[0] = conv_shapes[0] / self.expansion
fc_shapes = []
else:
fc_shapes = [512, 512]
remain_stat = [out_channels - self.last_prune_stat[idx] for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
for i in range(len(self.init_kl_list)):
if remain_stat[i] < 150:
# if remain_stat[i] < 200:
# if remain_stat[i] < 120:
# self.kl_list[i].p=1
self.kl_list[i].p = 1
else:
# self.kl_list[i].p = 1.0-1.0/remain_stat[i]
# 原设置
# self.kl_list[i].p = 0.99
self.kl_list[i].p = 1
print(i,self.kl_list[i].p)
def forward(self, x):
batch_size = x.size(0)
x = self.conv_layers(x).view(batch_size, -1)
x = self.fc_layers(x)
if self.training:
if self.no_ib:
# return x
if not self.wib:
return x
else:
Wib_upbound = self.compute_Wib_upbound()
return x, Wib_upbound
else:
if not self.wib:
ib_kld = self.kl_list[0].kld
for ib in self.kl_list[1:]:
ib_kld += ib.kld
return x, ib_kld.float()
else:
ib_kld = self.kl_list[0].kld
for ib in self.kl_list[1:]:
ib_kld += ib.kld
Wib_upbound = self.compute_Wib_upbound()
return x, ib_kld.float(), Wib_upbound
else:
return x
def get_masks(self, hard_mask=True, threshold=0):
masks = []
if hard_mask:
masks = [ib_layer.get_mask_hard(threshold) for ib_layer in self.kl_list]
return masks, [np.sum(mask.cpu().numpy()==0) for mask in masks]
else:
masks = [ib_layer.get_mask_weighted(threshold) for ib_layer in self.kl_list]
return masks
def compute_Wib_upbound(self,):
Wib_upbound = 0
offset=0
interval=0
Wib_upbound += self.conv_layers[0].weight_ib.compute_Wib_upbound(self.conv_layers[0].weight_ib.logalpha)
# print('conv_layers: {}'.format(self.conv_layers))
if not self.no_ib:
offset=5
interval=0
else:
offset=4
interval=1
for i in range(8):
# print('self.conv_layers[5+i*2]: {}'.format(self.conv_layers[5+i*2]))
block=self.conv_layers[offset+i*(2-interval)]
# print('block: {}'.format(block[0]))
Wib_upbound += block[0].compute_Wib_upbound()
return Wib_upbound
def print_params(self,):
mu = []
logalpha = []
weight = []
weight += [self.conv_layers[0].weight]
offset = 0
interval = 0
if not self.no_ib:
offset=5
interval=0
else:
offset=4
interval=1
# print('weight: {}'.format(weight))
if self.wib:
mu += [self.conv_layers[0].weight_ib.mu]
logalpha += [self.conv_layers[0].weight_ib.logalpha]
mask_w,_= self.conv_layers[0].weight_ib.get_mask_hard(self.conv_layers[0].weight_ib.epsilon)
if not self.no_ib:
mask_a = self.kl_list[0].get_mask_hard()
mask_dert = mask_w - mask_a
print('mask_w: {}'.format(mask_w))
if not self.no_ib:
print('mask_a: {}'.format(mask_a))
print('mask_dert: {}'.format(mask_dert))
print('mu: {}, logalpha: {}'.format(mu, logalpha))
for i in range(8):
# print('self.conv_layers[5+i*2]: {}'.format(self.conv_layers[5+i*2]))
block = self.conv_layers[offset + i * (2 - interval)]
# block=self.conv_layers[5+i*2]
# print('block: {}'.format(block[0]))
mu += [block[0].conv1.weight_ib.mu]
mu += [block[0].conv2.weight_ib.mu]
logalpha += [block[0].conv1.weight_ib.logalpha]
logalpha += [block[0].conv2.weight_ib.logalpha]
# mu = [ib_layer.post_z_mu for ib_layer in self.kl_list]
# logalpha = [ib_layer.post_z_logD for ib_layer in self.kl_list]
# print('mu: {}, logalpha: {}'.format(mu, logalpha))
def print_compression_ratio(self, threshold, writer=None, epoch=-1):
# applicable for structures with global pooling before fc
_, prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
# conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
if self.config in ['G', 'D6']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
fc_shapes = [512]
elif self.config in ['G5', 'G1']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
conv_shapes_temp = []
conv_shapes_temp += [conv_shapes[0]]
for i in range(len(conv_shapes) - 1):
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes = conv_shapes_temp
fc_shapes = []
elif self.config in ['G2']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
# conv_shapes[0] = conv_shapes[0] / self.expansion
fc_shapes = []
else:
fc_shapes = [512, 512]
# print('prune_stat: {}, last_prune_stat:{}'.format(prune_stat, self.last_prune_stat))
self.pruned_structure = [prune_stat[idx] - self.last_prune_stat[idx] for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
self.last_prune_stat = [prune_stat[idx] for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
net_shape = [ out_channels-prune_stat[idx] for idx, out_channels in enumerate(conv_shapes+fc_shapes)]
#conv_shape_with_pool = [v[0] if v != 'M' else 'M' for v in cfg[self.config]]
current_n, hdim, last_channels, flops, fmap_size = 0, 64, 3, 0, 32
for n, pruned_channels in enumerate(prune_stat):
if n < len(conv_shapes):
# current_channels = cfg[self.config][current_n][0] - pruned_channels
current_channels = conv_shapes[current_n] - pruned_channels
flops += (fmap_size**2) * 9 * last_channels * current_channels
last_channels = current_channels
current_n += 1
if self.config in ['G1']:
if current_n==1 or current_n==8 or current_n==16 or current_n==28 or current_n==33:
fmap_size /= 2
hdim *= 2
if self.config in ['G5']:
if current_n==1 or current_n==6 or current_n==10 or current_n==14 or current_n==17:
fmap_size /= 2
hdim *= 2
# if type(cfg[self.config][current_n]) is str:
# current_n += 1
# fmap_size /= 2
# hdim *= 2
else:
current_channels = 512 - pruned_channels
flops += last_channels * current_channels
last_channels = current_channels
flops += last_channels * self.n_cls
total_params, pruned_params, remain_params = 0, 0, 0
# total number of conv params
in_channels, in_pruned = 3, 0
for n, n_out in enumerate(conv_shapes):
n_params = in_channels * n_out * 9
total_params += n_params
n_remain = (in_channels - in_pruned) * (n_out - prune_stat[n]) * 9
remain_params += n_remain
pruned_params += n_params - n_remain
in_channels = n_out
in_pruned = prune_stat[n]
# print('n_params: {}, n_remain: {}, in_channels:{}, in_pruned:{}, n_out: {}, prune_stat: {},'.format(n_params, n_remain, in_channels, in_pruned, n_out, prune_stat))
# fc layers
offset = len(prune_stat) - len(fc_shapes)
for n, n_out in enumerate(fc_shapes):
n_params = in_channels * n_out
total_params += n_params
n_remain = (in_channels - in_pruned) * (n_out - prune_stat[n+offset])
remain_params += n_remain
pruned_params += n_params - n_remain
in_channels = n_out
in_pruned = prune_stat[n+offset]
# print('n_params: {}, n_remain: {}, in_channels:{}, in_pruned:{}, n_out: {}, prune_stat: {},'.format(n_params, n_remain, in_channels, in_pruned, n_out, prune_stat))
total_params += in_channels * self.n_cls
remain_params += (in_channels - in_pruned) * self.n_cls
pruned_params += in_pruned * self.n_cls
self.print_params()
print('total parameters: {}, pruned parameters: {}, remaining params:{}, remain/total params:{}, remaining flops: {}, remaining flops/params: {},'
'each layer pruned: {}, this epoch each layer pruned: {}, remaining structure:{}'.format(total_params, pruned_params, remain_params,
float(total_params-pruned_params)/total_params, flops, 0.0000000001 * flops/(float(total_params-pruned_params)/total_params), prune_stat, self.pruned_structure, net_shape))
if writer is not None:
writer.add_scalar('flops', flops, epoch)
writer.add_scalar('remain/total params', float(total_params-pruned_params)/total_params, epoch)
writer.add_scalar('flops/remaining params', 0.0000000001 * flops/(float(total_params-pruned_params)/total_params), epoch)
|
py | 1a428340cd4b1fd86db98593dc0b8997f8c17979 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
"""
This is your result for task 1:
mAP: 0.7066194189913816
ap of each class:
plane:0.8905480010393588,
baseball-diamond:0.7845764249543027,
bridge:0.4415489914209597,
ground-track-field:0.6515721505439082,
small-vehicle:0.7509226622459368,
large-vehicle:0.7288453788151275,
ship:0.8604046905135039,
tennis-court:0.9082569687774237,
basketball-court:0.8141347275878138,
storage-tank:0.8253027715641935,
soccer-ball-field:0.5623560181901192,
roundabout:0.6100656068973895,
harbor:0.5648618127447264,
swimming-pool:0.6767393616949172,
helicopter:0.5291557178810407
The submitted information is :
Description: RetinaNet_DOTA_R3Det_2x_20191108_70.2w
Username: SJTU-Det
Institute: SJTU
Emailadress: [email protected]
TeamMembers: yangxue
"""
# ------------------------------------------------
VERSION = 'RetinaNet_DOTA_R3Det_2x_20191108'
NET_NAME = 'resnet50_v1d' # 'MobilenetV2'
# ---------------------------------------- System
ROOT_PATH = os.path.abspath('../../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 27000 * 2
SUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')
TEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')
# ------------------------------------------ Train and test
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
ADD_BOX_IN_TENSORBOARD = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
USE_IOU_FACTOR = False
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 5e-4
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Dataset
DATASET_NAME = 'DOTA' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 800
CLASS_NUM = 15
IMG_ROTATE = False
RGB2GRAY = False
VERTICAL_FLIP = False
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# --------------------------------------------- Network
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
NUM_SUBNET_CONV = 4
NUM_REFINE_STAGE = 1
USE_RELU = False
FPN_CHANNEL = 256
FPN_MODE = 'fpn'
# --------------------------------------------- Anchor
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 90
# -------------------------------------------- Head
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
REFINE_IOU_POSITIVE_THRESHOLD = [0.6, 0.7]
REFINE_IOU_NEGATIVE_THRESHOLD = [0.5, 0.6]
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
|
py | 1a428374d84ba1cee644b671a6f22d686ce15232 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import time
from typing import Any, Dict, Optional
import requests
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.plexus.hooks.plexus import PlexusHook
logger = logging.getLogger(__name__)
class PlexusJobOperator(BaseOperator):
"""
Submits a Plexus job.
:param job_params: parameters required to launch a job.
:type job_params: dict
Required job parameters are the following
- "name": job name created by user.
- "app": name of the application to run. found in Plexus UI.
- "queue": public cluster name. found in Plexus UI.
- "num_nodes": number of nodes.
- "num_cores": number of cores per node.
"""
def __init__(self, job_params: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.job_params = job_params
self.required_params = {"name", "app", "queue", "num_cores", "num_nodes"}
self.lookups = {
"app": ("apps/", "id", "name"),
"billing_account_id": ("users/{}/billingaccounts/", "id", None),
"queue": ("queues/", "id", "public_name"),
}
self.job_params.update({"billing_account_id": None})
self.is_service = None
def execute(self, context: Any) -> Any:
hook = PlexusHook()
params = self.construct_job_params(hook)
if self.is_service is True:
if self.job_params.get("expected_runtime") is None:
end_state = "Running"
else:
end_state = "Finished"
elif self.is_service is False:
end_state = "Completed"
else:
raise AirflowException(
"Unable to determine if application "
"is running as a batch job or service. "
"Contact Core Scientific AI Team."
)
logger.info("creating job w/ following params: %s", params)
jobs_endpoint = hook.host + "jobs/"
headers = {"Authorization": f"Bearer {hook.token}"}
create_job = requests.post(jobs_endpoint, headers=headers, data=params, timeout=5)
if create_job.ok:
job = create_job.json()
jid = job["id"]
state = job["last_state"]
while state != end_state:
time.sleep(3)
jid_endpoint = jobs_endpoint + f"{jid}/"
get_job = requests.get(jid_endpoint, headers=headers, timeout=5)
if not get_job.ok:
raise AirflowException(
"Could not retrieve job status. "
f"Status Code: [{get_job.status_code}]. Reason: {get_job.reason} - {get_job.text}"
)
new_state = get_job.json()["last_state"]
if new_state in ("Cancelled", "Failed"):
raise AirflowException(f"Job {new_state}")
elif new_state != state:
logger.info("job is %s", new_state)
state = new_state
else:
raise AirflowException(
"Could not start job. "
f"Status Code: [{create_job.status_code}]. Reason: {create_job.reason} - {create_job.text}"
)
def _api_lookup(self, param: str, hook):
lookup = self.lookups[param]
key = lookup[1]
mapping = None if lookup[2] is None else (lookup[2], self.job_params[param])
if param == "billing_account_id":
endpoint = hook.host + lookup[0].format(hook.user_id)
else:
endpoint = hook.host + lookup[0]
headers = {"Authorization": f"Bearer {hook.token}"}
response = requests.get(endpoint, headers=headers, timeout=5)
results = response.json()["results"]
v = None
if mapping is None:
v = results[0][key]
else:
for dct in results:
if dct[mapping[0]] == mapping[1]:
v = dct[key]
if param == 'app':
self.is_service = dct['is_service']
if v is None:
raise AirflowException(f"Could not locate value for param:{key} at endpoint: {endpoint}")
return v
def construct_job_params(self, hook: Any) -> Dict[Any, Optional[Any]]:
"""
Creates job_params dict for api call to
launch a Plexus job.
Some parameters required to launch a job
are not available to the user in the Plexus
UI. For example, an app id is required, but
only the app name is provided in the UI.
This function acts as a backend lookup
of the required param value using the
user-provided value.
:param hook: plexus hook object
:type hook: airflow hook
"""
missing_params = self.required_params - set(self.job_params)
if len(missing_params) > 0:
raise AirflowException(f"Missing the following required job_params: {', '.join(missing_params)}")
params = {}
for prm in self.job_params:
if prm in self.lookups:
v = self._api_lookup(param=prm, hook=hook)
params[prm] = v
else:
params[prm] = self.job_params[prm]
return params
|
py | 1a4283f4d6b64201e58de43ded5210ef7bd9d1b8 | from openshift import Openshift
from command import Command
import re
import requests
import time
class NodeJSApp(object):
nodesj_app_image = "quay.io/pmacik/nodejs-rest-http-crud"
api_end_point = 'http://{route_url}/api/status/dbNameCM'
openshift = Openshift()
pod_name_pattern = "{name}.*$(?<!-build)"
name = ""
namespace = ""
def __init__(self, name, namespace):
self.cmd = Command()
self.name = name
self.namespace = namespace
def is_running(self, wait=False):
deployment_flag = False
if wait:
pod_name = self.openshift.wait_for_pod(self.get_pod_name_pattern(), self.namespace, timeout=180)
else:
pod_name = self.openshift.search_pod_in_namespace(self.get_pod_name_pattern(), self.namespace)
if pod_name is not None:
application_pod_status = self.openshift.check_pod_status(pod_name, self.namespace, wait_for_status="Running")
print("The pod {} is running: {}".format(pod_name, application_pod_status))
deployment = self.openshift.search_resource_in_namespace("deployments", f"{self.name}.*", self.namespace)
if deployment is not None:
print("deployment is {}".format(deployment))
deployment_flag = True
if application_pod_status and deployment_flag:
return True
else:
return False
else:
return False
def install(self):
create_new_app_output, exit_code = self.cmd.run(f"oc new-app --docker-image={self.nodesj_app_image} --name={self.name} -n {self.namespace}")
assert exit_code == 0, f"Non-zero exit code ({exit_code}) returned when attempting to create a new app: {create_new_app_output}"
assert re.search(f'imagestream.image.openshift.io.*{self.name}.*created',
create_new_app_output) is not None, f"Unable to create imagestream: {create_new_app_output}"
assert re.search(f'deployment.apps.*{self.name}.*created',
create_new_app_output) is not None, f"Unable to create deployment: {create_new_app_output}"
assert re.search(f'service.*{self.name}.*created',
create_new_app_output) is not None, f"Unable to create service: {create_new_app_output}"
assert self.openshift.expose_service_route(self.name, self.namespace) is not None, "Unable to expose service route"
return self.is_running(wait=True)
def get_db_name_from_api(self, interval=5, timeout=60):
route_url = self.openshift.get_route_host(self.name, self.namespace)
if route_url is None:
return None
start = 0
while ((start + interval) <= timeout):
db_name = requests.get(url=self.api_end_point.format(route_url=route_url))
if db_name.status_code == 200:
return db_name.text
time.sleep(interval)
start += interval
return None
def get_observed_generation(self):
return self.openshift.get_resource_info_by_jsonpath("deployment", self.name, self.namespace, "{.status.observedGeneration}")
def get_running_pod_name(self, interval=5, timeout=60):
start = 0
while ((start + interval) <= timeout):
pod_list = self.openshift.get_pod_lst(self.namespace)
for pod in pod_list.split(" "):
if re.fullmatch(self.get_pod_name_pattern(), pod):
if self.openshift.get_pod_status(pod, self.namespace) == "Running":
return pod
time.sleep(interval)
start += interval
return None
def get_redeployed_pod_name(self, old_pod_name, interval=5, timeout=60):
start = 0
while ((start + interval) <= timeout):
pod_list = self.openshift.get_pod_lst(self.namespace)
for pod in pod_list.split(" "):
if pod != old_pod_name and re.fullmatch(self.get_pod_name_pattern(), pod):
if self.openshift.get_pod_status(pod, self.namespace) == "Running":
return pod
time.sleep(interval)
start += interval
return None
def get_pod_name_pattern(self):
return self.pod_name_pattern.format(name=self.name)
|
py | 1a42847e97e0a9a07c9df3eae11c1c51e6fa56cc | import logging
import torch
import torch.nn.functional as F
from torch import nn
from predict_pv_yield.models.base_model import BaseModel
logging.basicConfig()
_LOG = logging.getLogger("predict_pv_yield")
class Model(BaseModel):
name = "conv3d_sat_nwp"
def __init__(
self,
include_pv_yield: bool = True,
include_nwp: bool = True,
include_time: bool = True,
forecast_minutes: int = 30,
history_minutes: int = 60,
number_of_conv3d_layers: int = 4,
conv3d_channels: int = 32,
image_size_pixels: int = 64,
number_sat_channels: int = 12,
number_nwp_channels: int = 10,
fc1_output_features: int = 128,
fc2_output_features: int = 128,
fc3_output_features: int = 64,
output_variable: str = "pv_yield",
):
"""
3d conv model, that takes in different data streams
architecture is roughly
1. satellite image time series goes into many 3d convolution layers.
2. nwp time series goes into many 3d convolution layers.
3. Final convolutional layer goes to full connected layer. This is joined by other data inputs like
- pv yield
- time variables
Then there ~4 fully connected layers which end up forecasting the pv yield / gsp into the future
include_pv_yield: include pv yield data
include_nwp: include nwp data
include_time: include hour of data, and day of year as sin and cos components
forecast_len: the amount of minutes that should be forecasted
history_len: the amount of historical minutes that are used
number_of_conv3d_layers, number of convolution 3d layers that are use
conv3d_channels, the amount of convolution 3d channels
image_size_pixels: the input satellite image size
number_sat_channels: number of nwp channels
fc1_output_features: number of fully connected outputs nodes out of the the first fully connected layer
fc2_output_features: number of fully connected outputs nodes out of the the second fully connected layer
fc3_output_features: number of fully connected outputs nodes out of the the third fully connected layer
output_variable: the output variable to be predicted
number_nwp_channels: The number of nwp channels there are
"""
self.include_pv_yield = include_pv_yield
self.include_nwp = include_nwp
self.include_time = include_time
self.number_of_conv3d_layers = number_of_conv3d_layers
self.number_of_nwp_features = 128
self.fc1_output_features = fc1_output_features
self.fc2_output_features = fc2_output_features
self.fc3_output_features = fc3_output_features
self.forecast_minutes = forecast_minutes
self.history_minutes = history_minutes
self.output_variable = output_variable
self.number_nwp_channels = number_nwp_channels
super().__init__()
conv3d_channels = conv3d_channels
self.cnn_output_size = (
conv3d_channels
* ((image_size_pixels - 2 * self.number_of_conv3d_layers) ** 2)
* (self.forecast_len_5 + self.history_len_5 + 1 - 2 * self.number_of_conv3d_layers)
)
# conv0
self.sat_conv0 = nn.Conv3d(
in_channels=number_sat_channels,
out_channels=conv3d_channels,
kernel_size=(3, 3, 3),
padding=0,
)
for i in range(0, self.number_of_conv3d_layers - 1):
layer = nn.Conv3d(
in_channels=conv3d_channels, out_channels=conv3d_channels, kernel_size=(3, 3, 3), padding=0
)
setattr(self, f"sat_conv{i + 1}", layer)
self.fc1 = nn.Linear(in_features=self.cnn_output_size, out_features=self.fc1_output_features)
self.fc2 = nn.Linear(in_features=self.fc1_output_features, out_features=self.fc2_output_features)
# nwp
if include_nwp:
self.nwp_conv0 = nn.Conv3d(
in_channels=number_nwp_channels,
out_channels=conv3d_channels,
kernel_size=(3, 3, 3),
padding=0,
)
for i in range(0, self.number_of_conv3d_layers - 1):
layer = nn.Conv3d(
in_channels=conv3d_channels, out_channels=conv3d_channels, kernel_size=(3, 3, 3), padding=0
)
setattr(self, f"nwp_conv{i + 1}", layer)
self.nwp_fc1 = nn.Linear(in_features=self.cnn_output_size, out_features=self.fc1_output_features)
self.nwp_fc2 = nn.Linear(in_features=self.fc1_output_features, out_features=self.number_of_nwp_features)
fc3_in_features = self.fc2_output_features
if include_pv_yield:
fc3_in_features += self.number_of_samples_per_batch * (self.history_len_30 + 1)
if include_nwp:
fc3_in_features += 128
if include_time:
fc3_in_features += 4
self.fc3 = nn.Linear(in_features=fc3_in_features, out_features=self.fc3_output_features)
self.fc4 = nn.Linear(in_features=self.fc3_output_features, out_features=self.forecast_len)
# self.fc5 = nn.Linear(in_features=32, out_features=8)
# self.fc6 = nn.Linear(in_features=8, out_features=1)
def forward(self, x):
# ******************* Satellite imagery *************************
# Shape: batch_size, seq_length, width, height, channel
sat_data = x["sat_data"]
batch_size, seq_len, width, height, n_chans = sat_data.shape
# Conv3d expects channels to be the 2nd dim, https://pytorch.org/docs/stable/generated/torch.nn.Conv3d.html
sat_data = sat_data.permute(0, 4, 1, 3, 2)
# Now shape: batch_size, n_chans, seq_len, height, width
# :) Pass data through the network :)
out = F.relu(self.sat_conv0(sat_data))
for i in range(0, self.number_of_conv3d_layers - 1):
layer = getattr(self, f"sat_conv{i + 1}")
out = F.relu(layer(out))
out = out.reshape(batch_size, self.cnn_output_size)
# Fully connected layers
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
# which has shape (batch_size, 128)
# add pv yield
if self.include_pv_yield:
pv_yield_history = x[self.output_variable][:, : self.history_len_30 + 1].nan_to_num(nan=0.0)
pv_yield_history = pv_yield_history.reshape(
pv_yield_history.shape[0], pv_yield_history.shape[1] * pv_yield_history.shape[2]
)
# join up
out = torch.cat((out, pv_yield_history), dim=1)
# *********************** NWP Data ************************************
if self.include_nwp:
# Shape: batch_size, channel, seq_length, width, height
nwp_data = x["nwp"]
out_nwp = F.relu(self.nwp_conv0(nwp_data))
for i in range(0, self.number_of_conv3d_layers - 1):
layer = getattr(self, f"nwp_conv{i + 1}")
out_nwp = F.relu(layer(out_nwp))
# fully connected layers
out_nwp = out_nwp.reshape(batch_size, self.cnn_output_size)
out_nwp = F.relu(self.nwp_fc1(out_nwp))
out_nwp = F.relu(self.nwp_fc2(out_nwp))
# join with other FC layer
out = torch.cat((out, out_nwp), dim=1)
# ########## include time variables #########
if self.include_time:
# just take the value now
x_sin_hour = x["hour_of_day_sin"][:, self.history_len_5 + 1].unsqueeze(dim=1)
x_cos_hour = x["hour_of_day_cos"][:, self.history_len_5 + 1].unsqueeze(dim=1)
x_sin_day = x["day_of_year_sin"][:, self.history_len_5 + 1].unsqueeze(dim=1)
x_cos_day = x["day_of_year_cos"][:, self.history_len_5 + 1].unsqueeze(dim=1)
# join up
out = torch.cat((out, x_sin_hour, x_cos_hour, x_sin_day, x_cos_day), dim=1)
# Fully connected layers.
out = F.relu(self.fc3(out))
out = self.fc4(out)
out = out.reshape(batch_size, self.forecast_len)
return out
|
py | 1a4284a667c99e624942b757e09ab1c40984f2d7 | from ffai.web.api import *
import numpy as np
import time
class MyRandomBot(Agent):
def __init__(self, name):
super().__init__(name)
self.my_team = None
self.actions_taken = 0
def new_game(self, game, team):
self.my_team = team
self.actions_taken = 0
def act(self, game):
while True:
action_choice = np.random.choice(game.state.available_actions)
if action_choice.action_type != ActionType.PLACE_PLAYER:
break
pos = np.random.choice(action_choice.positions) if len(action_choice.positions) > 0 else None
player = np.random.choice(action_choice.players) if len(action_choice.players) > 0 else None
action = Action(action_choice.action_type, pos=pos, player=player)
self.actions_taken += 1
return action
def end_game(self, game):
winner = game.get_winning_team()
print("Casualties: ", game.num_casualties())
if winner is None:
print("It's a draw")
elif winner == self.my_team:
print("I ({}) won".format(self.name))
else:
print("I ({}) lost".format(self.name))
print("I took", self.actions_taken, "actions")
if __name__ == "__main__":
# Load configurations, rules, arena and teams
config = get_config("ff-11.json")
#config.competition_mode = False
# config = get_config("ff-7.json")
# config = get_config("ff-5.json")
# config = get_config("ff-3.json")
ruleset = get_rule_set(config.ruleset, all_rules=False) # We don't need all the rules
arena = get_arena(config.arena)
home = get_team_by_id("human-1", ruleset)
away = get_team_by_id("human-2", ruleset)
# Play 100 games
for i in range(10):
away_agent = MyRandomBot("Random Bot 1")
home_agent = MyRandomBot("Random Bot 2")
config.debug_mode = False
game = Game(i, home, away, home_agent, away_agent, config, arena=arena, ruleset=ruleset)
game.config.fast_mode = True
print("Starting game", (i+1))
start = time.time()
game.init()
game.step()
end = time.time()
print(end - start)
|
py | 1a4285b4de50802553741969fb5aa6cbee04d66b | from django.apps import AppConfig
class CorecodeConfig(AppConfig):
name = 'corecode'
|
py | 1a428656408dbe596d5bab6fc7c1411c03800d06 | # -*- encoding: utf-8 -*-
import builtins
import unittest
import unittest.mock
import pytest
from common.utils.backend import Backend
class BackendStub(Backend):
def __init__(self):
self.__class__ = Backend
def setup_load_model_mocks(openMock, pickleLoadMock, seed, idx, budget):
model_path = "/runs/%s_%s_%s/%s.%s.%s.model" % (seed, idx, budget, seed, idx, budget)
file_handler = "file_handler"
expected_model = "model"
fileMock = unittest.mock.MagicMock()
fileMock.__enter__.return_value = file_handler
openMock.side_effect = (
lambda path, flag: fileMock if path == model_path and flag == "rb" else None
)
pickleLoadMock.side_effect = lambda fh: expected_model if fh == file_handler else None
return expected_model
@pytest.fixture
def backend_stub():
backend = BackendStub()
backend.internals_directory = "/"
return backend
@unittest.mock.patch("pickle.load")
@unittest.mock.patch("os.path.exists")
def test_load_model_by_seed_and_id(exists_mock, pickleLoadMock, backend_stub):
exists_mock.return_value = False
open_mock = unittest.mock.mock_open(read_data="Data")
with unittest.mock.patch(
"common.utils.backend.open",
open_mock,
create=True,
):
seed = 13
idx = 17
budget = 50.0
expected_model = setup_load_model_mocks(
open_mock,
pickleLoadMock,
seed,
idx,
budget,
)
actual_model = backend_stub.load_model_by_seed_and_id_and_budget(seed, idx, budget)
assert expected_model == actual_model
@unittest.mock.patch("pickle.load")
@unittest.mock.patch.object(builtins, "open")
@unittest.mock.patch("os.path.exists")
def test_loads_models_by_identifiers(exists_mock, openMock, pickleLoadMock, backend_stub):
exists_mock.return_value = True
seed = 13
idx = 17
budget = 50.0
expected_model = setup_load_model_mocks(openMock, pickleLoadMock, seed, idx, budget)
expected_dict = {(seed, idx, budget): expected_model}
actual_dict = backend_stub.load_models_by_identifiers([(seed, idx, budget)])
assert isinstance(actual_dict, dict)
assert expected_dict == actual_dict
|
py | 1a42873b1d4619fa816ba681de4a614e6426ea94 | import os
import pypandoc
import json
import yaml
import re
import datetime
from .config import Config
from pypandoc.pandoc_download import download_pandoc
from pylatex import Document, Description
from pylatex.section import Chapter
from pylatex.utils import *
class Cover:
artist = None
title = None
year = None
medium = None
musium = None
location = None
license = None
# Goya, Francisco. The Family of Charles IV. 1800, oil on canvas, Museo del Prado, Madrid.
def __init__(self, cover):
for dic in cover:
key = list(dic.keys())[0]
try:
setattr(self, key, dic[key])
except:
print('error', key)
return
def export_citation(self):
firstname = self.artist.split(' ')[0]
lastname = ' '.join(self.artist.split(' ')[1:])
name = ', '.join([firstname, lastname])
return '{name}. {title}, {year}, {medium}, {musium}, {location}'.format(
name = name,
title = '\\textit{%s}'%self.title,
year = self.year,
medium = self.medium,
musium = self.musium,
location= self.location
)
class Content:
title = None
layout = None
latex = None
type = 'mainmatter'
filename = None
endnote = None
sample = False
def __init__(self, content):
for key in content:
try:
setattr(self, key, content[key])
except:
print(key)
self.latex = self.convert_latex()
return
def convert_latex(self):
filepath = os.path.join(Config().manuscript_dir, self.filename+'.md')
return pypandoc.convert_file( filepath, 'latex', extra_args=[
'--data-dir='+os.path.join(os.getcwd(), 'BartlebyMachine', '.pandoc'),
'--wrap=none',
'--variable',
'documentclass=book',
])
def write_latex(self):
output_path = os.path.join(Config().manuscript_dir, 'tex', self.filename) + '.tex';
f = open(output_path, 'w', encoding='utf-8')
f.write(self.latex)
f.close()
class TableOfContent:
title = None
author = None
dateOfPublished = None
cover = None
license = None
content = []
sample = False
def __init__(self, toc):
for key in toc:
try:
if(key == 'content'):
content = list(map(lambda x: Content(x), toc[key]))
toc[key] = content
if(key == 'cover'):
toc[key] = Cover(toc[key])
setattr(self, key, toc[key])
except:
print(key)
def export_content(self):
concat = []
for content in self.content:
if self.sample == True and content.sample == False:
continue
if content.type == 'mainmatter':
str = '\\\\begin{{{layout}}}\n{latex}\n\end{{{layout}}}'.format(latex=content.latex.replace('\\\r\n', '\\\\\n'), layout=content.layout);
concat.append(str)
return '\n'.join(concat)
def export_preface(self):
if self.sample == True:
prefaces = list(filter(lambda x: x.type == 'preface' and x.sample == True, self.content))
else:
prefaces = list(filter(lambda x: x.type == 'preface', self.content))
return '\n'.join(list(map(lambda x: x.latex, prefaces)))
def export_endpaper(self):
options = ['itemsep=1pt', 'parsep=1pt']
book = Description(options=options)
book.add_item('제목', self.title)
book.add_item('저자', self.author)
book.add_item('편집', '미루')
book.add_item('디자인', '써드엔지니어링카르텔')
book.add_item('출간일', '2018-06-01')
publisher = Description(options=options)
publisher.add_item('출판', '금치산자레시피')
publisher.add_item('웹사이트', 'http://gtszrcp.com')
cover = Description(options=options)
cover.add_item('표지', NoEscape(self.cover.export_citation()))
cover.add_item('표지 그림 저작권', self.cover.license)
license = Description(options=options)
license.add_item('저작권', NoEscape('이 책에 수록된 저작물 중 별도로 표기되지 않은 모든 저작물의 저작권은 저자에게 있습니다. %s에 의해 이용할 수 있습니다.'%italic(self.license)))
license.add_item('', '이 책은 BartlebyMachine으로 제작되었습니다.')
endpaper = map(lambda x: x.dumps().replace('\\', '\\\\'), [
book, publisher, cover, license
])
return '\n'.join(list(endpaper))
def export_appendix(self):
appendix = []
appendix.append(Chapter('참조'))
content = Description()
endnotes = list(filter(lambda x: x.endnote != None, self.content))
for note in endnotes:
content.add_item(note.title, note.endnote)
appendix.append(content)
appendix = list(map(lambda x: x.dumps().replace('\\', '\\\\'), appendix))
return '\n'.join(appendix)
class Bartleby:
toc = None
manuscripts = None
overcite = None
orphan = None
config = None
sample = False
def __init__(self):
self.manuscripts = list(filter(
lambda x: os.path.isdir(os.path.join(Config().manuscript_dir, x)) == False,
os.listdir(Config().manuscript_dir)
))
self.toc = [];
def write_latex(self):
latex = self.replace_template()
filename = 'ggded.tex'
if self.sample == True:
filename = 'ggded.sample.tex'
f = open(filename, 'w', encoding='utf-8')
f.write(latex)
f.close()
return
def replace_template(self):
template = Config().template
book = []
if self.sample == True:
self.toc.sample = True
self.toc.title = self.toc.title + ' 샘플북'
replaces = [
(re.compile('<<content>>'), self.toc.export_content()),
(re.compile('<<author>>'), self.toc.author),
(re.compile('<<title>>'), self.toc.title),
(re.compile('<<date>>'), datetime.datetime.strptime(self.toc.dateOfPublished, '%Y-%m-%d').strftime('%Y')),
(re.compile('<<preface>>'), self.toc.export_preface()),
(re.compile('<<endpaper>>'), self.toc.export_endpaper()),
(re.compile('<<endnotes>>'), self.toc.export_appendix()),
]
for replace in replaces:
if replace[0].findall(template):
template = replace[0].sub(replace[1], template)
return template
def md_to_latex(self):
result = False
for content in self.toc.content:
content.write_latex()
return result
def add_toc(self, filename):
result = False
file = os.path.join(os.getcwd(), filename)
if os.path.exists(file) == False:
return result
with open(file, encoding='utf-8') as toc_file:
toc = yaml.load(toc_file)
result = True
self.toc = TableOfContent(toc)
return result
def manuscriptCount(self):
result = False
cite = {}
entries = []
if self.toc == None:
return result
for toc in self.toc:
for entry in toc.content:
entries.append(entry.filename)
for script in self.manuscripts:
needle = script.split('.')[0]
cite[needle] = entries.count(needle)
return cite
def manuscriptStatus(self):
self.orphan = []
self.overcite = []
for script in self.manuscripts:
cnt = list(
map(lambda x: '%s.md'%x.filename, self.toc.content)
).count(script)
if(cnt < 1):
self.orphan.append(script)
if(cnt > 1):
self.overcite.append(script)
return True
|
py | 1a42874e3e33c2af102215fbae943a96303460f1 | # -*- coding: utf-8 -*-
"""A clock based on LSL local_clock
"""
from pylsl import local_clock
class Clock():
"""allows to keep track of time
reset the clock with :meth:`~.reset`, and get the time since this reset with :meth:`~.now`. Measure intermediate times with :meth:`~.tick`, which return time since last call of :meth:`tick`, or
each call of :meth:`~.tick` updates the cumulative time since the last call of :meth:`reset`. If you don't want to add the time measured, use :meth:`~.pause`
"""
def __init__(self):
"create a new instance requiring no arguments"
self._error = 0.
self.reset()
def time(self):
"time in seconds (usually since computer boot) according to LSL"
return local_clock()
def tick(self) -> float:
"""time since last call of :meth:`~.tick`
returns
-------
delta_t: float
time passed in seconds since the last call of :meth:`~.tick` or
:meth:`~.pause`. This is time counting into the cumulative time
"""
ts = self.time()
delta_t = ts - self.last_tick
self.last_tick = ts
return delta_t
def sleep(self, duration: float):
"""Sleep for duration in seconds
blocking is slightly more accurate than non-blocking sleep, e.g. as available with :meth:`time.sleep` from the stdlib. There is one disadvantage of this kind of busy sleep: it can cause slight oversleeping, as there is an overhead of the function being called and returning, which is not accounted for. See :meth:`~.sleep_debiased` for an alternative sleep with asymptotic minimisation of the error.
args
----
duration: float
how many seconds to sleep blocking
returns
-------
duration: float
the time in seconds spent sleeping
"""
t1 = t0 = self.time()
dt = 0
while dt <= duration:
t1 = self.time()
dt = t1-t0
return dt
def sleep_debiased(self, duration: float):
"""Sleep for duration in seconds with attempts for debiasing`
sometimes, you execute some other commands, and these commands have a variable runtime. If we would naively sleep everytime for n seconds afterwards, we would inherit this jitter. By using :meth:`~.tick` before these commands, and :meth:`~.sleep_debiased` after these commands, we can normalize the runtime to a fixed period (as long as the sleep duration is longer than the runtime of the commands).
Additionally, this function keeps track of any oversleeping or undersleeping, and will minimize the temporal error asymptotically
returns
-------
duration: float
the time in seconds spent sleeping since the last call
of :meth:`~.tick` or :meth:`~.sleep_debiased`.
Example
-------
This example shows how we can regularize the time spent in each cycle to 200ms in spite of there being an element of random runtime
.. code-block:: python
import time
import random
from reiz.time import Clock
clock = Clock()
t = 0.
msg = "{0:3.5f}, {1:3.5f}, {2:3.5f}, slept for {3:3.5f}s"
for i in range(1, 11):
time.sleep(random.random()/10)
dt = clock.sleep_debiased(0.2)
t += dt
print(msg.format(i*0.2, clock.now(), t, dt))
"""
bias = self.tick()
dt = self.sleep(duration - bias - self._error)
self._error += dt + bias - duration
tick_bias = self.tick()-dt
self._error += tick_bias
return dt + bias + tick_bias
def reset(self):
"""reset the clock
resets the counter keeping track of the cumulative time spend since instantiaion or the last call of :meth:`~.reset`
"""
self._t0 = self.last_tick = self.time()
def now(self):
"""return the cumulative time passed since the last call of :meth:`reset`
"""
return self.time()-self._t0
clock = Clock() #: a default :class:`.Clock` instance ready for your experiment
|
py | 1a428873da424f6747533522e4a970033b869cfd | # preprocessing.py
"""
parses MIMIC-CXR radiology reports from data/mimic-cxr-reports/ into data/train.csv and data/test.csv
train.csv contains two columns with each column wrapped in double quotes; the first column contains
the input text (radiology examination, technique, comparison, and findings) while the second
column contains the output text (impressions). All reports without the term "IMPRESSIONS:" are ommitted
test.csv has the same structure as train.csv.
The processing also lematizes all of the terms using nltk and strips whitespace.
REQUIREMENTS:
- data/mimic-cxr-reports/*
- data/cxr-study-list.csv.gz
- overwrite data/train.csv
- overwrite data/test.csv
"""
import os;
import pandas as pd;
import re
TEST_FRACTION = 0.1 # fraction for test set
VALIDATION_FRACTION = 0.1
ROOT = os.path.dirname( os.path.abspath(__file__) );
LIST_FILE = os.path.join(ROOT, 'data', 'cxr-study-list.csv.gz');
REPORTS_DIR = os.path.join(ROOT, 'data', 'mimic-cxr-reports');
TRAIN_FILE = os.path.join(ROOT, 'data', 'train.csv');
TEST_FILE = os.path.join(ROOT, 'data', 'test.csv');
VALIDATION_FILE = os.path.join(ROOT, 'data', 'validation.csv');
def remove_notification_section(text):
"""
We noticed that some reports have a notification section after
the impressions (summary) section, which was impeding our data, so
we decided to remove this section all together. We use various rule-based
mechanisms to parse and remove the notification section.
params: text
returns: text with notification section removed
"""
idx = text.rfind("NOTIFICATION");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("telephone notification");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("Telephone notification");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("These findings were");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("Findings discussed");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("Findings were");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("This preliminary report");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("Reviewed with");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("A preliminary read");
if( idx > 0 ):
text = text[:idx];
return(text);
def sanitize(text):
"""
Cleanses the text to be written in CSV, which will be fed directly to
the summarizer. Tokenization and lemmatization is not performed in this
step, as the summarizer performs those directly.
params: text
returns: cleaned text
"""
text = text.strip();
text = re.sub("\n", "", text);
text = re.sub(",", "", text);
# Remove all text before FINDINGS: section
regex = r'^(.*finding.?:)'
if( re.search(regex, text, flags=re.IGNORECASE)==None ): #if no summary
return None;
text = re.sub(regex,"", text, flags=re.IGNORECASE);
text = remove_notification_section(text);
return(text);
def split(slicable, fraction):
"""
splits data into test-train set or dev-validation set; does not shuffle.
params: slicable - an object that responds to len() and [], works on dataframes
fraction - a value between 0 and 1
returns: (x, y) - where x has (1-fraction) percent entries and y has the rest
"""
partition = int(len(slicable) * (1.0 - fraction));
return( (slicable[:partition], slicable[partition:]) );
def parse_summary(text):
"""
parses and separates input text from summary in cxr reports, returns None if
not found
params: text
returns: None or [input_text, summary]
"""
regex = r'impression.?(?::|" ")'
if( re.search(regex, text, flags=re.IGNORECASE)==None ): #if no summary
return None;
data = re.split(regex, text, flags=re.IGNORECASE);
data[0] = data[0].strip();
data[1] = data[1].strip();
return(data);
def write_csv(filename, reports):
"""
writes a csv file for summarization. The CSV file has four columns: "subject_id",
"study_id", "findings", and "impression" based on MIMIC-CXR reports. "findings"
contains the input text, and "impression" contains the true summary.
params: filename - name of csv file to write, will overwrite if it exists
reports - dataframe of cxr reports from cxr-study-list file
"""
print(f"Writing {filename}...");
f = open(filename, 'w');
f.write(f"\"subject_id\",\"study_id\",\"findings\",\"impression\"\n");
ommitted = 0;
progress = 1;
for report in reports:
x = open(os.path.join(REPORTS_DIR, report));
text = x.read();
x.close();
text = sanitize(text);
if( text==None ):
ommitted += 1;
continue; #toss out data and go to next textfile
if (progress % 10000 == 0):
print(f'Read {progress} files so far...');
progress += 1;
data = parse_summary(text);
if( (data==None) or (data[0]=='') or (data[1]=='') ):
ommitted += 1;
continue; #toss out data and go to next textfile
folders = report.split('/');
f.write(f"\"{folders[2]}\",\"{folders[3].split('.')[0]}\",\"{data[0]}\",\"{data[1]}\"\n");
f.close();
print(f"Ommited {ommitted} files out of {progress} total files in dataset.\n")
print("Done.\n");
print("================ Starting data preprocessing ==================");
print(f"Reading {os.path.basename(LIST_FILE)}...");
radiology_reports = pd.read_csv(LIST_FILE)['path']; # file paths as pandas series
train_reports, test_reports = split(radiology_reports, TEST_FRACTION);
print("Done.");
# if you want validation set:
train_reports, validation_reports = split(train_reports, VALIDATION_FRACTION / (1 - TEST_FRACTION));
write_csv(VALIDATION_FILE, validation_reports);
# sanity check
#print(train_reports);
#print(validation_reports);
#print(test_reports);
write_csv(TRAIN_FILE, train_reports);
write_csv(TEST_FILE, test_reports);
print("==================== End data preprocessing ======================");
|
py | 1a428919a36cc9dcc4c095150596812a73b75e49 | import threading
import time
from datetime import datetime
import schedule
import atexit
import SocketServer
import BioControle
import syslog
# ------------------------------------------------------------------------
def display_jobs():
print('------------------------------------------------------------------')
for job in schedule.jobs:
print(job)
print('------------------------------------------------------------------')
print()
# ------------------------------------------------------------------------
def run_threaded(job_func, run_time):
job_thread = threading.Thread(target=job_func, args=[run_time])
job_thread.start()
# ------------------------------------------------------------------------
@atexit.register
def cleanup():
date_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
syslog.syslog(syslog.LOG_INFO, '%s: Cleaning up before exit ' % (date_time))
print('%s: Cleaning up before exit ' % (date_time))
pump.off()
circ.off()
fan.off()
userled.off()
def process_request(data, server, client_sock, bio):
#print(data[0])
s = data[0].decode()
date_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
syslog.syslog(syslog.LOG_INFO, '%s: Received Message: << %s >> ' % (date_time, s))
print('%s: Received Message: << %s >> ' % (date_time, s))
if 'GET /?circ=' in s:
result = int(s[s.find('circ=')+5:s.find(' HTTP')],10)
server.send_response(client_sock, "Circulating for {} seconds".format(result));
run_threaded(bio.run_circulate, result)
#bio.run_circulate(result)
if 'GET /?pump=' in s:
result = int(s[s.find('pump=')+5:s.find(' HTTP')])
server.send_response(client_sock, "Pumping for {} seconds".format(result));
run_threaded(bio.run_pump, result)
#bio.run_pump(result)
if 'GET /?fan=' in s:
result = int(s[s.find('fan=')+4:s.find(' HTTP')])
server.send_response(client_sock, "Fanning for {} seconds".format(result));
run_threaded(bio.run_fan, result)
#bio.run_fan(result)
if 'GET /?status' in s:
status = "<html><head></head><body>"
status = status + "<h2>Current Schedule</h2><hr><ul>"
for job in schedule.jobs:
status = status + "<li>" + str(job) + "</li>\n"
status = status + "</ul></body>"
server.send_response(client_sock, status)
# MAIN
# ------------------------------------------------------------------------
def main():
server = SocketServer.SocketServer()
bio = BioControle.BioControle()
pump_duration = 300
fan_duration = 200
circ_duration = 1800
# Pump schedule
schedule.every().day.at("08:00").do(run_threaded, bio.run_pump, run_time=pump_duration)
schedule.every().day.at("13:00").do(run_threaded, bio.run_pump, run_time=pump_duration)
schedule.every().day.at("18:00").do(run_threaded, bio.run_pump, run_time=pump_duration)
#schedule.every().day.at("19:25").do(run_threaded, bio.run_pump, run_time=pump_duration)
# Fan schedule
schedule.every(2).hours.do(run_threaded, bio.run_fan, run_time=fan_duration)
# Circulation pump schedule
schedule.every().day.at("07:45").do(run_threaded, bio.run_circulate, run_time=circ_duration)
schedule.every().day.at("12:45").do(run_threaded, bio.run_circulate, run_time=circ_duration)
schedule.every().day.at("17:45").do(run_threaded, bio.run_circulate, run_time=circ_duration)
# Job display schedule
#schedule.every(15).minutes.do(display_jobs)
display_jobs()
date_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
syslog.syslog('%s: Server Ready ' % (date_time))
print('%s: Server Ready ' % (date_time))
sleepTimer = 10;
while True:
try:
schedule.run_pending()
try:
(data, client_sock) = server.check_select()
if data:
process_request(data, server, client_sock, bio)
server.close_client(client_sock)
except:
pass
time.sleep(sleepTimer)
BioControle.userled.toggle()
except (KeyboardInterrupt, SystemExit):
cleanup()
exit()
# ------------------------------------------------------------------------
main()
print("exit") |
py | 1a42893df10728ccc390a9b8c156409c1f37890b | from django.apps import AppConfig
class ReviewConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'review'
|
py | 1a428981c67a7c3c5150600b1a026b81a9041153 | # Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from collections import Counter
import os.path
import shutil
import tempfile
import queue
import nose.tools
import numpy as np
import PIL.Image
from . import create_db
from digits import test_utils
test_utils.skipIfNotFramework('none')
class BaseTest():
"""
Provides some helpful files and utilities
"""
@classmethod
def setUpClass(cls):
cls.empty_file = tempfile.mkstemp()
cls.empty_dir = tempfile.mkdtemp()
# Create one good textfile
cls.good_file = tempfile.mkstemp()
# Create a color image
cls.color_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_color = np.ones((8, 10, 3), dtype='uint8')
cls.pil_image_color = PIL.Image.fromarray(cls.numpy_image_color)
cls.pil_image_color.save(cls.color_image_file[1])
# Create a grayscale image
cls.gray_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_gray = np.ones((8, 10), dtype='uint8')
cls.pil_image_gray = PIL.Image.fromarray(cls.numpy_image_gray)
cls.pil_image_gray.save(cls.gray_image_file[1])
cls.image_count = 0
for i in range(3):
for j in range(3):
print((type(cls.good_file[0]), type(cls.color_image_file[1])))
tmp = '%s %s\n' % (cls.color_image_file[1], i)
os.write(cls.good_file[0], tmp.encode(encoding='UTF-8'))
os.write(cls.good_file[0], tmp.encode(encoding='UTF-8'))
cls.image_count += 2
@classmethod
def tearDownClass(cls):
for f in cls.empty_file, cls.good_file, cls.color_image_file, cls.gray_image_file:
try:
os.close(f[0])
os.remove(f[1])
except OSError:
pass
try:
shutil.rmtree(cls.empty_dir)
except OSError:
raise
class TestFillLoadQueue(BaseTest):
def test_valid_file(self):
for shuffle in True, False:
yield self.check_valid_file, shuffle
def check_valid_file(self, shuffle):
q = queue.Queue()
result = create_db._fill_load_queue(self.good_file[1], q, shuffle)
assert result == self.image_count, 'lines not added'
assert q.qsize() == self.image_count, 'queue not full'
def test_empty_file(self):
for shuffle in True, False:
yield self.check_empty_file, shuffle
def check_empty_file(self, shuffle):
q = queue.Queue()
nose.tools.assert_raises(
create_db.BadInputFileError,
create_db._fill_load_queue,
self.empty_file[1], q, shuffle)
class TestParseLine():
def test_good_lines(self):
for label, line in [
(0, '/path/image.jpg 0'),
(1, 'image.jpg 1'),
(2, 'image.jpg 2\n'),
(3, 'image.jpg 3'),
(4, 'spaces in filename.jpg 4'),
]:
yield self.check_good_line, line, label
def check_good_line(self, line, label):
c = Counter()
p, l = create_db._parse_line(line, c)
assert l == label, 'parsed label wrong'
assert c[l] == 1, 'distribution is wrong'
def test_bad_lines(self):
for line in [
'nolabel.jpg',
'non-number.jpg five',
'negative.jpg -1',
]:
yield self.check_bad_line, line
def check_bad_line(self, line):
nose.tools.assert_raises(
create_db.ParseLineError,
create_db._parse_line,
line, Counter()
)
class TestCalculateBatchSize():
def test(self):
for count, batch_size in [
(1, 1),
(50, 50),
(100, 100),
(200, 100),
]:
yield self.check, count, batch_size
def check(self, count, batch_size):
assert create_db._calculate_batch_size(count) == batch_size
class TestCalculateNumThreads():
def test(self):
for batch_size, shuffle, num in [
(1000, True, 10),
(1000, False, 1),
(100, True, 10),
(100, False, 1),
(50, True, 7),
(4, True, 2),
(1, True, 1),
]:
yield self.check, batch_size, shuffle, num
def check(self, batch_size, shuffle, num):
assert create_db._calculate_num_threads(
batch_size, shuffle) == num
class TestInitialImageSum():
def test_color(self):
s = create_db._initial_image_sum(10, 10, 3)
assert s.shape == (10, 10, 3)
assert s.dtype == 'float64'
def test_grayscale(self):
s = create_db._initial_image_sum(10, 10, 1)
assert s.shape == (10, 10)
assert s.dtype == 'float64'
class TestImageToDatum(BaseTest):
def test(self):
for compression in None, 'png', 'jpg':
yield self.check_color, compression
yield self.check_grayscale, compression
def check_color(self, compression):
d = create_db._array_to_datum(self.numpy_image_color, 1, compression)
assert d.height == self.numpy_image_color.shape[0]
assert d.width == self.numpy_image_color.shape[1]
assert d.channels == 3
assert d.encoded == bool(compression)
def check_grayscale(self, compression):
d = create_db._array_to_datum(self.numpy_image_gray, 1, compression)
assert d.height == self.numpy_image_gray.shape[0]
assert d.width == self.numpy_image_gray.shape[1]
assert d.channels == 1
assert d.encoded == bool(compression)
class TestSaveMeans():
def test(self):
for color in True, False:
d = tempfile.mkdtemp()
for filename in 'mean.jpg', 'mean.png', 'mean.npy', 'mean.binaryproto':
yield self.check, d, filename, color
shutil.rmtree(d)
def check(self, directory, filename, color):
filename = os.path.join(directory, filename)
if color:
s = np.ones((8, 10, 3), dtype='float64')
else:
s = np.ones((8, 10), dtype='float64')
create_db._save_means(s, 2, [filename])
assert os.path.exists(filename)
class BaseCreationTest(BaseTest):
def test_image_sizes(self):
for width in 8, 12:
for channels in 1, 3:
yield self.check_image_sizes, width, channels, False
def check_image_sizes(self, width, channels, shuffle):
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
width, 10, channels, self.BACKEND)
def test_no_shuffle(self):
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, shuffle=False)
def test_means(self):
mean_files = []
for suffix in 'jpg', 'npy', 'png', 'binaryproto':
mean_files.append(os.path.join(self.empty_dir, 'mean.%s' % suffix))
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, mean_files=mean_files)
class TestLmdbCreation(BaseCreationTest):
BACKEND = 'lmdb'
class TestHdf5Creation(BaseCreationTest):
BACKEND = 'hdf5'
def test_dset_limit(self):
db_dir = os.path.join(self.empty_dir, 'db')
create_db.create_db(self.good_file[1], db_dir,
10, 10, 1, 'hdf5', hdf5_dset_limit=10 * 10)
with open(os.path.join(db_dir, 'list.txt')) as infile:
lines = infile.readlines()
assert len(lines) == self.image_count, '%d != %d' % (len(lines), self.image_count)
|
py | 1a428afe2dd333f26ec19a547cf7c7dbc8c81209 | # Copyright 2015 Broad Institute, all rights reserved.
#
# Much of the implementation is modelled on other backends in Matplotlib
# and the code is derivative and subject to their copyright.
import javabridge
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backend_bases import \
FigureManagerBase, NavigationToolbar2, cursors
import numpy as np
import os
class FigureCanvasSwing(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self, figure)
self.__ref_id, self.__ref = javabridge.create_jref(self)
self.__cpython = javabridge.make_instance(
'org/cellprofiler/javabridge/CPython', '()V')
paint_script = (
'import javabridge\n'
'self = javabridge.redeem_jref("%s")\n'
'self.draw(javabridge.JWrapper(graphics))\n') % self.__ref_id
component = javabridge.run_script("""
new javax.swing.JComponent() {
paintComponent: function(graphics) {
locals = new java.util.Hashtable();
locals.put("graphics", graphics);
cpython.exec(script, locals, locals);
}
}
""", dict(cpython = self.__cpython, script = paint_script))
self.__component = javabridge.JWrapper(component)
self.__event_queue_class = None
self.__component_listener = javabridge.JProxy(
'java.awt.event.ComponentListener',
dict(componentHidden = self._on_component_hidden,
componentMoved = self._on_component_moved,
componentResized = self._on_component_resized,
componentShown = self._on_component_shown))
self.__component.addComponentListener(self.__component_listener.o)
self.__key_event_cls = javabridge.JClassWrapper(
'java.awt.event.KeyEvent')
self.__key_listener = javabridge.JProxy(
'java.awt.event.KeyListener',
dict(keyPressed=self._on_key_pressed,
keyReleased=self._on_key_released,
keyTyped=self._on_key_typed))
self.__component.addKeyListener(self.__key_listener.o)
self.__component.setFocusable(True)
self.__mouse_listener = javabridge.JProxy(
'java.awt.event.MouseListener',
dict(mouseClicked=self._on_mouse_clicked,
mouseEntered=self._on_mouse_entered,
mouseExited=self._on_mouse_exited,
mousePressed=self._on_mouse_pressed,
mouseReleased=self._on_mouse_released))
self.__component.addMouseListener(self.__mouse_listener.o)
self.__mouse_motion_listener = javabridge.JProxy(
'java.awt.event.MouseMotionListener',
dict(mouseDragged=self._on_mouse_dragged,
mouseMoved=self._on_mouse_moved))
self.__component.addMouseMotionListener(self.__mouse_motion_listener.o)
@property
def component(self):
return self.__component
def draw(self, graphics=None):
'''Render the figure
:param graphics: the AWT Graphics gc that should be used to draw.
If None and not in the AWT thread, the call is
reflected via EventQueue.invokeAndWait, otherwise
a graphics context is created and drawn.
'''
FigureCanvasAgg.draw(self)
self.jimage = _convert_agg_to_awt_image(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(graphics)
def gui_repaint(self, graphics = None):
'''Do the actual painting on the Java canvas'''
if not self.isDispatchThread():
self.__component.repaint()
else:
if self.__component.isShowing():
color = javabridge.JClassWrapper(
'java.awt.Color')(*self.figure.get_facecolor())
if graphics is None:
graphics = self.__component.getGraphics()
graphics.drawImage(self.jimage, 0, 0, color, None)
def blit(self, bbox=None):
if bbox is None:
self.jimage = _convert_agg_to_awt_image(self.get_renderer(), None)
self._isDrawn = True
self.__component.repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
destHeight = self.jimage.getHeight(None)
x0 = int(l)
y0 = int(destHeight - t)
x1 = int(l + w)
y1 = int(destHeight - t + h)
srcImage = _convert_agg_to_awt_image(self.get_renderer(), None)
destGraphics = self.jimage.getGraphics()
destGraphics.drawImage(srcImage,
x0, y0, x1, y1, # dest coordinates
x0, y0, x1, y1, # src coordinates
None)
self._isDrawn = True
self.__component.repaint()
def _on_component_hidden(self, event):
pass
def _on_component_moved(self, event):
pass
def _on_component_shown(self, event):
pass
def _get_key(self, event):
modifiers = javabridge.call(event, "getModifiers", "()I")
keycode = javabridge.call(event, "getKeyCode", "()I")
keytext = javabridge.to_string(
self.__key_event_cls.getKeyText(keycode))
if modifiers != 0:
modtext = javabridge.to_string(
self.__key_event_cls.getKeyModifiersText(modifiers)).lower()
return "%s+%s" % (modtext, keytext)
return keytext
def _on_key_pressed(self, event):
key = self._get_key(event)
javabridge.call(event, "consume", "()V")
self.key_press_event(key, guiEvent=event)
def _on_key_released(self, event):
key = self._get_key(event)
javabridge.call(event, "consume", "()V")
self.key_release_event(key, guiEvent=event)
def _on_key_typed(self, event):
# TODO: determine if we ever get this after consuming
pass
def _on_mouse_clicked(self, event):
pass
def _on_mouse_entered(self, event):
pass
def _on_mouse_exited(self, event):
pass
def _get_mouse_x_y_button(self, event):
x = javabridge.call(event, "getX", "()I")
y = self.figure.bbox.height - javabridge.call(event, "getY", "()I")
button = javabridge.call(event, "getButton", "()I")
return x, y, button
def _on_mouse_pressed(self, event):
x, y, button = self._get_mouse_x_y_button(event)
javabridge.call(event, "consume", "()V")
self.button_press_event(x, y, button, guiEvent=event)
def _on_mouse_released(self, event):
x, y, button = self._get_mouse_x_y_button(event)
javabridge.call(event, "consume", "()V")
self.button_release_event(x, y, button, guiEvent=event)
def _on_mouse_dragged(self, event):
x, y, button = self._get_mouse_x_y_button(event)
self.motion_notify_event(x, y, guiEvent=event)
def _on_mouse_moved(self, event):
x, y, button = self._get_mouse_x_y_button(event)
self.motion_notify_event(x, y, guiEvent=event)
def _on_component_resized(self, event):
w = float(self.__component.getWidth())
h = float(self.__component.getHeight())
if w <= 0 or h <= 0:
return
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
self._isDrawn = False
self.__component.repaint()
FigureCanvasAgg.resize_event(self)
def isDispatchThread(self):
return self._eqc.isDispatchThread()
@property
def _eqc(self):
'''java.awt.EventQueue.class'''
if self.__event_queue_class is None:
self.__event_queue_class = javabridge.JClassWrapper(
"java.awt.EventQueue")
return self.__event_queue_class
class NavigationToolbar2Swing(NavigationToolbar2):
def __init__(self, canvas, frame):
self._frame = frame
self._tools = {}
self._lastrect = None
NavigationToolbar2.__init__(self, canvas)
self._idle = True
clsCursor = javabridge.JClassWrapper('java.awt.Cursor')
self.cursor_map = {
cursors.MOVE: clsCursor(clsCursor.MOVE_CURSOR),
cursors.HAND: clsCursor(clsCursor.HAND_CURSOR),
cursors.POINTER: clsCursor(clsCursor.DEFAULT_CURSOR),
cursors.SELECT_REGION: clsCursor(clsCursor.CROSSHAIR_CURSOR)
}
self.white = javabridge.JClassWrapper('java.awt.Color').WHITE
def _init_toolbar(self):
self.toolbar = javabridge.JClassWrapper('javax.swing.JToolBar')()
self.toolbar.setFloatable(False)
self.radio_button_group = javabridge.JClassWrapper(
'javax.swing.ButtonGroup')()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.toolbar.addSeparator()
continue
callback = getattr(self, callback, None)
if callback is None:
continue
if text in ("Pan", "Zoom"):
self.add_radio_button(callback, image_file)
else:
self.add_button(callback, image_file)
north = javabridge.get_static_field('java/awt/BorderLayout', 'NORTH',
'Ljava/lang/String;')
javabridge.call(self._frame.o, "add",
"(Ljava/awt/Component;Ljava/lang/Object;)V",
self.toolbar.o, north)
def make_action(self, action, icon_name):
basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
filename = os.path.normpath(os.path.join(basedir, icon_name+".png"))
if os.path.exists(filename):
jfile = javabridge.JClassWrapper('java.io.File')(filename)
image = javabridge.JClassWrapper('javax.imageio.ImageIO').read(
jfile)
icon = javabridge.JClassWrapper('javax.swing.ImageIcon')(image)
else:
icon = None
class ActionListener(javabridge.JProxy):
def __init__(self, action):
javabridge.JProxy.__init__(self, 'java.awt.event.ActionListener')
self.action = action
def actionPerformed(self, event):
self.action(event)
action_listener = ActionListener(action)
jaction = javabridge.run_script(
"""var result = new JavaAdapter(javax.swing.AbstractAction,
javax.swing.Action, {
actionPerformed: function(event) {
action_listener.actionPerformed(event);
}
});
result.putValue(javax.swing.Action.NAME, name);
result.putValue(javax.swing.Action.SMALL_ICON, icon);
result
""", dict(action_listener=action_listener.o,
name = icon_name,
icon = icon.o if icon is not None else icon))
self._tools[icon_name] = (action_listener, jaction)
return jaction
def add_radio_button(self, action, icon_name):
jaction = self.make_action(action, icon_name)
button = javabridge.JClassWrapper('javax.swing.JToggleButton')(jaction)
self.toolbar.add(button)
self.radio_button_group.add(button)
return button
def add_button(self, action, icon_name):
jaction = self.make_action(action, icon_name)
return self.toolbar.add(jaction)
def set_cursor(self, cursor):
'''Set the cursor on the canvas component'''
self.canvas.component.setCursor(self.cursor_map[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
if not self.canvas.isDispatchThread():
return
height = self.canvas.figure.bbox.height
y0, y1 = [height - y for y in y0, y1]
(y0, y1), (x0, x1) = [[int(fn(v0, v1)) for fn in min, max]
for v0, v1 in ((y0, y1), (x0, x1))]
rect = (x0, y0, x1-x0, y1-y0)
graphics = self.canvas.component.getGraphics()
graphics.setXORMode(self.white)
try:
old_color = graphics.getColor()
try:
if self._lastrect != None:
graphics.drawRect(*self._lastrect)
graphics.drawRect(*rect)
self._lastrect = rect
finally:
graphics.setColor(old_color)
finally:
graphics.setPaintMode()
def release(self, event):
super(NavigationToolbar2Swing, self).release(event)
self._lastrect = None
def dynamic_update(self):
self.canvas.draw()
def save_figure(self, *args):
"""Save the current figure"""
default_filetype = self.canvas.get_default_filetype()
filetypes = self.canvas.get_supported_filetypes_grouped()
def compare(a, b):
aname, aextensions = a
bname, bextensions = b
if default_filetype in aextensions:
return 0 if default_filetype in bextensions else -1
elif default_filetype in bextensions:
return 1
else:
return cmp(aname, bname)
filetypes = list(filetypes.items())
filetypes.sort(cmp=compare)
chooser_cls = javabridge.JClassWrapper('javax.swing.JFileChooser')
chooser = chooser_cls()
chooser.setDialogTitle("Save figure")
filter_cls = javabridge.JClassWrapper(
'javax.swing.filechooser.FileNameExtensionFilter')
default_filter = None
for name, extensions in filetypes:
file_filter = filter_cls(name, *extensions)
chooser.addChoosableFileFilter(file_filter)
if default_filter is None:
default_filter = file_filter
chooser.setFileFilter(default_filter)
result = chooser.showSaveDialog(self.canvas.component)
if result == chooser_cls.APPROVE_OPTION:
path = chooser.getSelectedFile().getAbsolutePath()
exts = chooser.getFileFilter().getExtensions()
ext = javabridge.get_env().get_object_array_elements(exts.o)[0]
self.canvas.print_figure(path, format=javabridge.to_string(ext))
class FigureFrameSwing(object):
def __init__(self, num, figure):
self.frame = javabridge.JClassWrapper('javax.swing.JFrame')()
self.canvas = FigureCanvasSwing(figure)
dimension = javabridge.JClassWrapper('java.awt.Dimension')(
figure.bbox.width, figure.bbox.height)
border_layout_cls = javabridge.JClassWrapper(
'java.awt.BorderLayout')
self.canvas.component.setPreferredSize(dimension)
self.frame.add(self.canvas.component, border_layout_cls.CENTER)
self.toolbar = NavigationToolbar2Swing(self.canvas, self.frame)
self.frame.pack()
self.figmgr = FigureManagerSwing(self.canvas, num, self)
def get_figure_manager(self):
return self.figmgr
class FigureManagerSwing(FigureManagerBase):
def __init__(self, canvas, num, frame):
FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame.frame
self.toolbar = frame.toolbar
def notify_axes_change(fig):
if self.toolbar is not None:
self.toolbar.update()
def show(self):
self.window.setVisible(True)
def destroy(self, *args):
self.window.dispose()
def get_window_title(self):
return javabridge.to_string(self.window.getTitle())
def set_window_title(self, title):
self.window.setTitle(title)
def resize(self, width, height):
dimension = javabridge.JClassWrapper('java.awt.Dimension')(
width, height)
self.frame.canvas.component.setPreferredSize(dimension)
self.window.pack()
def _convert_agg_to_awt_image(renderer, awt_image):
'''Use the renderer to draw the figure on a java.awt.BufferedImage
:param renderer: the RendererAgg that's in charge of rendering the figure
:param awt_image: a javabridge JB_Object holding the BufferedImage or
None to create one.
:returns: the BufferedImage
'''
env = javabridge.get_env()
w = int(renderer.width)
h = int(renderer.height)
buf = np.frombuffer(renderer.buffer_rgba(), np.uint8).reshape(w * h, 4)
cm = javabridge.JClassWrapper('java.awt.image.DirectColorModel')(
32, int(0x000000FF), int(0x0000FF00), int(0x00FF0000), -16777216)
raster = cm.createCompatibleWritableRaster(w, h)
for i in range(4):
samples = env.make_int_array(buf[:, i].astype(np.int32))
raster.setSamples(0, 0, w, h, i, samples)
awt_image = javabridge.JClassWrapper('java.awt.image.BufferedImage')(
cm, raster, False, None)
return awt_image
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', matplotlib.figure.Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
frame = FigureFrameSwing(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
|
py | 1a428b3cebba37ae0405317bd4d0e2bbdb0b64a7 | # -*- coding: utf-8 -*-
"""
Author:by 王林清 on 2021/11/2 21:32
FileName:insertMongodb.py in shiyizhonghua_resource
Tools:PyCharm python3.8.4
"""
from pymongo import MongoClient
from util import *
if __name__ == '__main__':
dir_name = r'./../database_json'
paths = get_file_path(dir_name)
host = '114.55.236.49'
client = MongoClient(host=host, port=27017,
username='rw', password='cczu193rw',
authSource='shiyizhonghua')
with client:
db = client['shiyizhonghua']
collection = db['data']
collection.drop()
for path in paths:
data = get_json(path)
collection.insert_many(data, ordered=False)
|
py | 1a428b51d180c2b3a9979fbf6f28ac9b3d05121a | # -*- coding: utf-8 -*-
"""
Created on Mon May 31 18:14:32 2021
@author: ilayda
"""
#1.kutuphaneler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#2.veri onisleme
#2.1.veri yukleme
veriler = pd.read_csv('odev_tenis.txt.crdownload')
#pd.read_csv("veriler.csv")
#test
print(veriler)
'''
#encoder: Kategorik -> Numeric dönüşüm yapalım.
play = veriler.iloc[:,-1:].values
print(play)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
play[:,-1] = le.fit_transform(veriler.iloc[:,-1])
print(play)
#sondan ikinci kolonu 1 ve 0 a dönüştürdük
windy = veriler.iloc[:,-2:-1].values
print(play)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
windy[:,-1] = le.fit_transform(veriler.iloc[:,-1])
print(windy)
#encoder: Kategorik -> Numeric
c = veriler.iloc[:,-1:].values
print(c)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
c[:,-1] = le.fit_transform(veriler.iloc[:,-1])
print(c)
'''
#1)DÖNÜŞTÜRMEK
#yukrdaki gibi tek tek dönüştürmek yerine aşağıdaki tek kodla 1 ve 0 a dönüştürdük.
#sundy,rainy,overcast,windy,play labelenconder ile 1 ve 0 a dönüştürdük.
from sklearn.preprocessing import LabelEncoder
veriler2 = veriler.apply(LabelEncoder().fit_transform)
#temperature ve humidity onehot ettik. onehot ile yaptık çünkü zaten sayılardı.true falan değildi.
#temperature ve humidity
c = veriler2.iloc[:,:1]
from sklearn import preprocessing
ohe = preprocessing.OneHotEncoder()
c=ohe.fit_transform(c).toarray()
print(c)
#2)EKLEME yapalım bir tabloda
#havadurumu ile dataframe yaparak rainy,sunny,overcast 1 ve 0 şeklinde bir tabloya ekledik.
havadurumu = pd.DataFrame(data = c, index = range(14), columns=['o','r','s'])
#sonveriler ile=havadurumuna veriler tablosundan windy ve play 0 ve 1 şeklini tabloya ekledik.
sonveriler = pd.concat([havadurumu,veriler.iloc[:,1:3]],axis = 1)
#yukarda yazdırdığımız veriler2 temperature ve humidity de onehot şeklinde tabloya ekleyelim.
sonveriler = pd.concat([veriler2.iloc[:,-2:],sonveriler], axis = 1)
#3)VERİLERİ BÖLME
#humidity bağımlı değişken olduğu için o hariç hepsini bölüyoruz ayrı tabloda
#y_train ve y_test humadityi tabloda göstercek. sonveriler.iloc[:,-1:]
from sklearn.model_selection import train_test_split
x_train, x_test,y_train,y_test = train_test_split(sonveriler.iloc[:,:-1],sonveriler.iloc[:,-1:],test_size=0.33, random_state=0)
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x_train,y_train)
#tahmin edelim. humidity tahmin edelim. y_test ile karşılaştırarak.
y_pred = regressor.predict(x_test)
print(y_pred)
#4)GERİYE ELEME
#Başarı ölçerek hangi verileri çıkarcağımıza bakalım. Önce tüm değişkenleri tanımlayalım.
#14 satır var.
import statsmodels.api as sm
X = np.append(arr = np.ones((14,1)).astype(int), values=sonveriler.iloc[:,:-1], axis=1 )
#6 kolon var. Kolon tanımlayalım. Sonrada sm.OLS ile tüm değişkenleri alsın.sonveriler.iloc[:,-1:]
X_l=sonveriler.iloc[:,[0,1,2,3,4,5]].values
X_l=np.array(X_l,dtype=float)
model=sm.OLS(sonveriler.iloc[:,-1:],X_l).fit()
print(model.summary())
#4)VERİ ATMA
#yandaki raporda en yüksek olan p>t değeri x1 olduğu için 0.593 onu atıyoruz.
#2.playden sonrasını yazdırıp windy atıyoruz [:,1:]
sonveriler = sonveriler.iloc[:,1:]
#yeni tabloyu yazdıralım. x1 olmadığı yani windy olmadığı- kolon sayısı 5 oldu iloc[:,[0,1,2,3,4]]
import statsmodels.api as sm
X = np.append(arr = np.ones((14,1)).astype(int), values=sonveriler.iloc[:,:-1], axis=1 )
X_l=sonveriler.iloc[:,[0,1,2,3,4]].values
X_l=np.array(X_l,dtype=float)
model=sm.OLS(sonveriler.iloc[:,-1:],X_l).fit()
print(model.summary())
#5)X_TRAİN VE X_TEST HUMİDİTY DEĞİŞKENİNİ ATMA
x_train = x_train.iloc[:,1:]
x_test = x_test.iloc[:,1:]
#sonra y_test ve y_pred karşılaştırınca iyileşmiş halini görebilirsin.
regressor.fit(x_train,y_train)
y_pred = regressor.predict(x_test)
#y_pred ilk tahmin 84 ken x1 i silip başarı oranını arttırınca 77 oldu ve y_testteki 70 e daha fazla yaklaştı.
|
py | 1a428bf2547851228dbb0537526d0c4de5c0932d | # -----------------------------------------------------------------------------
#
# Copyright (C) The BioDynaMo Project.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# See the LICENSE file distributed with this work for details.
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# -----------------------------------------------------------------------------
import os
import shutil
from print_command import Print
def CopySupportFiles(sim_name):
SUPPORT_DIR = os.path.join(os.environ['BDMSYS'], 'share', 'util', 'support_files')
Print.new_step("Copy additional support files")
for filename in os.listdir(SUPPORT_DIR):
full_file_name = os.path.join(SUPPORT_DIR, filename)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, sim_name)
|
py | 1a428c40f5fd18ab170abe93a92d51f0329c389d | import asyncio
import time
from typing import Callable, Coroutine
import httpx
addr = "https://langa.pl/crawl"
todo = set()
async def crawl2(prefix: str, url: str = "") -> None:
url = url or prefix
client = httpx.AsyncClient()
try:
response = await client.get(url)
finally:
await client.aclose()
for line in response.text.splitlines():
if line.startswith(prefix):
task = asyncio.create_task(crawl2(prefix, line), name=line)
todo.add(task)
async def progress(url: str, algo: Callable[..., Coroutine]) -> None:
# report in an elegant way
# tells asyncio to run task in the background
# it runs only when there is an await
task = asyncio.create_task(algo(url), name=url)
# A collection of tasks
todo.add(task)
start = time.time()
while len(todo):
# await on todo tasks, will not raise exception instead returns:
# - done: completed tasks
# - _pending: tasks yet to complete
done, _pending = await asyncio.wait(todo, timeout=0.5)
# Update the todo by removing what is already done
todo.difference_update(done)
urls = (t.get_name() for t in todo)
print(f"{len(todo)}: " + ", ".join(sorted(urls))[-75:])
end = time.time()
print(f"Took {int(end-start)} seconds")
async def async_main() -> None:
try:
await progress(addr, crawl2)
except asyncio.CancelledError:
for task in todo:
task.cancel()
done, pending = await asyncio.wait(todo, timeout=1.0)
# remove done tasks from the todo collection
todo.difference_update(done)
# remove pending tasks from the todo collection
todo.difference_update(pending)
if todo:
print("warning: new tasks added while we were cancelling")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
task = loop.create_task(async_main())
loop.call_later(5, task.cancel)
loop.run_until_complete(task)
|
py | 1a428d2f0da2594ad7f641b8de9fc48963f1bc0e | """
Copyright (C) 2019 Authors of gHHC
This file is part of "hyperbolic_hierarchical_clustering"
http://github.com/nmonath/hyperbolic_hierarchical_clustering
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
import datetime
import numpy as np
from absl import logging
import tensorflow as tf
from ghhc.util.Config import Config
from ghhc.util.load import load
from ghhc.util.initializers import random_pts,init_from_afkmc2_and_hac,init_from_rand_and_hac
from ghhc.model.ghhc import gHHCTree, gHHCInference
from ghhc.util.io import mkdir_p
tf.enable_eager_execution()
logging.set_verbosity(logging.INFO)
if __name__ == "__main__":
config = Config(sys.argv[1])
now = datetime.datetime.now()
ts = "{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}".format(
now.year, now.month, now.day, now.hour, now.minute, now.second)
config.exp_out_dir = os.path.join(config.exp_out_base, config.dataset_name, config.alg_name, "%s-%s" %(ts,config.to_filename()))
config.checkpoint_dir = os.path.join(config.exp_out_dir, 'models', 'ckpt')
mkdir_p(config.exp_out_dir)
mkdir_p(os.path.join(config.exp_out_dir, 'models'))
config.save_config(config.exp_out_dir,config.to_filename() + ".json")
config.save_config(config.exp_out_dir)
pids, lbls, dataset = load(config.inference_file, config)
dev_pids, dev_lbls, dev_dataset = load(config.dev_file, config)
if config.random_projection is not None:
logging.info('Using random projection: %s', config.random_projection)
_proj = np.random.randn(dataset.shape[1], config.random_projection).astype(np.float32)
def p(x):
projd = tf.matmul(x, _proj)
projd /= tf.linalg.norm(projd,axis=1,keepdims=True)
projd = tf.clip_by_norm(projd, 0.9, axes=[1])
return projd
proj = lambda x: p(x)
init_tree = random_pts(proj(dataset).numpy(), config.num_internals, config.random_pts_scale)
else:
if config.init_method == 'randompts':
init_tree = random_pts(dataset, config.num_internals, config.random_pts_scale)
elif config.init_method == 'afkmc2hac':
init_tree = init_from_afkmc2_and_hac(dataset, config.num_internals)
elif config.init_method == 'randhac':
init_tree = init_from_rand_and_hac(dataset, config.num_internals, config.random_pts_scale)
proj = None
tree = gHHCTree(init_tree.copy(), config=config, projection=proj)
optimizer = tf.train.GradientDescentOptimizer(config.tree_learning_rate)
inf = gHHCInference(tree, optimizer, config, dev_dataset, dev_lbls)
samples = np.load(config.sample_file)
inf.inference(samples, dataset, config.batch_size) |
py | 1a428d872d8ab73d020338de0fc37696321808df | """Limited version of os module: only keep what is more or less relevant in a
browser context
"""
import sys
error = OSError
name = 'posix'
linesep = '\n'
from posix import *
import posixpath as path
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
environ = {'HOME': __BRYTHON__.curdir,
'PYTHONPATH': __BRYTHON__.brython_path
}
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = True
def chdir(path):
__BRYTHON__.curdir = path
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
encoding = sys.getfilesystemencoding()
errors = 'surrogateescape'
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
encoding = sys.getfilesystemencoding()
errors = 'surrogateescape'
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def getcwd():
return __BRYTHON__.curdir
_set = set()
supports_dir_fd = _set
supports_effective_ids = _set
supports_fd = _set
supports_follow_symlinks = _set
|
py | 1a428e68240cca56674ffa259909bad0ca6077e3 | # Generated by Django 3.2.12 on 2022-02-04 07:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True)),
('name', models.CharField(max_length=254)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
py | 1a428eaa5a8a06c492df53dc33e574d81e1d1c6a | from datetime import date
pessoas = {}
listaDePessoas = []
hoje = date.today().year
somaIdade = mediaIdade = 0
while True:
pessoas.clear()
pessoas['nome'] = str(input('Nome: ')).strip()
while True:
pessoas['sexo'] = str(input('Sexo [M/F]: ')).upper()[0]
if pessoas['sexo'] in 'MF':
break
print('Erro! Digite apenas "M" ou "F".')
idade = int(input('Ano de Nascimento: '))
pessoas['idade'] = hoje - idade
somaIdade += pessoas['idade']
listaDePessoas.append(pessoas.copy())
while True:
continuar = str(input('Deseja continuar?[S/N]: ')).upper()[0]
if continuar in 'SN':
break
print('Erro! Digite apenas "SIM" ou "NÃO".')
if continuar in 'N':
break
pessoas.clear()
print('-=' * 25)
print(listaDePessoas)
print('-=' * 12, 'Total de Pessoas Cadastradas ', '-=' * 12)
print(f'Total de pessoas cadastradas: {len(listaDePessoas)}')
print('-=' * 10, 'A média das Idades ', '-=' * 10)
mediaIdade = somaIdade / len(listaDePessoas)
print(f'A média das Idades: {mediaIdade} anos.')
print('-=' * 10, 'Lista com Todas as Mulheres', '-=' * 10)
for elemento in listaDePessoas:
if elemento['sexo'] in 'F':
print(f'{elemento["nome"]}')
print()
print('-=' * 10, 'Lista de Pessoas Acima da Média de Idade ', '-=' * 10)
for elemento in listaDePessoas:
if elemento['idade'] >= mediaIdade:
print(f'{elemento["nome"]} está acima da Média da Idade. ')
print() |
py | 1a428ec03a617802684ba54c921da26ea04115e7 | import logging
from van.adam import transactions as t
from van.adam.inventory_methods.output import calculate_output
from van.adam.inventory_methods.sell_outs import take_all, take_less
from van.adam.transactions import is_taxable
def calc_profit(sell: tuple) -> tuple:
"""
Calculates the buy price, sell price, total profit, and taxable profit from a sell transaction tuple.
:param sell: the sell transaction as a tuple (date: datetime.date, quantity: float, price: float)
:return: a tuple containing (weighted_buy_price: float, sell_price: float, total_profit: float, taxable_profit:
float); all numbers are rounded to two decimal places
"""
sell_date, total_sell_quantity, sell_price = sell
sell_outs = []
quantity = 0.0
while quantity != total_sell_quantity:
log.debug("quantity: {}".format(quantity))
try:
buy = t.buys.pop()
except IndexError:
log.error("Not enough crypto assets for the sale {}: list index out of range!".format(sell))
latest_sell_out = sell_outs.pop()
t.buys.insert(len(t.buys), latest_sell_out)
log.warning("Re-added latest sell out {} to buy transactions".format(latest_sell_out))
return 0.0, 0.0, 0.0, 0.0
buy_date, buy_quantity, buy_price = buy
# round buy_quantity to 10 decimal places to avoid IndexError above
buy_quantity = round(buy_quantity, 10)
taxable = is_taxable(buy_date, sell_date)
if (quantity + buy_quantity) == total_sell_quantity:
quantity = take_all(quantity, buy_quantity, buy_price, sell_outs, taxable)
elif (quantity + buy_quantity) < total_sell_quantity:
quantity = take_all(quantity, buy_quantity, buy_price, sell_outs, taxable)
elif (quantity + buy_quantity) > total_sell_quantity:
quantity, updated_buy = take_less(total_sell_quantity, quantity, buy_date, buy_quantity, buy_price,
sell_outs, taxable)
t.buys.insert(len(t.buys), updated_buy)
return calculate_output(sell_outs, total_sell_quantity, sell_price)
log = logging.getLogger()
|
py | 1a428f7030afe9e53dae35815d816e6d7ddf2388 | """Setup for TcEx Module."""
# standard library
import os
# third-party
from setuptools import find_packages, setup
metadata = {}
metadata_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tcex', '__metadata__.py')
with open(
metadata_file,
encoding='utf-8',
) as f:
exec(f.read(), metadata) # nosec; pylint: disable=exec-used
if not metadata:
raise RuntimeError(f'Could not load metadata file ({metadata_file}).')
with open('README.md') as f:
readme = f.read()
dev_packages = [
'bandit',
'black',
'CommonMark',
'deepdiff',
'flake8',
# isort 5 currently causes issues with pylint
'isort>=4,<5',
'mako',
'pre-commit',
'pydocstyle',
'pylint',
'pytest',
'pytest-cov',
'pytest-html',
'pytest-xdist',
'pyupgrade',
'recommonmark',
'reno',
'sphinx',
'sphinx-rtd-theme',
]
setup(
author=metadata['__author__'],
author_email=metadata['__author_email__'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Security',
],
description=metadata['__description__'],
download_url=metadata['__download_url__'],
extras_require={'dev': dev_packages, 'develop': dev_packages, 'development': dev_packages},
include_package_data=True,
install_requires=[
'colorama',
'future',
'hvac',
'inflect',
'jmespath',
'jsonschema',
'lark',
'paho-mqtt',
'parsedatetime',
'pyaes',
'python-dateutil',
'pytz',
'redis',
'requests',
'six',
'stdlib-list',
'stix2',
'tzlocal',
'wrapt',
],
license=metadata['__license__'],
long_description=readme,
long_description_content_type='text/markdown',
name=metadata['__package_name__'],
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={'': ['*.json', '*.lark']},
package_dir={'tcex': 'tcex'},
project_urls={
'Documentation': 'https://threatconnect-inc.github.io/tcex/',
'Source': 'https://github.com/ThreatConnect-Inc/tcex',
},
python_requires='>=3.6',
scripts=[
'bin/tcinit',
'bin/tcinit.cmd',
'bin/tclib',
'bin/tclib.cmd',
'bin/tcpackage',
'bin/tcpackage.cmd',
'bin/tctest',
'bin/tctest.cmd',
'bin/tcvalidate',
'bin/tcvalidate.cmd',
],
url=metadata['__url__'],
version=metadata['__version__'],
zip_safe=True,
)
|
py | 1a428fde0c3b4784648d31ca13c24f690850ea1d | import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope
import data_iter
import nn_extra_gauss
import nn_extra_nvp
from config_rnn import defaults
batch_size = 32
sample_batch_size = 1
n_samples = 4
rng = np.random.RandomState(42)
rng_test = np.random.RandomState(317070)
seq_len = defaults.seq_len
eps_corr = defaults.eps_corr
mask_dims = defaults.mask_dims
nonlinearity = tf.nn.elu
weight_norm = True
train_data_iter = data_iter.BaseExchSeqDataIterator(seq_len=seq_len, batch_size=batch_size,
set='train', rng=rng, digits=[0, 2, 4, 6, 8])
test_data_iter = data_iter.BaseExchSeqDataIterator(seq_len=seq_len, batch_size=batch_size, set='test',
digits=[1, 3, 5, 7, 9], rng=rng_test)
valid_data_iter = data_iter.BaseExchSeqDataIterator(seq_len=seq_len, batch_size=batch_size,
set='test', rng=rng_test, digits=[0, 2, 4, 6, 8])
test_data_iter2 = data_iter.BaseTestBatchSeqDataIterator(seq_len=seq_len,
set='test',
rng=rng_test,
digits=[1, 3, 5, 7, 9])
obs_shape = train_data_iter.get_observation_size() # (seq_len, 28,28,1)
print('obs shape', obs_shape)
ndim = np.prod(obs_shape[1:])
corr_init = np.ones((ndim,), dtype='float32') * 0.1
optimizer = 'rmsprop'
learning_rate = 0.001
lr_decay = 0.999995
max_iter = 50000
save_every = 1000
validate_every = 1000
n_valid_batches = 20
scale_student_grad = 0.
student_grad_schedule = {0: 0., 100: 0.1}
nvp_layers = []
nvp_dense_layers = []
student_layer = None
def build_model(x, init=False, sampling_mode=False):
global nvp_layers
global nvp_dense_layers
with arg_scope([nn_extra_nvp.conv2d_wn, nn_extra_nvp.dense_wn], init=init):
if len(nvp_layers) == 0:
build_nvp_model()
if len(nvp_dense_layers) == 0:
build_nvp_dense_model()
global student_layer
if student_layer is None:
student_layer = nn_extra_gauss.GaussianRecurrentLayer(shape=(ndim,), corr_init=corr_init)
x_shape = nn_extra_nvp.int_shape(x)
x_bs = tf.reshape(x, (x_shape[0] * x_shape[1], x_shape[2], x_shape[3], x_shape[4]))
x_bs_shape = nn_extra_nvp.int_shape(x_bs)
log_det_jac = tf.zeros(x_bs_shape[0])
logit_layer = nn_extra_nvp.LogitLayer()
scale_layer = nn_extra_nvp.ScaleLayer()
y, log_det_jac = scale_layer.forward_and_jacobian(x_bs, None, log_det_jac)
y, log_det_jac = logit_layer.forward_and_jacobian(y, None, log_det_jac)
# construct forward pass
z = None
for layer in nvp_layers:
y, z, log_det_jac = layer.forward_and_jacobian(y, z, log_det_jac)
z = tf.concat([z, y], 3)
for layer in nvp_dense_layers:
z, _, log_det_jac = layer.forward_and_jacobian(z, None, log_det_jac)
z_shape = nn_extra_nvp.int_shape(z)
z_vec = tf.reshape(z, (x_shape[0], x_shape[1], -1))
log_det_jac = tf.reshape(log_det_jac, (x_shape[0], x_shape[1]))
log_probs = []
z_samples = []
latent_log_probs = []
latent_log_probs_prior = []
if mask_dims:
mask_dim = tf.greater(student_layer.corr, tf.ones_like(student_layer.corr) * eps_corr)
mask_dim = tf.cast(mask_dim, tf.float32)
else:
mask_dim = None
with tf.variable_scope("one_step") as scope:
student_layer.reset()
for i in range(seq_len):
if sampling_mode:
z_sample = student_layer.sample(nr_samples=n_samples)
z_samples.append(z_sample)
latent_log_prob = student_layer.get_log_likelihood(z_sample[:, 0, :])
latent_log_probs.append(latent_log_prob)
else:
latent_log_prob = student_layer.get_log_likelihood(z_vec[:, i, :], mask_dim=mask_dim)
latent_log_probs.append(latent_log_prob)
log_prob = latent_log_prob + log_det_jac[:, i]
log_probs.append(log_prob)
latent_log_prob_prior = student_layer.get_log_likelihood_under_prior(z_vec[:, i, :],
mask_dim=mask_dim)
latent_log_probs_prior.append(latent_log_prob_prior)
student_layer.update_distribution(z_vec[:, i, :])
scope.reuse_variables()
if sampling_mode:
# one more sample after seeing the last element in the sequence
z_sample = student_layer.sample(nr_samples=n_samples)
z_samples.append(z_sample)
z_samples = tf.concat(z_samples, 1)
latent_log_prob = student_layer.get_log_likelihood(z_sample[:, 0, :])
latent_log_probs.append(latent_log_prob)
z_samples_shape = nn_extra_nvp.int_shape(z_samples)
z_samples = tf.reshape(z_samples,
(z_samples_shape[0] * z_samples_shape[1],
z_shape[1], z_shape[2], z_shape[3])) # (n_samples*seq_len, z_img_shape)
log_det_jac = tf.zeros(z_samples_shape[0] * z_samples_shape[1])
for layer in reversed(nvp_dense_layers):
z_samples, _, log_det_jac = layer.backward(z_samples, None, log_det_jac)
x_samples = None
for layer in reversed(nvp_layers):
x_samples, z_samples, log_det_jac = layer.backward(x_samples, z_samples, log_det_jac)
x_samples, log_det_jac = logit_layer.backward(x_samples, None, log_det_jac)
x_samples, log_det_jac = scale_layer.backward(x_samples, None, log_det_jac)
x_samples = tf.reshape(x_samples,
(z_samples_shape[0], z_samples_shape[1], x_shape[2], x_shape[3], x_shape[4]))
log_det_jac = tf.reshape(log_det_jac, (z_samples_shape[0], z_samples_shape[1]))
latent_log_probs = tf.stack(latent_log_probs, axis=1)
for i in range(seq_len + 1):
log_prob = latent_log_probs[:, i] - log_det_jac[:, i]
log_probs.append(log_prob)
log_probs = tf.stack(log_probs, axis=1)
return x_samples, log_probs
log_probs = tf.stack(log_probs, axis=1)
latent_log_probs = tf.stack(latent_log_probs, axis=1)
latent_log_probs_prior = tf.stack(latent_log_probs_prior, axis=1)
return log_probs, latent_log_probs, latent_log_probs_prior, z_vec
def build_nvp_model():
global nvp_layers
num_scales = 2
num_filters = 32
for scale in range(num_scales - 1):
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard0', name='Checkerboard%d_1' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm,
num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard1', name='Checkerboard%d_2' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm,
num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard0', name='Checkerboard%d_3' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm,
num_filters=num_filters))
nvp_layers.append(nn_extra_nvp.SqueezingLayer(name='Squeeze%d' % scale))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('channel0', name='Channel%d_1' % scale, nonlinearity=nonlinearity,
weight_norm=weight_norm,
num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('channel1', name='Channel%d_2' % scale, nonlinearity=nonlinearity,
weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('channel0', name='Channel%d_3' % scale, nonlinearity=nonlinearity,
weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(nn_extra_nvp.FactorOutLayer(scale, name='FactorOut%d' % scale))
# final layer
scale = num_scales - 1
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard0', name='Checkerboard%d_1' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard1', name='Checkerboard%d_2' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard0', name='Checkerboard%d_3' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard1', name='Checkerboard%d_4' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(nn_extra_nvp.FactorOutLayer(scale, name='FactorOut%d' % scale))
def build_nvp_dense_model():
global nvp_dense_layers
for i in range(6):
mask = 'even' if i % 2 == 0 else 'odd'
name = '%s_%s' % (mask, i)
nvp_dense_layers.append(
nn_extra_nvp.CouplingLayerDense(mask, name=name, nonlinearity=nonlinearity, n_units=256,
weight_norm=weight_norm))
def loss(log_probs):
return -tf.reduce_mean(log_probs)
|
py | 1a429029c3fec8a895ec980d03565ccf36319f36 | # -*- coding = utf-8 -*-
# /usr/bin/env python
# @Time : 20-11-18 下午8:25
# @File : test.py
# @Software: PyCharm
# try/except/else while/else break continue
# while True:
# reply = input('Enter txt:')
# if reply == 'stop':
# break
# try:
# num = int(reply)
# except:
# print('Bad!' * 8)
# else:
# print(int(reply)**2)
#
# print('Bye')
while True:
reply = input('Enter txt:')
if reply == 'stop':
break
elif not reply.isdigit():
print('Bad!'*8)
else:
num = int(reply)
if num < 20:
print('low'*4)
else:
print(num**2)
print('Bye'*3)
|
py | 1a4290b3961bd973bf09bf164210db305da6d3e3 | # Standard imports
import logging
# Our imports
import emission.core.wrapper.trip_old as ect
import emission.core.wrapper.tripiterator as ecti
import emission.core.wrapper.filter_modules as ecfm
import emission.core.get_database as edb
import emission.analysis.modelling.user_model.query_scheduler_pipeline as eaqsp
#import Profiles
"""
High level overivew of alternative_trips_pipeline
The main method for this pipeline is calc_perturbed_trips
We first construct a trip iterator, and look at each trip where the pipelineFlags attribte "alternativesStarted" is False
For each of these trips, we create an array of trip objects, which are modifications to the original trip object with perturbed times
We also have a database collection that associates each original trip, with all of its perturbed trips
For each array of perturbed trip objects, we schedule those queries via CRON jobs
We initialize each perturbed trip as None, and we update the collection as the queries are being made
Overview of helper files relevant to this pipeline:
-query.py -> makes a google maps query immediately to get the directions
-> will also store the query into the database collection
-perturb.py -> schedules calls to query.py on cron jobs
@TODO: this doesn't exist yet, but it would make more sense than to put it in here in order to keep the code clean
-database_util.py -> contains all the helper methods to abstract out interaction with the database
"""
# Invoked in recommendation pipeline to get perturbed trips user should consider
def calc_alternative_trips(user_trips, immediate):
stagger = 1
total_stagger = 0
for existing_trip in user_trips:
#if not existing_trip.pipelineFlags.alternativesStarted:
existing_trip.pipelineFlags.startAlternatives()
existing_trip.pipelineFlags.savePipelineFlags()
if immediate:
eaqsp.schedule_queries(existing_trip.trip_id, existing_trip.user_id, [existing_trip], immediate, total_stagger)
total_stagger += stagger
else:
eaqsp.schedule_queries(existing_trip.trip_id, existing_trip.user_id, [existing_trip], immediate)
def get_alternative_for_trips(trip_it):
# User Utility Pipeline calls this to get alternatve trips for one original trip (_id)
alternatives = []
tripCnt = 0
for _trip in trip_it:
logging.debug("Considering trip with id %s " % _trip.trip_id)
tripCnt = tripCnt + 1
try:
ti = ecti.TripIterator(_trip.trip_id, ["alternatives", "get_alternatives"], ect.Alternative_Trip)
alternatives.append(ti)
except ecfm.AlternativesNotFound:
alternatives.append([])
logging.debug("tripCnt = %d, alternatives cnt = %d" % (tripCnt, len(alternatives)))
return alternatives
def get_alternative_for_trip(trip):
# User Utility Pipeline calls this to get alternatve trips for one original trip (_id)
try:
ti = ecti.TripIterator(trip.trip_id, ["alternatives", "get_alternatives"], ect.Alternative_Trip)
return ti
except ecfm.AlternativesNotFound:
return []
def get_perturbed_trips(_id):
# User Utility Pipeline calls this to get alternatve trips for one original trip (_id)
# db = edb.get_perturbed_trips_db()
# _id = tripObj.get_id()
# return db.find(_id)
return [ect.E_Mission_Trip.trip_from_json(jsonStr) for jsonStr in edb.get_perturbed_trips_db().find({'_id' : _id})].__iter__()
|
py | 1a4291c07f0e8029506bdfe7266dadce08a77c77 | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2019 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.appliance.update.
#---------------------------------------------------------------------------
"""
The ``com.vmware.appliance.update_client`` module provides classes for updating
the software in the appliance. The module is available starting in vSphere 6.5.
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class ServiceInfo(VapiStruct):
"""
The ``ServiceInfo`` class describes a service to be stopped and started
during the update installation. This class was added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
service=None,
description=None,
):
"""
:type service: :class:`str`
:param service: Service ID. This attribute was added in vSphere API 6.7.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.appliance.service``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.appliance.service``.
:type description: :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param description: Service description. This attribute was added in vSphere API 6.7.
"""
self.service = service
self.description = description
VapiStruct.__init__(self)
ServiceInfo._set_binding_type(type.StructType(
'com.vmware.appliance.update.service_info', {
'service': type.IdType(resource_types='com.vmware.appliance.service'),
'description': type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage'),
},
ServiceInfo,
False,
None))
class CommonInfo(VapiStruct):
"""
The ``CommonInfo`` class defines common update information. This class was
added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
description=None,
priority=None,
severity=None,
update_type=None,
release_date=None,
reboot_required=None,
size=None,
):
"""
:type description: :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param description: Description of the update. The short information what this update
is. E.g. "Update2 for vCenter Server Appliance 6.5". This attribute
was added in vSphere API 6.7.
:type priority: :class:`CommonInfo.Priority`
:param priority: Update priority. This attribute was added in vSphere API 6.7.
:type severity: :class:`CommonInfo.Severity`
:param severity: Update severity. This attribute was added in vSphere API 6.7.
:type update_type: :class:`CommonInfo.Category`
:param update_type: Update category. This attribute was added in vSphere API 6.7.
:type release_date: :class:`datetime.datetime`
:param release_date: Update release date. This attribute was added in vSphere API 6.7.
:type reboot_required: :class:`bool`
:param reboot_required: Flag indicating whether reboot is required after update. This
attribute was added in vSphere API 6.7.
:type size: :class:`long`
:param size: Download Size of update in Megabytes. This attribute was added in
vSphere API 6.7.
"""
self.description = description
self.priority = priority
self.severity = severity
self.update_type = update_type
self.release_date = release_date
self.reboot_required = reboot_required
self.size = size
VapiStruct.__init__(self)
class Priority(Enum):
"""
The ``CommonInfo.Priority`` class defines the update installation priority
recommendations. This enumeration was added in vSphere API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
HIGH = None
"""
Install ASAP. This class attribute was added in vSphere API 6.7.
"""
MEDIUM = None
"""
Install at the earliest convenience. This class attribute was added in
vSphere API 6.7.
"""
LOW = None
"""
Install at your discretion. This class attribute was added in vSphere API
6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Priority` instance.
"""
Enum.__init__(string)
Priority._set_values([
Priority('HIGH'),
Priority('MEDIUM'),
Priority('LOW'),
])
Priority._set_binding_type(type.EnumType(
'com.vmware.appliance.update.common_info.priority',
Priority))
class Severity(Enum):
"""
The ``CommonInfo.Severity`` class defines the severity of the issues fixed
in the update. This enumeration was added in vSphere API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
CRITICAL = None
"""
Vulnerabilities that can be exploited by an unauthenticated attacker from
the Internet or those that break the guest/host Operating System isolation.
The exploitation results in the complete compromise of confidentiality,
integrity, and availability of user data and/or processing resources
without user interaction. Exploitation could be leveraged to propagate an
Internet worm or execute arbitrary code between Virtual Machines and/or the
Host Operating System. This class attribute was added in vSphere API 6.7.
"""
IMPORTANT = None
"""
Vulnerabilities that are not rated critical but whose exploitation results
in the complete compromise of confidentiality and/or integrity of user data
and/or processing resources through user assistance or by authenticated
attackers. This rating also applies to those vulnerabilities which could
lead to the complete compromise of availability when exploitation is by a
remote unauthenticated attacker from the Internet or through a breach of
virtual machine isolation. This class attribute was added in vSphere API
6.7.
"""
MODERATE = None
"""
Vulnerabilities where the ability to exploit is mitigated to a significant
degree by configuration or difficulty of exploitation, but in certain
deployment scenarios could still lead to the compromise of confidentiality,
integrity, or availability of user data and/or processing resources. This
class attribute was added in vSphere API 6.7.
"""
LOW = None
"""
All other issues that have a security impact. Vulnerabilities where
exploitation is believed to be extremely difficult, or where successful
exploitation would have minimal impact. This class attribute was added in
vSphere API 6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Severity` instance.
"""
Enum.__init__(string)
Severity._set_values([
Severity('CRITICAL'),
Severity('IMPORTANT'),
Severity('MODERATE'),
Severity('LOW'),
])
Severity._set_binding_type(type.EnumType(
'com.vmware.appliance.update.common_info.severity',
Severity))
class Category(Enum):
"""
The ``CommonInfo.Category`` class defines update type. This enumeration was
added in vSphere API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
SECURITY = None
"""
Fixes vulnerabilities, doesn't change functionality. This class attribute
was added in vSphere API 6.7.
"""
FIX = None
"""
Fixes bugs/vulnerabilities, doesn't change functionality. This class
attribute was added in vSphere API 6.7.
"""
UPDATE = None
"""
Changes product functionality. This class attribute was added in vSphere
API 6.7.
"""
UPGRADE = None
"""
Introduces new features, significantly changes product functionality. This
class attribute was added in vSphere API 6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Category` instance.
"""
Enum.__init__(string)
Category._set_values([
Category('SECURITY'),
Category('FIX'),
Category('UPDATE'),
Category('UPGRADE'),
])
Category._set_binding_type(type.EnumType(
'com.vmware.appliance.update.common_info.category',
Category))
CommonInfo._set_binding_type(type.StructType(
'com.vmware.appliance.update.common_info', {
'description': type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage'),
'priority': type.ReferenceType(__name__, 'CommonInfo.Priority'),
'severity': type.ReferenceType(__name__, 'CommonInfo.Severity'),
'update_type': type.ReferenceType(__name__, 'CommonInfo.Category'),
'release_date': type.DateTimeType(),
'reboot_required': type.BooleanType(),
'size': type.IntegerType(),
},
CommonInfo,
False,
None))
class Summary(VapiStruct):
"""
The ``Summary`` class contains the essential information about the update.
This class was added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
version=None,
description=None,
priority=None,
severity=None,
update_type=None,
release_date=None,
reboot_required=None,
size=None,
):
"""
:type version: :class:`str`
:param version: Version in form of X.Y.Z.P. e.g. 6.5.1.5400. This attribute was
added in vSphere API 6.7.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.appliance.update.pending``. When methods return a
value of this class as a return value, the attribute will be an
identifier for the resource type:
``com.vmware.appliance.update.pending``.
:type description: :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param description: Description of the update. The short information what this update
is. E.g. "Update2 for vCenter Server Appliance 6.5". This attribute
was added in vSphere API 6.7.
:type priority: :class:`CommonInfo.Priority`
:param priority: Update priority. This attribute was added in vSphere API 6.7.
:type severity: :class:`CommonInfo.Severity`
:param severity: Update severity. This attribute was added in vSphere API 6.7.
:type update_type: :class:`CommonInfo.Category`
:param update_type: Update category. This attribute was added in vSphere API 6.7.
:type release_date: :class:`datetime.datetime`
:param release_date: Update release date. This attribute was added in vSphere API 6.7.
:type reboot_required: :class:`bool`
:param reboot_required: Flag indicating whether reboot is required after update. This
attribute was added in vSphere API 6.7.
:type size: :class:`long`
:param size: Download Size of update in Megabytes. This attribute was added in
vSphere API 6.7.
"""
self.version = version
self.description = description
self.priority = priority
self.severity = severity
self.update_type = update_type
self.release_date = release_date
self.reboot_required = reboot_required
self.size = size
VapiStruct.__init__(self)
Summary._set_binding_type(type.StructType(
'com.vmware.appliance.update.summary', {
'version': type.IdType(resource_types='com.vmware.appliance.update.pending'),
'description': type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage'),
'priority': type.ReferenceType(__name__, 'CommonInfo.Priority'),
'severity': type.ReferenceType(__name__, 'CommonInfo.Severity'),
'update_type': type.ReferenceType(__name__, 'CommonInfo.Category'),
'release_date': type.DateTimeType(),
'reboot_required': type.BooleanType(),
'size': type.IntegerType(),
},
Summary,
False,
None))
class Policy(VapiInterface):
"""
The ``Policy`` class provides methods to set/get background check for the
new updates. This class was added in vSphere API 6.7.
"""
_VAPI_SERVICE_ID = 'com.vmware.appliance.update.policy'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _PolicyStub)
class DayOfWeek(Enum):
"""
The ``Policy.DayOfWeek`` class defines the set of days. This enumeration
was added in vSphere API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
MONDAY = None
"""
Monday. This class attribute was added in vSphere API 6.7.
"""
TUESDAY = None
"""
Tuesday. This class attribute was added in vSphere API 6.7.
"""
WEDNESDAY = None
"""
Wednesday. This class attribute was added in vSphere API 6.7.
"""
THURSDAY = None
"""
Thursday. This class attribute was added in vSphere API 6.7.
"""
FRIDAY = None
"""
Friday. This class attribute was added in vSphere API 6.7.
"""
SATURDAY = None
"""
Saturday. This class attribute was added in vSphere API 6.7.
"""
SUNDAY = None
"""
Sunday. This class attribute was added in vSphere API 6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`DayOfWeek` instance.
"""
Enum.__init__(string)
DayOfWeek._set_values([
DayOfWeek('MONDAY'),
DayOfWeek('TUESDAY'),
DayOfWeek('WEDNESDAY'),
DayOfWeek('THURSDAY'),
DayOfWeek('FRIDAY'),
DayOfWeek('SATURDAY'),
DayOfWeek('SUNDAY'),
])
DayOfWeek._set_binding_type(type.EnumType(
'com.vmware.appliance.update.policy.day_of_week',
DayOfWeek))
class Time(VapiStruct):
"""
The ``Policy.Time`` class defines weekday and time the automatic check for
new updates will be run. This class was added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
day=None,
hour=None,
minute=None,
):
"""
:type day: :class:`Policy.DayOfWeek`
:param day: weekday to check for updates. This attribute was added in vSphere
API 6.7.
:type hour: :class:`long`
:param hour: Hour: 0-24. This attribute was added in vSphere API 6.7.
:type minute: :class:`long`
:param minute: Minute: 0-59. This attribute was added in vSphere API 6.7.
"""
self.day = day
self.hour = hour
self.minute = minute
VapiStruct.__init__(self)
Time._set_binding_type(type.StructType(
'com.vmware.appliance.update.policy.time', {
'day': type.ReferenceType(__name__, 'Policy.DayOfWeek'),
'hour': type.IntegerType(),
'minute': type.IntegerType(),
},
Time,
False,
None))
class Info(VapiStruct):
"""
The ``Policy.Info`` class defines automatic update checking and staging
policy. This class was added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_canonical_to_pep_names = {
'custom_URL': 'custom_url',
'default_URL': 'default_url',
}
def __init__(self,
custom_url=None,
default_url=None,
username=None,
check_schedule=None,
auto_stage=None,
auto_update=None,
manual_control=None,
):
"""
:type custom_url: :class:`str` or ``None``
:param custom_url: Current appliance update custom repository URL. This attribute was
added in vSphere API 6.7.
If None update is checked at defaut URL.
:type default_url: :class:`str`
:param default_url: Current appliance update default repository URL. This attribute was
added in vSphere API 6.7.
:type username: :class:`str` or ``None``
:param username: Username for the update repository. This attribute was added in
vSphere API 6.7.
If None username will not be used to login
:type check_schedule: :class:`list` of :class:`Policy.Time`
:param check_schedule: Schedule when the automatic check will be run. This attribute was
added in vSphere API 6.7.
:type auto_stage: :class:`bool`
:param auto_stage: Automatically stage the latest update if available. This attribute
was added in vSphere API 6.7.
:type auto_update: :class:`bool`
:param auto_update: Is the appliance updated automatically. If :class:`set` the
appliance may ignore the check schedule or auto-stage settings.
This attribute was added in vSphere API 6.7.
:type manual_control: :class:`bool`
:param manual_control: Whether API client should allow the user to start update manually.
This attribute was added in vSphere API 6.7.
"""
self.custom_url = custom_url
self.default_url = default_url
self.username = username
self.check_schedule = check_schedule
self.auto_stage = auto_stage
self.auto_update = auto_update
self.manual_control = manual_control
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.appliance.update.policy.info', {
'custom_URL': type.OptionalType(type.StringType()),
'default_URL': type.StringType(),
'username': type.OptionalType(type.StringType()),
'check_schedule': type.ListType(type.ReferenceType(__name__, 'Policy.Time')),
'auto_stage': type.BooleanType(),
'auto_update': type.BooleanType(),
'manual_control': type.BooleanType(),
},
Info,
False,
None))
class Config(VapiStruct):
"""
The ``Policy.Config`` class defines automatic update checking and staging
policy. This class was added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_canonical_to_pep_names = {
'custom_URL': 'custom_url',
}
def __init__(self,
custom_url=None,
username=None,
password=None,
check_schedule=None,
auto_stage=None,
):
"""
:type custom_url: :class:`str` or ``None``
:param custom_url: Current appliance update repository URL. This attribute was added
in vSphere API 6.7.
If None then default URL is assumed
:type username: :class:`str` or ``None``
:param username: Username for the update repository. This attribute was added in
vSphere API 6.7.
If None username will not be used to login
:type password: :class:`str` or ``None``
:param password: Password for the update repository. This attribute was added in
vSphere API 6.7.
password If None password will not be used to login
:type check_schedule: :class:`list` of :class:`Policy.Time`
:param check_schedule: Schedule when the automatic check will be run. This attribute was
added in vSphere API 6.7.
:type auto_stage: :class:`bool`
:param auto_stage: Automatically stage the latest update if available. This attribute
was added in vSphere API 6.7.
"""
self.custom_url = custom_url
self.username = username
self.password = password
self.check_schedule = check_schedule
self.auto_stage = auto_stage
VapiStruct.__init__(self)
Config._set_binding_type(type.StructType(
'com.vmware.appliance.update.policy.config', {
'custom_URL': type.OptionalType(type.StringType()),
'username': type.OptionalType(type.StringType()),
'password': type.OptionalType(type.SecretType()),
'check_schedule': type.ListType(type.ReferenceType(__name__, 'Policy.Time')),
'auto_stage': type.BooleanType(),
},
Config,
False,
None))
def get(self):
"""
Gets the automatic update checking and staging policy. This method was
added in vSphere API 6.7.
:rtype: :class:`Policy.Info`
:return: Structure containing the policy for the appliance update.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
"""
return self._invoke('get', None)
def set(self,
policy,
):
"""
Sets the automatic update checking and staging policy. This method was
added in vSphere API 6.7.
:type policy: :class:`Policy.Config`
:param policy: Info structure containing the policy for the appliance update.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
"""
return self._invoke('set',
{
'policy': policy,
})
class Pending(VapiInterface):
"""
The ``Pending`` class provides methods to manipulate pending updates. This
class was added in vSphere API 6.7.
"""
_VAPI_SERVICE_ID = 'com.vmware.appliance.update.pending'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _PendingStub)
class SourceType(Enum):
"""
The ``Pending.SourceType`` class defines the supported types of sources of
updates. This enumeration was added in vSphere API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
LAST_CHECK = None
"""
Do not perform a new check, return the previous result. This class
attribute was added in vSphere API 6.7.
"""
LOCAL = None
"""
Check the local sources, ISO devices, staged area. This class attribute was
added in vSphere API 6.7.
"""
LOCAL_AND_ONLINE = None
"""
Check the local sources, ISO devices, staged area, then online repository
as stated in update policy. This class attribute was added in vSphere API
6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`SourceType` instance.
"""
Enum.__init__(string)
SourceType._set_values([
SourceType('LAST_CHECK'),
SourceType('LOCAL'),
SourceType('LOCAL_AND_ONLINE'),
])
SourceType._set_binding_type(type.EnumType(
'com.vmware.appliance.update.pending.source_type',
SourceType))
class Info(VapiStruct):
"""
The ``Pending.Info`` class contains the extended information about the
update. This class was added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
contents=None,
services_will_be_stopped=None,
eulas=None,
staged=None,
description=None,
priority=None,
severity=None,
update_type=None,
release_date=None,
reboot_required=None,
size=None,
):
"""
:type contents: :class:`list` of :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param contents: List of the 1. issues addressed since previous/current version 2.
new features/improvements. This attribute was added in vSphere API
6.7.
:type services_will_be_stopped: :class:`list` of :class:`ServiceInfo`
:param services_will_be_stopped: List of the services that will be stopped and restarted during the
update installation. This attribute was added in vSphere API 6.7.
:type eulas: :class:`list` of :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param eulas: List of EULAs. This list has multiple entries and can be dynamic
based on what we are actually installing. This attribute was added
in vSphere API 6.7.
:type staged: :class:`bool`
:param staged: Is the update staged. This attribute was added in vSphere API 6.7.
:type description: :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param description: Description of the update. The short information what this update
is. E.g. "Update2 for vCenter Server Appliance 6.5". This attribute
was added in vSphere API 6.7.
:type priority: :class:`CommonInfo.Priority`
:param priority: Update priority. This attribute was added in vSphere API 6.7.
:type severity: :class:`CommonInfo.Severity`
:param severity: Update severity. This attribute was added in vSphere API 6.7.
:type update_type: :class:`CommonInfo.Category`
:param update_type: Update category. This attribute was added in vSphere API 6.7.
:type release_date: :class:`datetime.datetime`
:param release_date: Update release date. This attribute was added in vSphere API 6.7.
:type reboot_required: :class:`bool`
:param reboot_required: Flag indicating whether reboot is required after update. This
attribute was added in vSphere API 6.7.
:type size: :class:`long`
:param size: Download Size of update in Megabytes. This attribute was added in
vSphere API 6.7.
"""
self.contents = contents
self.services_will_be_stopped = services_will_be_stopped
self.eulas = eulas
self.staged = staged
self.description = description
self.priority = priority
self.severity = severity
self.update_type = update_type
self.release_date = release_date
self.reboot_required = reboot_required
self.size = size
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.appliance.update.pending.info', {
'contents': type.ListType(type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage')),
'services_will_be_stopped': type.ListType(type.ReferenceType(__name__, 'ServiceInfo')),
'eulas': type.ListType(type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage')),
'staged': type.BooleanType(),
'description': type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage'),
'priority': type.ReferenceType(__name__, 'CommonInfo.Priority'),
'severity': type.ReferenceType(__name__, 'CommonInfo.Severity'),
'update_type': type.ReferenceType(__name__, 'CommonInfo.Category'),
'release_date': type.DateTimeType(),
'reboot_required': type.BooleanType(),
'size': type.IntegerType(),
},
Info,
False,
None))
class Question(VapiStruct):
"""
The ``Pending.Question`` class describes a item of information that must be
provided by the user in order to install the update. This class was added
in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'type',
{
'PLAIN_TEXT' : [('allowed_values', False), ('regexp', False), ('default_answer', False)],
'BOOLEAN' : [],
'PASSWORD' : [],
}
),
]
def __init__(self,
data_item=None,
text=None,
description=None,
type=None,
allowed_values=None,
regexp=None,
default_answer=None,
):
"""
:type data_item: :class:`str`
:param data_item: ID of the data item. This attribute was added in vSphere API 6.7.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.applicance.update.pending.dataitem``. When methods
return a value of this class as a return value, the attribute will
be an identifier for the resource type:
``com.vmware.applicance.update.pending.dataitem``.
:type text: :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param text: Label for the item to be used in GUI/CLI. This attribute was added
in vSphere API 6.7.
:type description: :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param description: Description of the item. This attribute was added in vSphere API
6.7.
:type type: :class:`Pending.Question.InputType`
:param type: How this field shoudl be represented in GUI or CLI. This attribute
was added in vSphere API 6.7.
:type allowed_values: :class:`list` of :class:`str` or ``None``
:param allowed_values: List of allowed values. This attribute was added in vSphere API
6.7.
allowedValues If None any value is valid.
:type regexp: :class:`str` or ``None``
:param regexp: Regexp to validate the input. This attribute was added in vSphere
API 6.7.
regexp If None no validation will be performed.
:type default_answer: :class:`str` or ``None``
:param default_answer: Default answer. This attribute was added in vSphere API 6.7.
defaultAnswer If None then there is no default answer, so an
explicit answer must be provided
"""
self.data_item = data_item
self.text = text
self.description = description
self.type = type
self.allowed_values = allowed_values
self.regexp = regexp
self.default_answer = default_answer
VapiStruct.__init__(self)
class InputType(Enum):
"""
The ``Pending.Question.InputType`` class defines representation of field
fields in GUI or CLI. This enumeration was added in vSphere API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
PLAIN_TEXT = None
"""
plain text answer. This class attribute was added in vSphere API 6.7.
"""
BOOLEAN = None
"""
Yes/No,On/Off,Checkbox answer. This class attribute was added in vSphere
API 6.7.
"""
PASSWORD = None
"""
Password (masked) answer. This class attribute was added in vSphere API
6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`InputType` instance.
"""
Enum.__init__(string)
InputType._set_values([
InputType('PLAIN_TEXT'),
InputType('BOOLEAN'),
InputType('PASSWORD'),
])
InputType._set_binding_type(type.EnumType(
'com.vmware.appliance.update.pending.question.input_type',
InputType))
Question._set_binding_type(type.StructType(
'com.vmware.appliance.update.pending.question', {
'data_item': type.IdType(resource_types='com.vmware.applicance.update.pending.dataitem'),
'text': type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage'),
'description': type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage'),
'type': type.ReferenceType(__name__, 'Pending.Question.InputType'),
'allowed_values': type.OptionalType(type.ListType(type.StringType())),
'regexp': type.OptionalType(type.StringType()),
'default_answer': type.OptionalType(type.StringType()),
},
Question,
False,
None))
class PrecheckResult(VapiStruct):
"""
The ``Pending.PrecheckResult`` class contains estimates of how long it will
take install and rollback an update as well as a list of possible warnings
and problems with installing the update. This class was added in vSphere
API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
check_time=None,
estimated_time_to_install=None,
estimated_time_to_rollback=None,
reboot_required=None,
issues=None,
questions=None,
):
"""
:type check_time: :class:`datetime.datetime`
:param check_time: Time when this precheck was run. This attribute was added in
vSphere API 6.7.
:type estimated_time_to_install: :class:`long` or ``None``
:param estimated_time_to_install: Rough estimate of time to install the update (minutes). This
attribute was added in vSphere API 6.7.
estimatedTimeToInstall If None N/A
:type estimated_time_to_rollback: :class:`long` or ``None``
:param estimated_time_to_rollback: Rough estimate of time to rollback the update (minutes). This
attribute was added in vSphere API 6.7.
estimatedTimeToRollback If None N/A
:type reboot_required: :class:`bool`
:param reboot_required: Is reboot required to install the update. This attribute was added
in vSphere API 6.7.
:type issues: :class:`com.vmware.appliance_client.Notifications` or ``None``
:param issues: Lists of the issues and warnings. This attribute was added in
vSphere API 6.7.
issues If None N/A
:type questions: :class:`list` of :class:`Pending.Question`
:param questions: List of questions that must be answered to install the update. This
attribute was added in vSphere API 6.7.
"""
self.check_time = check_time
self.estimated_time_to_install = estimated_time_to_install
self.estimated_time_to_rollback = estimated_time_to_rollback
self.reboot_required = reboot_required
self.issues = issues
self.questions = questions
VapiStruct.__init__(self)
PrecheckResult._set_binding_type(type.StructType(
'com.vmware.appliance.update.pending.precheck_result', {
'check_time': type.DateTimeType(),
'estimated_time_to_install': type.OptionalType(type.IntegerType()),
'estimated_time_to_rollback': type.OptionalType(type.IntegerType()),
'reboot_required': type.BooleanType(),
'issues': type.OptionalType(type.ReferenceType('com.vmware.appliance_client', 'Notifications')),
'questions': type.ListType(type.ReferenceType(__name__, 'Pending.Question')),
},
PrecheckResult,
False,
None))
def list(self,
source_type,
url=None,
):
"""
Checks if new updates are available. This method was added in vSphere
API 6.7.
:type source_type: :class:`Pending.SourceType`
:param source_type: type of the source
:type url: :class:`str` or ``None``
:param url: specific URL to check at
If None then URL is taken from the policy settings
:rtype: :class:`list` of :class:`Summary`
:return: List of the update summaries
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
source is not found
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
"""
return self._invoke('list',
{
'source_type': source_type,
'url': url,
})
def get(self,
version,
):
"""
Gets update information. This method was added in vSphere API 6.7.
:type version: :class:`str`
:param version: Update version
The parameter must be an identifier for the resource type:
``com.vmware.appliance.update.pending``.
:rtype: :class:`Pending.Info`
:return: Update
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
the update is not found
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the update of this version is already installed
"""
return self._invoke('get',
{
'version': version,
})
def precheck(self,
version,
):
"""
Runs update precheck. This method was added in vSphere API 6.7.
:type version: :class:`str`
:param version: Update version
The parameter must be an identifier for the resource type:
``com.vmware.appliance.update.pending``.
:rtype: :class:`Pending.PrecheckResult`
:return: PrecheckResult
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
the update is not found
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if this version is already installed
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if another operation is in progress
"""
return self._invoke('precheck',
{
'version': version,
})
def stage(self,
version,
):
"""
Starts staging the appliance update. The updates are searched for in
the following order: staged, CDROM, URL. This method was added in
vSphere API 6.7.
:type version: :class:`str`
:param version: Update version
The parameter must be an identifier for the resource type:
``com.vmware.appliance.update.pending``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
the update is not found
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the update of this version is already installed
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyExists`
the update is already staged
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if appliance update state prevents staging
"""
return self._invoke('stage',
{
'version': version,
})
def validate(self,
version,
user_data,
):
"""
Validates the user provided data before the update installation. This
method was added in vSphere API 6.7.
:type version: :class:`str`
:param version: Update version
The parameter must be an identifier for the resource type:
``com.vmware.appliance.update.pending``.
:type user_data: :class:`dict` of :class:`str` and :class:`str`
:param user_data: map of user provided data with IDs
The key in the parameter :class:`dict` must be an identifier for
the resource type:
``com.vmware.applicance.update.pending.dataitem``.
:rtype: :class:`com.vmware.appliance_client.Notifications`
:return: Issues struct with the issues found during the validation
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the update is not found
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the update of this version is already installed
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if appliance update state prevents running an check
"""
return self._invoke('validate',
{
'version': version,
'user_data': user_data,
})
def install(self,
version,
user_data,
):
"""
Starts operation of installing the appliance update. Will fail is the
update is not staged. This method was added in vSphere API 6.7.
:type version: :class:`str`
:param version: Update version
The parameter must be an identifier for the resource type:
``com.vmware.appliance.update.pending``.
:type user_data: :class:`dict` of :class:`str` and :class:`str`
:param user_data: map of user provided data with IDs
The key in the parameter :class:`dict` must be an identifier for
the resource type:
``com.vmware.applicance.update.pending.dataitem``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the update is not found
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the update of this version is already installed
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if appliance update state prevents running an update or not staged
"""
return self._invoke('install',
{
'version': version,
'user_data': user_data,
})
def stage_and_install(self,
version,
user_data,
):
"""
Starts operation of installing the appliance update. Will stage update
if not already staged The updates are searched for in the following
order: staged, CDROM, URL. This method was added in vSphere API 6.7.
:type version: :class:`str`
:param version: Update version
The parameter must be an identifier for the resource type:
``com.vmware.appliance.update.pending``.
:type user_data: :class:`dict` of :class:`str` and :class:`str`
:param user_data: map of user provided data with IDs
The key in the parameter :class:`dict` must be an identifier for
the resource type:
``com.vmware.applicance.update.pending.dataitem``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the update is not found
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the update of this version is already installed
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if appliance update state prevents running an update
"""
return self._invoke('stage_and_install',
{
'version': version,
'user_data': user_data,
})
class Staged(VapiInterface):
"""
The ``Staged`` class provides methods to get the status of the staged
update. This class was added in vSphere API 6.7.
"""
_VAPI_SERVICE_ID = 'com.vmware.appliance.update.staged'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _StagedStub)
class Info(VapiStruct):
"""
The ``Staged.Info`` class contains information about the staged update.
This class was added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
staging_complete=None,
version=None,
description=None,
priority=None,
severity=None,
update_type=None,
release_date=None,
reboot_required=None,
size=None,
):
"""
:type staging_complete: :class:`bool`
:param staging_complete: Is staging complete. This attribute was added in vSphere API 6.7.
:type version: :class:`str`
:param version: Version in form of X.Y.Z.P. e.g. 6.5.1.5400. This attribute was
added in vSphere API 6.7.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.appliance.update.pending``. When methods return a
value of this class as a return value, the attribute will be an
identifier for the resource type:
``com.vmware.appliance.update.pending``.
:type description: :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param description: Description of the update. The short information what this update
is. E.g. "Update2 for vCenter Server Appliance 6.5". This attribute
was added in vSphere API 6.7.
:type priority: :class:`CommonInfo.Priority`
:param priority: Update priority. This attribute was added in vSphere API 6.7.
:type severity: :class:`CommonInfo.Severity`
:param severity: Update severity. This attribute was added in vSphere API 6.7.
:type update_type: :class:`CommonInfo.Category`
:param update_type: Update category. This attribute was added in vSphere API 6.7.
:type release_date: :class:`datetime.datetime`
:param release_date: Update release date. This attribute was added in vSphere API 6.7.
:type reboot_required: :class:`bool`
:param reboot_required: Flag indicating whether reboot is required after update. This
attribute was added in vSphere API 6.7.
:type size: :class:`long`
:param size: Download Size of update in Megabytes. This attribute was added in
vSphere API 6.7.
"""
self.staging_complete = staging_complete
self.version = version
self.description = description
self.priority = priority
self.severity = severity
self.update_type = update_type
self.release_date = release_date
self.reboot_required = reboot_required
self.size = size
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.appliance.update.staged.info', {
'staging_complete': type.BooleanType(),
'version': type.IdType(resource_types='com.vmware.appliance.update.pending'),
'description': type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage'),
'priority': type.ReferenceType(__name__, 'CommonInfo.Priority'),
'severity': type.ReferenceType(__name__, 'CommonInfo.Severity'),
'update_type': type.ReferenceType(__name__, 'CommonInfo.Category'),
'release_date': type.DateTimeType(),
'reboot_required': type.BooleanType(),
'size': type.IntegerType(),
},
Info,
False,
None))
def get(self):
"""
Gets the current status of the staged update. This method was added in
vSphere API 6.7.
:rtype: :class:`Staged.Info`
:return: Info structure with information about staged update
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if nothing is staged
"""
return self._invoke('get', None)
def delete(self):
"""
Deletes the staged update. This method was added in vSphere API 6.7.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
Generic error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
session is not authenticated
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
session is not authorized to perform this operation
"""
return self._invoke('delete', None)
class _PolicyStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/appliance/update/policy',
path_variables={
},
query_parameters={
}
)
# properties for set operation
set_input_type = type.StructType('operation-input', {
'policy': type.ReferenceType(__name__, 'Policy.Config'),
})
set_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
set_input_value_validator_list = [
]
set_output_validator_list = [
]
set_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/appliance/update/policy',
path_variables={
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Policy.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'set': {
'input_type': set_input_type,
'output_type': type.VoidType(),
'errors': set_error_dict,
'input_value_validator_list': set_input_value_validator_list,
'output_validator_list': set_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'set': set_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.appliance.update.policy',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _PendingStub(ApiInterfaceStub):
def __init__(self, config):
# properties for list operation
list_input_type = type.StructType('operation-input', {
'source_type': type.ReferenceType(__name__, 'Pending.SourceType'),
'url': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/appliance/update/pending',
path_variables={
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'version': type.IdType(resource_types='com.vmware.appliance.update.pending'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/appliance/update/pending/{version}',
path_variables={
'version': 'version',
},
query_parameters={
}
)
# properties for precheck operation
precheck_input_type = type.StructType('operation-input', {
'version': type.IdType(resource_types='com.vmware.appliance.update.pending'),
})
precheck_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
precheck_input_value_validator_list = [
]
precheck_output_validator_list = [
]
precheck_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/appliance/update/pending/{version}?action=precheck',
path_variables={
'version': 'version',
},
query_parameters={
}
)
# properties for stage operation
stage_input_type = type.StructType('operation-input', {
'version': type.IdType(resource_types='com.vmware.appliance.update.pending'),
})
stage_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.already_exists':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyExists'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
stage_input_value_validator_list = [
]
stage_output_validator_list = [
]
stage_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/appliance/update/pending/{version}?action=stage',
path_variables={
'version': 'version',
},
query_parameters={
}
)
# properties for validate operation
validate_input_type = type.StructType('operation-input', {
'version': type.IdType(resource_types='com.vmware.appliance.update.pending'),
'user_data': type.MapType(type.IdType(), type.StringType()),
})
validate_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
validate_input_value_validator_list = [
]
validate_output_validator_list = [
]
validate_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/appliance/update/pending/{version}?action=validate',
path_variables={
'version': 'version',
},
query_parameters={
}
)
# properties for install operation
install_input_type = type.StructType('operation-input', {
'version': type.IdType(resource_types='com.vmware.appliance.update.pending'),
'user_data': type.MapType(type.IdType(), type.StringType()),
})
install_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
install_input_value_validator_list = [
]
install_output_validator_list = [
]
install_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/appliance/update/pending/{version}?action=install',
path_variables={
'version': 'version',
},
query_parameters={
}
)
# properties for stage_and_install operation
stage_and_install_input_type = type.StructType('operation-input', {
'version': type.IdType(resource_types='com.vmware.appliance.update.pending'),
'user_data': type.MapType(type.IdType(), type.StringType()),
})
stage_and_install_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
stage_and_install_input_value_validator_list = [
]
stage_and_install_output_validator_list = [
]
stage_and_install_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/appliance/update/pending/{version}?action=stage-and-install',
path_variables={
'version': 'version',
},
query_parameters={
}
)
operations = {
'list': {
'input_type': list_input_type,
'output_type': type.ListType(type.ReferenceType(__name__, 'Summary')),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Pending.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'precheck': {
'input_type': precheck_input_type,
'output_type': type.ReferenceType(__name__, 'Pending.PrecheckResult'),
'errors': precheck_error_dict,
'input_value_validator_list': precheck_input_value_validator_list,
'output_validator_list': precheck_output_validator_list,
'task_type': TaskType.NONE,
},
'stage': {
'input_type': stage_input_type,
'output_type': type.VoidType(),
'errors': stage_error_dict,
'input_value_validator_list': stage_input_value_validator_list,
'output_validator_list': stage_output_validator_list,
'task_type': TaskType.NONE,
},
'validate': {
'input_type': validate_input_type,
'output_type': type.ReferenceType('com.vmware.appliance_client', 'Notifications'),
'errors': validate_error_dict,
'input_value_validator_list': validate_input_value_validator_list,
'output_validator_list': validate_output_validator_list,
'task_type': TaskType.NONE,
},
'install': {
'input_type': install_input_type,
'output_type': type.VoidType(),
'errors': install_error_dict,
'input_value_validator_list': install_input_value_validator_list,
'output_validator_list': install_output_validator_list,
'task_type': TaskType.NONE,
},
'stage_and_install': {
'input_type': stage_and_install_input_type,
'output_type': type.VoidType(),
'errors': stage_and_install_error_dict,
'input_value_validator_list': stage_and_install_input_value_validator_list,
'output_validator_list': stage_and_install_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'list': list_rest_metadata,
'get': get_rest_metadata,
'precheck': precheck_rest_metadata,
'stage': stage_rest_metadata,
'validate': validate_rest_metadata,
'install': install_rest_metadata,
'stage_and_install': stage_and_install_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.appliance.update.pending',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _StagedStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/appliance/update/staged',
path_variables={
},
query_parameters={
}
)
# properties for delete operation
delete_input_type = type.StructType('operation-input', {})
delete_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/appliance/update/staged',
path_variables={
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Staged.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'delete': delete_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.appliance.update.staged',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class StubFactory(StubFactoryBase):
_attrs = {
'Policy': Policy,
'Pending': Pending,
'Staged': Staged,
}
|
py | 1a4291e671f69909492601960696f7f814580fbd | """empty message
Revision ID: d68e85682c2c
Revises: fed65154fba4
Create Date: 2018-09-27 11:27:26.206337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd68e85682c2c'
down_revision = 'fed65154fba4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('collections',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('posts_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['posts_id'], ['posts.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('collections')
# ### end Alembic commands ###
|
py | 1a4292040d144a0500071e7460a7695df5d014a4 | """
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Test cases for L{ofspy.surface}.
"""
import unittest
from ..surface import Surface
class SurfaceTestCase(unittest.TestCase):
def setUp(self):
self.default = Surface(0)
def tearDown(self):
self.default = None
def test_isSurface(self):
self.assertTrue(self.default.isSurface())
def test_isOrbit(self):
self.assertFalse(self.default.isOrbit()) |
py | 1a4293d7888a295f0f1d1acb501667b8a713ed03 | #!/usr/bin/env python3
""" VAPI test """
import unittest
import os
import signal
from framework import VppTestCase, running_on_centos, VppTestRunner, Worker
class VAPITestCase(VppTestCase):
""" VAPI test """
@classmethod
def setUpClass(cls):
super(VAPITestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(VAPITestCase, cls).tearDownClass()
def test_vapi_c(self):
""" run C VAPI tests """
var = "TEST_BR"
built_root = os.getenv(var, None)
self.assertIsNotNone(built_root,
"Environment variable `%s' not set" % var)
executable = "%s/vapi_test/vapi_c_test" % built_root
worker = Worker(
[executable, "vapi client", self.shm_prefix], self.logger)
worker.start()
timeout = 60
worker.join(timeout)
self.logger.info("Worker result is `%s'" % worker.result)
error = False
if worker.result is None:
try:
error = True
self.logger.error(
"Timeout! Worker did not finish in %ss" % timeout)
os.killpg(os.getpgid(worker.process.pid), signal.SIGTERM)
worker.join()
except:
self.logger.debug("Couldn't kill worker-spawned process")
raise
if error:
raise Exception(
"Timeout! Worker did not finish in %ss" % timeout)
self.assert_equal(worker.result, 0, "Binary test return code")
@unittest.skipIf(running_on_centos, "Centos's gcc can't compile our C++")
def test_vapi_cpp(self):
""" run C++ VAPI tests """
var = "TEST_BR"
built_root = os.getenv(var, None)
self.assertIsNotNone(built_root,
"Environment variable `%s' not set" % var)
executable = "%s/vapi_test/vapi_cpp_test" % built_root
worker = Worker(
[executable, "vapi client", self.shm_prefix], self.logger)
worker.start()
timeout = 120
worker.join(timeout)
self.logger.info("Worker result is `%s'" % worker.result)
error = False
if worker.result is None:
try:
error = True
self.logger.error(
"Timeout! Worker did not finish in %ss" % timeout)
os.killpg(os.getpgid(worker.process.pid), signal.SIGTERM)
worker.join()
except:
raise Exception("Couldn't kill worker-spawned process")
if error:
raise Exception(
"Timeout! Worker did not finish in %ss" % timeout)
self.assert_equal(worker.result, 0, "Binary test return code")
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
py | 1a42956839b8fba2b817f8acd6487f7ea64829d0 | import os
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from config.config import cfg
import numpy as np
import cv2
import random
from data.folder_new import ImageFolder_Withpath
from data.folder_mec import ImageFolder_MEC
class Sampler(object):
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class UniformBatchSampler(Sampler):
def __init__(self, per_category, category_index_list, imgs):
self.per_category = per_category
self.category_index_list = category_index_list
self.imgs = imgs
self.batch_size = per_category * len(category_index_list)
self.batch_num = len(self.imgs) // self.batch_size
def __iter__(self):
for bat in range(self.batch_num):
batch = []
for i in range(len(self.category_index_list)):
batch = batch + random.sample(self.category_index_list[i], self.per_category)
random.shuffle(batch)
yield batch
def __len__(self):
return self.batch_num
########################################## function for MEC
def _random_affine_augmentation(x):
M = np.float32([[1 + np.random.normal(0.0, 0.1), np.random.normal(0.0, 0.1), 0],
[np.random.normal(0.0, 0.1), 1 + np.random.normal(0.0, 0.1), 0]])
rows, cols = x.shape[1:3]
dst = cv2.warpAffine(np.transpose(x.numpy(), [1, 2, 0]), M, (cols,rows))
dst = np.transpose(dst, [2, 0, 1])
return torch.from_numpy(dst)
def _gaussian_blur(x, sigma=0.1):
ksize = int(sigma + 0.5) * 8 + 1
dst = cv2.GaussianBlur(x.numpy(), (ksize, ksize), sigma)
return torch.from_numpy(dst)
############# To control the categorical weight of each batch.
def make_weights_for_balanced_classes(images, nclasses):
count = [0] * nclasses
for item in images:
count[item[1]] += 1
weight_per_class = [0.] * nclasses
N = float(sum(count))
for i in range(nclasses):
weight_per_class[i] = N/float(count[i])
weight = [0] * len(images)
# weight_per_class[-1] = weight_per_class[-1] ########### adjust the cate-weight for unknown category.
for idx, val in enumerate(images):
weight[idx] = weight_per_class[val[1]]
return weight
def _select_image_process(DATA_TRANSFORM_TYPE):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if DATA_TRANSFORM_TYPE == 'ours':
transforms_train = transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transforms_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
elif DATA_TRANSFORM_TYPE == 'longs':
transforms_train = transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transforms_test = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
elif DATA_TRANSFORM_TYPE == 'simple':
transforms_train = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize,
])
transforms_test = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize,
])
else:
raise NotImplementedError
return transforms_train, transforms_test
def _select_image_process_mec(DATA_TRANSFORM_TYPE):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if DATA_TRANSFORM_TYPE == 'ours':
transforms_train = transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Lambda(lambda x: _random_affine_augmentation(x)),
transforms.Lambda(lambda x: _gaussian_blur(x)),
normalize,
])
elif DATA_TRANSFORM_TYPE == 'longs':
transforms_train = transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Lambda(lambda x: _random_affine_augmentation(x)),
transforms.Lambda(lambda x: _gaussian_blur(x)),
normalize,
])
elif DATA_TRANSFORM_TYPE == 'simple':
transforms_train = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Lambda(lambda x: _random_affine_augmentation(x)),
transforms.Lambda(lambda x: _gaussian_blur(x)),
normalize,
])
else:
raise NotImplementedError
return transforms_train
def generate_dataloader():
dataloaders = {}
source = cfg.DATASET.SOURCE_NAME
target = cfg.DATASET.TARGET_NAME
val = cfg.DATASET.VAL_NAME
dataroot_S = os.path.join(cfg.DATASET.DATAROOT, source)
dataroot_T = os.path.join(cfg.DATASET.DATAROOT, target)
dataroot_V = os.path.join(cfg.DATASET.DATAROOT, val)
if not os.path.isdir(dataroot_S):
raise ValueError('Invalid path of source data!!!')
transforms_train, transforms_test = _select_image_process(cfg.DATA_TRANSFORM.TYPE)
############ dataloader #############################
source_train_dataset = datasets.ImageFolder(
dataroot_S,
transforms_train
)
source_train_loader = torch.utils.data.DataLoader(
source_train_dataset, batch_size=cfg.TRAIN.SOURCE_BATCH_SIZE, shuffle=True,
drop_last=True, num_workers=cfg.NUM_WORKERS, pin_memory=False
)
target_train_dataset = datasets.ImageFolder(
dataroot_T,
transforms_train
)
target_train_loader = torch.utils.data.DataLoader(
target_train_dataset, batch_size=cfg.TRAIN.TARGET_BATCH_SIZE, shuffle=True,
drop_last=True, num_workers=cfg.NUM_WORKERS, pin_memory=False
)
target_test_dataset = datasets.ImageFolder(
dataroot_V,
transforms_test
)
target_test_loader = torch.utils.data.DataLoader(
target_test_dataset,
batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
num_workers=cfg.NUM_WORKERS, pin_memory=False
)
dataloaders['source'] = source_train_loader
dataloaders['target'] = target_train_loader
dataloaders['test'] = target_test_loader
return dataloaders
def generate_dataloader_sc():
dataloaders = {}
source = cfg.DATASET.SOURCE_NAME
target = cfg.DATASET.TARGET_NAME
val = cfg.DATASET.VAL_NAME
dataroot_S = os.path.join(cfg.DATASET.DATAROOT, source)
dataroot_T = os.path.join(cfg.DATASET.DATAROOT, target)
dataroot_V = os.path.join(cfg.DATASET.DATAROOT, val)
if not os.path.isdir(dataroot_S):
raise ValueError('Invalid path of source data!!!')
transforms_train, transforms_test = _select_image_process(cfg.DATA_TRANSFORM.TYPE)
transforms_mec = _select_image_process_mec(cfg.DATA_TRANSFORM.TYPE)
############ dataloader #############################
source_train_dataset = datasets.ImageFolder(
dataroot_S,
transforms_train
)
if cfg.STRENGTHEN.DATALOAD == 'normal':
source_train_loader = torch.utils.data.DataLoader(
source_train_dataset, batch_size=cfg.STRENGTHEN.PERCATE * cfg.DATASET.NUM_CLASSES, shuffle=True,
drop_last=True, num_workers=cfg.NUM_WORKERS, pin_memory=False
)
elif cfg.STRENGTHEN.DATALOAD == 'hard':
uniformbatchsampler = UniformBatchSampler(cfg.STRENGTHEN.PERCATE, source_train_dataset.category_index_list, source_train_dataset.imgs)
source_train_loader = torch.utils.data.DataLoader(
source_train_dataset, num_workers=cfg.NUM_WORKERS, pin_memory=True, batch_sampler=uniformbatchsampler
)
elif cfg.STRENGTHEN.DATALOAD == 'soft':
weights = make_weights_for_balanced_classes(source_train_dataset.imgs, len(source_train_dataset.classes))
weights = torch.DoubleTensor(weights)
sampler_s = torch.utils.data.sampler.WeightedRandomSampler(weights, len(
weights)) #### sample instance uniformly for each category
source_train_loader = torch.utils.data.DataLoader(
source_train_dataset, batch_size=cfg.STRENGTHEN.PERCATE * cfg.DATASET.NUM_CLASSES, shuffle=False,
drop_last=True, num_workers=cfg.NUM_WORKERS, pin_memory=True, sampler=sampler_s
)
else:
raise NotImplementedError
target_train_dataset = ImageFolder_MEC(
dataroot_T,
transforms_train,
transforms_mec
)
# target_train_loader = torch.utils.data.DataLoader(
# target_train_dataset, batch_size=cfg.TRAIN.TARGET_BATCH_SIZE, shuffle=True,
# drop_last=True, num_workers=cfg.NUM_WORKERS, pin_memory=False
# )
target_test_dataset = datasets.ImageFolder(
dataroot_V,
transforms_test
)
target_test_loader = torch.utils.data.DataLoader(
target_test_dataset,
batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
num_workers=cfg.NUM_WORKERS, pin_memory=False
)
source_cluster_dataset = ImageFolder_Withpath(
dataroot_S,
transforms_test
)
target_cluster_dataset = ImageFolder_Withpath(
dataroot_T,
transforms_test
)
source_cluster_loader = torch.utils.data.DataLoader(
source_cluster_dataset,
batch_size=1000, shuffle=False,
num_workers=cfg.NUM_WORKERS, pin_memory=False
)
target_cluster_loader = torch.utils.data.DataLoader(
target_cluster_dataset,
batch_size=1000, shuffle=False,
num_workers=cfg.NUM_WORKERS, pin_memory=False
)
dataloaders['source'] = source_train_loader
dataloaders['target'] = 0
dataloaders['target_train_dataset'] = target_train_dataset
dataloaders['test'] = target_test_loader
dataloaders['source_cluster'] = source_cluster_loader
dataloaders['target_cluster'] = target_cluster_loader
return dataloaders
def generate_dataloader_open():
dataloaders = {}
source = cfg.DATASET.SOURCE_NAME
target = cfg.DATASET.TARGET_NAME
val = cfg.DATASET.VAL_NAME
dataroot_S = os.path.join(cfg.DATASET.DATAROOT, source)
dataroot_T = os.path.join(cfg.DATASET.DATAROOT, target)
dataroot_V = os.path.join(cfg.DATASET.DATAROOT, val)
if not os.path.isdir(dataroot_S):
raise ValueError('Invalid path of source data!!!')
transforms_train, transforms_test = _select_image_process(cfg.DATA_TRANSFORM.TYPE)
############ dataloader #############################
source_train_dataset = datasets.ImageFolder(
dataroot_S,
transforms_train
)
weights = make_weights_for_balanced_classes(source_train_dataset.imgs, len(source_train_dataset.classes),
cfg.OPEN.WEIGHT_UNK)
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(
weights)) #### sample instance uniformly for each category
source_train_loader = torch.utils.data.DataLoader(
source_train_dataset, batch_size=cfg.TRAIN.SOURCE_BATCH_SIZE, shuffle=False,
drop_last=True, num_workers=cfg.NUM_WORKERS, pin_memory=False, sampler=sampler
)
target_train_dataset = datasets.ImageFolder(
dataroot_T,
transforms_train
)
target_train_loader = torch.utils.data.DataLoader(
target_train_dataset, batch_size=cfg.TRAIN.TARGET_BATCH_SIZE, shuffle=True,
drop_last=True, num_workers=cfg.NUM_WORKERS, pin_memory=False
)
target_test_dataset = datasets.ImageFolder(
dataroot_V,
transforms_test
)
target_test_loader = torch.utils.data.DataLoader(
target_test_dataset,
batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
num_workers=cfg.NUM_WORKERS, pin_memory=False
)
dataloaders['source'] = source_train_loader
dataloaders['target'] = target_train_loader
dataloaders['test'] = target_test_loader
return dataloaders
########## other dataloader options to be added ##########
# if args.uniform_type_s == 'hard':
# uniformbatchsampler = UniformBatchSampler(args.per_category, source_train_dataset.category_index_list, source_train_dataset.imgs)
# source_train_loader = torch.utils.data.DataLoader(
# source_train_dataset, num_workers=args.workers, pin_memory=True, batch_sampler=uniformbatchsampler
# )
# elif args.uniform_type_s == 'soft':
# weights = make_weights_for_balanced_classes(source_train_dataset.imgs, len(source_train_dataset.classes))
# weights = torch.DoubleTensor(weights)
# sampler_s = torch.utils.data.sampler.WeightedRandomSampler(weights, len(
# weights)) #### sample instance uniformly for each category
# source_train_loader = torch.utils.data.DataLoader(
# source_train_dataset, batch_size=args.per_category * args.num_classes, shuffle=False,
# drop_last=True, num_workers=args.workers, pin_memory=True, sampler=sampler_s
# )
# else:
# source_train_loader = torch.utils.data.DataLoader(
# source_train_dataset, batch_size=args.batch_size, shuffle=True,
# drop_last=True, num_workers=args.workers, pin_memory=True
# ) |
py | 1a429650858d09587a7049c2e5b2954043738915 | # Register Blueprints/Views.
from gatco.response import text, json
from application.extensions import jinja
def init_views(app):
import application.controllers.user
import application.controllers.hanghoa
import application.controllers.hoadon
import application.controllers.chitiethoadon
import application.controllers.gianhang
import application.controllers.loaigianhang
import application.controllers.giohang
@app.route('/')
def index(request):
#return text("Index")
return jinja.render('index.html', request)
@app.route('/client_app/')
def admin_index(request):
return jinja.render('index_client.html', request)
|
py | 1a42976e6dbdaa3a207b7fc4a2dea4cbcd70393b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import textwrap
import numpy as np
import pytest
from astropy.io import fits
from astropy.nddata.nduncertainty import (
StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty,
InverseVariance)
from astropy import units as u
from astropy import log
from astropy.wcs import WCS, FITSFixedWarning
from astropy.utils import NumpyRNGContext
from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames,
get_pkg_data_contents)
from astropy.utils.exceptions import AstropyWarning
from astropy.nddata.ccddata import CCDData
from astropy.nddata import _testing as nd_testing
from astropy.table import Table
DEFAULT_DATA_SIZE = 100
with NumpyRNGContext(123):
_random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE])
def create_ccd_data():
"""
Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE
with units of ADU.
"""
data = _random_array.copy()
fake_meta = {'my_key': 42, 'your_key': 'not 42'}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd
def test_ccddata_empty():
with pytest.raises(TypeError):
CCDData() # empty initializer should fail
def test_ccddata_must_have_unit():
with pytest.raises(ValueError):
CCDData(np.zeros([2, 2]))
def test_ccddata_unit_cannot_be_set_to_none():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.unit = None
def test_ccddata_meta_header_conflict():
with pytest.raises(ValueError) as exc:
CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2})
assert "can't have both header and meta." in str(exc.value)
def test_ccddata_simple():
ccd_data = create_ccd_data()
assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert ccd_data.dtype == np.dtype(float)
def test_ccddata_init_with_string_electron_unit():
ccd = CCDData(np.zeros([2, 2]), unit="electron")
assert ccd.unit is u.electron
def test_initialize_from_FITS(tmpdir):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdulist = fits.HDUList([hdu])
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
cd = CCDData.read(filename, unit=u.electron)
assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert np.issubdtype(cd.data.dtype, np.floating)
for k, v in hdu.header.items():
assert cd.meta[k] == v
def test_initialize_from_fits_with_unit_in_header(tmpdir):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = u.adu.to_string()
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
# An explicit unit in the read overrides any unit in the FITS file
ccd2 = CCDData.read(filename, unit="photon")
assert ccd2.unit is u.photon
def test_initialize_from_fits_with_ADU_in_header(tmpdir):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = 'ADU'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header['bunit'] = 'definetely-not-a-unit'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
with pytest.raises(ValueError):
CCDData.read(filename)
def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header['bunit'] = 'ELECTRONS/S'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
assert ccd.unit == u.electron/u.s
def test_initialize_from_fits_with_data_in_different_extension(tmpdir):
fake_img = np.arange(4).reshape(2, 2)
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(fake_img)
hdus = fits.HDUList([hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
ccd = CCDData.read(filename, unit='adu')
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img)
# check that the header is the combined header
assert hdu2.header + hdu1.header == ccd.header
def test_initialize_from_fits_with_extension(tmpdir):
fake_img1 = np.zeros([2, 2])
fake_img2 = np.arange(4).reshape(2, 2)
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(fake_img1, name='first', ver=1)
hdu2 = fits.ImageHDU(fake_img2, name='second', ver=1)
hdus = fits.HDUList([hdu0, hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
ccd = CCDData.read(filename, hdu=2, unit='adu')
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu string parameter
ccd = CCDData.read(filename, hdu='second', unit='adu')
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu tuple parameter
ccd = CCDData.read(filename, hdu=('second', 1), unit='adu')
np.testing.assert_array_equal(ccd.data, fake_img2)
def test_write_unit_to_hdu():
ccd_data = create_ccd_data()
ccd_unit = ccd_data.unit
hdulist = ccd_data.to_hdu()
assert 'bunit' in hdulist[0].header
assert hdulist[0].header['bunit'] == ccd_unit.to_string()
def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir):
# There are two fits.open keywords that are not permitted in ccdproc:
# do_not_scale_image_data and scale_back
ccd_data = create_ccd_data()
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit,
do_not_scale_image_data=True)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, scale_back=True)
def test_ccddata_writer(tmpdir):
ccd_data = create_ccd_data()
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
def test_ccddata_meta_is_case_sensitive():
ccd_data = create_ccd_data()
key = 'SoMeKEY'
ccd_data.meta[key] = 10
assert key.lower() not in ccd_data.meta
assert key.upper() not in ccd_data.meta
assert key in ccd_data.meta
def test_ccddata_meta_is_not_fits_header():
ccd_data = create_ccd_data()
ccd_data.meta = {'OBSERVER': 'Edwin Hubble'}
assert not isinstance(ccd_data.meta, fits.Header)
def test_fromMEF(tmpdir):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdu2 = fits.PrimaryHDU(2 * ccd_data.data)
hdulist = fits.HDUList(hdu)
hdulist.append(hdu2)
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
# by default, we reading from the first extension
cd = CCDData.read(filename, unit=u.electron)
np.testing.assert_array_equal(cd.data, ccd_data.data)
# but reading from the second should work too
cd = CCDData.read(filename, hdu=1, unit=u.electron)
np.testing.assert_array_equal(cd.data, 2 * ccd_data.data)
def test_metafromheader():
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromdict():
dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600}
d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
def test_header2meta():
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), unit=u.electron)
d1.header = hdr
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromstring_fail():
hdr = 'this is not a valid header'
with pytest.raises(TypeError):
CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu)
def test_setting_bad_uncertainty_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
# Uncertainty is supposed to be an instance of NDUncertainty
ccd_data.uncertainty = 10
def test_setting_uncertainty_with_array():
ccd_data = create_ccd_data()
ccd_data.uncertainty = None
fake_uncertainty = np.sqrt(np.abs(ccd_data.data))
ccd_data.uncertainty = fake_uncertainty.copy()
np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty)
def test_setting_uncertainty_wrong_shape_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(ValueError):
ccd_data.uncertainty = np.zeros([3, 4])
def test_to_hdu():
ccd_data = create_ccd_data()
ccd_data.meta = {'observer': 'Edwin Hubble'}
fits_hdulist = ccd_data.to_hdu()
assert isinstance(fits_hdulist, fits.HDUList)
for k, v in ccd_data.meta.items():
assert fits_hdulist[0].header[k] == v
np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data)
def test_copy():
ccd_data = create_ccd_data()
ccd_copy = ccd_data.copy()
np.testing.assert_array_equal(ccd_copy.data, ccd_data.data)
assert ccd_copy.unit == ccd_data.unit
assert ccd_copy.meta == ccd_data.meta
@pytest.mark.parametrize('operation,affects_uncertainty', [
("multiply", True),
("divide", True),
])
@pytest.mark.parametrize('operand', [
2.0,
2 * u.dimensionless_unscaled,
2 * u.photon / u.adu,
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
def test_mult_div_overload(operand, with_uncertainty,
operation, affects_uncertainty):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
# Need the "1 *" below to force arguments to be Quantity to work around
# astropy/astropy#2377
expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit
assert result.unit == expected_unit
else:
assert result.unit == ccd_data.unit
@pytest.mark.parametrize('operation,affects_uncertainty', [
("add", False),
("subtract", False),
])
@pytest.mark.parametrize('operand,expect_failure', [
(2.0, u.UnitsError), # fail--units don't match image
(2 * u.dimensionless_unscaled, u.UnitsError), # same
(2 * u.adu, False),
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
def test_add_sub_overload(operand, expect_failure, with_uncertainty,
operation, affects_uncertainty):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
if expect_failure:
with pytest.raises(expect_failure):
result = method(operand)
return
else:
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
assert (result.unit == ccd_data.unit and result.unit == operand.unit)
else:
assert result.unit == ccd_data.unit
def test_arithmetic_overload_fails():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.multiply("five")
with pytest.raises(TypeError):
ccd_data.divide("five")
with pytest.raises(TypeError):
ccd_data.add("five")
with pytest.raises(TypeError):
ccd_data.subtract("five")
def test_arithmetic_no_wcs_compare():
ccd = CCDData(np.ones((10, 10)), unit='')
assert ccd.add(ccd, compare_wcs=None).wcs is None
assert ccd.subtract(ccd, compare_wcs=None).wcs is None
assert ccd.multiply(ccd, compare_wcs=None).wcs is None
assert ccd.divide(ccd, compare_wcs=None).wcs is None
def test_arithmetic_with_wcs_compare():
def return_true(_, __):
return True
wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2)
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1)
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2)
nd_testing.assert_wcs_seem_equal(
ccd1.add(ccd2, compare_wcs=return_true).wcs,
wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.subtract(ccd2, compare_wcs=return_true).wcs,
wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.multiply(ccd2, compare_wcs=return_true).wcs,
wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.divide(ccd2, compare_wcs=return_true).wcs,
wcs1)
def test_arithmetic_with_wcs_compare_fail():
def return_false(_, __):
return False
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS())
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS())
with pytest.raises(ValueError):
ccd1.add(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.subtract(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.multiply(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.divide(ccd2, compare_wcs=return_false)
def test_arithmetic_overload_ccddata_operand():
ccd_data = create_ccd_data()
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
operand = ccd_data.copy()
result = ccd_data.add(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
2 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.subtract(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
0 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.multiply(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
ccd_data.data ** 2)
expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
result = ccd_data.divide(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
np.ones_like(ccd_data.data))
expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
def test_arithmetic_overload_differing_units():
a = np.array([1, 2, 3]) * u.m
b = np.array([1, 2, 3]) * u.cm
ccddata = CCDData(a)
# TODO: Could also be parametrized.
res = ccddata.add(b)
np.testing.assert_array_almost_equal(res.data, np.add(a, b).value)
assert res.unit == np.add(a, b).unit
res = ccddata.subtract(b)
np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value)
assert res.unit == np.subtract(a, b).unit
res = ccddata.multiply(b)
np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value)
assert res.unit == np.multiply(a, b).unit
res = ccddata.divide(b)
np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value)
assert res.unit == np.divide(a, b).unit
def test_arithmetic_add_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.add(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.add(np.arange(3))
def test_arithmetic_subtract_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.subtract(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.subtract(np.arange(3))
def test_arithmetic_multiply_with_array():
ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m)
res = ccd.multiply(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3)
assert res.unit == ccd.unit
def test_arithmetic_divide_with_array():
ccd = CCDData(np.ones((3, 3)), unit=u.m)
res = ccd.divide(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3)
assert res.unit == ccd.unit
def test_history_preserved_if_metadata_is_fits_header(tmpdir):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header['history'] = 'one'
hdu.header['history'] = 'two'
hdu.header['history'] = 'three'
assert len(hdu.header['history']) == 3
tmp_file = tmpdir.join('temp.fits').strpath
hdu.writeto(tmp_file)
ccd_read = CCDData.read(tmp_file, unit="adu")
assert ccd_read.header['history'] == hdu.header['history']
def test_infol_logged_if_unit_in_fits_header(tmpdir):
ccd_data = create_ccd_data()
tmpfile = tmpdir.join('temp.fits')
ccd_data.write(tmpfile.strpath)
log.setLevel('INFO')
explicit_unit_name = "photon"
with log.log_to_list() as log_list:
_ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name)
assert explicit_unit_name in log_list[0].message
def test_wcs_attribute(tmpdir):
"""
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header.
"""
ccd_data = create_ccd_data()
tmpfile = tmpdir.join('temp.fits')
# This wcs example is taken from the astropy.wcs docs.
wcs = WCS(naxis=2)
wcs.wcs.crpix = np.array(ccd_data.shape) / 2
wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
wcs.wcs.crval = [0, -90]
wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
wcs.wcs.set_pv([(2, 1, 45.0)])
ccd_data.header = ccd_data.to_hdu()[0].header
ccd_data.header.extend(wcs.to_header(), useblanks=False)
ccd_data.write(tmpfile.strpath)
# Get the header length after it has been extended by the WCS keywords
original_header_length = len(ccd_data.header)
ccd_new = CCDData.read(tmpfile.strpath)
# WCS attribute should be set for ccd_new
assert ccd_new.wcs is not None
# WCS attribute should be equal to wcs above.
assert ccd_new.wcs.wcs == wcs.wcs
# Converting CCDData object with wcs to an hdu shouldn't
# create duplicate wcs-related entries in the header.
ccd_new_hdu = ccd_new.to_hdu()[0]
assert len(ccd_new_hdu.header) == original_header_length
# Making a CCDData with WCS (but not WCS in the header) should lead to
# WCS information in the header when it is converted to an HDU.
ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
hdu = ccd_wcs_not_in_header.to_hdu()[0]
wcs_header = wcs.to_header()
for k in wcs_header.keys():
# Skip these keywords if they are in the WCS header because they are
# not WCS-specific.
if k in ['', 'COMMENT', 'HISTORY']:
continue
# No keyword from the WCS should be in the header.
assert k not in ccd_wcs_not_in_header.header
# Every keyword in the WCS should be in the header of the HDU
assert hdu.header[k] == wcs_header[k]
# Now check that if WCS of a CCDData is modified, then the CCDData is
# converted to an HDU, the WCS keywords in the header are overwritten
# with the appropriate keywords from the header.
#
# ccd_new has a WCS and WCS keywords in the header, so try modifying
# the WCS.
ccd_new.wcs.wcs.cdelt *= 2
ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0]
assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0]
assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1]
def test_wcs_keywords_removed_from_header():
"""
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header.
"""
from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
data_file = get_pkg_data_filename('data/sip-wcs.fits')
ccd = CCDData.read(data_file)
with pytest.warns(AstropyWarning,
match=r'Some non-standard WCS keywords were excluded'):
wcs_header = ccd.wcs.to_header()
assert not (set(wcs_header) & set(ccd.meta) - keepers)
# Make sure that exceptions are not raised when trying to remove missing
# keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'.
data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits')
with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"):
ccd = CCDData.read(data_file1, unit='count')
def test_wcs_SIP_coefficient_keywords_removed():
# If SIP polynomials are present, check that no more polynomial
# coefficients remain in the header. See #8598
# The SIP paper is ambiguous as to whether keywords like
# A_0_0 can appear in the header for a 2nd order or higher
# polynomial. The paper clearly says that the corrections
# are only for quadratic or higher order, so A_0_0 and the like
# should be zero if they are present, but they apparently can be
# there (or at least astrometry.net produces them).
# astropy WCS does not write those coefficients, so they were
# not being removed from the header even though they are WCS-related.
data_file = get_pkg_data_filename('data/sip-wcs.fits')
test_keys = ['A_0_0', 'B_0_1']
# Make sure the keywords added to this file for testing are there
with fits.open(data_file) as hdu:
for key in test_keys:
assert key in hdu[0].header
ccd = CCDData.read(data_file)
# Now the test...the two keywords above should have been removed.
for key in test_keys:
assert key not in ccd.header
@pytest.mark.filterwarnings('ignore')
def test_wcs_keyword_removal_for_wcs_test_files():
"""
Test, for the WCS test files, that keyword removal works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header.
Includes regression test for #8597
"""
from astropy.nddata.ccddata import _generate_wcs_and_update_header
from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER,
_CDs, _PCs)
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
wcs_headers = get_pkg_data_filenames('../../wcs/tests/data',
pattern='*.hdr')
for hdr in wcs_headers:
# Skip the files that are expected to be bad...
if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or
'chandra-pixlist-wcs' in hdr):
continue
header_string = get_pkg_data_contents(hdr)
header = fits.Header.fromstring(header_string)
wcs = WCS(header_string)
header_from_wcs = wcs.to_header(relax=True)
new_header, new_wcs = _generate_wcs_and_update_header(header)
new_wcs_header = new_wcs.to_header(relax=True)
# Make sure all of the WCS-related keywords generated by astropy
# have been removed.
assert not (set(new_header) &
set(new_wcs_header) -
keepers)
# Check that new_header contains no remaining WCS information.
# Specifically, check that
# 1. The combination of new_header and new_wcs does not contain
# both PCi_j and CDi_j keywords. See #8597.
# Check for 1
final_header = new_header + new_wcs_header
final_header_set = set(final_header)
if _PCs & final_header_set:
assert not (_CDs & final_header_set)
elif _CDs & final_header_set:
assert not (_PCs & final_header_set)
# Check that the new wcs is the same as the old.
for k, v in new_wcs_header.items():
if isinstance(v, str):
assert header_from_wcs[k] == v
else:
np.testing.assert_almost_equal(header_from_wcs[k], v)
def test_read_wcs_not_creatable(tmpdir):
# The following Header can't be converted to a WCS object. See also #6499.
hdr_txt_example_WCS = textwrap.dedent('''
SIMPLE = T / Fits standard
BITPIX = 16 / Bits per pixel
NAXIS = 2 / Number of axes
NAXIS1 = 1104 / Axis length
NAXIS2 = 4241 / Axis length
CRVAL1 = 164.98110962 / Physical value of the reference pixel X
CRVAL2 = 44.34089279 / Physical value of the reference pixel Y
CRPIX1 = -34.0 / Reference pixel in X (pixel)
CRPIX2 = 2041.0 / Reference pixel in Y (pixel)
CDELT1 = 0.10380000 / X Scale projected on detector (#/pix)
CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix)
CTYPE1 = 'RA---TAN' / Pixel coordinate system
CTYPE2 = 'WAVELENGTH' / Pixel coordinate system
CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1
CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2
CD1_1 = 0.20760000 / Pixel Coordinate translation matrix
CD1_2 = 0.00000000 / Pixel Coordinate translation matrix
CD2_1 = 0.00000000 / Pixel Coordinate translation matrix
CD2_2 = 0.10380000 / Pixel Coordinate translation matrix
C2YPE1 = 'RA---TAN' / Pixel coordinate system
C2YPE2 = 'DEC--TAN' / Pixel coordinate system
C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1
C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2
RADECSYS= 'FK5 ' / The equatorial coordinate system
''')
hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n')
hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)])
filename = tmpdir.join('afile.fits').strpath
hdul.writeto(filename)
# The hdr cannot be converted to a WCS object because of an
# InconsistentAxisTypesError but it should still open the file
ccd = CCDData.read(filename, unit='adu')
assert ccd.wcs is None
def test_header():
ccd_data = create_ccd_data()
a = {'Observer': 'Hubble'}
ccd = CCDData(ccd_data, header=a)
assert ccd.meta == a
def test_wcs_arithmetic():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
result = ccd_data.multiply(1.0)
nd_testing.assert_wcs_seem_equal(result.wcs, wcs)
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_wcs_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.wcs = WCS(naxis=2)
method = getattr(ccd_data, operation)
result = method(ccd_data2)
nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs)
assert ccd_data2.wcs is None
def test_wcs_sip_handling():
"""
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged.
"""
data_file = get_pkg_data_filename('data/sip-wcs.fits')
def check_wcs_ctypes(header):
expected_wcs_ctypes = {
'CTYPE1': 'RA---TAN-SIP',
'CTYPE2': 'DEC--TAN-SIP'
}
return [header[k] == v for k, v in expected_wcs_ctypes.items()]
ccd_original = CCDData.read(data_file)
# After initialization the keywords should be in the WCS, not in the
# meta.
with fits.open(data_file) as raw:
good_ctype = check_wcs_ctypes(raw[0].header)
assert all(good_ctype)
ccd_new = ccd_original.to_hdu()
good_ctype = check_wcs_ctypes(ccd_new[0].header)
assert all(good_ctype)
# Try converting to header with wcs_relax=False and
# the header should contain the CTYPE keywords without
# the -SIP
ccd_no_relax = ccd_original.to_hdu(wcs_relax=False)
good_ctype = check_wcs_ctypes(ccd_no_relax[0].header)
assert not any(good_ctype)
assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN'
assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN'
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_mask_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.mask = (ccd_data.data > 0)
method = getattr(ccd_data, operation)
result = method(ccd_data2)
np.testing.assert_equal(result.mask, ccd_data.mask)
def test_write_read_multiextensionfits_mask_default(tmpdir):
# Test that if a mask is present the mask is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
@pytest.mark.parametrize(
'uncertainty_type',
[StdDevUncertainty, VarianceUncertainty, InverseVariance])
def test_write_read_multiextensionfits_uncertainty_default(
tmpdir, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
@pytest.mark.parametrize(
'uncertainty_type',
[StdDevUncertainty, VarianceUncertainty, InverseVariance])
def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key(
tmpdir, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, key_uncertainty_type='Blah')
ccd_after = CCDData.read(filename, key_uncertainty_type='Blah')
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_write_read_multiextensionfits_not(tmpdir):
# Test that writing mask and uncertainty can be disabled
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
def test_write_read_multiextensionfits_custom_ext_names(tmpdir):
# Test writing mask, uncertainty in another extension than default
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
# Try reading with defaults extension names
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
# Try reading with custom extension names
ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
assert ccd_after.uncertainty is not None
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_read_old_style_multiextensionfits(tmpdir):
# Regression test for https://github.com/astropy/ccdproc/issues/664
#
# Prior to astropy 3.1 there was no uncertainty type saved
# in the multiextension fits files generated by CCDData
# because the uncertainty had to be StandardDevUncertainty.
#
# Current version should be able to read those in.
#
size = 4
# Value of the variables below are not important to the test.
data = np.zeros([size, size])
mask = data > 0.9
uncert = np.sqrt(data)
ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu')
# We'll create the file manually to ensure we have the
# right extension names and no uncertainty type.
hdulist = ccd.to_hdu()
del hdulist[2].header['UTYPE']
file_name = tmpdir.join('old_ccddata_mef.fits').strpath
hdulist.writeto(file_name)
ccd = CCDData.read(file_name)
assert isinstance(ccd.uncertainty, StdDevUncertainty)
def test_wcs():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
assert ccd_data.wcs is wcs
def test_recognized_fits_formats_for_read_write(tmpdir):
# These are the extensions that are supposed to be supported.
ccd_data = create_ccd_data()
supported_extensions = ['fit', 'fits', 'fts']
for ext in supported_extensions:
path = tmpdir.join(f"test.{ext}")
ccd_data.write(path.strpath)
from_disk = CCDData.read(path.strpath)
assert (ccd_data.data == from_disk.data).all()
def test_stddevuncertainty_compat_descriptor_no_parent():
with pytest.raises(MissingDataAssociationException):
StdDevUncertainty(np.ones((10, 10))).parent_nddata
def test_stddevuncertainty_compat_descriptor_no_weakref():
# TODO: Remove this test if astropy 1.0 isn't supported anymore
# This test might create a Memoryleak on purpose, so the last lines after
# the assert are IMPORTANT cleanup.
ccd = CCDData(np.ones((10, 10)), unit='')
uncert = StdDevUncertainty(np.ones((10, 10)))
uncert._parent_nddata = ccd
assert uncert.parent_nddata is ccd
uncert._parent_nddata = None
# https://github.com/astropy/astropy/issues/7595
def test_read_returns_image(tmpdir):
# Test if CCData.read returns a image when reading a fits file containing
# a table and image, in that order.
tbl = Table(np.ones(10).reshape(5, 2))
img = np.ones((5, 5))
hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()),
fits.ImageHDU(img)])
filename = tmpdir.join('table_image.fits').strpath
hdul.writeto(filename)
ccd = CCDData.read(filename, unit='adu')
# Expecting to get (5, 5), the size of the image
assert ccd.data.shape == (5, 5)
# https://github.com/astropy/astropy/issues/9664
def test_sliced_ccdata_to_hdu():
wcs = WCS(naxis=2)
wcs.wcs.crpix = 10, 10
ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel')
trimmed = ccd[2:-2, 2:-2]
hdul = trimmed.to_hdu()
assert isinstance(hdul, fits.HDUList)
assert hdul[0].header['CRPIX1'] == 8
assert hdul[0].header['CRPIX2'] == 8
|
py | 1a4297ee27cb00f7dad9450dc4dc792ba74bf555 | # Generated by Django 3.2.5 on 2021-07-24 13:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('neighbourapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
py | 1a4298e44b4dbdbf4fa02985ca51737f87099cb2 | from application import app, db
from flask import redirect, render_template, request, url_for
from application.visits.models import Visit
from application.yhteenveto.forms import InYearForm, InMonthForm
from flask_login.utils import login_required, current_user
from application.sivu.models import Sivu
from sqlalchemy.sql import text
@app.route("/yhteenveto/alku/", methods=["GET"])
@login_required
def yhteenveto_alku():
return render_template("yhteenveto/valinta.html", title="Yhteenvedot")
@app.route("/yhteenveto/vuodessa/", methods=["GET", "POST"])
@login_required
def yhteenveto_vuodessa():
if request.method == 'POST':
form = InYearForm(request.form)
stmt = text("SELECT sivu.osoite, SUM(visit.lukumaara) AS maara FROM sivu, visit WHERE visit.vuosi = :vuosi AND visit.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY sivu.osoite").params(vuosi=form.year.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/vuodessa.html", title="Käyntejä sivuilla vuodessa", vuosi=result)
else:
return render_template("yhteenveto/kyselyvuodessa.html", title="Käyntejä sivuilla vuodessa", form = InYearForm())
@app.route("/yhteenveto/ryhma/", methods=["GET", "POST"])
@login_required
def yhteenveto_ryhmatulos():
if request.method == 'POST':
form = InMonthForm(request.form)
stmt = text("SELECT sivu.ryhma AS ryhma, SUM(visit.lukumaara) AS maara FROM sivu, visit WHERE visit.vuosi = :vuosi AND visit.kuukausi = :kuukausi AND visit.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY sivu.ryhma").params(vuosi=form.year.data, kuukausi=form.month.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/ryhmassa.html", title="Käyntejä sivuryhmissä vuodessa", vuosi=result)
else:
return render_template("yhteenveto/kyselyryhmassa.html", title="Vuoden tilasto", form = InMonthForm())
@app.route("/yhteenveto/selaimia/", methods=["GET", "POST"])
@login_required
def yhteenveto_selaimia():
if request.method == 'POST':
form = InYearForm(request.form)
stmt = text("SELECT selain.selain AS nimi, SUM(selain.kaynnit) AS maara FROM sivu, selain, kavijat WHERE selain.kavijat_id = kavijat.id AND kavijat.vuosi = :vuosi AND kavijat.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY selain.selain").params(vuosi=form.year.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/selaimia.html", title="Selaimien yhteenveto", selaimet=result)
else:
return render_template("yhteenveto/selainvuosi.html", title="Vuoden tilasto", form = InYearForm())
@app.route("/yhteenveto/kavijoita/", methods=["GET", "POST"])
@login_required
def yhteenveto_kavijoita():
if request.method == 'POST':
form = InYearForm(request.form)
stmt = text("SELECT sivu.osoite, SUM(kavijat.kaynnit) AS maara FROM sivu, kavijat WHERE kavijat.vuosi = :vuosi AND kavijat.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY sivu.osoite").params(vuosi=form.year.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/kavijoita.html", title="Kavijoita sivuilla vuodessa", kavijat=result)
else:
return render_template("yhteenveto/kavijavuosi.html", title="Vuoden tilasto", form = InYearForm()) |
py | 1a4298e9aa2e8fa1d4b4d7475a4b8979d9edee0a |
def echo(params):
"""Echo back your input message."""
return params['message']
|
py | 1a429a627cd148f6d2bfb4ba2c78d3c72df1cce4 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDiskResult',
'AwaitableGetDiskResult',
'get_disk',
]
@pulumi.output_type
class GetDiskResult:
"""
A Disk.
"""
def __init__(__self__, created_date=None, disk_blob_name=None, disk_size_gi_b=None, disk_type=None, disk_uri=None, host_caching=None, leased_by_lab_vm_id=None, location=None, managed_disk_id=None, name=None, provisioning_state=None, tags=None, type=None, unique_identifier=None):
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if disk_blob_name and not isinstance(disk_blob_name, str):
raise TypeError("Expected argument 'disk_blob_name' to be a str")
pulumi.set(__self__, "disk_blob_name", disk_blob_name)
if disk_size_gi_b and not isinstance(disk_size_gi_b, int):
raise TypeError("Expected argument 'disk_size_gi_b' to be a int")
pulumi.set(__self__, "disk_size_gi_b", disk_size_gi_b)
if disk_type and not isinstance(disk_type, str):
raise TypeError("Expected argument 'disk_type' to be a str")
pulumi.set(__self__, "disk_type", disk_type)
if disk_uri and not isinstance(disk_uri, str):
raise TypeError("Expected argument 'disk_uri' to be a str")
pulumi.set(__self__, "disk_uri", disk_uri)
if host_caching and not isinstance(host_caching, str):
raise TypeError("Expected argument 'host_caching' to be a str")
pulumi.set(__self__, "host_caching", host_caching)
if leased_by_lab_vm_id and not isinstance(leased_by_lab_vm_id, str):
raise TypeError("Expected argument 'leased_by_lab_vm_id' to be a str")
pulumi.set(__self__, "leased_by_lab_vm_id", leased_by_lab_vm_id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_disk_id and not isinstance(managed_disk_id, str):
raise TypeError("Expected argument 'managed_disk_id' to be a str")
pulumi.set(__self__, "managed_disk_id", managed_disk_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_identifier and not isinstance(unique_identifier, str):
raise TypeError("Expected argument 'unique_identifier' to be a str")
pulumi.set(__self__, "unique_identifier", unique_identifier)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
The creation date of the disk.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="diskBlobName")
def disk_blob_name(self) -> Optional[str]:
"""
When backed by a blob, the name of the VHD blob without extension.
"""
return pulumi.get(self, "disk_blob_name")
@property
@pulumi.getter(name="diskSizeGiB")
def disk_size_gi_b(self) -> Optional[int]:
"""
The size of the disk in Gibibytes.
"""
return pulumi.get(self, "disk_size_gi_b")
@property
@pulumi.getter(name="diskType")
def disk_type(self) -> Optional[str]:
"""
The storage type for the disk (i.e. Standard, Premium).
"""
return pulumi.get(self, "disk_type")
@property
@pulumi.getter(name="diskUri")
def disk_uri(self) -> Optional[str]:
"""
When backed by a blob, the URI of underlying blob.
"""
return pulumi.get(self, "disk_uri")
@property
@pulumi.getter(name="hostCaching")
def host_caching(self) -> Optional[str]:
"""
The host caching policy of the disk (i.e. None, ReadOnly, ReadWrite).
"""
return pulumi.get(self, "host_caching")
@property
@pulumi.getter(name="leasedByLabVmId")
def leased_by_lab_vm_id(self) -> Optional[str]:
"""
The resource ID of the VM to which this disk is leased.
"""
return pulumi.get(self, "leased_by_lab_vm_id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedDiskId")
def managed_disk_id(self) -> Optional[str]:
"""
When backed by managed disk, this is the ID of the compute disk resource.
"""
return pulumi.get(self, "managed_disk_id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> Optional[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
class AwaitableGetDiskResult(GetDiskResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDiskResult(
created_date=self.created_date,
disk_blob_name=self.disk_blob_name,
disk_size_gi_b=self.disk_size_gi_b,
disk_type=self.disk_type,
disk_uri=self.disk_uri,
host_caching=self.host_caching,
leased_by_lab_vm_id=self.leased_by_lab_vm_id,
location=self.location,
managed_disk_id=self.managed_disk_id,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
unique_identifier=self.unique_identifier)
def get_disk(expand: Optional[str] = None,
lab_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
user_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDiskResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Specify the $expand query. Example: 'properties($select=diskType)'
:param str lab_name: The name of the lab.
:param str name: The name of the disk.
:param str resource_group_name: The name of the resource group.
:param str user_name: The name of the user profile.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['labName'] = lab_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['userName'] = user_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:devtestlab/v20160515:getDisk', __args__, opts=opts, typ=GetDiskResult).value
return AwaitableGetDiskResult(
created_date=__ret__.created_date,
disk_blob_name=__ret__.disk_blob_name,
disk_size_gi_b=__ret__.disk_size_gi_b,
disk_type=__ret__.disk_type,
disk_uri=__ret__.disk_uri,
host_caching=__ret__.host_caching,
leased_by_lab_vm_id=__ret__.leased_by_lab_vm_id,
location=__ret__.location,
managed_disk_id=__ret__.managed_disk_id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
unique_identifier=__ret__.unique_identifier)
|
py | 1a429a8a22ee42334556b082fb21ccc302c74fe8 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="keras-grid-search-cacheable",
version="1.0.0",
author="Daniel Espinosa",
author_email="[email protected]",
description="Reducción de tiempo de ejecución de los algoritmos de Machine Learning con búsqueda de parámetros en GridSearch.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/machine-learning-tools/keras-grid-search-cacheable",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6.9',
install_requires=[
'tensorflow>=1.15.0'
]
)
|
py | 1a429adbaf8f63e462fec4d85e38badc232e6d08 | # $Id: __init__.py 7661 2013-05-07 10:52:59Z milde $
# Author: David Goodger
# Maintainer: [email protected]
# Copyright: This module has been placed in the public domain.
"""
Simple HyperText Markup Language document tree Writer.
The output conforms to the XHTML version 1.0 Transitional DTD
(*almost* strict). The output contains a minimum of formatting
information. The cascading style sheet "html4css1.css" is required
for proper viewing with a modern graphical browser.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import time
import re
import urllib.request, urllib.parse, urllib.error
try: # check for the Python Imaging Library
import PIL.Image
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
import docutils
from docutils import frontend, nodes, utils, writers, languages, io
from docutils.utils.error_reporting import SafeString
from docutils.transforms import writer_aux
from docutils.utils.math import unichar2tex, pick_math_environment, math2html
from docutils.utils.math.latex2mathml import parse_latex_math
class Writer(writers.Writer):
supported = ('html', 'html4css1', 'xhtml')
"""Formats this writer supports."""
default_stylesheet = 'html4css1.css'
default_stylesheet_dirs = ['.', utils.relative_path(
os.path.join(os.getcwd(), 'dummy'), os.path.dirname(__file__))]
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = (
'HTML-Specific Options',
None,
(('Specify the template file (UTF-8 encoded). Default is "%s".'
% default_template_path,
['--template'],
{'default': default_template_path, 'metavar': '<file>'}),
('Comma separated list of stylesheet URLs. '
'Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'metavar': '<URL[,URL,...]>', 'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of stylesheet paths. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'Default: "%s"' % default_stylesheet,
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list,
'default': [default_stylesheet]}),
('Embed the stylesheet(s) in the output HTML file. The stylesheet '
'files must be accessible during processing. This is the default.',
['--embed-stylesheet'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Link to the stylesheet(s) in the output HTML file. '
'Default: embed stylesheets.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "%s"' % default_stylesheet_dirs,
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheet_dirs}),
('Specify the initial header level. Default is 1 for "<h1>". '
'Does not affect document title & subtitle (see --no-doc-title).',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '1',
'metavar': '<level>'}),
('Specify the maximum width (in characters) for one-column field '
'names. Longer field names will span an entire row of the table '
'used to render the field list. Default is 14 characters. '
'Use 0 for "no limit".',
['--field-name-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Specify the maximum width (in characters) for options in option '
'lists. Longer options will span an entire row of the table used '
'to render the option list. Default is 14 characters. '
'Use 0 for "no limit".',
['--option-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of "simple" bullet '
'lists and enumerated lists. Default: enabled.',
['--compact-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Remove extra vertical whitespace between items of simple field '
'lists. Default: enabled.',
['--compact-field-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple field lists.',
['--no-compact-field-lists'],
{'dest': 'compact_field_lists', 'action': 'store_false'}),
('Added to standard table classes. '
'Defined styles: "borderless". Default: ""',
['--table-style'],
{'default': ''}),
('Math output format, one of "MathML", "HTML", "MathJax" '
'or "LaTeX". Default: "HTML math.css"',
['--math-output'],
{'default': 'HTML math.css'}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'html4css1 writer'
config_section_dependencies = ('writers',)
visitor_attributes = (
'head_prefix', 'head', 'stylesheet', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'body_suffix',
'title', 'subtitle', 'header', 'footer', 'meta', 'fragment',
'html_prolog', 'html_head', 'html_title', 'html_subtitle',
'html_body')
def get_transforms(self):
return writers.Writer.get_transforms(self) + [writer_aux.Admonitions]
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = HTMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
for attr in self.visitor_attributes:
setattr(self, attr, getattr(visitor, attr))
self.output = self.apply_template()
def apply_template(self):
template_file = open(self.document.settings.template, 'rb')
template = str(template_file.read(), 'utf-8')
template_file.close()
subs = self.interpolation_dict()
return template % subs
def interpolation_dict(self):
subs = {}
settings = self.document.settings
for attr in self.visitor_attributes:
subs[attr] = ''.join(getattr(self, attr)).rstrip('\n')
subs['encoding'] = settings.output_encoding
subs['version'] = docutils.__version__
return subs
def assemble_parts(self):
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
self.parts[part] = ''.join(getattr(self, part))
class HTMLTranslator(nodes.NodeVisitor):
"""
This HTML writer has been optimized to produce visually compact
lists (less vertical whitespace). HTML's mixed content models
allow list items to contain "<li><p>body elements</p></li>" or
"<li>just text</li>" or even "<li>text<p>and body
elements</p>combined</li>", each with different effects. It would
be best to stick with strict body elements in list items, but they
affect vertical spacing in browsers (although they really
shouldn't).
Here is an outline of the optimization:
- Check for and omit <p> tags in "simple" lists: list items
contain either a single paragraph, a nested simple list, or a
paragraph followed by a nested simple list. This means that
this list can be compact:
- Item 1.
- Item 2.
But this list cannot be compact:
- Item 1.
This second paragraph forces space between list items.
- Item 2.
- In non-list contexts, omit <p> tags on a paragraph if that
paragraph is the only child of its parent (footnotes & citations
are allowed a label first).
- Regardless of the above, in definitions, table cells, field bodies,
option descriptions, and list items, mark the first child with
'class="first"' and the last child with 'class="last"'. The stylesheet
sets the margins (top & bottom respectively) to 0 for these elements.
The ``no_compact_lists`` setting (``--no-compact-lists`` command-line
option) disables list whitespace optimization.
"""
xml_declaration = '<?xml version="1.0" encoding="%s" ?>\n'
doctype = (
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
doctype_mathml = doctype
head_prefix_template = ('<html xmlns="http://www.w3.org/1999/xhtml"'
' xml:lang="%(lang)s" lang="%(lang)s">\n<head>\n')
content_type = ('<meta http-equiv="Content-Type"'
' content="text/html; charset=%s" />\n')
content_type_mathml = ('<meta http-equiv="Content-Type"'
' content="application/xhtml+xml; charset=%s" />\n')
generator = ('<meta name="generator" content="Docutils %s: '
'http://docutils.sourceforge.net/" />\n')
# Template for the MathJax script in the header:
mathjax_script = '<script type="text/javascript" src="%s"></script>\n'
# The latest version of MathJax from the distributed server:
# avaliable to the public under the `MathJax CDN Terms of Service`__
# __http://www.mathjax.org/download/mathjax-cdn-terms-of-service/
mathjax_url = ('http://cdn.mathjax.org/mathjax/latest/MathJax.js?'
'config=TeX-AMS-MML_HTMLorMML')
# may be overwritten by custom URL appended to "mathjax"
stylesheet_link = '<link rel="stylesheet" href="%s" type="text/css" />\n'
embedded_stylesheet = '<style type="text/css">\n\n%s\n</style>\n'
words_and_spaces = re.compile(r'\S+| +|\n')
sollbruchstelle = re.compile(r'.+\W\W.+|[-?].+', re.U) # wrap point inside word
lang_attribute = 'lang' # name changes to 'xml:lang' in XHTML 1.1
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode, document.reporter)
self.meta = [self.generator % docutils.__version__]
self.head_prefix = []
self.html_prolog = []
if settings.xml_declaration:
self.head_prefix.append(self.xml_declaration
% settings.output_encoding)
# encoding not interpolated:
self.html_prolog.append(self.xml_declaration)
self.head = self.meta[:]
self.stylesheet = [self.stylesheet_call(path)
for path in utils.get_stylesheet_list(settings)]
self.body_prefix = ['</head>\n<body>\n']
# document title, subtitle display
self.body_pre_docinfo = []
# author, date, etc.
self.docinfo = []
self.body = []
self.fragment = []
self.body_suffix = ['</body>\n</html>\n']
self.section_level = 0
self.initial_header_level = int(settings.initial_header_level)
self.math_output = settings.math_output.split()
self.math_output_options = self.math_output[1:]
self.math_output = self.math_output[0].lower()
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
self.topic_classes = []
self.colspecs = []
self.compact_p = True
self.compact_simple = False
self.compact_field_list = False
self.in_docinfo = False
self.in_sidebar = False
self.title = []
self.subtitle = []
self.header = []
self.footer = []
self.html_head = [self.content_type] # charset not interpolated
self.html_title = []
self.html_subtitle = []
self.html_body = []
self.in_document_title = 0 # len(self.body) or 0
self.in_mailto = False
self.author_in_authors = False
self.math_header = []
def astext(self):
return ''.join(self.head_prefix + self.head
+ self.stylesheet + self.body_prefix
+ self.body_pre_docinfo + self.docinfo
+ self.body + self.body_suffix)
def encode(self, text):
"""Encode special characters in `text` & return."""
# @@@ A codec to do these and all other HTML entities would be nice.
text = str(text)
return text.translate({
ord('&'): '&',
ord('<'): '<',
ord('"'): '"',
ord('>'): '>',
ord('@'): '@', # may thwart some address harvesters
# TODO: convert non-breaking space only if needed?
0xa0: ' '}) # non-breaking space
def cloak_mailto(self, uri):
"""Try to hide a mailto: URL from harvesters."""
# Encode "@" using a URL octet reference (see RFC 1738).
# Further cloaking with HTML entities will be done in the
# `attval` function.
return uri.replace('@', '%40')
def cloak_email(self, addr):
"""Try to hide the link text of a email link from harversters."""
# Surround at-signs and periods with <span> tags. ("@" has
# already been encoded to "@" by the `encode` method.)
addr = addr.replace('@', '<span>@</span>')
addr = addr.replace('.', '<span>.</span>')
return addr
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, HTML encode, and return attribute value text."""
encoded = self.encode(whitespace.sub(' ', text))
if self.in_mailto and self.settings.cloak_email_addresses:
# Cloak at-signs ("%40") and periods with HTML entities.
encoded = encoded.replace('%40', '%40')
encoded = encoded.replace('.', '.')
return encoded
def stylesheet_call(self, path):
"""Return code to reference or embed stylesheet file `path`"""
if self.settings.embed_stylesheet:
try:
content = io.FileInput(source_path=path,
encoding='utf-8').read()
self.settings.record_dependencies.add(path)
except IOError as err:
msg = "Cannot embed stylesheet '%s': %s." % (
path, SafeString(err.strerror))
self.document.reporter.error(msg)
return '<--- %s --->\n' % msg
return self.embedded_stylesheet % content
# else link to style file:
if self.settings.stylesheet_path:
# adapt path relative to output (cf. config.html#stylesheet-path)
path = utils.relative_path(self.settings._destination, path)
return self.stylesheet_link % self.encode(path)
def starttag(self, node, tagname, suffix='\n', empty=False, **attributes):
"""
Construct and return a start tag given a node (id & class attributes
are extracted), tag name, and optional attributes.
"""
tagname = tagname.lower()
prefix = []
atts = {}
ids = []
for (name, value) in list(attributes.items()):
atts[name.lower()] = value
classes = []
languages = []
# unify class arguments and move language specification
for cls in node.get('classes', []) + atts.pop('class', '').split() :
if cls.startswith('language-'):
languages.append(cls[9:])
elif cls.strip() and cls not in classes:
classes.append(cls)
if languages:
# attribute name is 'lang' in XHTML 1.0 but 'xml:lang' in 1.1
atts[self.lang_attribute] = languages[0]
if classes:
atts['class'] = ' '.join(classes)
assert 'id' not in atts
ids.extend(node.get('ids', []))
if 'ids' in atts:
ids.extend(atts['ids'])
del atts['ids']
if ids:
atts['id'] = ids[0]
for id in ids[1:]:
# Add empty "span" elements for additional IDs. Note
# that we cannot use empty "a" elements because there
# may be targets inside of references, but nested "a"
# elements aren't allowed in XHTML (even if they do
# not all have a "href" attribute).
if empty:
# Empty tag. Insert target right in front of element.
prefix.append('<span id="%s"></span>' % id)
else:
# Non-empty tag. Place the auxiliary <span> tag
# *inside* the element, as the first child.
suffix += '<span id="%s"></span>' % id
attlist = list(atts.items())
attlist.sort()
parts = [tagname]
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
values = [str(v) for v in value]
parts.append('%s="%s"' % (name.lower(),
self.attval(' '.join(values))))
else:
parts.append('%s="%s"' % (name.lower(),
self.attval(str(value))))
if empty:
infix = ' /'
else:
infix = ''
return ''.join(prefix) + '<%s%s>' % (' '.join(parts), infix) + suffix
def emptytag(self, node, tagname, suffix='\n', **attributes):
"""Construct and return an XML-compatible empty tag."""
return self.starttag(node, tagname, suffix, empty=True, **attributes)
def set_class_on_child(self, node, class_, index=0):
"""
Set class `class_` on the visible child no. index of `node`.
Do nothing if node has fewer children than `index`.
"""
children = [n for n in node if not isinstance(n, nodes.Invisible)]
try:
child = children[index]
except IndexError:
return
child['classes'].append(class_)
def set_first_last(self, node):
self.set_class_on_child(node, 'first', 0)
self.set_class_on_child(node, 'last', -1)
def visit_Text(self, node):
text = node.astext()
encoded = self.encode(text)
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
self.body.append(encoded)
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'abbr', ''))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_acronym(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'acronym', ''))
def depart_acronym(self, node):
self.body.append('</acronym>')
def visit_address(self, node):
self.visit_docinfo_item(node, 'address', meta=False)
self.body.append(self.starttag(node, 'pre', CLASS='address'))
def depart_address(self, node):
self.body.append('\n</pre>\n')
self.depart_docinfo_item()
def visit_admonition(self, node):
self.body.append(self.starttag(node, 'div'))
self.set_first_last(node)
def depart_admonition(self, node=None):
self.body.append('</div>\n')
attribution_formats = {'dash': ('—', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.context.append(suffix)
self.body.append(
self.starttag(node, 'p', prefix, CLASS='attribution'))
def depart_attribution(self, node):
self.body.append(self.context.pop() + '</p>\n')
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
if self.author_in_authors:
self.body.append('\n<br />')
else:
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
if isinstance(node.parent, nodes.authors):
self.author_in_authors = True
else:
self.depart_docinfo_item()
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors')
self.author_in_authors = False # initialize
def depart_authors(self, node):
self.depart_docinfo_item()
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append('</blockquote>\n')
def check_simple_list(self, node):
"""Check for a simple list that can be rendered compactly."""
visitor = SimpleListChecker(self.document)
try:
node.walk(visitor)
except nodes.NodeFound:
return None
else:
return 1
def is_compactable(self, node):
return ('compact' in node['classes']
or (self.settings.compact_lists
and 'open' not in node['classes']
and (self.compact_simple
or self.topic_classes == ['contents']
or self.check_simple_list(node))))
def visit_bullet_list(self, node):
atts = {}
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = 'simple'
self.body.append(self.starttag(node, 'ul', **atts))
def depart_bullet_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ul>\n')
def visit_caption(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='caption'))
def depart_caption(self, node):
self.body.append('</p>\n')
def visit_citation(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def depart_citation(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_citation_reference(self, node):
href = '#'
if 'refid' in node:
href += node['refid']
elif 'refname' in node:
href += self.document.nameids[node['refname']]
# else: # TODO system message (or already in the transform)?
# 'Citation reference missing.'
self.body.append(self.starttag(
node, 'a', '[', CLASS='citation-reference', href=href))
def depart_citation_reference(self, node):
self.body.append(']</a>')
def visit_classifier(self, node):
self.body.append(' <span class="classifier-delimiter">:</span> ')
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
def depart_classifier(self, node):
self.body.append('</span>')
def visit_colspec(self, node):
self.colspecs.append(node)
# "stubs" list is an attribute of the tgroup element:
node.parent.stubs.append(node.attributes.get('stub'))
def depart_colspec(self, node):
pass
def write_colspecs(self):
width = 0
for node in self.colspecs:
width += node['colwidth']
for node in self.colspecs:
colwidth = int(node['colwidth'] * 100.0 / width + 0.5)
self.body.append(self.emptytag(node, 'col',
width='%i%%' % colwidth))
self.colspecs = []
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
"""Escape double-dashes in comment text."""
self.body.append('<!-- %s -->\n' % sub('- ', node.astext()))
# Content already processed:
raise nodes.SkipNode
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div', CLASS='compound'))
if len(node) > 1:
node[0]['classes'].append('compound-first')
node[-1]['classes'].append('compound-last')
for child in node[1:-1]:
child['classes'].append('compound-middle')
def depart_compound(self, node):
self.body.append('</div>\n')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div', CLASS='container'))
def depart_container(self, node):
self.body.append('</div>\n')
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact', meta=False)
def depart_contact(self, node):
self.depart_docinfo_item()
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item()
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append('</dt>\n')
self.body.append(self.starttag(node, 'dd', ''))
self.set_first_last(node)
def depart_definition(self, node):
self.body.append('</dd>\n')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl', CLASS='docutils'))
def depart_definition_list(self, node):
self.body.append('</dl>\n')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.body.append(self.starttag(node, 'td', ''))
self.set_first_last(node)
def depart_description(self, node):
self.body.append('</td>')
def visit_docinfo(self, node):
self.context.append(len(self.body))
self.body.append(self.starttag(node, 'table',
CLASS='docinfo',
frame="void", rules="none"))
self.body.append('<col class="docinfo-name" />\n'
'<col class="docinfo-content" />\n'
'<tbody valign="top">\n')
self.in_docinfo = True
def depart_docinfo(self, node):
self.body.append('</tbody>\n</table>\n')
self.in_docinfo = False
start = self.context.pop()
self.docinfo = self.body[start:]
self.body = []
def visit_docinfo_item(self, node, name, meta=True):
if meta:
meta_tag = '<meta name="%s" content="%s" />\n' \
% (name, self.attval(node.astext()))
self.add_meta(meta_tag)
self.body.append(self.starttag(node, 'tr', ''))
self.body.append('<th class="docinfo-name">%s:</th>\n<td>'
% self.language.labels[name])
if len(node):
if isinstance(node[0], nodes.Element):
node[0]['classes'].append('first')
if isinstance(node[-1], nodes.Element):
node[-1]['classes'].append('last')
def depart_docinfo_item(self):
self.body.append('</td></tr>\n')
def visit_doctest_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
def depart_doctest_block(self, node):
self.body.append('\n</pre>\n')
def visit_document(self, node):
self.head.append('<title>%s</title>\n'
% self.encode(node.get('title', '')))
def depart_document(self, node):
self.head_prefix.extend([self.doctype,
self.head_prefix_template %
{'lang': self.settings.language_code}])
self.html_prolog.append(self.doctype)
self.meta.insert(0, self.content_type % self.settings.output_encoding)
self.head.insert(0, self.content_type % self.settings.output_encoding)
if self.math_header:
if self.math_output == 'mathjax':
self.head.extend(self.math_header)
else:
self.stylesheet.extend(self.math_header)
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.body_prefix.append(self.starttag(node, 'div', CLASS='document'))
self.body_suffix.insert(0, '</div>\n')
self.fragment.extend(self.body) # self.fragment is the "naked" body
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
assert not self.context, 'len(context) = %s' % len(self.context)
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em', ''))
def depart_emphasis(self, node):
self.body.append('</em>')
def visit_entry(self, node):
atts = {'class': []}
if isinstance(node.parent.parent, nodes.thead):
atts['class'].append('head')
if node.parent.parent.parent.stubs[node.parent.column]:
# "stubs" list is an attribute of the tgroup element
atts['class'].append('stub')
if atts['class']:
tagname = 'th'
atts['class'] = ' '.join(atts['class'])
else:
tagname = 'td'
del atts['class']
node.parent.column += 1
if 'morerows' in node:
atts['rowspan'] = node['morerows'] + 1
if 'morecols' in node:
atts['colspan'] = node['morecols'] + 1
node.parent.column += node['morecols']
self.body.append(self.starttag(node, tagname, '', **atts))
self.context.append('</%s>\n' % tagname.lower())
if len(node) == 0: # empty cell
self.body.append(' ')
self.set_first_last(node)
def depart_entry(self, node):
self.body.append(self.context.pop())
def visit_enumerated_list(self, node):
"""
The 'start' attribute does not conform to HTML 4.01's strict.dtd, but
CSS1 doesn't help. CSS2 isn't widely enough supported yet to be
usable.
"""
atts = {}
if 'start' in node:
atts['start'] = node['start']
if 'enumtype' in node:
atts['class'] = node['enumtype']
# @@@ To do: prefix, suffix. How? Change prefix/suffix to a
# single "format" attribute? Use CSS2?
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = (atts.get('class', '') + ' simple').strip()
self.body.append(self.starttag(node, 'ol', **atts))
def depart_enumerated_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ol>\n')
def visit_field(self, node):
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def depart_field(self, node):
self.body.append('</tr>\n')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS='field-body'))
self.set_class_on_child(node, 'first', 0)
field = node.parent
if (self.compact_field_list or
isinstance(field.parent, nodes.docinfo) or
field.parent.index(field) == len(field.parent) - 1):
# If we are in a compact list, the docinfo, or if this is
# the last field of the field list, do not add vertical
# space after last element.
self.set_class_on_child(node, 'last', -1)
def depart_field_body(self, node):
self.body.append('</td>\n')
def visit_field_list(self, node):
self.context.append((self.compact_field_list, self.compact_p))
self.compact_p = None
if 'compact' in node['classes']:
self.compact_field_list = True
elif (self.settings.compact_field_lists
and 'open' not in node['classes']):
self.compact_field_list = True
if self.compact_field_list:
for field in node:
field_body = field[-1]
assert isinstance(field_body, nodes.field_body)
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
if not (len(children) == 0 or
len(children) == 1 and
isinstance(children[0],
(nodes.paragraph, nodes.line_block))):
self.compact_field_list = False
break
self.body.append(self.starttag(node, 'table', frame='void',
rules='none',
CLASS='docutils field-list'))
self.body.append('<col class="field-name" />\n'
'<col class="field-body" />\n'
'<tbody valign="top">\n')
def depart_field_list(self, node):
self.body.append('</tbody>\n</table>\n')
self.compact_field_list, self.compact_p = self.context.pop()
def visit_field_name(self, node):
atts = {}
if self.in_docinfo:
atts['class'] = 'docinfo-name'
else:
atts['class'] = 'field-name'
if ( self.settings.field_name_limit
and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n'
+ self.starttag(node.parent, 'tr', '',
CLASS='field')
+ '<td> </td>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'th', '', **atts))
def depart_field_name(self, node):
self.body.append(':</th>')
self.body.append(self.context.pop())
def visit_figure(self, node):
atts = {'class': 'figure'}
if node.get('width'):
atts['style'] = 'width: %s' % node['width']
if node.get('align'):
atts['class'] += " align-" + node['align']
self.body.append(self.starttag(node, 'div', **atts))
def depart_figure(self, node):
self.body.append('</div>\n')
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = [self.starttag(node, 'div', CLASS='footer'),
'<hr class="footer" />\n']
footer.extend(self.body[start:])
footer.append('\n</div>\n')
self.footer.extend(footer)
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
backlinks = []
backrefs = node['backrefs']
if self.settings.footnote_backlinks and backrefs:
if len(backrefs) == 1:
self.context.append('')
self.context.append('</a>')
self.context.append('<a class="fn-backref" href="#%s">'
% backrefs[0])
else:
i = 1
for backref in backrefs:
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i))
i += 1
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
self.context += ['', '']
else:
self.context.append('')
self.context += ['', '']
# If the node does not only consist of a label.
if len(node) > 1:
# If there are preceding backlinks, we do not set class
# 'first', because we need to retain the top-margin.
if not backlinks:
node[1]['classes'].append('first')
node[-1]['classes'].append('last')
def depart_footnote(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_footnote_reference(self, node):
href = '#' + node['refid']
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
else:
assert format == 'superscript'
suffix = '<sup>'
self.context.append('</sup>')
self.body.append(self.starttag(node, 'a', suffix,
CLASS='footnote-reference', href=href))
def depart_footnote_reference(self, node):
self.body.append(self.context.pop() + '</a>')
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
header = [self.starttag(node, 'div', CLASS='header')]
header.extend(self.body[start:])
header.append('\n<hr class="header"/>\n</div>\n')
self.body_prefix.extend(header)
self.header.extend(header)
del self.body[start:]
def visit_image(self, node):
atts = {}
uri = node['uri']
# place SVG and SWF images in an <object> element
types = {'.svg': 'image/svg+xml',
'.swf': 'application/x-shockwave-flash'}
ext = os.path.splitext(uri)[1].lower()
if ext in ('.svg', '.swf'):
atts['data'] = uri
atts['type'] = types[ext]
else:
atts['src'] = uri
atts['alt'] = node.get('alt', uri)
# image size
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
if 'scale' in node:
if (PIL and not ('width' in node and 'height' in node)
and self.settings.file_insertion_enabled):
imagepath = urllib.request.url2pathname(uri)
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (IOError, UnicodeEncodeError):
pass # TODO: warn?
else:
self.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
if 'width' not in atts:
atts['width'] = str(img.size[0])
if 'height' not in atts:
atts['height'] = str(img.size[1])
del img
for att_name in 'width', 'height':
if att_name in atts:
match = re.match(r'([0-9.]+)(\S*)$', atts[att_name])
assert match
atts[att_name] = '%s%s' % (
float(match.group(1)) * (float(node['scale']) / 100),
match.group(2))
style = []
for att_name in 'width', 'height':
if att_name in atts:
if re.match(r'^[0-9.]+$', atts[att_name]):
# Interpret unitless values as pixels.
atts[att_name] += 'px'
style.append('%s: %s;' % (att_name, atts[att_name]))
del atts[att_name]
if style:
atts['style'] = ' '.join(style)
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
if 'align' in node:
atts['class'] = 'align-%s' % node['align']
self.context.append('')
if ext in ('.svg', '.swf'): # place in an object element,
# do NOT use an empty tag: incorrect rendering in browsers
self.body.append(self.starttag(node, 'object', suffix, **atts) +
node.get('alt', uri) + '</object>' + suffix)
else:
self.body.append(self.emptytag(node, 'img', suffix, **atts))
def depart_image(self, node):
self.body.append(self.context.pop())
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span', ''))
def depart_inline(self, node):
self.body.append('</span>')
def visit_label(self, node):
# Context added in footnote_backrefs.
self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
CLASS='label'))
def depart_label(self, node):
# Context added in footnote_backrefs.
self.body.append(']%s</td><td>%s' % (self.context.pop(), self.context.pop()))
def visit_legend(self, node):
self.body.append(self.starttag(node, 'div', CLASS='legend'))
def depart_legend(self, node):
self.body.append('</div>\n')
def visit_line(self, node):
self.body.append(self.starttag(node, 'div', suffix='', CLASS='line'))
if not len(node):
self.body.append('<br />')
def depart_line(self, node):
self.body.append('</div>\n')
def visit_line_block(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
self.body.append('</div>\n')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li', ''))
if len(node):
node[0]['classes'].append('first')
def depart_list_item(self, node):
self.body.append('</li>\n')
def visit_literal(self, node):
# special case: "code" role
classes = node.get('classes', [])
if 'code' in classes:
# filter 'code' from class arguments
node['classes'] = [cls for cls in classes if cls != 'code']
self.body.append(self.starttag(node, 'code', ''))
return
self.body.append(
self.starttag(node, 'tt', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
# Protect text like "--an-option" and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
if self.sollbruchstelle.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
elif token in ('\n', ' '):
# Allow breaks at whitespace:
self.body.append(token)
else:
# Protect runs of multiple spaces; the last space can wrap:
self.body.append(' ' * (len(token) - 1) + ' ')
self.body.append('</tt>')
# Content already processed:
raise nodes.SkipNode
def depart_literal(self, node):
# skipped unless literal element is from "code" role:
self.body.append('</code>')
def visit_literal_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='literal-block'))
def depart_literal_block(self, node):
self.body.append('\n</pre>\n')
def visit_math(self, node, math_env=''):
# If the method is called from visit_math_block(), math_env != ''.
# As there is no native HTML math support, we provide alternatives:
# LaTeX and MathJax math_output modes simply wrap the content,
# HTML and MathML math_output modes also convert the math_code.
if self.math_output not in ('mathml', 'html', 'mathjax', 'latex'):
self.document.reporter.error(
'math-output format "%s" not supported '
'falling back to "latex"'% self.math_output)
self.math_output = 'latex'
#
# HTML container
tags = {# math_output: (block, inline, class-arguments)
'mathml': ('div', '', ''),
'html': ('div', 'span', 'formula'),
'mathjax': ('div', 'span', 'math'),
'latex': ('pre', 'tt', 'math'),
}
tag = tags[self.math_output][math_env == '']
clsarg = tags[self.math_output][2]
# LaTeX container
wrappers = {# math_mode: (inline, block)
'mathml': (None, None),
'html': ('$%s$', '\\begin{%s}\n%s\n\\end{%s}'),
'mathjax': ('\(%s\)', '\\begin{%s}\n%s\n\\end{%s}'),
'latex': (None, None),
}
wrapper = wrappers[self.math_output][math_env != '']
# get and wrap content
math_code = node.astext().translate(unichar2tex.uni2tex_table)
if wrapper and math_env:
math_code = wrapper % (math_env, math_code, math_env)
elif wrapper:
math_code = wrapper % math_code
# settings and conversion
if self.math_output in ('latex', 'mathjax'):
math_code = self.encode(math_code)
if self.math_output == 'mathjax' and not self.math_header:
if self.math_output_options:
self.mathjax_url = self.math_output_options[0]
self.math_header = [self.mathjax_script % self.mathjax_url]
elif self.math_output == 'html':
if self.math_output_options and not self.math_header:
self.math_header = [self.stylesheet_call(
utils.find_file_in_dirs(s, self.settings.stylesheet_dirs))
for s in self.math_output_options[0].split(',')]
# TODO: fix display mode in matrices and fractions
math2html.DocumentParameters.displaymode = (math_env != '')
math_code = math2html.math2html(math_code)
elif self.math_output == 'mathml':
self.doctype = self.doctype_mathml
self.content_type = self.content_type_mathml
try:
mathml_tree = parse_latex_math(math_code, inline=not(math_env))
math_code = ''.join(mathml_tree.xml())
except SyntaxError as err:
err_node = self.document.reporter.error(err, base_node=node)
self.visit_system_message(err_node)
self.body.append(self.starttag(node, 'p'))
self.body.append(','.join(err.args))
self.body.append('</p>\n')
self.body.append(self.starttag(node, 'pre',
CLASS='literal-block'))
self.body.append(self.encode(math_code))
self.body.append('\n</pre>\n')
self.depart_system_message(err_node)
raise nodes.SkipNode
# append to document body
if tag:
self.body.append(self.starttag(node, tag,
suffix='\n'*bool(math_env),
CLASS=clsarg))
self.body.append(math_code)
if math_env:
self.body.append('\n')
if tag:
self.body.append('</%s>\n' % tag)
# Content already processed:
raise nodes.SkipNode
def depart_math(self, node):
pass # never reached
def visit_math_block(self, node):
# print node.astext().encode('utf8')
math_env = pick_math_environment(node.astext())
self.visit_math(node, math_env=math_env)
def depart_math_block(self, node):
pass # never reached
def visit_meta(self, node):
meta = self.emptytag(node, 'meta', **node.non_default_attributes())
self.add_meta(meta)
def depart_meta(self, node):
pass
def add_meta(self, tag):
self.meta.append(tag)
self.head.append(tag)
def visit_option(self, node):
if self.context[-1]:
self.body.append(', ')
self.body.append(self.starttag(node, 'span', '', CLASS='option'))
def depart_option(self, node):
self.body.append('</span>')
self.context[-1] += 1
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
self.body.append(self.starttag(node, 'var', ''))
def depart_option_argument(self, node):
self.body.append('</var>')
def visit_option_group(self, node):
atts = {}
if ( self.settings.option_limit
and len(node.astext()) > self.settings.option_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
self.body.append(
self.starttag(node, 'td', CLASS='option-group', **atts))
self.body.append('<kbd>')
self.context.append(0) # count number of options
def depart_option_group(self, node):
self.context.pop()
self.body.append('</kbd></td>\n')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append(
self.starttag(node, 'table', CLASS='docutils option-list',
frame="void", rules="none"))
self.body.append('<col class="option" />\n'
'<col class="description" />\n'
'<tbody valign="top">\n')
def depart_option_list(self, node):
self.body.append('</tbody>\n</table>\n')
def visit_option_list_item(self, node):
self.body.append(self.starttag(node, 'tr', ''))
def depart_option_list_item(self, node):
self.body.append('</tr>\n')
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item()
def should_be_compact_paragraph(self, node):
"""
Determine if the <p> tags around paragraph ``node`` can be omitted.
"""
if (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.compound)):
# Never compact paragraphs in document or compound.
return False
for key, value in node.attlist():
if (node.is_not_default(key) and
not (key == 'classes' and value in
([], ['first'], ['last'], ['first', 'last']))):
# Attribute which needs to survive.
return False
first = isinstance(node.parent[0], nodes.label) # skip label
for child in node.parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return False
parent_length = len([n for n in node.parent if not isinstance(
n, (nodes.Invisible, nodes.label))])
if ( self.compact_simple
or self.compact_field_list
or self.compact_p and parent_length == 1):
return True
return False
def visit_paragraph(self, node):
if self.should_be_compact_paragraph(node):
self.context.append('')
else:
self.body.append(self.starttag(node, 'p', ''))
self.context.append('</p>\n')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
def visit_problematic(self, node):
if node.hasattr('refid'):
self.body.append('<a href="#%s">' % node['refid'])
self.context.append('</a>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'span', '', CLASS='problematic'))
def depart_problematic(self, node):
self.body.append('</span>')
self.body.append(self.context.pop())
def visit_raw(self, node):
if 'html' in node.get('format', '').split():
t = isinstance(node.parent, nodes.TextElement) and 'span' or 'div'
if node['classes']:
self.body.append(self.starttag(node, t, suffix=''))
self.body.append(node.astext())
if node['classes']:
self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
atts = {'class': 'reference'}
if 'refuri' in node:
atts['href'] = node['refuri']
if ( self.settings.cloak_email_addresses
and atts['href'].startswith('mailto:')):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = True
atts['class'] += ' external'
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
atts['class'] += ' internal'
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
self.body.append(self.starttag(node, 'a', '', **atts))
def depart_reference(self, node):
self.body.append('</a>')
if not isinstance(node.parent, nodes.TextElement):
self.body.append('\n')
self.in_mailto = False
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision', meta=False)
def depart_revision(self, node):
self.depart_docinfo_item()
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0
def depart_row(self, node):
self.body.append('</tr>\n')
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='rubric'))
def depart_rubric(self, node):
self.body.append('</p>\n')
def visit_section(self, node):
self.section_level += 1
self.body.append(
self.starttag(node, 'div', CLASS='section'))
def depart_section(self, node):
self.section_level -= 1
self.body.append('</div>\n')
def visit_sidebar(self, node):
self.body.append(
self.starttag(node, 'div', CLASS='sidebar'))
self.set_first_last(node)
self.in_sidebar = True
def depart_sidebar(self, node):
self.body.append('</div>\n')
self.in_sidebar = False
def visit_status(self, node):
self.visit_docinfo_item(node, 'status', meta=False)
def depart_status(self, node):
self.depart_docinfo_item()
def visit_strong(self, node):
self.body.append(self.starttag(node, 'strong', ''))
def depart_strong(self, node):
self.body.append('</strong>')
def visit_subscript(self, node):
self.body.append(self.starttag(node, 'sub', ''))
def depart_subscript(self, node):
self.body.append('</sub>')
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.starttag(node, 'p', '',
CLASS='sidebar-subtitle'))
self.context.append('</p>\n')
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
self.context.append('</h2>\n')
self.in_document_title = len(self.body)
elif isinstance(node.parent, nodes.section):
tag = 'h%s' % (self.section_level + self.initial_header_level - 1)
self.body.append(
self.starttag(node, tag, '', CLASS='section-subtitle') +
self.starttag({}, 'span', '', CLASS='section-subtitle'))
self.context.append('</span></%s>\n' % tag)
def depart_subtitle(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.subtitle = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_subtitle.extend(self.body)
del self.body[:]
def visit_superscript(self, node):
self.body.append(self.starttag(node, 'sup', ''))
def depart_superscript(self, node):
self.body.append('</sup>')
def visit_system_message(self, node):
self.body.append(self.starttag(node, 'div', CLASS='system-message'))
self.body.append('<p class="system-message-title">')
backref_text = ''
if len(node['backrefs']):
backrefs = node['backrefs']
if len(backrefs) == 1:
backref_text = ('; <em><a href="#%s">backlink</a></em>'
% backrefs[0])
else:
i = 1
backlinks = []
for backref in backrefs:
backlinks.append('<a href="#%s">%s</a>' % (backref, i))
i += 1
backref_text = ('; <em>backlinks: %s</em>'
% ', '.join(backlinks))
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('System Message: %s/%s '
'(<tt class="docutils">%s</tt>%s)%s</p>\n'
% (node['type'], node['level'],
self.encode(node['source']), line, backref_text))
def depart_system_message(self, node):
self.body.append('</div>\n')
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
classes = ' '.join(['docutils', self.settings.table_style]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes, border="1"))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_target(self, node):
if not ('refuri' in node or 'refid' in node
or 'refname' in node):
self.body.append(self.starttag(node, 'span', '', CLASS='target'))
self.context.append('</span>')
else:
self.context.append('')
def depart_target(self, node):
self.body.append(self.context.pop())
def visit_tbody(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n' or ''
self.body.append(self.starttag(node, 'tbody', valign='top'))
def depart_tbody(self, node):
self.body.append('</tbody>\n')
def visit_term(self, node):
self.body.append(self.starttag(node, 'dt', ''))
def depart_term(self, node):
"""
Leave the end tag to `self.visit_definition()`, in case there's a
classifier.
"""
pass
def visit_tgroup(self, node):
# Mozilla needs <colgroup>:
self.body.append(self.starttag(node, 'colgroup'))
# Appended by thead or tbody:
self.context.append('</colgroup>\n')
node.stubs = []
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n'
# There may or may not be a <thead>; this is for <tbody> to use:
self.context.append('')
self.body.append(self.starttag(node, 'thead', valign='bottom'))
def depart_thead(self, node):
self.body.append('</thead>\n')
def visit_title(self, node):
"""Only 6 section levels are supported by HTML."""
check_id = 0 # TODO: is this a bool (False) or a counter?
close_tag = '</p>\n'
if isinstance(node.parent, nodes.topic):
self.body.append(
self.starttag(node, 'p', '', CLASS='topic-title first'))
elif isinstance(node.parent, nodes.sidebar):
self.body.append(
self.starttag(node, 'p', '', CLASS='sidebar-title'))
elif isinstance(node.parent, nodes.Admonition):
self.body.append(
self.starttag(node, 'p', '', CLASS='admonition-title'))
elif isinstance(node.parent, nodes.table):
self.body.append(
self.starttag(node, 'caption', ''))
close_tag = '</caption>\n'
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1', '', CLASS='title'))
close_tag = '</h1>\n'
self.in_document_title = len(self.body)
else:
assert isinstance(node.parent, nodes.section)
h_level = self.section_level + self.initial_header_level - 1
atts = {}
if (len(node.parent) >= 2 and
isinstance(node.parent[1], nodes.subtitle)):
atts['CLASS'] = 'with-subtitle'
self.body.append(
self.starttag(node, 'h%s' % h_level, '', **atts))
atts = {}
if node.hasattr('refid'):
atts['class'] = 'toc-backref'
atts['href'] = '#' + node['refid']
if atts:
self.body.append(self.starttag({}, 'a', '', **atts))
close_tag = '</a></h%s>\n' % (h_level)
else:
close_tag = '</h%s>\n' % (h_level)
self.context.append(close_tag)
def depart_title(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.title = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_title.extend(self.body)
del self.body[:]
def visit_title_reference(self, node):
self.body.append(self.starttag(node, 'cite', ''))
def depart_title_reference(self, node):
self.body.append('</cite>')
def visit_topic(self, node):
self.body.append(self.starttag(node, 'div', CLASS='topic'))
self.topic_classes = node['classes']
def depart_topic(self, node):
self.body.append('</div>\n')
self.topic_classes = []
def visit_transition(self, node):
self.body.append(self.emptytag(node, 'hr', CLASS='docutils'))
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version', meta=False)
def depart_version(self, node):
self.depart_docinfo_item()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
class SimpleListChecker(nodes.GenericNodeVisitor):
"""
Raise `nodes.NodeFound` if non-simple list item is encountered.
Here "simple" means a list item containing nothing other than a single
paragraph, a simple list, or a paragraph followed by a simple list.
"""
def default_visit(self, node):
raise nodes.NodeFound
def visit_bullet_list(self, node):
pass
def visit_enumerated_list(self, node):
pass
def visit_list_item(self, node):
children = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if (children and isinstance(children[0], nodes.paragraph)
and (isinstance(children[-1], nodes.bullet_list)
or isinstance(children[-1], nodes.enumerated_list))):
children.pop()
if len(children) <= 1:
return
else:
raise nodes.NodeFound
def visit_paragraph(self, node):
raise nodes.SkipNode
def invisible_visit(self, node):
"""Invisible nodes should be ignored."""
raise nodes.SkipNode
visit_comment = invisible_visit
visit_substitution_definition = invisible_visit
visit_target = invisible_visit
visit_pending = invisible_visit
|
py | 1a429bcffe9247b636c561f73508f3625de78b81 |
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('project_dashboard')
class CeleryConfig(AppConfig):
name = 'project_dashboard.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['dsn'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}') # pragma: no cover
|
py | 1a429c061c0f4b23220650eacf56774b3721ca6b | from __future__ import unicode_literals
import importlib
import json
import logging
import random
import re
import string
import urlparse
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
import urllib3
logger = logging.getLogger(__name__)
content_types = {
'image': [
'website.content_types.Imgur',
'website.content_types.Image'
],
'video': [
'website.content_types.YouTube',
'website.content_types.Video'
],
'link': [
'website.content_types.Link'
],
'gifv': [
'website.content_types.GifV'
]
}
def get_class_from_string(class_name):
module_name, cls_name = class_name.rsplit(".", 1)
module = importlib.import_module(module_name)
return getattr(module, cls_name)
def detect_post_type(url=None):
# Default
if not url:
return 'text'
else:
return detect_link_type(url)
def find_urls(text):
"""Find any URLs in the given text, return a list of them"""
return re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|'
'(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
def imgur_preprocessor(url):
url_data = urlparse.urlparse(url)
logger.info('imgurl preprocessor')
logger.info((url_data.netloc == 'imgur.com' or
url_data.netloc == 'i.imgur.com'))
logger.info(url[-4:] == '.gif')
if ((url_data.netloc == 'imgur.com' or
url_data.netloc == 'i.imgur.com') and url[-4:] == '.gif'):
# Switch to gifv
url += 'v'
logger.info('new url {}'.format(url))
return url
def detect_link_type(url):
"""Given a link, get the HEAD and match to known types"""
logger.info("Detecting content type of {}".format(url))
# Get mime type of remote url
try:
http = urllib3.PoolManager()
response = http.request('HEAD', url)
content_type = response.headers.get('content-type')
except Exception as e:
logger.warning("Could not detect content type. Defaulting to "
"link for url: {}, exception: {}".format(url, e))
return 'link'
# Find list of content detectors based on mime
if ('text/html' in content_type and
'imgur.com' in url and
url[-5:] == '.gifv'):
key = 'gifv'
elif content_type in settings.MIME_IMAGES:
key = 'image'
elif content_type in settings.MIME_VIDEO or 'youtube.com' in url:
key = 'video'
elif url:
key = 'link'
else:
return 'text'
logger.info('content type is {}'.format(key))
# Go through content detectors in order, returning if any matches
for content_type in content_types[key]:
cls = get_class_from_string(content_type)()
detected_type = cls.detect(url, content_type)
if detected_type:
return detected_type
def url_filter(text):
"""Given a block of text, add HTML for links and embedded content."""
attachment_type = None
attachment_url = None
# Search for images to render
urls = find_urls(text)
# Render the first
logger.debug('Looking for image links in message {}'.format(
text))
if urls and detect_link_type(urls[0]) == 'image':
logger.info('found image link in message: {}'.format(urls[0]))
attachment_type = 'image'
attachment_url = urls[0]
return {
'message': text,
'attachment_type': attachment_type,
'attachment_url': attachment_url
}
def render_to_json(request, data):
# msgs = {}
# messages_list = messages.get_messages(request)
# count = 0
# for message in messages_list:
# msgs[count] = {'message': message.message, 'level': message.level}
# count += 1
# data['messages'] = msgs
return HttpResponse(
json.dumps(data, ensure_ascii=False, cls=DjangoJSONEncoder),
content_type=request.is_ajax() and "application/json" or "text/html"
)
def generate_pk(length=32):
return ''.join(random.SystemRandom().choice(
'abcdef' + string.digits) for _ in range(length))
|
py | 1a429c65c66874e4b23ed40e79ad35711d43af40 | from arm.logicnode.arm_nodes import *
class ExpressionNode(ArmLogicTreeNode):
"""Evaluate a Haxe expression and get its output.
@output Result: the result of the expression."""
bl_idname = 'LNExpressionNode'
bl_label = 'Expression'
arm_version = 1
property0: StringProperty(name='', default='')
def init(self, context):
super(ExpressionNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_output('ArmNodeSocketAction', 'Out')
self.add_output('NodeSocketShader', 'Result')
def draw_buttons(self, context, layout):
layout.prop(self, 'property0')
add_node(ExpressionNode, category=PKG_AS_CATEGORY, section='haxe')
|
py | 1a429ce17e74b0daad40875e75e2a6cb5d74a5ee | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VpnSiteArgs', 'VpnSite']
@pulumi.input_type
class VpnSiteArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
address_space: Optional[pulumi.Input['AddressSpaceArgs']] = None,
bgp_properties: Optional[pulumi.Input['BgpSettingsArgs']] = None,
device_properties: Optional[pulumi.Input['DevicePropertiesArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input['SubResourceArgs']] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VpnSite resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.
:param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if address_space is not None:
pulumi.set(__self__, "address_space", address_space)
if bgp_properties is not None:
pulumi.set(__self__, "bgp_properties", bgp_properties)
if device_properties is not None:
pulumi.set(__self__, "device_properties", device_properties)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_security_site is not None:
pulumi.set(__self__, "is_security_site", is_security_site)
if location is not None:
pulumi.set(__self__, "location", location)
if site_key is not None:
pulumi.set(__self__, "site_key", site_key)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_wan is not None:
pulumi.set(__self__, "virtual_wan", virtual_wan)
if vpn_site_links is not None:
pulumi.set(__self__, "vpn_site_links", vpn_site_links)
if vpn_site_name is not None:
pulumi.set(__self__, "vpn_site_name", vpn_site_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name of the VpnSite.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> Optional[pulumi.Input['AddressSpaceArgs']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@address_space.setter
def address_space(self, value: Optional[pulumi.Input['AddressSpaceArgs']]):
pulumi.set(self, "address_space", value)
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> Optional[pulumi.Input['BgpSettingsArgs']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@bgp_properties.setter
def bgp_properties(self, value: Optional[pulumi.Input['BgpSettingsArgs']]):
pulumi.set(self, "bgp_properties", value)
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> Optional[pulumi.Input['DevicePropertiesArgs']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@device_properties.setter
def device_properties(self, value: Optional[pulumi.Input['DevicePropertiesArgs']]):
pulumi.set(self, "device_properties", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> Optional[pulumi.Input[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@is_security_site.setter
def is_security_site(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_security_site", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> Optional[pulumi.Input[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@site_key.setter
def site_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "site_key", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@virtual_wan.setter
def virtual_wan(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_wan", value)
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
@vpn_site_links.setter
def vpn_site_links(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]):
pulumi.set(self, "vpn_site_links", value)
@property
@pulumi.getter(name="vpnSiteName")
def vpn_site_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the VpnSite being created or updated.
"""
return pulumi.get(self, "vpn_site_name")
@vpn_site_name.setter
def vpn_site_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpn_site_name", value)
class VpnSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.
:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpnSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param VpnSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpnSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = address_space
__props__.__dict__["bgp_properties"] = bgp_properties
__props__.__dict__["device_properties"] = device_properties
__props__.__dict__["id"] = id
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["is_security_site"] = is_security_site
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["site_key"] = site_key
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_wan"] = virtual_wan
__props__.__dict__["vpn_site_links"] = vpn_site_links
__props__.__dict__["vpn_site_name"] = vpn_site_name
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-native:network:VpnSite"), pulumi.Alias(type_="azure-nextgen:network:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20201101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20210201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20210201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20210301:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20210301:VpnSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VpnSite, __self__).__init__(
'azure-native:network/v20200501:VpnSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnSite':
"""
Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = None
__props__.__dict__["bgp_properties"] = None
__props__.__dict__["device_properties"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ip_address"] = None
__props__.__dict__["is_security_site"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["site_key"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_wan"] = None
__props__.__dict__["vpn_site_links"] = None
return VpnSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> pulumi.Output[Optional[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the VPN site resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> pulumi.Output[Optional[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkResponse']]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
|
py | 1a429eaacb6c237dc607f862fbd60a203e2b1a4d | import sys
from sys import argv
from subprocess import call
import threading
import webbrowser
import os
from shutil import copy, move, rmtree
from os.path import join, dirname, realpath, exists
from glob import glob
import re
from setuptools import setup, find_packages, Command
directory = dirname(realpath(__file__))
sys.path.insert(0, join(directory, 'escher'))
version = __import__('version').__version__
full_version = __import__('version').__full_version__
package = __import__('version').package
port = 8789
setup(
name='Escher',
version=full_version,
author=package['author'],
url=package['homepage'],
description=package['description'],
keywords=', '.join(package['keywords']),
license=package['license'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Visualization',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent'
],
packages=find_packages(),
include_package_data=True,
data_files=[
(
'share/jupyter/nbextensions/escher',
[
'escher/static/extension.js',
'escher/static/escher.min.js',
'escher/static/escher.min.js.map',
]
),
(
'etc/jupyter/nbconfig/notebook.d',
['escher.json'],
)
],
install_requires=[
'Jinja2>=2.7.3,<3',
'pytest>=4.0.1,<5',
'cobra>=0.5.0',
'jsonschema>=2.4.0,<3',
'ipywidgets>=7.1.0,<8',
'pandas>=0.18'
],
extras_require={
'docs': ['sphinx>=1.2', 'sphinx-rtd-theme>=0.1.6'],
},
)
|
py | 1a429eacc40f25cf2496861a8e6bd6b978b595c5 | import re
import sys
from django import VERSION
from django.conf import settings as django_settings
from django.contrib import admin
from django.db import connection
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views import debug
from celery import current_app
from redis import ConnectionError
from kitsune.sumo.redis_utils import redis_client
def settings(request):
"""Admin view that displays the django settings."""
settings = debug.get_safe_settings()
sorted_settings = [{"key": key, "value": settings[key]} for key in sorted(settings.keys())]
return render_to_response(
"kadmin/settings.html",
{"pythonpath": sys.path, "settings": sorted_settings, "title": "Settings"},
RequestContext(request, {}),
)
admin.site.register_view("settings", view=settings, name="Settings")
def celery_settings(request):
"""Admin view that displays the celery configuration."""
capital = re.compile("^[A-Z]")
settings = [key for key in dir(current_app.conf) if capital.match(key)]
sorted_settings = [
{
"key": key,
"value": "*****" if "password" in key.lower() else getattr(current_app.conf, key),
}
for key in sorted(settings)
]
return render_to_response(
"kadmin/settings.html",
{"settings": sorted_settings, "title": "Celery Settings"},
RequestContext(request, {}),
)
admin.site.register_view("celery", view=celery_settings, name="Celery Settings")
def env(request):
"""Admin view that displays env info."""
return render_to_response(
"kadmin/env_view.html",
{"request": request, "pythonver": sys.version, "djangover": VERSION},
)
admin.site.register_view("env", view=env, name="Environment")
def schema_version(request):
"""Admin view that displays the current schema_version."""
cursor = connection.cursor()
cursor.execute("SELECT version FROM schema_version")
version = [x for x in cursor][0][0]
return render_to_response(
"kadmin/schema.html",
{"schema_version": version, "title": "Schema Version"},
RequestContext(request, {}),
)
admin.site.register_view("schema", view=schema_version, name="Database Schema Version")
def redis_info(request):
"""Admin view that displays redis INFO+CONFIG output for all backends."""
redis_info = {}
for key in list(django_settings.REDIS_BACKENDS.keys()):
redis_info[key] = {}
client = redis_client(key)
redis_info[key]["connection"] = django_settings.REDIS_BACKENDS[key]
try:
cfg = client.config_get()
redis_info[key]["config"] = [{"key": k, "value": cfg[k]} for k in sorted(cfg)]
info = client.info()
redis_info[key]["info"] = [{"key": k, "value": info[k]} for k in sorted(info)]
except ConnectionError:
redis_info[key]["down"] = True
return render_to_response(
"kadmin/redis.html",
{"redis_info": redis_info, "title": "Redis Information"},
RequestContext(request, {}),
)
admin.site.register_view("redis", view=redis_info, name="Redis Information")
|
py | 1a429f6c6a164ad44743aaca55e48283d40dd1d9 | # ------------------------------------------------------------------------
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
import os
def run_test(env, xml_file, test):
"""
Run test with the given SCons Environment, dumping Valgrind
results to the given XML file. If no Valgrind run is desired
simply pass in an empty string or None for the xml_file
parameter.
Note that the test path should not include the build directory
where binaries are placed. The build directory will be prepended
to the test path automatically.
"""
build_dir = env.get('BUILD_DIR')
result_dir = os.path.join(build_dir, 'test_out/')
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
# Dump test report in XML format to the results directory.
env.AppendENVPath('GTEST_OUTPUT', ['xml:' + result_dir])
# Make sure the Google Test libraries are in the dynamic
# linker/loader path.
env.AppendENVPath('LD_LIBRARY_PATH', [build_dir])
env.AppendENVPath('LD_LIBRARY_PATH', ['./extlibs/gtest/gtest-1.7.0/lib/.libs'])
test_cmd = os.path.join(build_dir, test)
have_valgrind = False
if env.get('TARGET_OS') not in ['windows']:
have_valgrind = True
if xml_file and have_valgrind:
# Environment variables to be made available during the
# Valgrind run.
valgrind_environment = ''
# GLib uses a custom memory allocation scheme that can
# sometimes confuse Valgrind. Configure GLib to be Valgrind
# friendly.
valgrind_environment += 'G_DEBUG=gc-friendly G_SLICE=always-malloc'
# Valgrind suppressions file.
suppression_file = env.File('#tools/valgrind/iotivity.supp').srcnode().path
# Set up to run the test under Valgrind.
test_cmd = '%s valgrind --leak-check=full --suppressions=%s --xml=yes --xml-file=%s %s' % (valgrind_environment, suppression_file, xml_file, test_cmd)
ut = env.Command('ut', None, test_cmd)
env.AlwaysBuild('ut')
|
py | 1a42a14da60309b92d94fad968f028b1bfe1e0f7 | from test.integration.base import DBTIntegrationTest, use_profile
from dbt import deprecations
import dbt.exceptions
class BaseTestDeprecations(DBTIntegrationTest):
def setUp(self):
super().setUp()
deprecations.reset_deprecations()
@property
def schema(self):
return "deprecation_test_012"
@staticmethod
def dir(path):
return path.lstrip("/")
class TestDeprecations(BaseTestDeprecations):
@property
def models(self):
return self.dir("models")
@use_profile('postgres')
def test_postgres_deprecations_fail(self):
self.run_dbt(strict=True, expect_pass=False)
@use_profile('postgres')
def test_postgres_deprecations(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
expected = {'adapter:already_exists'}
self.assertEqual(expected, deprecations.active_deprecations)
class TestMaterializationReturnDeprecation(BaseTestDeprecations):
@property
def models(self):
return self.dir('custom-models')
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': [self.dir('custom-materialization-macros')],
}
@use_profile('postgres')
def test_postgres_deprecations_fail(self):
# this should fail at runtime
self.run_dbt(strict=True, expect_pass=False)
@use_profile('postgres')
def test_postgres_deprecations(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
expected = {'materialization-return'}
self.assertEqual(expected, deprecations.active_deprecations)
class TestModelsKeyMismatchDeprecation(BaseTestDeprecations):
@property
def models(self):
return self.dir('models-key-mismatch')
@use_profile('postgres')
def test_postgres_deprecations_fail(self):
# this should fail at compile_time
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=True)
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
self.assertIn('"seed" is a seed node, but it is specified in the models section', exc_str)
@use_profile('postgres')
def test_postgres_deprecations(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
expected = {'models-key-mismatch'}
self.assertEqual(expected, deprecations.active_deprecations)
class TestAdapterMacroDeprecation(BaseTestDeprecations):
@property
def models(self):
return self.dir('adapter-macro-models')
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': [self.dir('adapter-macro-macros')]
}
@use_profile('postgres')
def test_postgres_adapter_macro(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@use_profile('postgres')
def test_postgres_adapter_macro_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=True)
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'The "adapter_macro" macro has been deprecated' in exc_str
@use_profile('redshift')
def test_redshift_adapter_macro(self):
self.assertEqual(deprecations.active_deprecations, set())
# picked up the default -> error
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=False, expect_pass=False)
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'not allowed' in exc_str # we saw the default macro
class TestAdapterMacroDeprecationPackages(BaseTestDeprecations):
@property
def models(self):
return self.dir('adapter-macro-models-package')
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': [self.dir('adapter-macro-macros')]
}
@use_profile('postgres')
def test_postgres_adapter_macro_pkg(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@use_profile('postgres')
def test_postgres_adapter_macro_pkg_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=True)
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'The "adapter_macro" macro has been deprecated' in exc_str
@use_profile('redshift')
def test_redshift_adapter_macro_pkg(self):
self.assertEqual(deprecations.active_deprecations, set())
# picked up the default -> error
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=False, expect_pass=False)
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'not allowed' in exc_str # we saw the default macro
|
py | 1a42a2c55d2d5ea1bcb984429ed074fc8bf37e7d | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
BASE_DIR = os.path.dirname(__file__)
VERSION_FILENAME = os.path.join(
BASE_DIR, "src", "opentelemetry", "test", "version.py"
)
PACKAGE_INFO = {}
with open(VERSION_FILENAME) as f:
exec(f.read(), PACKAGE_INFO)
setuptools.setup(version=PACKAGE_INFO["__version__"])
|
py | 1a42a49302e54a2093eefe2b7226982a2bc67688 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import os
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.teachers import FixedDialogTeacher, DialogTeacher, ParlAIDialogTeacher
from parlai.tasks.wrapper.agents import AbstractWrapperTeacher
from parlai.utils.io import PathManager
from .build import build
def get_sentence_tokenizer():
"""
Loads the nltk sentence tokenizer.
"""
try:
import nltk
except ImportError:
raise ImportError('Please install nltk (e.g. pip install nltk).')
# nltk-specific setup
st_path = 'tokenizers/punkt/{0}.pickle'.format('english')
try:
sent_tok = nltk.data.load(st_path)
except LookupError:
nltk.download('punkt')
sent_tok = nltk.data.load(st_path)
return sent_tok
class IndexTeacher(FixedDialogTeacher):
"""
Hand-written SQuAD teacher, which loads the json squad data and implements its own
`act()` method for interacting with student agent, rather than inheriting from the
core Dialog Teacher. This code is here as an example of rolling your own without
inheritance.
This teacher also provides access to the "answer_start" indices that specify the
location of the answer in the context.
"""
def __init__(self, opt, shared=None):
build(opt)
super().__init__(opt, shared)
if self.datatype.startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
datapath = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.data = self._setup_data(datapath)
self.id = 'squad'
self.reset()
def num_examples(self):
return len(self.examples)
def num_episodes(self):
return self.num_examples()
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
question = qa['question']
answers = []
answer_starts = []
for a in qa['answers']:
answers.append(a['text'])
answer_starts.append(a['answer_start'])
context = paragraph['context']
action = {
'id': 'squad',
'text': context + '\n' + question,
'labels': answers,
'episode_done': True,
'answer_starts': answer_starts,
}
return action
def _setup_data(self, path):
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
self.examples = []
for article_idx in range(len(self.squad)):
article = self.squad[article_idx]
for paragraph_idx in range(len(article['paragraphs'])):
paragraph = article['paragraphs'][paragraph_idx]
num_questions = len(paragraph['qas'])
for qa_idx in range(num_questions):
self.examples.append((article_idx, paragraph_idx, qa_idx))
class DefaultTeacher(DialogTeacher):
"""
This version of SQuAD inherits from the core Dialog Teacher, which just requires it
to define an iterator over its data `setup_data` in order to inherit basic metrics,
a default `act` function.
For SQuAD, this does not efficiently store the paragraphs in memory.
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.id = 'squad'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = tuple(a['text'] for a in qa['answers'])
context = paragraph['context']
yield (context + '\n' + question, answers), True
class OpensquadTeacher(DialogTeacher):
"""
This version of SQuAD inherits from the core Dialog Teacher, which just requires it
to define an iterator over its data `setup_data` in order to inherit basic metrics,
a default `act` function.
Note: This teacher omits the context paragraph
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.id = 'squad'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = (a['text'] for a in qa['answers'])
yield (question, answers), True
class TitleTeacher(DefaultTeacher):
"""
This version of SquAD inherits from the Default Teacher.
The only
difference is that the 'text' field of an observation will contain
the title of the article separated by a newline from the paragraph and the
query.
Note: The title will contain underscores, as it is the part of the link for
the Wikipedia page; i.e., the article is at the site:
https://en.wikipedia.org/wiki/{TITLE}
Depending on your task, you may wish to remove underscores.
"""
def __init__(self, opt, shared=None):
self.id = 'squad_title'
build(opt)
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with PathManager.open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
title = article['title']
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = (a['text'] for a in qa['answers'])
context = paragraph['context']
yield ('\n'.join([title, context, question]), answers), True
class FulldocTeacher(ParlAIDialogTeacher):
def __init__(self, opt, shared=None):
build(opt)
opt = copy.deepcopy(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'valid'
datafile = os.path.join(
opt['datapath'], 'SQuAD-fulldoc', "squad_fulldocs." + suffix + ":ordered"
)
opt['parlaidialogteacher_datafile'] = datafile
super().__init__(opt, shared)
self.id = 'squad-fulldoc'
self.reset()
class SentenceTeacher(IndexTeacher):
"""
Teacher where the label(s) are the sentences that contain the true answer.
Some punctuation may be removed from the context and the answer for
tokenization purposes.
If `include_context` is False, the teacher returns action dict in the
following format:
{
'context': <context>,
'text': <question>,
'labels': <sentences containing the true answer>,
'label_candidates': <all sentences in the context>,
'episode_done': True,
'answer_starts': <index of start of answer in context>
}
Otherwise, the 'text' field contains <context>\n<question> and there is
no separate context field.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.sent_tok = get_sentence_tokenizer()
self.include_context = opt.get('include_context', False)
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group('SQuAD Sentence Teacher Arguments')
agent.add_argument(
'--include-context',
type='bool',
default=False,
help='include context within text instead of as a ' 'separate field',
)
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
context = paragraph['context']
question = qa['question']
answers = [a['text'] for a in qa['answers']]
# remove '.', '?', '!' from answers for proper sentence
# tokenization
edited_answers = []
for answer in answers:
new_answer = answer.replace('.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
edited_answers.append(new_answer)
edited_sentences = self.sent_tok.tokenize(context)
labels = []
label_starts = []
for sentence in edited_sentences:
for answer in edited_answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
break
action = {
'context': context,
'text': question,
'labels': labels,
'label_candidates': edited_sentences,
'episode_done': True,
'answer_starts': label_starts,
}
if self.include_context:
action['text'] = action['context'] + '\n' + action['text']
del action['context']
return action
class FulldocsentenceTeacher(FulldocTeacher):
"""
Teacher which contains the question as the text, the sentences as the label
candidates, and the label as the sentence containing the answer.
Some punctuation may be removed for tokenization purposes.
If `include_context` is False, the teacher returns action dict in the
following format:
{
'context': <context>,
'text': <question>,
'labels': <sentences containing the true answer>,
'label_candidates': <all sentences in the context>,
'episode_done': True,
'answer_starts': <index of start of answer in context>
}
Otherwise, the 'text' field contains <context>\n<question> and there is
no separate context field.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.sent_tok = get_sentence_tokenizer()
self.include_context = opt.get('include_context', False)
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group('SQuAD Fulldoc Sentence Teacher Arguments')
agent.add_argument(
'--include-context',
type='bool',
default=False,
help='include context within text instead of as a ' 'separate field',
)
def get(self, episode_idx, entry_idx=None):
action = {}
episode = self.episodes[episode_idx][entry_idx]
context = ' '.join(episode['text'].split('\n')[:-1]).replace(
'\xa0', ' '
) # get rid of non breaking space characters
question = episode['text'].split('\n')[-1]
label_field = 'labels' if 'labels' in episode else 'eval_labels'
answers = []
for answer in episode[label_field]:
new_answer = answer.replace('.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
answers.append(new_answer)
sentences = self.sent_tok.tokenize(context)
labels = []
label_starts = []
for sentence in sentences:
for answer in answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
action = {
'context': context,
'text': question,
label_field: labels,
'answer_starts': label_starts,
'label_candidates': sentences,
'episode_done': episode['episode_done'],
}
if self.include_context:
action['text'] = action['context'] + '\n' + action['text']
del action['context']
return action
class SquadQATeacher(AbstractWrapperTeacher):
"""
Wrapper Teacher over SQuAD to get only the passage, and ignore the question.
"""
@classmethod
def add_cmdline_args(cls, parser):
parser.set_defaults(wrapper_task='squad')
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
def _edit_action(self, act: Message) -> Message:
"""
# SQuAD returns passage and question both, only passage required for task.
"""
passage = act['text'].split('\n')[0]
act.force_set('text', passage)
return act
|
py | 1a42a4bf1ed34b6d4dbcc807f4df07b3f1a3941b | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from botocore.vendored import requests
from botocore.vendored.requests.packages import urllib3
def _exception_from_packed_args(exception_cls, args=None, kwargs=None):
# This is helpful for reducing Exceptions that only accept kwargs as
# only positional arguments can be provided for __reduce__
# Ideally, this would also be a class method on the BotoCoreError
# but instance methods cannot be pickled.
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return exception_cls(*args, **kwargs)
class BotoCoreError(Exception):
"""
The base exception class for BotoCore exceptions.
:ivar msg: The descriptive message associated with the error.
"""
fmt = 'An unspecified error occurred'
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
def __reduce__(self):
return _exception_from_packed_args, (self.__class__, None, self.kwargs)
class DataNotFoundError(BotoCoreError):
"""
The data associated with a particular path could not be loaded.
:ivar data_path: The data path that the user attempted to load.
"""
fmt = 'Unable to load data for: {data_path}'
class UnknownServiceError(DataNotFoundError):
"""Raised when trying to load data for an unknown service.
:ivar service_name: The name of the unknown service.
"""
fmt = (
"Unknown service: '{service_name}'. Valid service names are: "
"{known_service_names}")
class ApiVersionNotFoundError(BotoCoreError):
"""
The data associated with either the API version or a compatible one
could not be loaded.
:ivar data_path: The data path that the user attempted to load.
:ivar api_version: The API version that the user attempted to load.
"""
fmt = 'Unable to load data {data_path} for: {api_version}'
class HTTPClientError(BotoCoreError):
fmt = 'An HTTP Client raised an unhandled exception: {error}'
def __init__(self, request=None, response=None, **kwargs):
self.request = request
self.response = response
super(HTTPClientError, self).__init__(**kwargs)
def __reduce__(self):
return _exception_from_packed_args, (
self.__class__, (self.request, self.response), self.kwargs)
class ConnectionError(BotoCoreError):
fmt = 'An HTTP Client failed to establish a connection: {error}'
class InvalidIMDSEndpointError(BotoCoreError):
fmt = 'Invalid endpoint EC2 Instance Metadata endpoint: {endpoint}'
class EndpointConnectionError(ConnectionError):
fmt = 'Could not connect to the endpoint URL: "{endpoint_url}"'
class SSLError(ConnectionError, requests.exceptions.SSLError):
fmt = 'SSL validation failed for {endpoint_url} {error}'
class ConnectionClosedError(HTTPClientError):
fmt = (
'Connection was closed before we received a valid response '
'from endpoint URL: "{endpoint_url}".')
class ReadTimeoutError(HTTPClientError, requests.exceptions.ReadTimeout,
urllib3.exceptions.ReadTimeoutError):
fmt = 'Read timeout on endpoint URL: "{endpoint_url}"'
class ConnectTimeoutError(ConnectionError, requests.exceptions.ConnectTimeout):
fmt = 'Connect timeout on endpoint URL: "{endpoint_url}"'
class ProxyConnectionError(ConnectionError, requests.exceptions.ProxyError):
fmt = 'Failed to connect to proxy URL: "{proxy_url}"'
class NoCredentialsError(BotoCoreError):
"""
No credentials could be found.
"""
fmt = 'Unable to locate credentials'
class PartialCredentialsError(BotoCoreError):
"""
Only partial credentials were found.
:ivar cred_var: The missing credential variable name.
"""
fmt = 'Partial credentials found in {provider}, missing: {cred_var}'
class CredentialRetrievalError(BotoCoreError):
"""
Error attempting to retrieve credentials from a remote source.
:ivar provider: The name of the credential provider.
:ivar error_msg: The msg explaining why credentials could not be
retrieved.
"""
fmt = 'Error when retrieving credentials from {provider}: {error_msg}'
class UnknownSignatureVersionError(BotoCoreError):
"""
Requested Signature Version is not known.
:ivar signature_version: The name of the requested signature version.
"""
fmt = 'Unknown Signature Version: {signature_version}.'
class ServiceNotInRegionError(BotoCoreError):
"""
The service is not available in requested region.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = 'Service {service_name} not available in region {region_name}'
class BaseEndpointResolverError(BotoCoreError):
"""Base error for endpoint resolving errors.
Should never be raised directly, but clients can catch
this exception if they want to generically handle any errors
during the endpoint resolution process.
"""
class NoRegionError(BaseEndpointResolverError):
"""No region was specified."""
fmt = 'You must specify a region.'
class UnknownEndpointError(BaseEndpointResolverError, ValueError):
"""
Could not construct an endpoint.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = (
'Unable to construct an endpoint for '
'{service_name} in region {region_name}')
class UnknownFIPSEndpointError(BaseEndpointResolverError):
"""
Could not construct a FIPS endpoint.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = (
'The provided FIPS pseudo-region "{region_name}" is not known for '
'the service "{service_name}". A FIPS compliant endpoint cannot be '
'constructed.'
)
class ProfileNotFound(BotoCoreError):
"""
The specified configuration profile was not found in the
configuration file.
:ivar profile: The name of the profile the user attempted to load.
"""
fmt = 'The config profile ({profile}) could not be found'
class ConfigParseError(BotoCoreError):
"""
The configuration file could not be parsed.
:ivar path: The path to the configuration file.
"""
fmt = 'Unable to parse config file: {path}'
class ConfigNotFound(BotoCoreError):
"""
The specified configuration file could not be found.
:ivar path: The path to the configuration file.
"""
fmt = 'The specified config file ({path}) could not be found.'
class MissingParametersError(BotoCoreError):
"""
One or more required parameters were not supplied.
:ivar object: The object that has missing parameters.
This can be an operation or a parameter (in the
case of inner params). The str() of this object
will be used so it doesn't need to implement anything
other than str().
:ivar missing: The names of the missing parameters.
"""
fmt = ('The following required parameters are missing for '
'{object_name}: {missing}')
class ValidationError(BotoCoreError):
"""
An exception occurred validating parameters.
Subclasses must accept a ``value`` and ``param``
argument in their ``__init__``.
:ivar value: The value that was being validated.
:ivar param: The parameter that failed validation.
:ivar type_name: The name of the underlying type.
"""
fmt = ("Invalid value ('{value}') for param {param} "
"of type {type_name} ")
class ParamValidationError(BotoCoreError):
fmt = 'Parameter validation failed:\n{report}'
# These exceptions subclass from ValidationError so that code
# can just 'except ValidationError' to catch any possibly validation
# error.
class UnknownKeyError(ValidationError):
"""
Unknown key in a struct parameter.
:ivar value: The value that was being checked.
:ivar param: The name of the parameter.
:ivar choices: The valid choices the value can be.
"""
fmt = ("Unknown key '{value}' for param '{param}'. Must be one "
"of: {choices}")
class RangeError(ValidationError):
"""
A parameter value was out of the valid range.
:ivar value: The value that was being checked.
:ivar param: The parameter that failed validation.
:ivar min_value: The specified minimum value.
:ivar max_value: The specified maximum value.
"""
fmt = ('Value out of range for param {param}: '
'{min_value} <= {value} <= {max_value}')
class UnknownParameterError(ValidationError):
"""
Unknown top level parameter.
:ivar name: The name of the unknown parameter.
:ivar operation: The name of the operation.
:ivar choices: The valid choices the parameter name can be.
"""
fmt = (
"Unknown parameter '{name}' for operation {operation}. Must be one "
"of: {choices}"
)
class InvalidRegionError(ValidationError, ValueError):
"""
Invalid region_name provided to client or resource.
:ivar region_name: region_name that was being validated.
"""
fmt = (
"Provided region_name '{region_name}' doesn't match a supported format."
)
class AliasConflictParameterError(ValidationError):
"""
Error when an alias is provided for a parameter as well as the original.
:ivar original: The name of the original parameter.
:ivar alias: The name of the alias
:ivar operation: The name of the operation.
"""
fmt = (
"Parameter '{original}' and its alias '{alias}' were provided "
"for operation {operation}. Only one of them may be used."
)
class UnknownServiceStyle(BotoCoreError):
"""
Unknown style of service invocation.
:ivar service_style: The style requested.
"""
fmt = 'The service style ({service_style}) is not understood.'
class PaginationError(BotoCoreError):
fmt = 'Error during pagination: {message}'
class OperationNotPageableError(BotoCoreError):
fmt = 'Operation cannot be paginated: {operation_name}'
class ChecksumError(BotoCoreError):
"""The expected checksum did not match the calculated checksum.
"""
fmt = ('Checksum {checksum_type} failed, expected checksum '
'{expected_checksum} did not match calculated checksum '
'{actual_checksum}.')
class UnseekableStreamError(BotoCoreError):
"""Need to seek a stream, but stream does not support seeking.
"""
fmt = ('Need to rewind the stream {stream_object}, but stream '
'is not seekable.')
class WaiterError(BotoCoreError):
"""Waiter failed to reach desired state."""
fmt = 'Waiter {name} failed: {reason}'
def __init__(self, name, reason, last_response):
super(WaiterError, self).__init__(name=name, reason=reason)
self.last_response = last_response
class IncompleteReadError(BotoCoreError):
"""HTTP response did not return expected number of bytes."""
fmt = ('{actual_bytes} read, but total bytes '
'expected is {expected_bytes}.')
class InvalidExpressionError(BotoCoreError):
"""Expression is either invalid or too complex."""
fmt = 'Invalid expression {expression}: Only dotted lookups are supported.'
class UnknownCredentialError(BotoCoreError):
"""Tried to insert before/after an unregistered credential type."""
fmt = 'Credential named {name} not found.'
class WaiterConfigError(BotoCoreError):
"""Error when processing waiter configuration."""
fmt = 'Error processing waiter config: {error_msg}'
class UnknownClientMethodError(BotoCoreError):
"""Error when trying to access a method on a client that does not exist."""
fmt = 'Client does not have method: {method_name}'
class UnsupportedSignatureVersionError(BotoCoreError):
"""Error when trying to use an unsupported Signature Version."""
fmt = 'Signature version is not supported: {signature_version}'
class ClientError(Exception):
MSG_TEMPLATE = (
'An error occurred ({error_code}) when calling the {operation_name} '
'operation{retry_info}: {error_message}')
def __init__(self, error_response, operation_name):
retry_info = self._get_retry_info(error_response)
error = error_response.get('Error', {})
msg = self.MSG_TEMPLATE.format(
error_code=error.get('Code', 'Unknown'),
error_message=error.get('Message', 'Unknown'),
operation_name=operation_name,
retry_info=retry_info,
)
super(ClientError, self).__init__(msg)
self.response = error_response
self.operation_name = operation_name
def _get_retry_info(self, response):
retry_info = ''
if 'ResponseMetadata' in response:
metadata = response['ResponseMetadata']
if metadata.get('MaxAttemptsReached', False):
if 'RetryAttempts' in metadata:
retry_info = (' (reached max retries: %s)' %
metadata['RetryAttempts'])
return retry_info
def __reduce__(self):
# Subclasses of ClientError's are dynamically generated and
# cannot be pickled unless they are attributes of a
# module. So at the very least return a ClientError back.
return ClientError, (self.response, self.operation_name)
class EventStreamError(ClientError):
pass
class UnsupportedTLSVersionWarning(Warning):
"""Warn when an openssl version that uses TLS 1.2 is required"""
pass
class ImminentRemovalWarning(Warning):
pass
class InvalidDNSNameError(BotoCoreError):
"""Error when virtual host path is forced on a non-DNS compatible bucket"""
fmt = (
'Bucket named {bucket_name} is not DNS compatible. Virtual '
'hosted-style addressing cannot be used. The addressing style '
'can be configured by removing the addressing_style value '
'or setting that value to \'path\' or \'auto\' in the AWS Config '
'file or in the botocore.client.Config object.'
)
class InvalidS3AddressingStyleError(BotoCoreError):
"""Error when an invalid path style is specified"""
fmt = (
'S3 addressing style {s3_addressing_style} is invalid. Valid options '
'are: \'auto\', \'virtual\', and \'path\''
)
class UnsupportedS3ArnError(BotoCoreError):
"""Error when S3 ARN provided to Bucket parameter is not supported"""
fmt = (
'S3 ARN {arn} provided to "Bucket" parameter is invalid. Only '
'ARNs for S3 access-points are supported.'
)
class UnsupportedS3ControlArnError(BotoCoreError):
"""Error when S3 ARN provided to S3 control parameter is not supported"""
fmt = (
'S3 ARN "{arn}" provided is invalid for this operation. {msg}'
)
class InvalidHostLabelError(BotoCoreError):
"""Error when an invalid host label would be bound to an endpoint"""
fmt = (
'Invalid host label to be bound to the hostname of the endpoint: '
'"{label}".'
)
class UnsupportedOutpostResourceError(BotoCoreError):
"""Error when S3 Outpost ARN provided to Bucket parameter is incomplete"""
fmt = (
'S3 Outpost ARN resource "{resource_name}" provided to "Bucket" '
'parameter is invalid. Only ARNs for S3 Outpost arns with an '
'access-point sub-resource are supported.'
)
class UnsupportedS3ConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with access-points"""
fmt = (
'Unsupported configuration when using S3: {msg}'
)
class UnsupportedS3AccesspointConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with access-points"""
fmt = (
'Unsupported configuration when using S3 access-points: {msg}'
)
class InvalidEndpointDiscoveryConfigurationError(BotoCoreError):
"""Error when invalid value supplied for endpoint_discovery_enabled"""
fmt = (
'Unsupported configuration value for endpoint_discovery_enabled. '
'Expected one of ("true", "false", "auto") but got {config_value}.'
)
class UnsupportedS3ControlConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with S3 Control"""
fmt = (
'Unsupported configuration when using S3 Control: {msg}'
)
class InvalidRetryConfigurationError(BotoCoreError):
"""Error when invalid retry configuration is specified"""
fmt = (
'Cannot provide retry configuration for "{retry_config_option}". '
'Valid retry configuration options are: \'max_attempts\''
)
class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError):
"""Error when invalid retry configuration is specified"""
fmt = (
'Value provided to "max_attempts": {provided_max_attempts} must '
'be an integer greater than or equal to {min_value}.'
)
class InvalidRetryModeError(InvalidRetryConfigurationError):
"""Error when invalid retry mode configuration is specified"""
fmt = (
'Invalid value provided to "mode": "{provided_retry_mode}" must '
'be one of: "legacy", "standard", "adaptive"'
)
class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError):
"""Error for invalid s3 us-east-1 regional endpoints configuration"""
fmt = (
'S3 us-east-1 regional endpoint option '
'{s3_us_east_1_regional_endpoint_config} is '
'invalid. Valid options are: "legacy", "regional"'
)
class InvalidSTSRegionalEndpointsConfigError(BotoCoreError):
"""Error when invalid sts regional endpoints configuration is specified"""
fmt = (
'STS regional endpoints option {sts_regional_endpoints_config} is '
'invalid. Valid options are: "legacy", "regional"'
)
class StubResponseError(BotoCoreError):
fmt = 'Error getting response stub for operation {operation_name}: {reason}'
class StubAssertionError(StubResponseError, AssertionError):
pass
class UnStubbedResponseError(StubResponseError):
pass
class InvalidConfigError(BotoCoreError):
fmt = '{error_msg}'
class InfiniteLoopConfigError(InvalidConfigError):
fmt = (
'Infinite loop in credential configuration detected. Attempting to '
'load from profile {source_profile} which has already been visited. '
'Visited profiles: {visited_profiles}'
)
class RefreshWithMFAUnsupportedError(BotoCoreError):
fmt = 'Cannot refresh credentials: MFA token required.'
class MD5UnavailableError(BotoCoreError):
fmt = "This system does not support MD5 generation."
class MetadataRetrievalError(BotoCoreError):
fmt = "Error retrieving metadata: {error_msg}"
class UndefinedModelAttributeError(Exception):
pass
class MissingServiceIdError(UndefinedModelAttributeError):
fmt = (
"The model being used for the service {service_name} is missing the "
"serviceId metadata property, which is required."
)
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
class SSOError(BotoCoreError):
fmt = "An unspecified error happened when resolving SSO credentials"
class SSOTokenLoadError(SSOError):
fmt = "Error loading SSO Token: {error_msg}"
class UnauthorizedSSOTokenError(SSOError):
fmt = (
"The SSO session associated with this profile has expired or is "
"otherwise invalid. To refresh this SSO session run aws sso login "
"with the corresponding profile."
)
class CapacityNotAvailableError(BotoCoreError):
fmt = (
'Insufficient request capacity available.'
)
class InvalidProxiesConfigError(BotoCoreError):
fmt = (
'Invalid configuration value(s) provided for proxies_config.'
)
|
py | 1a42a4d85b151c12a95e0f7f6d2c877c8a24cacf | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the msvs.py file. """
import nnabt.generator.msvs as msvs
import unittest
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import StringIO # Python 3
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO()
def test_GetLibraries(self):
self.assertEqual(msvs._GetLibraries({}), [])
self.assertEqual(msvs._GetLibraries({"libraries": []}), [])
self.assertEqual(
msvs._GetLibraries({"other": "foo", "libraries": ["a.lib"]}), ["a.lib"]
)
self.assertEqual(msvs._GetLibraries({"libraries": ["-la"]}), ["a.lib"])
self.assertEqual(
msvs._GetLibraries(
{
"libraries": [
"a.lib",
"b.lib",
"c.lib",
"-lb.lib",
"-lb.lib",
"d.lib",
"a.lib",
]
}
),
["c.lib", "b.lib", "d.lib", "a.lib"],
)
if __name__ == "__main__":
unittest.main()
|
py | 1a42a50f8b85194abaedfe3f48003757cbb29411 | """
==========================================
Using cloudknot to run pyAFQ on AWS batch:
==========================================
One of the purposes of ``pyAFQ`` is to analyze large-scale openly-available datasets,
such as those in the `Human Connectome Project <https://www.humanconnectome.org/>`_.
To analyze these datasets, large amounts of compute are needed. One way to gain access
to massive computational power is by using cloud computing. Here, we will demonstrate
how to use ``pyAFQ`` in the Amazon Web Services cloud.
We will rely on the `AWS Batch Service <https://aws.amazon.com/batch/>`_ , and we will
submit work into AWS Batch using software that our group developed called
`Cloudknot <https://nrdg.github.io/cloudknot/>`_.
"""
##########################################################################
# Import cloudknot and set the AWS region within which computations will take place. Setting a
# region is important, because if the data that you are analyzing is stored in
# `AWS S3 <https://aws.amazon.com/s3/>`_ in a particular region, it is best to run the computation
# in that region as well. That is because AWS charges for inter-region transfer of data.
import cloudknot as ck
ck.set_region('us-east-1')
##########################################################################
# Define the function to use
# --------------------------
# ``Cloudknot`` uses the single program multiple data paradigm of computing. This means that the same
# function will be run on multiple different inputs. For example, a ``pyAFQ`` processing function run
# on multiple different subjects in a dataset.
# Below, we define the function that we will use. Notice that ``Cloudknot`` functions include the
# import statements of the dependencies used. This is necessary so that ``Cloudknot`` knows
# what dependencies to install into AWS Batch to run this function.
def afq_process_subject(subject):
# define a function that each job will run
# In this case, each process does a single subject
import logging
import s3fs
# all imports must be at the top of the function
# cloudknot installs the appropriate packages from pip
import AFQ.data as afqd
import AFQ.api as api
import AFQ.definitions.mask as afm
# set logging level to your choice
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
# Download the given subject to your local machine from s3
study_ixi = afqd.S3BIDSStudy(
"my_study",
"my_study_bucket",
"my_study_prefix",
subjects=[subject],
anon=False)
study_ixi.download(
"local_bids_dir",
include_derivs=["pipeline_name"])
# you can optionally provide your own segmentation file
# in this case, we look for a file with suffix 'seg'
# in the 'pipeline_name' pipeline,
# and we consider all non-zero labels to be a part of the brain
brain_mask = afm.LabelledMaskFile(
'seg', {'scope': 'pipeline_name'}, exclusive_labels=[0])
# define the api AFQ object
myafq = api.AFQ(
local_bids_dir,
dmriprep="pipeline_name",
brain_mask=brain_mask,
viz_backend='plotly', # this will generate both interactive html and GIFs
scalars=["dki_fa", "dki_md"])
# export_all runs the entire pipeline and creates many useful derivates
myafq.export_all()
# upload the results to some location on s3
myafq.upload_to_s3(
s3fs.S3FileSystem(),
f"my_study_bucket/my_study_prefix/derivatives/afq")
##########################################################################
# Here we provide a list of subjects that we have selected to process
# to randomly select 3 subjects without replacement, instead do:
# subjects = [[1], [2], [3]]
# see the docstring for S3BIDSStudy.__init__ for more information
subjects = [123456, 123457, 123458]
##########################################################################
# Defining a ``Knot`` instance
# ---------------------------------
# We instantiate a class instance of the :class:`ck.Knot` class. This object will be used to run your jobs.
# The object is instantiated with the `'AmazonS3FullAccess'` policy, so that it can write the results
# out to S3, into a bucket that you have write permissions on.
# Setting the `bid_percentage` key-word makes AWS Batch use
# `spot EC2 instances <https://aws.amazon.com/ec2/spot/>`_ for the computation.
# This can result in substantial cost-savings, as spot compute instances can cost
# much less than on-demand instances. However, not that spot instances can also
# be evicted, so if completing all of the work is very time-sensitive, do not set this
# key-word argument. Using the `image_github_installs` key-word argument will
# install pyAFQ from GitHub. You can also specify other forks and branches to
# install from.
knot = ck.Knot(
name='afq_process_subject-201009-0',
func=afq_process_subject,
base_image='python:3.8',
image_github_installs="https://github.com/yeatmanlab/pyAFQ.git",
pars_policies=('AmazonS3FullAccess',),
bid_percentage=100)
##########################################################################
# Launching the computation
# --------------------------------
# The :meth:`map` method of the :class:`Knot object maps each of the inputs provided
# as a sequence onto the function and executes the function on each one of them in
# parallel.
result_futures = knot.map(subjects)
##########################################################################
# Once computations have started, you can call the following
# function to view the progress of jobs::
#
# knot.view_jobs()
#
# You can also view the status of a specific job::
#
# knot.jobs[0].status
##########################################################################
# When all jobs are finished, remember to use the :meth:`clobber` method to
# destroy all of the AWS resources created by the :class:`Knot`
result_futures.result()
knot.clobber(clobber_pars=True, clobber_repo=True, clobber_image=True)
##########################################################################
# In a second :class:`Knot` object, we use a function that takes the resulting profiles of each subject
# and combines them into one csv file.
def afq_combine_profiles(dummy_argument):
from AFQ.api import download_and_combine_afq_profiles
download_and_combine_afq_profiles(
"temp", "my_study_bucket", "my_study_prefix/derivatives/afq")
knot2 = ck.Knot(
name='afq_combine_subjects-201009-0',
func=afq_combine_profiles,
base_image='python:3.8',
image_github_installs="https://github.com/yeatmanlab/pyAFQ.git",
pars_policies=('AmazonS3FullAccess',),
bid_percentage=100)
##########################################################################
# This knot is called with a dummy argument, which is not used within the function itself. The
# `job_type` key-word argument is used to signal to ``Cloudknot`` that only one job is submitted
# rather than the default array of jobs.
result_futures2 = knot2.map(["dummy_argument"], job_type="independent")
result_futures2.result()
knot2.clobber(clobber_pars=True, clobber_repo=True, clobber_image=True)
|
py | 1a42a6474c8b72c1cd0e3e31a09bdc24d96f4f4e | pyrcc4 -o resources.py resources.qrc |
py | 1a42a6ab5d04c168599c509de2edc66e1f30b7e4 | from datetime import datetime
from typing import Optional
from app.domain.user import CreateUser as DomainCreateUser
from app.domain.user import UpdateUser as DomainUpdateUser
from app.domain.user import User as DomainUser
from app.repository.database.crud import CRUDBase
from app.repository.models.users import User
from app.security import get_string_hash
from sqlalchemy.orm import Session
class UsersRepo(CRUDBase[User, DomainCreateUser, DomainUpdateUser]):
def get_by_email(self, db: Session, *, email: str) -> Optional[DomainUser]:
query = db.query(self.db_model).filter(User.email == email)
db_obj = self._filter_soft_deleted(query).first()
if not db_obj:
return None
return self._create_object(db_obj=db_obj)
def create(self, db: Session, *, obj_in: DomainCreateUser) -> DomainUser:
create_data = obj_in.dict()
create_data.pop("password")
user = self.db_model(**create_data)
if user:
user.last_login_at = datetime.utcnow()
user.password = get_string_hash(obj_in.password)
db.add(user)
db.commit()
db.refresh(user)
return self._create_object(db_obj=user)
users_repo = UsersRepo(db_model=User, domain_model=DomainUser)
|
py | 1a42a6d3601b5ac57c24e09fde2ce5d0955f73db | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 19-4-24 下午6:42
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/lanenet-lane-detection
# @File : vgg16_based_fcn.py
# @IDE: PyCharm
"""
Implement VGG16 based fcn net for semantic segmentation
"""
import collections
import tensorflow as tf
from config import global_config
from semantic_segmentation_zoo import cnn_basenet
CFG = global_config.cfg
class VGG16FCN(cnn_basenet.CNNBaseModel):
"""
VGG 16 based fcn net for semantic segmentation
"""
def __init__(self, phase):
"""
"""
super(VGG16FCN, self).__init__()
self._phase = phase
self._is_training = self._is_net_for_training()
self._net_intermediate_results = collections.OrderedDict()
def _is_net_for_training(self):
"""
if the net is used for training or not
:return:
"""
if isinstance(self._phase, tf.Tensor):
phase = self._phase
else:
phase = tf.constant(self._phase, dtype=tf.string)
return tf.equal(phase, tf.constant('train', dtype=tf.string))
def _vgg16_conv_stage(self, input_tensor, k_size, out_dims, name,
stride=1, pad='SAME', need_layer_norm=True):
"""
stack conv and activation in vgg16
:param input_tensor:
:param k_size:
:param out_dims:
:param name:
:param stride:
:param pad:
:param need_layer_norm:
:return:
"""
with tf.variable_scope(name):
conv = self.conv2d(
inputdata=input_tensor, out_channel=out_dims,
kernel_size=k_size, stride=stride,
use_bias=False, padding=pad, name='conv'
)
if need_layer_norm:
bn = self.layerbn(inputdata=conv, is_training=self._is_training, name='bn')
relu = self.relu(inputdata=bn, name='relu')
else:
relu = self.relu(inputdata=conv, name='relu')
return relu
def _decode_block(self, input_tensor, previous_feats_tensor,
out_channels_nums, name, kernel_size=4,
stride=2, use_bias=False,
previous_kernel_size=4, need_activate=True):
"""
:param input_tensor:
:param previous_feats_tensor:
:param out_channels_nums:
:param kernel_size:
:param previous_kernel_size:
:param use_bias:
:param stride:
:param name:
:return:
"""
with tf.variable_scope(name_or_scope=name):
deconv_weights_stddev = tf.sqrt(
tf.divide(tf.constant(2.0, tf.float32),
tf.multiply(tf.cast(previous_kernel_size * previous_kernel_size, tf.float32),
tf.cast(tf.shape(input_tensor)[3], tf.float32)))
)
deconv_weights_init = tf.truncated_normal_initializer(
mean=0.0, stddev=deconv_weights_stddev)
deconv = self.deconv2d(
inputdata=input_tensor, out_channel=out_channels_nums, kernel_size=kernel_size,
stride=stride, use_bias=use_bias, w_init=deconv_weights_init,
name='deconv'
)
deconv = self.layerbn(inputdata=deconv, is_training=self._is_training, name='deconv_bn')
deconv = self.relu(inputdata=deconv, name='deconv_relu')
fuse_feats = tf.concat(
[previous_feats_tensor, deconv],
axis=-1, name='fuse_feats'
)
conv_weights_stddev = tf.sqrt(
tf.divide(tf.constant(2.0, tf.float32),
tf.multiply(tf.cast(kernel_size * kernel_size, tf.float32),
tf.cast(tf.shape(fuse_feats)[3], tf.float32)))
)
conv_weights_init = tf.truncated_normal_initializer(
mean=0.0, stddev=conv_weights_stddev)
fuse_feats = self.conv2d(
inputdata=fuse_feats,
out_channel=out_channels_nums,
kernel_size=3,
padding='SAME',
stride=1,
w_init=conv_weights_init,
use_bias=use_bias,
name='fuse_conv'
)
if need_activate:
fuse_feats = self.layerbn(
inputdata=fuse_feats, is_training=self._is_training, name='fuse_gn'
)
fuse_feats = self.relu(inputdata=fuse_feats, name='fuse_relu')
return fuse_feats
def _vgg16_fcn_encode(self, input_tensor, name):
"""
:param input_tensor:
:param name:
:return:
"""
with tf.variable_scope(name_or_scope=name):
# encode stage 1
conv_1_1 = self._vgg16_conv_stage(
input_tensor=input_tensor, k_size=3,
out_dims=64, name='conv1_1',
need_layer_norm=True
)
conv_1_2 = self._vgg16_conv_stage(
input_tensor=conv_1_1, k_size=3,
out_dims=64, name='conv1_2',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_1_share'] = {
'data': conv_1_2,
'shape': conv_1_2.get_shape().as_list()
}
# encode stage 2
pool1 = self.maxpooling(
inputdata=conv_1_2, kernel_size=2,
stride=2, name='pool1'
)
conv_2_1 = self._vgg16_conv_stage(
input_tensor=pool1, k_size=3,
out_dims=128, name='conv2_1',
need_layer_norm=True
)
conv_2_2 = self._vgg16_conv_stage(
input_tensor=conv_2_1, k_size=3,
out_dims=128, name='conv2_2',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_2_share'] = {
'data': conv_2_2,
'shape': conv_2_2.get_shape().as_list()
}
# encode stage 3
pool2 = self.maxpooling(
inputdata=conv_2_2, kernel_size=2,
stride=2, name='pool2'
)
conv_3_1 = self._vgg16_conv_stage(
input_tensor=pool2, k_size=3,
out_dims=256, name='conv3_1',
need_layer_norm=True
)
conv_3_2 = self._vgg16_conv_stage(
input_tensor=conv_3_1, k_size=3,
out_dims=256, name='conv3_2',
need_layer_norm=True
)
conv_3_3 = self._vgg16_conv_stage(
input_tensor=conv_3_2, k_size=3,
out_dims=256, name='conv3_3',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_3_share'] = {
'data': conv_3_3,
'shape': conv_3_3.get_shape().as_list()
}
# encode stage 4
pool3 = self.maxpooling(
inputdata=conv_3_3, kernel_size=2,
stride=2, name='pool3'
)
conv_4_1 = self._vgg16_conv_stage(
input_tensor=pool3, k_size=3,
out_dims=512, name='conv4_1',
need_layer_norm=True
)
conv_4_2 = self._vgg16_conv_stage(
input_tensor=conv_4_1, k_size=3,
out_dims=512, name='conv4_2',
need_layer_norm=True
)
conv_4_3 = self._vgg16_conv_stage(
input_tensor=conv_4_2, k_size=3,
out_dims=512, name='conv4_3',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_4_share'] = {
'data': conv_4_3,
'shape': conv_4_3.get_shape().as_list()
}
# encode stage 5 for binary segmentation
pool4 = self.maxpooling(
inputdata=conv_4_3, kernel_size=2,
stride=2, name='pool4'
)
conv_5_1_binary = self._vgg16_conv_stage(
input_tensor=pool4, k_size=3,
out_dims=512, name='conv5_1_binary',
need_layer_norm=True
)
conv_5_2_binary = self._vgg16_conv_stage(
input_tensor=conv_5_1_binary, k_size=3,
out_dims=512, name='conv5_2_binary',
need_layer_norm=True
)
conv_5_3_binary = self._vgg16_conv_stage(
input_tensor=conv_5_2_binary, k_size=3,
out_dims=512, name='conv5_3_binary',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_5_binary'] = {
'data': conv_5_3_binary,
'shape': conv_5_3_binary.get_shape().as_list()
}
# encode stage 5 for instance segmentation
conv_5_1_instance = self._vgg16_conv_stage(
input_tensor=pool4, k_size=3,
out_dims=512, name='conv5_1_instance',
need_layer_norm=True
)
conv_5_2_instance = self._vgg16_conv_stage(
input_tensor=conv_5_1_instance, k_size=3,
out_dims=512, name='conv5_2_instance',
need_layer_norm=True
)
conv_5_3_instance = self._vgg16_conv_stage(
input_tensor=conv_5_2_instance, k_size=3,
out_dims=512, name='conv5_3_instance',
need_layer_norm=True
)
self._net_intermediate_results['encode_stage_5_instance'] = {
'data': conv_5_3_instance,
'shape': conv_5_3_instance.get_shape().as_list()
}
return
def _vgg16_fcn_decode(self, name):
"""
:return:
"""
with tf.variable_scope(name):
# decode part for binary segmentation
with tf.variable_scope(name_or_scope='binary_seg_decode'):
decode_stage_5_binary = self._net_intermediate_results['encode_stage_5_binary']['data']
decode_stage_4_fuse = self._decode_block(
input_tensor=decode_stage_5_binary,
previous_feats_tensor=self._net_intermediate_results['encode_stage_4_share']['data'],
name='decode_stage_4_fuse', out_channels_nums=512, previous_kernel_size=3
)
decode_stage_3_fuse = self._decode_block(
input_tensor=decode_stage_4_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_3_share']['data'],
name='decode_stage_3_fuse', out_channels_nums=256
)
decode_stage_2_fuse = self._decode_block(
input_tensor=decode_stage_3_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_2_share']['data'],
name='decode_stage_2_fuse', out_channels_nums=128
)
decode_stage_1_fuse = self._decode_block(
input_tensor=decode_stage_2_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_1_share']['data'],
name='decode_stage_1_fuse', out_channels_nums=64
)
binary_final_logits_conv_weights_stddev = tf.sqrt(
tf.divide(tf.constant(2.0, tf.float32),
tf.multiply(4.0 * 4.0,
tf.cast(tf.shape(decode_stage_1_fuse)[3], tf.float32)))
)
binary_final_logits_conv_weights_init = tf.truncated_normal_initializer(
mean=0.0, stddev=binary_final_logits_conv_weights_stddev)
binary_final_logits = self.conv2d(
inputdata=decode_stage_1_fuse, out_channel=CFG.TRAIN.CLASSES_NUMS,
kernel_size=1, use_bias=False,
w_init=binary_final_logits_conv_weights_init,
name='binary_final_logits')
self._net_intermediate_results['binary_segment_logits'] = {
'data': binary_final_logits,
'shape': binary_final_logits.get_shape().as_list()
}
with tf.variable_scope(name_or_scope='instance_seg_decode'):
decode_stage_5_instance = self._net_intermediate_results['encode_stage_5_instance']['data']
decode_stage_4_fuse = self._decode_block(
input_tensor=decode_stage_5_instance,
previous_feats_tensor=self._net_intermediate_results['encode_stage_4_share']['data'],
name='decode_stage_4_fuse', out_channels_nums=512, previous_kernel_size=3)
decode_stage_3_fuse = self._decode_block(
input_tensor=decode_stage_4_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_3_share']['data'],
name='decode_stage_3_fuse', out_channels_nums=256)
decode_stage_2_fuse = self._decode_block(
input_tensor=decode_stage_3_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_2_share']['data'],
name='decode_stage_2_fuse', out_channels_nums=128)
decode_stage_1_fuse = self._decode_block(
input_tensor=decode_stage_2_fuse,
previous_feats_tensor=self._net_intermediate_results['encode_stage_1_share']['data'],
name='decode_stage_1_fuse', out_channels_nums=64, need_activate=False)
self._net_intermediate_results['instance_segment_logits'] = {
'data': decode_stage_1_fuse,
'shape': decode_stage_1_fuse.get_shape().as_list()
}
def build_model(self, input_tensor, name, reuse=False):
"""
:param input_tensor:
:param name:
:param reuse:
:return:
"""
with tf.variable_scope(name_or_scope=name, reuse=reuse):
# vgg16 fcn encode part
self._vgg16_fcn_encode(input_tensor=input_tensor, name='vgg16_encode_module')
# vgg16 fcn decode part
self._vgg16_fcn_decode(name='vgg16_decode_module')
return self._net_intermediate_results
if __name__ == '__main__':
"""
test code
"""
test_in_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 256, 512, 3], name='input')
model = VGG16FCN(phase='train')
ret = model.build_model(test_in_tensor, name='vgg16fcn')
for layer_name, layer_info in ret.items():
print('layer name: {:s} shape: {}'.format(layer_name, layer_info['shape']))
|
py | 1a42a73ef063cb1bc6c966c987a031554260236a | import chainer
import chainer.functions as F
import chainer.links as L
class SEBlock(chainer.Chain):
"""A squeeze-and-excitation block.
This block is part of squeeze-and-excitation networks. Channel-wise
multiplication weights are inferred from and applied to input feature map.
Please refer to `the original paper
<https://arxiv.org/pdf/1709.01507.pdf>`_ for more details.
.. seealso::
:class:`chainercv.links.model.senet.SEResNet`
Args:
n_channel (int): The number of channels of the input and output array.
ratio (int): Reduction ratio of :obj:`n_channel` to the number of
hidden layer units.
"""
def __init__(self, n_channel, ratio=16):
super(SEBlock, self).__init__()
reduction_size = n_channel // ratio
with self.init_scope():
self.down = L.Linear(n_channel, reduction_size)
self.up = L.Linear(reduction_size, n_channel)
def forward(self, u):
B, C, H, W = u.shape
z = F.average(u, axis=(2, 3))
x = F.relu(self.down(z))
x = F.sigmoid(self.up(x))
x = F.broadcast_to(x, (H, W, B, C))
x = x.transpose((2, 3, 0, 1))
return u * x
|
py | 1a42a8159b872598116c574d024c3ab884a6df1a | from functools import lru_cache
__all__ = ['factorial', 'mysin']
@lru_cache(maxsize=None) # Note: -> @cache in python >= 3.9
def factorial(n):
return n * factorial(n - 1) if n else 1
def mysin(x, k):
my_sin = -(x ** (2 * k - 1)) / factorial(2 * k - 1) * ((-1) ** k)
return my_sin + mysin(x, k - 1) if k > 1 else my_sin
|
py | 1a42a8359fa64aa00c51119e20b6ead1b38748d7 | from __future__ import print_function
import os.path, json, requests, logging, datetime, argparse, sys
from requests.packages.urllib3.exceptions import InsecureRequestWarning
#suppres warning for certificate
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#API headers and url
HEADERS = {'content-type': 'application/json'}
TIME_ZONE_CHOICE = [
"Africa/Cairo", "Asia/Dhaka", "Asia/Yekaterinburg","Europe/London",
"Africa/Casablanca", "Asia/Hong_Kong", "Atlantic/Azores", "Europe/Madrid",
"Africa/Harare", "Asia/Irkutsk", "Atlantic/Cape_Verde", "Europe/Moscow",
"Africa/Kinshasa", "Asia/Kabul", "Australia/Adelaide", "Europe/Prague",
"Africa/Nairobi", "Asia/Karachi", "Australia/Brisbane", "Europe/Rome",
"America/Buenos_Aires", "Asia/Katmandu", "Australia/Darwin", "Europe/Warsaw",
"America/Caracas", "Asia/Krasnoyarsk", "Australia/Hobart", "GMT",
"America/Chihuahua", "Asia/Magadan", "Australia/Perth", "Pacific/Auckland",
"America/Lima", "Asia/Muscat", "Australia/Sydney", "Pacific/Fiji"
"America/Mexico_City", "Asia/Rangoon", "Canada/Atlantic", "Pacific/Guam",
"America/Panama", "Asia/Riyadh", "Canada/Central", "Pacific/Midway",
"America/Phoenix", "Asia/Seoul", "Canada/Newfoundland", "Pacific/Tongatapu",
"America/Santiago", "Asia/Singapore", "Etc/UTC+6", "US/Alaska",
"America/Sao_Paulo", "Asia/Taipei", "Etc/UTC-12", "US/Central",
"Asia/Almaty", "Asia/Tehran", "Etc/UTC-2", "US/East-Indiana",
"Asia/Baghdad", "Asia/Tel_Aviv", "Etc/UTC-3", "US/Eastern",
"Asia/Baku", "Asia/Tokyo", "Europe/Athens", "US/Hawaii",
"Asia/Bangkok", "Asia/Vladivostok", "Europe/Bucharest", "US/Mountain",
"Asia/Calcutta", "Asia/Yakutsk", "Europe/Helsinki", "US/Pacific"
]
#exit code standard:
#0 = OK
#1 = argument parser issue
#2 = environment issue such as invalid environment id, invalid password, or invalid scope
#3 = timeout
EXIT_CODE = 0
def get_api_endpoint(target_dc):
if target_dc == "defender-us-denver":
return "https://publicapi.alertlogic.net/api/lm/v1/"
elif target_dc == "defender-us-ashburn":
return "https://publicapi.alertlogic.com/api/lm/v1/"
elif target_dc == "defender-uk-newport":
return "https://publicapi.alertlogic.co.uk/api/lm/v1/"
else:
return False
def get_source_s3(token, endpoint, target_s3, target_cid):
API_ENDPOINT = endpoint + target_cid + "/sources/" + target_s3
REQUEST = requests.get(API_ENDPOINT, headers=HEADERS, auth=(token,''))
print ("Retrieving S3 Log source info status : " + str(REQUEST.status_code), str(REQUEST.reason))
if REQUEST.status_code == 200:
RESULT = json.loads(REQUEST.text)
else:
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["id"] = "n/a"
return RESULT
def del_source_s3(token, endpoint, target_s3, target_cid):
API_ENDPOINT = endpoint + target_cid + "/sources/" + target_s3
REQUEST = requests.delete(API_ENDPOINT, headers=HEADERS, auth=(token,''))
print ("Delete S3 log source status : " + str(REQUEST.status_code), str(REQUEST.reason))
def del_s3_policy(token, endpoint, target_policy, target_cid):
API_ENDPOINT = endpoint + target_cid + "/policies/" + target_policy
REQUEST = requests.delete(API_ENDPOINT, headers=HEADERS, auth=(token,''))
print ("Delete S3 collection policy status : " + str(REQUEST.status_code), str(REQUEST.reason))
def del_credentials(token, endpoint, target_cred, target_cid):
API_ENDPOINT = endpoint + target_cid + "/credentials/" + target_cred
REQUEST = requests.delete(API_ENDPOINT, headers=HEADERS, auth=(token,''))
print ("Delete credentials status : " + str(REQUEST.status_code), str(REQUEST.reason))
def post_credentials(token, endpoint, payload, target_cid):
API_ENDPOINT = endpoint + target_cid + "/credentials/iam_role"
REQUEST = requests.post(API_ENDPOINT, headers=HEADERS, auth=(token,''), data=payload)
print ("Create Credentials status : " + str(REQUEST.status_code), str(REQUEST.reason))
if REQUEST.status_code == 201:
RESULT = json.loads(REQUEST.text)
else:
RESULT = {}
RESULT["iam_role"] = {}
RESULT["iam_role"]["id"] = "n/a"
return RESULT
def prep_credentials(iam_arn, iam_ext_id, cred_name):
#Setup dictionary for credentials payload
RESULT = {}
RESULT["iam_role"] = {}
RESULT["iam_role"]["arn"] = str(iam_arn)
RESULT["iam_role"]["external_id"] = str(iam_ext_id)
RESULT["iam_role"]["name"] = str(cred_name)
return RESULT
def post_s3_policy(token, endpoint, payload, target_cid):
API_ENDPOINT = endpoint + target_cid + "/policies/s3"
REQUEST = requests.post(API_ENDPOINT, headers=HEADERS, auth=(token,''), data=payload)
print ("Create S3 policy status : " + str(REQUEST.status_code), str(REQUEST.reason))
if REQUEST.status_code == 201:
RESULT = json.loads(REQUEST.text)
else:
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["id"] = "n/a"
return RESULT
def prep_s3_policy(s3_policy_name, s3_policy_type):
#Setup dictionary for s3 collection payload
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["name"] = str(s3_policy_name)
RESULT["s3"]["multiline"] = {}
RESULT["s3"]["multiline"]["is_multiline"] = False
if (s3_policy_type == "MsSQL"):
RESULT["s3"]["template_id"] = "3A943EDF-FB2C-1004-963D-005056853D45"
elif (s3_policy_type == "ELB"):
RESULT["s3"]["template_id"] = "A3069F39-FB68-1004-B9EA-005056853D45"
elif (s3_policy_type == "Redshift_Activity"):
RESULT["s3"]["template_id"] = "7B85CAC3-FB68-1004-B9EA-005056853D45"
elif (s3_policy_type == "Redshift_Con"):
RESULT["s3"]["template_id"] = "74173391-FB82-1004-B9EA-005056853D45"
elif (s3_policy_type == "Redshift_User"):
RESULT["s3"]["template_id"] = "D9675D68-FB93-1004-B9EA-005056853D45"
elif (s3_policy_type == "S3_Access"):
RESULT["s3"]["template_id"] = "AB51CD45-FB68-1004-B9EA-005056853D45"
return RESULT
def post_s3_source(token, endpoint, payload, target_cid):
API_ENDPOINT = endpoint + target_cid + "/sources/s3"
REQUEST = requests.post(API_ENDPOINT, headers=HEADERS, auth=(token,''), data=payload)
print ("Create S3 source status : " + str(REQUEST.status_code), str(REQUEST.reason))
if REQUEST.status_code == 201:
RESULT = json.loads(REQUEST.text)
else:
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["id"] = "n/a"
return RESULT
def prep_s3_source(source_name, s3_bucket_name, file_pattern, time_zone, credential_id, policy_id):
#Setup dictionary for s3 collection payload
RESULT = {}
RESULT["s3"] = {}
RESULT["s3"]["name"] = str(source_name)
RESULT["s3"]["enabled"] = True
RESULT["s3"]["bucket"] = s3_bucket_name
RESULT["s3"]["file_pattern"] = file_pattern
RESULT["s3"]["time_zone"] = time_zone
RESULT["s3"]["credential_id"] = credential_id
RESULT["s3"]["policy_id"] = policy_id
return RESULT
#MAIN MODULE
if __name__ == '__main__':
EXIT_CODE=0
#Prepare parser and argument
parent_parser = argparse.ArgumentParser()
subparsers = parent_parser.add_subparsers(help="Select mode", dest="mode")
#Add parser for both ADD and DELETE mode
add_parser = subparsers.add_parser("ADD", help="Add CloudTrail collection")
del_parser = subparsers.add_parser("DEL", help="Delete CloudTrail collection")
#Parser argument for Add scope
add_parser.add_argument("--key", required=True, help="User Key for Alert Logic Log Manager API Authentication")
add_parser.add_argument("--cid", required=True, help="Alert Logic Customer CID as target")
add_parser.add_argument("--iam", required=True, help="Cross Account IAM role arn")
add_parser.add_argument("--ext", required=True, help="External ID specified in IAM role trust relationship")
add_parser.add_argument("--cred", required=True, help="Credential name, free form label, not visible in Alert Logic UI")
add_parser.add_argument("--name", required=True, help="S3 source name, free form label")
add_parser.add_argument("--pol", required=True, help="S3 Collection Policy name, free form label")
add_parser.add_argument("--type", required=True, help="S3 Collection Policy Template", choices=["MsSQL", "ELB", "Redshift_Activity", "Redshift_Con", "Redshift_User", "S3_Access"])
add_parser.add_argument("--s3", required=True, help="S3 bucket name as target for log collection")
add_parser.add_argument("--rgex", required=False, help="File name or Pattern, will use .* if not specified", default=".*")
add_parser.add_argument("--tz", required=False, help="Time zone (https://docs.alertlogic.com/developer/content/z-sandbox/apitest/endpoint/logmgrapi/commonparameters.htm)", choices=TIME_ZONE_CHOICE, default="US/Central")
add_parser.add_argument("--int", required=False, help="Collection interval (in seconds), will use 300 seconds if not specified", default=300)
add_parser.add_argument("--dc", required=True, help="Alert Logic Data center assignment, i.e. defender-us-denver, defender-us-ashburn or defender-uk-newport")
#Parser argument for Delete scope
del_parser.add_argument("--key", required=True, help="User Key for Alert Logic Log Manager API Authentication")
del_parser.add_argument("--cid", required=True, help="Alert Logic Customer CID as target")
del_parser.add_argument("--uid", required=True, help="S3 log source ID that you wish to delete")
del_parser.add_argument("--dc", required=True, help="Alert Logic Data center assignment, i.e. defender-us-denver, defender-us-ashburn or defender-uk-newport")
try:
args = parent_parser.parse_args()
except:
EXIT_CODE = 1
sys.exit(EXIT_CODE)
#Set argument to variables
if args.mode == "ADD":
print ("\n### Starting script - " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + " - Deployment Mode = ADD ###\n")
APIKEY = args.key
TARGET_CID = args.cid
TARGET_IAM_ROLE_ARN = args.iam
TARGET_EXT_ID = args.ext
TARGET_CRED_NAME = args.cred
TARGET_NAME = args.name
TARGET_S3_POL = args.pol
TARGET_S3_NAME = args.s3
TARGET_S3_TYPE = args.type
TARGET_S3_REGEX = args.rgex
TARGET_TIME_ZONE = args.tz
TARGET_INTERVAL = args.int
TARGET_DEFENDER = args.dc
#get API endpoint
ALERT_LOGIC_LM = get_api_endpoint(TARGET_DEFENDER)
if ALERT_LOGIC_LM != False:
#Create credentials using the IAM role ARN and external ID
print ("### Creating IAM Role Link ###")
CRED_PAYLOAD = prep_credentials(TARGET_IAM_ROLE_ARN, TARGET_EXT_ID, TARGET_CRED_NAME)
CRED_RESULT = post_credentials(APIKEY, ALERT_LOGIC_LM, str(json.dumps(CRED_PAYLOAD, indent=4)), TARGET_CID)
CRED_ID = str(CRED_RESULT["iam_role"]["id"])
if CRED_ID != "n/a":
print ("Cred ID : " + CRED_ID)
#Prep the S3 Collection Policy payload
print ("### Creating S3 Collection Policy ###")
S3_POLICY_PAYLOAD = prep_s3_policy(TARGET_S3_POL, TARGET_S3_TYPE)
S3_POLICY_RESULT = post_s3_policy(APIKEY, ALERT_LOGIC_LM, str(json.dumps(S3_POLICY_PAYLOAD, indent=4)), TARGET_CID)
S3_POLICY_ID = str(S3_POLICY_RESULT["s3"]["id"])
if S3_POLICY_ID != "n/a":
print ("S3 Collection Policy ID : " + S3_POLICY_ID)
#Prep the S3 Log Source payload
print ("### Creating S3 Log Source ###")
S3_SOURCE_PAYLOAD = prep_s3_source(TARGET_NAME, TARGET_S3_NAME, TARGET_S3_REGEX, TARGET_TIME_ZONE, CRED_ID, S3_POLICY_ID)
S3_SOURCE_RESULT = post_s3_source(APIKEY, ALERT_LOGIC_LM, str(json.dumps(S3_SOURCE_PAYLOAD, indent=4)), TARGET_CID)
S3_SOURCE_ID = str(S3_SOURCE_RESULT["s3"]["id"])
if S3_SOURCE_ID != "n/a":
print ("S3 Source ID : " + S3_SOURCE_ID)
else:
EXIT_CODE=2
print ("### Failed to create S3 Log Source, see response code + reason above, stopping .. ###")
else:
EXIT_CODE=2
print ("### Failed to create S3 Collection Policy, see response code + reason above, stopping .. ###")
else:
EXIT_CODE=2
print ("### Failed to create credentials, see response code + reason above, stopping .. ###")
else:
EXIT_CODE=2
print ("Invalid data center assignment, use -h for more details, stopping ...")
elif args.mode == "DEL":
print ("\n### Starting script - " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + " - Deployment Mode = DEL ###\n")
APIKEY = args.key
TARGET_CID = args.cid
TARGET_S3_SOURCE_ID = args.uid
TARGET_DEFENDER = args.dc
#get API endpoint
ALERT_LOGIC_LM = get_api_endpoint(TARGET_DEFENDER)
S3_SOURCE_RESULT = get_source_s3(APIKEY, ALERT_LOGIC_LM, TARGET_S3_SOURCE_ID, TARGET_CID)
if S3_SOURCE_RESULT["s3"]["id"] != "n/a":
#Get the credentials ID and Policy ID
TARGET_CRED_ID = S3_SOURCE_RESULT["s3"]["credential_id"]
TARGET_POLICY_ID = S3_SOURCE_RESULT["s3"]["policy_id"]
#Delete S3 log source
del_source_s3(APIKEY, ALERT_LOGIC_LM, TARGET_S3_SOURCE_ID, TARGET_CID)
#Delete S3 collection policy
del_s3_policy(APIKEY, ALERT_LOGIC_LM, TARGET_POLICY_ID, TARGET_CID)
#Delete S3 credentials
del_credentials(APIKEY, ALERT_LOGIC_LM, TARGET_CRED_ID, TARGET_CID)
else:
EXIT_CODE=2
print ("Failed to find the S3 log source ID, see response code + reason above, stopping ..")
print ("\n### Script stopped - " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + "###\n")
sys.exit(EXIT_CODE) |
py | 1a42a95a06745d9da4ce99d311a65fe234bf0a51 | from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.http.response import HttpResponseRedirect
from django.views import View
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from shop.models import Customer
class Signup(View):
def get(self, request):
current_user = request.user
if current_user.id:
messages.success(request, 'Already logged in!!!')
return redirect('ShopHome')
return render(request, 'signup.html')
def post(self, request):
email = request.POST['email']
fname = request.POST['fname']
lname = request.POST['lname']
phone = request.POST['phone']
house = request.POST['house']
street = request.POST['street']
state = request.POST['state']
city = request.POST['city']
pin = request.POST['pin']
pass1 = request.POST['pass1']
pass2 = request.POST['pass2']
uname = email
print(uname, fname, lname, email, phone, pass1)
values = {
'email': email,
'fname': fname,
'lname': lname,
'phone': phone,
'house': house,
'street': street,
'state': state,
'city': city,
'pin': pin,
}
# Form Validations
if User.objects.filter(email=email):
messages.success(request, "E-mail Already Registered!!!")
return render(request, 'signup.html', values)
if len(fname)>10 and len(lname)>10:
messages.success(request, "First or Last Name too long!!!")
return render(request, 'signup.html', values)
if not fname.isalpha() or not lname.isalpha():
messages.warning(request,"Name must contain only letters.")
return render(request, 'signup.html', values)
if len(str(phone))!=10:
messages.warning(request,"Phone number must contain 10 digits.")
return render(request, 'signup.html', values)
if len(pass1)<5:
messages.warning(request, "Password too short!!! It must have atleast 5 characters.")
return render(request, 'signup.html', values)
if pass1!=pass2:
messages.warning(request, "Passwords don't match!!!")
return render(request, 'signup.html', values)
new_user = User.objects.create_user(
uname,
email=email,
password=pass1,
first_name=fname.capitalize(),
last_name=lname.capitalize(),
)
new_user.save()
customer = Customer(
user=new_user,
phone=phone,
house_no=house,
street=street.capitalize(),
state=state.capitalize(),
city=city.capitalize(),
pin=pin,
)
customer.save()
user = authenticate(request, username=email, password=pass1)
if user is not None:
login(request, user)
messages.success(request, "Account Created Successfully!!!")
return redirect('ShopHome')
|
py | 1a42aa13fd87bd79f0f35cda582a6e0214851274 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) Flo Developers 2013-2018
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.01}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
|
py | 1a42aac5a8c4c986066c58a9b175a5f0ccd56101 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-01 08:58
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wordcompletion', '0008_auto_20180601_0808'),
]
operations = [
migrations.CreateModel(
name='Warehouses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(default=b'7b02a98f8ceb4f3499ee20c290c4c3b4', max_length=50)),
('name', models.CharField(max_length=255)),
('registered_name', models.CharField(max_length=255)),
('seller_id', models.CharField(max_length=255, null=True)),
('address', django.contrib.postgres.fields.jsonb.JSONField()),
('incoming_center', models.CharField(max_length=40)),
('rto_center', models.CharField(max_length=40)),
('dto_center', models.CharField(max_length=40)),
('active', models.BooleanField(default=True)),
('pin_code', models.CharField(max_length=6)),
],
),
migrations.DeleteModel(
name='BooksCatalog',
),
]
|
py | 1a42ab013e278832fe6c8eed20f4a4c879f4d8cf | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
"""Multi-class Focal loss implementation.
Args:
gamma (float): The larger the gamma, the smaller
the loss weight of easier samples.
weight (float): A manual rescaling weight given to each
class.
ignore_index (int): Specifies a target value that is ignored
and does not contribute to the input gradient.
"""
def __init__(self, gamma=2, weight=None, ignore_index=-100):
super().__init__()
self.gamma = gamma
self.weight = weight
self.ignore_index = ignore_index
def forward(self, input, target):
logit = F.log_softmax(input, dim=1)
pt = torch.exp(logit)
logit = (1 - pt)**self.gamma * logit
loss = F.nll_loss(
logit, target, self.weight, ignore_index=self.ignore_index)
return loss
|
py | 1a42abded11582153985a7a00fc3bff7d595aa73 | from __future__ import print_function
import pandas as pd
import numpy as np
import sys
from os import listdir, getcwd
from os.path import isdir, join, dirname, abspath
from pandas import concat
from nilmtk.utils import get_module_directory, check_directory_exists
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilm_metadata import convert_yaml_to_hdf5
"""
DATASET STRUCTURE:
------------------
On extracting all the dataset values, we should arrive at a similar directory structure as
mentioned.
ECO Dataset will have a folder '<i>_sm_csv' and '<i>_plug_csv' where i is the building no.
Originally, the expected folder structure was:
- <i>_sm_csv has a folder <i>
- <i>_plug_csv has folders 01, 02,....<n> where n is the plug numbers.
This version also supports the following structure, which can be created by unpacking the
ZIP files uniformly, creating a folder for each one:
- <i>_sm_csv has a folder <i>
- <i>_plug_csv has a folder <i>, and <i>_plug_csv/<i> has folders 01, 02,....<n>,
where n is the plug numbers.
Each folder has a CSV file as per each day, with each day csv file containing
86400 entries.
"""
plugs_column_name = {1: ('power', 'active')}
def convert_eco(dataset_loc, hdf_filename, timezone):
"""
Parameters:
-----------
dataset_loc: str
The root directory where the dataset is located.
hdf_filename: str
The location where the hdf_filename is present.
The directory location has to contain the
hdf5file name for the converter to work.
timezone: str
specifies the timezone of the dataset.
"""
# Creating a new HDF File
store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='blosc')
check_directory_exists(dataset_loc)
directory_list = [i for i in listdir(dataset_loc) if '.txt' not in i]
directory_list.sort()
print(directory_list)
found_any_sm = False
found_any_plug = False
# Traversing every folder
for folder in directory_list:
if folder[0] == '.' or folder[-3:] == '.h5':
print('Skipping ', folder)
continue
#Building number and meter_flag
building_no = int(folder[:2])
meter_flag = None
if 'sm_csv' in folder:
meter_flag = 'sm'
elif 'plugs' in folder:
meter_flag = 'plugs'
else:
print('Skipping folder', folder)
continue
print('Computing for folder', folder)
dir_list = [i for i in listdir(join(dataset_loc, folder)) if isdir(join(dataset_loc,folder,i))]
dir_list.sort()
if meter_flag == 'plugs' and len(dir_list) < 3:
# Try harder to find the subfolders
folder = join(folder, folder[:2])
dir_list = [i for i in listdir(join(dataset_loc, folder)) if isdir(join(dataset_loc,folder,i))]
print('Current dir list:', dir_list)
for fl in dir_list:
print('Computing for folder ', fl)
fl_dir_list = [i for i in listdir(join(dataset_loc,folder,fl)) if '.csv' in i]
fl_dir_list.sort()
if meter_flag == 'sm':
for fi in fl_dir_list:
found_any_sm = True
df = pd.read_csv(join(dataset_loc,folder,fl,fi), names=[i for i in range(1,17)], dtype=np.float32)
for phase in range(1,4):
key = str(Key(building=building_no, meter=phase))
df_phase = df.loc[:,[1+phase, 5+phase, 8+phase, 13+phase]]
# get reactive power
power = df_phase.loc[:, (1+phase, 13+phase)].values
reactive = power[:,0] * np.tan(power[:,1] * np.pi / 180)
df_phase['Q'] = reactive
df_phase.index = pd.DatetimeIndex(start=fi[:-4], freq='s', periods=86400, tz='GMT')
df_phase = df_phase.tz_convert(timezone)
sm_column_name = {
1+phase:('power', 'active'),
5+phase:('current', ''),
8+phase:('voltage', ''),
13+phase:('phase_angle', ''),
'Q': ('power', 'reactive'),
}
df_phase.columns = pd.MultiIndex.from_tuples(
sm_column_name[col] for col in df_phase.columns
)
power_active = df_phase['power', 'active']
tmp_before = np.size(power_active)
df_phase = df_phase[power_active != -1]
power_active = df_phase['power', 'active']
tmp_after = np.size(power_active)
if tmp_before != tmp_after:
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
df_phase.columns.set_names(LEVEL_NAMES, inplace=True)
if not key in store:
store.put(key, df_phase, format='Table')
else:
store.append(key, df_phase, format='Table')
store.flush()
print('Building', building_no, ', Meter no.', phase,
'=> Done for ', fi[:-4])
else:
#Meter number to be used in key
meter_num = int(fl) + 3
key = str(Key(building=building_no, meter=meter_num))
current_folder = join(dataset_loc,folder,fl)
if not fl_dir_list:
raise RuntimeError("No CSV file found in " + current_folder)
#Getting dataframe for each csv file seperately
for fi in fl_dir_list:
found_any_plug = True
df = pd.read_csv(join(current_folder, fi), names=[1], dtype=np.float64)
df.index = pd.DatetimeIndex(start=fi[:-4].replace('.', ':'), freq='s', periods=86400, tz = 'GMT')
df.columns = pd.MultiIndex.from_tuples(plugs_column_name.values())
df = df.tz_convert(timezone)
df.columns.set_names(LEVEL_NAMES, inplace=True)
tmp_before = np.size(df.power.active)
df = df[df.power.active != -1]
tmp_after = np.size(df.power.active)
if (tmp_before != tmp_after):
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
# If table not present in hdf5, create or else append to existing data
if not key in store:
store.put(key, df, format='Table')
print('Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4])
else:
store.append(key, df, format='Table')
store.flush()
print('Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4])
if not found_any_plug or not found_any_sm:
raise RuntimeError('The files were not found! Please check the folder structure. Extract each ZIP file into a folder with its base name (e.g. extract "01_plugs_csv.zip" into a folder named "01_plugs_csv", etc.)')
print("Data storage completed.")
store.close()
# Adding the metadata to the HDF5file
print("Proceeding to Metadata conversion...")
meta_path = join(
get_module_directory(),
'dataset_converters',
'eco',
'metadata'
)
convert_yaml_to_hdf5(meta_path, hdf_filename)
print("Completed Metadata conversion.")
|
py | 1a42acfbed1be55977cbb860baada0ea06f160cb | from typing import Any, Callable, MutableMapping
import toml
def ManipulateFile(toml_file_path:str,
new_toml: MutableMapping[Any,Any]):
try:
with open(toml_file_path, "w") as file:
toml.dump(new_toml,file)
except:
pass
def TomlBaseManipulation(toml_file_path: str,ManipulationFunction: Callable[[Any],None]):
try:
toml_full_dict=toml.load(toml_file_path)
ManipulationFunction(toml_full_dict)
ManipulateFile(toml_file_path,toml_full_dict)
except:
pass |
py | 1a42ad73ba1095d3b17fbad4b0bb70ec0337374c | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc_testing
from grpc_testing._channel import _channel_rpc
from grpc_testing._channel import _multi_callable
# All serializer and deserializer parameters are not (yet) used by this
# test infrastructure.
# pylint: disable=unused-argument
class TestingChannel(grpc_testing.Channel):
def __init__(self, time, state):
self._time = time
self._state = state
def subscribe(self, callback, try_to_connect=False):
raise NotImplementedError()
def unsubscribe(self, callback):
raise NotImplementedError()
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _multi_callable.UnaryUnary(method, self._state)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _multi_callable.UnaryStream(method, self._state)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _multi_callable.StreamUnary(method, self._state)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _multi_callable.StreamStream(method, self._state)
def _close(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Decide what
# action to take here, if any?
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def take_unary_unary(self, method_descriptor, timeout=None):
'''Take a unary request and return a unary response.'''
return _channel_rpc.unary_unary(self._state, method_descriptor, timeout)
def take_unary_stream(self, method_descriptor, timeout=None):
'''Take a unary request and return a streaming response.'''
return _channel_rpc.unary_stream(self._state, method_descriptor, timeout)
def take_stream_unary(self, method_descriptor, timeout=None):
'''Take a streaming request and return a unary response.'''
return _channel_rpc.stream_unary(self._state, method_descriptor, timeout)
def take_stream_stream(self, method_descriptor, timeout=None):
'''Take a streaming request and return a streaming response.'''
return _channel_rpc.stream_stream(self._state, method_descriptor, timeout)
# pylint: enable=unused-argument
|
py | 1a42ae01bb8bcd1ee2a3e4688e1227caf3ad7686 | import re
class StringCal:
def __init__(self, formula:str, **variables):
self.formula = formula.replace(' ', '')
self.variables = variables
def get_coefficient(self) -> dict:
coefficients = {}
term = ""
for f in self.formula + ';':
variable_term = re.compile('(\W?[0-9]*)([a-zA-Z]+)')
constant_term = re.compile('(\W?[0-9]+)(\W)')
is_coefficientOne = re.compile('(\W?)([a-zA-Z]+)')
term += f
variable_term = variable_term.match(term)
constant_term = constant_term.match(term)
if variable_term == None and constant_term == None:
continue
elif variable_term != None:
variable = variable_term.group(2)
coefficient = variable_term.group(1)
if is_coefficientOne.match(variable_term.group()):
coefficient += '1'
try:
coefficients[variable] = eval(str(coefficients[variable]) + coefficient)
except KeyError:
coefficients[variable] = int(coefficient)
term = ""
elif constant_term != None:
constant = constant_term.group(1)
try:
coefficients['constant'] = eval(str(coefficients['constant']) + constant)
except KeyError:
coefficients['constant'] = int(constant)
term = constant_term.group(2)
return coefficients
def simplify(self) -> str:
simplified_formula = ""
no_plus_minus = re.compile('[0-9]+')
coefficients = self.get_coefficient()
for variable in coefficients:
coefficient = str(coefficients[variable])
if no_plus_minus.match(coefficient) != None and simplified_formula != '':
coefficient = '+' + coefficient
if variable == 'constant':
simplified_formula += coefficient
else:
simplified_formula += coefficient + variable
return simplified_formula
def define(self, **kwargs) -> int:
formula = self.formula
if kwargs != {}:
self.variables = kwargs
for var in self.variables:
var_value = str(self.variables[var])
formula = formula.replace(var, '*' + var_value)
return eval(formula)
if __name__ == '__main__':
formula = StringCal(formula='2x+3x+1-3+3y',x=1,y=1)
print(formula.define())
|
py | 1a42ae2a1c784bafb08e0faeff6f9f01264f7ef2 | from tensor2tensor.utils import registry
from tensor2tensor.data_generators import translate, problem, text_encoder, generator_utils
from tensor2tensor.data_generators.translate_encs import TranslateEncsWmt32k
_ENCS_TRAIN_DATASETS = [
[("http://czeng57m.tar"),
("tsv", 3, 2, "czeng57m/*train.gz")],
[
"http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz", # pylint: disable=line-too-long
("training/news-commentary-v12.cs-en.en",
"training/news-commentary-v12.cs-en.cs")
],
[
"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
("commoncrawl.cs-en.en", "commoncrawl.cs-en.cs")
],
[
"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
("training/europarl-v7.cs-en.en", "training/europarl-v7.cs-en.cs")
],
]
_ENCS_TEST_DATASETS = [
[
"http://data.statmt.org/wmt17/translation-task/dev.tgz",
("dev/newstest2013.en", "dev/newstest2013.cs")
],
]
@registry.register_problem
class TranslateEncsWmtCzeng57m32k(translate.TranslateProblem):
"""Problem spec for WMT English-Czech translation."""
@property
def targeted_vocab_size(self):
return 2**15 # 32768
@property
def vocab_name(self):
return "vocab.encs"
def generator(self, data_dir, tmp_dir, train):
datasets = _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS
tag = "train" if train else "dev"
vocab_datasets = []
data_path = translate.compile_data(tmp_dir, datasets,
"czeng57m_encs_tok_%s" % tag)
# CzEng contains 100 gz files with tab-separated columns, so let's expect
# it is the first dataset in datasets and use the newly created *.lang{1,2}
# files for vocab construction.
if datasets[0][0].endswith("czeng57m.tar"):
vocab_datasets.append([
datasets[0][0],
["czeng57m_encs_tok_%s.lang1" % tag,
"czeng57m_encs_tok_%s.lang2" % tag]
])
datasets = datasets[1:]
vocab_datasets += [[item[0], [item[1][0], item[1][1]]] for item in datasets]
symbolizer_vocab = generator_utils.get_or_generate_vocab(
data_dir, tmp_dir, self.vocab_file, self.targeted_vocab_size,
vocab_datasets)
return translate.token_generator(data_path + ".lang1", data_path + ".lang2",
symbolizer_vocab, text_encoder.EOS_ID)
@property
def input_space_id(self):
return problem.SpaceID.EN_TOK
@property
def target_space_id(self):
return problem.SpaceID.CS_TOK
@registry.register_problem
class TranslateMedical8lang(TranslateEncsWmt32k):
@property
def vocab_filename(self):
return "vocab_medical8lang.%d" % self.approx_vocab_size
@registry.register_problem
class TranslateEnde(TranslateEncsWmtCzeng57m32k):
@property
def vocab_filename(self):
return "vocab.ende.%d" % self.approx_vocab_size
@registry.register_problem
class TranslateEnru(TranslateEncsWmtCzeng57m32k):
@property
def vocab_filename(self):
return "vocab.enru.%d" % self.approx_vocab_size
@registry.register_problem
class TranslateEnpl(TranslateEncsWmtCzeng57m32k):
@property
def vocab_filename(self):
return "vocab.enpl.%d" % self.approx_vocab_size
|
py | 1a42af979ab6057c3eaf5e0b24b695dc471e18ce | # Copyright 2019 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from tools.statics import PACKAGES_CONFIG_PATH
class PackageConfigReader(object):
def __init__(self, config_file=PACKAGES_CONFIG_PATH):
self.config_file = config_file
self.config = None
self._read_file()
def _read_file(self):
with open(self.config_file, 'r') as fp:
self.config = yaml.load(fp)
def get(self, package_operation_type):
return self._get_packages_by_type(package_operation_type)
def _get_packages_by_type(self, package_operation_type):
if self.config is None:
return []
if package_operation_type == 'install':
return self._get_installed_packages()
elif package_operation_type == 'uninstall':
return self._get_uninstalled_packages()
raise Exception('Never here')
def _get_uninstalled_packages(self):
return sorted([p for p in self.config if not self._is_install_package(p)])
def _get_installed_packages(self):
return sorted([p for p in self.config if self._is_install_package(p)])
def _is_install_package(self, package):
if self.config[package] is None:
return True
if self.config[package].get('uninstall') is True:
return False
return True
|
py | 1a42b014ff86eb995e4839d009024d6433b145bf | # -*- coding: utf-8 -*-
model = {
'ng ': 0,
'ang': 1,
' na': 2,
' sa': 3,
'an ': 4,
'nan': 5,
'sa ': 6,
'na ': 7,
' ma': 8,
' ca': 9,
'ay ': 10,
'n g': 11,
' an': 12,
'ong': 13,
' ga': 14,
'at ': 15,
' pa': 16,
'ala': 17,
' si': 18,
'a n': 19,
'ga ': 20,
'g n': 21,
'g m': 22,
'ito': 23,
'g c': 24,
'man': 25,
'san': 26,
'g s': 27,
'ing': 28,
'to ': 29,
'ila': 30,
'ina': 31,
' di': 32,
' ta': 33,
'aga': 34,
'iya': 35,
'aca': 36,
'g t': 37,
' at': 38,
'aya': 39,
'ama': 40,
'lan': 41,
'a a': 42,
'qui': 43,
'a c': 44,
'a s': 45,
'nag': 46,
' ba': 47,
'g i': 48,
'tan': 49,
"'t ": 50,
' cu': 51,
'aua': 52,
'g p': 53,
' ni': 54,
'os ': 55,
"'y ": 56,
'a m': 57,
' n ': 58,
'la ': 59,
' la': 60,
'o n': 61,
'yan': 62,
' ay': 63,
'usa': 64,
'cay': 65,
'on ': 66,
'ya ': 67,
' it': 68,
'al ': 69,
'apa': 70,
'ata': 71,
't n': 72,
'uan': 73,
'aha': 74,
'asa': 75,
'pag': 76,
' gu': 77,
'g l': 78,
'di ': 79,
'mag': 80,
'aba': 81,
'g a': 82,
'ara': 83,
'a p': 84,
'in ': 85,
'ana': 86,
'it ': 87,
'si ': 88,
'cus': 89,
'g b': 90,
'uin': 91,
'a t': 92,
'as ': 93,
'n n': 94,
'hin': 95,
' hi': 96,
"a't": 97,
'ali': 98,
' bu': 99,
'gan': 100,
'uma': 101,
'a d': 102,
'agc': 103,
'aqu': 104,
'g d': 105,
' tu': 106,
'aon': 107,
'ari': 108,
'cas': 109,
'i n': 110,
'niy': 111,
'pin': 112,
'a i': 113,
'gca': 114,
'siy': 115,
"a'y": 116,
'yao': 117,
'ag ': 118,
'ca ': 119,
'han': 120,
'ili': 121,
'pan': 122,
'sin': 123,
'ual': 124,
'n s': 125,
'nam': 126,
' lu': 127,
'can': 128,
'dit': 129,
'gui': 130,
'y n': 131,
'gal': 132,
'hat': 133,
'nal': 134,
' is': 135,
'bag': 136,
'fra': 137,
' fr': 138,
' su': 139,
'a l': 140,
' co': 141,
'ani': 142,
' bi': 143,
' da': 144,
'alo': 145,
'isa': 146,
'ita': 147,
'may': 148,
'o s': 149,
'sil': 150,
'una': 151,
' in': 152,
' pi': 153,
'l n': 154,
'nil': 155,
'o a': 156,
'pat': 157,
'sac': 158,
't s': 159,
' ua': 160,
'agu': 161,
'ail': 162,
'bin': 163,
'dal': 164,
'g h': 165,
'ndi': 166,
'oon': 167,
'ua ': 168,
' ha': 169,
'ind': 170,
'ran': 171,
's n': 172,
'tin': 173,
'ulo': 174,
'eng': 175,
'g f': 176,
'ini': 177,
'lah': 178,
'lo ': 179,
'rai': 180,
'rin': 181,
'ton': 182,
'g u': 183,
'inu': 184,
'lon': 185,
"o'y": 186,
't a': 187,
' ar': 188,
'a b': 189,
'ad ': 190,
'bay': 191,
'cal': 192,
'gya': 193,
'ile': 194,
'mat': 195,
'n a': 196,
'pau': 197,
'ra ': 198,
'tay': 199,
'y m': 200,
'ant': 201,
'ban': 202,
'i m': 203,
'nas': 204,
'nay': 205,
'no ': 206,
'sti': 207,
' ti': 208,
'ags': 209,
'g g': 210,
'ta ': 211,
'uit': 212,
'uno': 213,
' ib': 214,
' ya': 215,
'a u': 216,
'abi': 217,
'ati': 218,
'cap': 219,
'ig ': 220,
'is ': 221,
"la'": 222,
' do': 223,
' pu': 224,
'api': 225,
'ayo': 226,
'gos': 227,
'gul': 228,
'lal': 229,
'tag': 230,
'til': 231,
'tun': 232,
'y c': 233,
'y s': 234,
'yon': 235,
'ano': 236,
'bur': 237,
'iba': 238,
'isi': 239,
'lam': 240,
'nac': 241,
'nat': 242,
'ni ': 243,
'nto': 244,
'od ': 245,
'pa ': 246,
'rgo': 247,
'urg': 248,
' m ': 249,
'adr': 250,
'ast': 251,
'cag': 252,
'gay': 253,
'gsi': 254,
'i p': 255,
'ino': 256,
'len': 257,
'lin': 258,
'm g': 259,
'mar': 260,
'nah': 261,
"to'": 262,
' de': 263,
'a h': 264,
'cat': 265,
'cau': 266,
'con': 267,
'iqu': 268,
'lac': 269,
'mab': 270,
'min': 271,
'og ': 272,
'par': 273,
'sal': 274,
' za': 275,
'ao ': 276,
'doo': 277,
'ipi': 278,
'nod': 279,
'nte': 280,
'uha': 281,
'ula': 282,
' re': 283,
'ill': 284,
'lit': 285,
'mac': 286,
'nit': 287,
"o't": 288,
'or ': 289,
'ora': 290,
'sum': 291,
'y p': 292,
' al': 293,
' mi': 294,
' um': 295,
'aco': 296,
'ada': 297,
'agd': 298,
'cab': 299,
}
|
py | 1a42b01859fecb4fda725a7ed28a187a02da52fb | # Copyright (c) 2018, Neil Booth
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# and warranty status of this software.
"""Merkle trees, branches, proofs and roots."""
from asyncio import Event
from math import ceil, log
from lbry.wallet.server.hash import double_sha256
class Merkle:
"""Perform merkle tree calculations on binary hashes using a given hash
function.
If the hash count is not even, the final hash is repeated when
calculating the next merkle layer up the tree.
"""
def __init__(self, hash_func=double_sha256):
self.hash_func = hash_func
def tree_depth(self, hash_count):
return self.branch_length(hash_count) + 1
def branch_length(self, hash_count):
"""Return the length of a merkle branch given the number of hashes."""
if not isinstance(hash_count, int):
raise TypeError('hash_count must be an integer')
if hash_count < 1:
raise ValueError('hash_count must be at least 1')
return ceil(log(hash_count, 2))
def branch_and_root(self, hashes, index, length=None):
"""Return a (merkle branch, merkle_root) pair given hashes, and the
index of one of those hashes.
"""
hashes = list(hashes)
if not isinstance(index, int):
raise TypeError('index must be an integer')
# This also asserts hashes is not empty
if not 0 <= index < len(hashes):
raise ValueError(f"index '{index}/{len(hashes)}' out of range")
natural_length = self.branch_length(len(hashes))
if length is None:
length = natural_length
else:
if not isinstance(length, int):
raise TypeError('length must be an integer')
if length < natural_length:
raise ValueError('length out of range')
hash_func = self.hash_func
branch = []
for _ in range(length):
if len(hashes) & 1:
hashes.append(hashes[-1])
branch.append(hashes[index ^ 1])
index >>= 1
hashes = [hash_func(hashes[n] + hashes[n + 1])
for n in range(0, len(hashes), 2)]
return branch, hashes[0]
def root(self, hashes, length=None):
"""Return the merkle root of a non-empty iterable of binary hashes."""
branch, root = self.branch_and_root(hashes, 0, length)
return root
def root_from_proof(self, hash, branch, index):
"""Return the merkle root given a hash, a merkle branch to it, and
its index in the hashes array.
branch is an iterable sorted deepest to shallowest. If the
returned root is the expected value then the merkle proof is
verified.
The caller should have confirmed the length of the branch with
branch_length(). Unfortunately this is not easily done for
bitcoin transactions as the number of transactions in a block
is unknown to an SPV client.
"""
hash_func = self.hash_func
for elt in branch:
if index & 1:
hash = hash_func(elt + hash)
else:
hash = hash_func(hash + elt)
index >>= 1
if index:
raise ValueError('index out of range for branch')
return hash
def level(self, hashes, depth_higher):
"""Return a level of the merkle tree of hashes the given depth
higher than the bottom row of the original tree."""
size = 1 << depth_higher
root = self.root
return [root(hashes[n: n + size], depth_higher)
for n in range(0, len(hashes), size)]
def branch_and_root_from_level(self, level, leaf_hashes, index,
depth_higher):
"""Return a (merkle branch, merkle_root) pair when a merkle-tree has a
level cached.
To maximally reduce the amount of data hashed in computing a
markle branch, cache a tree of depth N at level N // 2.
level is a list of hashes in the middle of the tree (returned
by level())
leaf_hashes are the leaves needed to calculate a partial branch
up to level.
depth_higher is how much higher level is than the leaves of the tree
index is the index in the full list of hashes of the hash whose
merkle branch we want.
"""
if not isinstance(level, list):
raise TypeError("level must be a list")
if not isinstance(leaf_hashes, list):
raise TypeError("leaf_hashes must be a list")
leaf_index = (index >> depth_higher) << depth_higher
leaf_branch, leaf_root = self.branch_and_root(
leaf_hashes, index - leaf_index, depth_higher)
index >>= depth_higher
level_branch, root = self.branch_and_root(level, index)
# Check last so that we know index is in-range
if leaf_root != level[index]:
raise ValueError('leaf hashes inconsistent with level')
return leaf_branch + level_branch, root
class MerkleCache:
"""A cache to calculate merkle branches efficiently."""
def __init__(self, merkle, source_func):
"""Initialise a cache hashes taken from source_func:
async def source_func(index, count):
...
"""
self.merkle = merkle
self.source_func = source_func
self.length = 0
self.depth_higher = 0
self.initialized = Event()
def _segment_length(self):
return 1 << self.depth_higher
def _leaf_start(self, index):
"""Given a level's depth higher and a hash index, return the leaf
index and leaf hash count needed to calculate a merkle branch.
"""
depth_higher = self.depth_higher
return (index >> depth_higher) << depth_higher
def _level(self, hashes):
return self.merkle.level(hashes, self.depth_higher)
async def _extend_to(self, length):
"""Extend the length of the cache if necessary."""
if length <= self.length:
return
# Start from the beginning of any final partial segment.
# Retain the value of depth_higher; in practice this is fine
start = self._leaf_start(self.length)
hashes = await self.source_func(start, length - start)
self.level[start >> self.depth_higher:] = self._level(hashes)
self.length = length
async def _level_for(self, length):
"""Return a (level_length, final_hash) pair for a truncation
of the hashes to the given length."""
if length == self.length:
return self.level
level = self.level[:length >> self.depth_higher]
leaf_start = self._leaf_start(length)
count = min(self._segment_length(), length - leaf_start)
hashes = await self.source_func(leaf_start, count)
level += self._level(hashes)
return level
async def initialize(self, length):
"""Call to initialize the cache to a source of given length."""
self.length = length
self.depth_higher = self.merkle.tree_depth(length) // 2
self.level = self._level(await self.source_func(0, length))
self.initialized.set()
def truncate(self, length):
"""Truncate the cache so it covers no more than length underlying
hashes."""
if not isinstance(length, int):
raise TypeError('length must be an integer')
if length <= 0:
raise ValueError('length must be positive')
if length >= self.length:
return
length = self._leaf_start(length)
self.length = length
self.level[length >> self.depth_higher:] = []
async def branch_and_root(self, length, index):
"""Return a merkle branch and root. Length is the number of
hashes used to calculate the merkle root, index is the position
of the hash to calculate the branch of.
index must be less than length, which must be at least 1."""
if not isinstance(length, int):
raise TypeError('length must be an integer')
if not isinstance(index, int):
raise TypeError('index must be an integer')
if length <= 0:
raise ValueError('length must be positive')
if index >= length:
raise ValueError('index must be less than length')
await self.initialized.wait()
await self._extend_to(length)
leaf_start = self._leaf_start(index)
count = min(self._segment_length(), length - leaf_start)
leaf_hashes = await self.source_func(leaf_start, count)
if length < self._segment_length():
return self.merkle.branch_and_root(leaf_hashes, index)
level = await self._level_for(length)
return self.merkle.branch_and_root_from_level(
level, leaf_hashes, index, self.depth_higher)
|
py | 1a42b1e788fb0f9d1e7ad6e6d2f4be8aa7dca06b | """StorageTableSeeder Seeder."""
from masoniteorm.seeds import Seeder
from app.Storage import Storage
class StorageTableSeeder(Seeder):
def run(self):
"""Run the database seeds."""
Storage.create({
"storage_name": "blank",
"storage_brand": "blank",
"storage_type": "blank",
"storage_size": "blank",
"storage_price": 0,
"storage_img": "blank"
})
# NVMe
Storage.create({
"storage_name": "WD Black SN850",
"storage_brand": "Western Digital",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 164,
"storage_img": "https://i.imgur.com/QJVjs8j.jpg"
})
Storage.create({
"storage_name": "Samsung 970 Evo Plus",
"storage_brand": "Samsung",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 138,
"storage_img": "https://i.imgur.com/lhV4mhF.jpg"
})
Storage.create({
"storage_name": "Sabrent Rocket 4 Plus",
"storage_brand": "Sabrent",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 159,
"storage_img": "https://i.imgur.com/Ax9v8w4.jpg"
})
Storage.create({
"storage_name": "Samsung 980 Pro",
"storage_brand": "Samsung",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 184,
"storage_img": "https://i.imgur.com/HEWuIQF.jpg"
})
Storage.create({
"storage_name": "Crucial P5 Plus",
"storage_brand": "Crucial",
"storage_type": "NVMe SSD",
"storage_size": "1TB",
"storage_price": 149,
"storage_img": "https://i.imgur.com/XI7G1bA.jpg"
})
# SATA SSD
Storage.create({
"storage_name": "Samsung 870 EVO",
"storage_brand": "Samsung",
"storage_type": "SATA SSD",
"storage_size": "1TB",
"storage_price": 119,
"storage_img": "https://i.imgur.com/N2CWMLW.jpg"
})
Storage.create({
"storage_name": "Crucial MX500",
"storage_brand": "Crucial",
"storage_type": "SATA SSD",
"storage_size": "1TB",
"storage_price": 99,
"storage_img": "https://i.imgur.com/MUvepKg.jpg"
})
|
py | 1a42b1eced9ce46e6b1f264b56da0f61627031a8 | import re
from .helpers import fuzzy
class ProcessSpeech:
def __init__(self, pa, localize, command, default_cast):
self.pa = pa
self.command = command
self.localize = localize
self.device = default_cast
self.tv_keys = localize["shows"] + localize["season"]["keywords"] + localize["episode"]["keywords"]
self.music_keys = localize["music"] + localize["artists"] + localize["albums"] + localize["tracks"]
self.process_command()
@property
def results(self):
results = {}
for x in ["media", "device", "season", "episode", "latest", "unwatched", "random", "ondeck", "control", "library"]:
results[x] = getattr(self, x, None)
return results
def process_command(self):
controls = self.localize["controls"]
pre_command = self.command
for control in controls:
ctrl = [controls[control]] if isinstance(controls[control], str) else controls[control]
for c in ctrl:
if self.command.startswith(c):
control_check = self.command.replace(c, "").strip()
if control_check == "":
self.control = control
return
device = fuzzy(control_check, self.pa.device_names)
self.find_replace("separator")
if device[0] in ["watched", "deck", "on watched", "on deck"]:
continue
elif device[1] > 60 and self.command.replace(device[0].lower(), "").strip() == c:
self.device = device[0]
self.control = control
return
self.command = pre_command
self.library = self.get_library()
self.find_replace("play_start")
for item in ["random", "latest", "unwatched", "ondeck"]:
setattr(self, item, self.find_replace(item))
for item in ["season", "episode"]:
if self.find_replace(item, False):
self.library = self.pa.media["shows"]
setattr(self, item, self.get_season_episode_num(self.localize[item]))
self.find_replace(item)
for item in ["artist", "album", "track", "playlist"]:
if self.find_replace(f"music_{item}"):
self.library = self.pa.media[f"{item}s"]
self.get_media_and_device()
def get_library(self):
cmd = self.command
for device in self.pa.device_names:
if device.lower() in cmd:
cmd = cmd.replace(device.lower(), "")
if any(word in cmd for word in self.tv_keys):
return self.pa.media["shows"]
if any(word in cmd for word in self.music_keys):
return self.pa.media["tracks"]
for item in ["movies", "artists", "albums", "tracks", "playlists"]:
if any(word in cmd for word in self.localize[item]):
return self.pa.media[item]
def is_device(self, media_list, separator):
split = self.command.split(separator)
full_score = fuzzy(self.command, media_list)[1]
split_score = fuzzy(self.command.replace(split[-1], "")[0], media_list)[1]
cast_score = fuzzy(split[-1], self.pa.device_names)[1]
return full_score < split_score or full_score < cast_score
def get_media_and_device(self):
media = None
for separator in self.localize["separator"]["keywords"]:
if separator in self.command:
self.find_replace("separator", True, separator)
if self.command.strip().startswith(separator + " "):
self.device = self.command.replace(separator, "").strip()
return
separator = f" {separator} "
if separator in self.command:
for item in ["show", "movie", "artist", "album", "track", "playlist", "all"]:
if item == "all" or self.library == self.pa.media[f"{item}s"]:
self.device = self.is_device(self.pa.media[f"{item}_titles"], separator)
if self.device:
split = self.command.split(separator)
self.command = self.command.replace(separator + split[-1], "")
self.device = split[-1]
self.find_replace("shows")
self.find_replace("movies")
for key in self.music_keys:
if not self.command.replace(key, ""):
self.command = self.command.replace(key, "")
lib = None if not getattr(self, "library", None) else getattr(self, "library")[0]
if self.find_replace("music_separator", False) and getattr(lib, "type", None) in ["artist", "album", "track", None]:
self.media = self.media_by_artist(lib) or self.command
else:
self.media = self.command
def media_by_artist(self, lib):
artist_media = None
for separator in self.localize["music_separator"]["keywords"]:
if separator in self.command:
self.find_replace("music_separator", True, separator)
split = self.command.split(f" {separator} ")
artist = fuzzy(split[-1], self.pa.media["artist_titles"])
if artist[1] > 60:
artist_albums = self.pa.server.search(artist[0], "album")
artist_album_titles = [x.title for x in artist_albums]
artist_tracks = self.pa.server.search(artist[0], "track")
artist_track_tracks = [x.title for x in artist_tracks]
if not lib:
artist_media = fuzzy(split[0], artist_album_titles + artist_track_tracks)
if artist_media[1] > 60:
return next((x for x in artist_albums + artist_tracks if artist_media[0] in getattr(x, "title", "")), None)
elif lib.type == "album":
artist_media = fuzzy(split[0], artist_album_titles)
if artist_media[1] > 60:
return next((x for x in artist_albums if artist_media[0] in getattr(x, "title", "")), None)
elif lib.type == "track":
artist_media = fuzzy(split[0], artist_track_tracks)
if artist_media[1] > 60:
return next((x for x in artist_tracks if artist_media[0] in getattr(x, "title", "")), None)
return self.command
def find_replace(self, item, replace=True, replacement=""):
item = self.localize[item]
if isinstance(item, str):
item = {"keywords": [item]}
elif isinstance(item, list):
item = {"keywords": item}
if all(keyword not in self.command for keyword in item["keywords"]):
return False
if replace:
if replacement:
replacement = f" {replacement} "
for keyword in item["keywords"]:
self.command = f" {self.command} "
for pre in item.get("pre", []):
self.command = self.command.replace(f"{pre} {keyword}", replacement)
for post in item.get("post", []):
self.command = self.command.replace(f"{keyword} {post}", replacement)
if keyword in self.command:
self.command = self.command.replace(f" {keyword} ", replacement)
self.command = self.command.strip()
self.command = " ".join(self.command.split())
return True
def convert_ordinals(self, item):
match = ""
matched = ""
ordinals = self.localize["ordinals"]
for word in item["keywords"]:
for ordinal in ordinals.keys():
if ordinal not in ("pre", "post") and ordinal in self.command:
match_before = re.search(fr"({ordinal})\s*({word})", self.command)
match_after = re.search(fr"({word})\s*({ordinal})", self.command)
if match_before:
match = match_before
matched = match.group(1)
if match_after:
match = match_after
matched = match.group(2)
if match:
replacement = match.group(0).replace(matched, ordinals[matched])
self.command = self.command.replace(match.group(0), replacement)
for pre in ordinals["pre"]:
if f"{pre} {match.group(0)}" in self.command:
self.command = self.command.replace(f"{pre} {match.group(0)}", replacement)
for post in ordinals["post"]:
if f"{match.group(0)} {post}" in self.command:
self.command = self.command.replace(f"{match.group(0)} {post}", replacement)
return self.command.strip()
def get_season_episode_num(self, item):
self.command = self.convert_ordinals(item)
phrase = ""
number = None
for keyword in item["keywords"]:
if keyword in self.command:
phrase = keyword
for pre in item["pre"]:
if pre in self.command:
regex = fr"(\d+\s+)({pre}\s+)({phrase}\s+)"
if re.search(regex, self.command):
self.command = re.sub(regex, fr"{phrase} \1 ", self.command)
else:
self.command = re.sub(
fr"({pre}\s+)({phrase}\s+)(\d+\s+)",
fr"{phrase} \3",
self.command,
)
self.command = re.sub(
fr"({phrase}\s+)(\d+\s+)({pre}\s+)",
fr"{phrase} \2",
self.command,
)
for post in item["post"]:
if post in self.command:
regex = fr"({phrase}\s+)({post}\s+)(\d+\s+)"
if re.search(regex, self.command):
self.command = re.sub(regex, fr"{phrase} \3", self.command)
else:
self.command = re.sub(
fr"(\d+\s+)({phrase}\s+)({post}\s+)",
fr"{phrase} \1",
self.command,
)
self.command = re.sub(
fr"({phrase}\s+)(\d+\s+)({post}\s+)",
fr" {phrase} \2",
self.command,
)
match = re.search(fr"(\d+)\s*({phrase}|^)|({phrase}|^)\s*(\d+)", self.command)
if match:
number = match.group(1) or match.group(4)
self.command = self.command.replace(match.group(0), "").strip()
return number
|
py | 1a42b21afd4f0cf385f7d0e96c76e7a6322af481 | import math
import time
import torch
import numpy as np
import pandas as pd
from torch import nn
import editdistance as ed
import soundfile as sf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#PRESERVE_INDICES = len(['<pad>', '<space>'])
PRESERVE_INDICES = len(['<pad>', '<space>', '<eos>'])
#IGNORE_INDICES = [0, 1, 41]
IGNORE_INDICES = [0, 1, 2, 42]
SEP = '\t'
class Timer():
''' Timer for recording training time distribution. '''
def __init__(self):
self.prev_t = time.time()
self.clear()
def set(self):
self.prev_t = time.time()
def cnt(self,mode):
self.time_table[mode] += time.time()-self.prev_t
self.set()
if mode =='bw':
self.click += 1
def show(self):
total_time = sum(self.time_table.values())
self.time_table['avg'] = total_time/self.click
self.time_table['rd'] = 100*self.time_table['rd']/total_time
self.time_table['fw'] = 100*self.time_table['fw']/total_time
self.time_table['bw'] = 100*self.time_table['bw']/total_time
msg = '{avg:.3f} sec/step (rd {rd:.1f}% | fw {fw:.1f}% | bw {bw:.1f}%)'.format(**self.time_table)
self.clear()
return msg
def clear(self):
self.time_table = {'rd':0,'fw':0,'bw':0}
self.click = 0
# Reference : https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/e2e_asr.py#L168
def init_weights(module):
# Exceptions
if type(module) == nn.Embedding:
module.weight.data.normal_(0, 1)
else:
for p in module.parameters():
data = p.data
if data.dim() == 1:
# bias
data.zero_()
elif data.dim() == 2:
# linear weight
n = data.size(1)
stdv = 1. / math.sqrt(n)
data.normal_(0, stdv)
elif data.dim() in [3,4]:
# conv weight
n = data.size(1)
for k in data.size()[2:]:
n *= k
stdv = 1. / math.sqrt(n)
data.normal_(0, stdv)
else:
raise NotImplementedError
def init_gate(bias):
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
return bias
def freq_loss(pred, label, sample_rate, n_mels, loss, differential_loss, emphasize_linear_low, p=1):
"""
Args:
pred: model output
label: target
loss: `l1` or `mse`
differential_loss: use differential loss or not, see here `https://arxiv.org/abs/1909.10302`
emphasize_linear_low: emphasize the low-freq. part of linear spectrogram or not
Return:
loss
"""
# ToDo : Tao
# pred -> BxTxD predicted mel-spec or linear-spec
# label-> same shape
# return loss for loss.backward()
if loss == 'l1':
criterion = torch.nn.functional.l1_loss
elif loss == 'mse':
criterion = torch.nn.functional.mse_loss
else:
raise NotImplementedError
cutoff_freq = 3000
# Repeat for postnet
#_, chn, _, dim = pred.shape
dim = pred.shape[-1]
#label = label.unsqueeze(1).repeat(1,chn,1,1)
loss_all = criterion(p * pred, p * label)
if dim != n_mels and emphasize_linear_low:
# Linear
n_priority_freq = int(dim * (cutoff_freq / (sample_rate/2)))
pred_low = pred[:, :, :n_priority_freq]
label_low = label[:, :, :n_priority_freq]
loss_low = criterion(p * pred_low, p * label_low)
#loss_low = torch.nn.functional.mse_loss(p * pred_low, p * label_low)
loss_all = 0.5 * loss_all + 0.5 * loss_low
if dim == n_mels and differential_loss:
pred_diff = pred[:, 1:, :] - pred[:, :-1, :]
label_diff = label[:, 1:, :] - label[:, :-1, :]
loss_all += 0.5 * criterion(p * pred_diff, p * label_diff)
return loss_all
def feat_to_fig(feat):
if feat is None:
return None
# feat TxD tensor
data = _save_canvas(feat.numpy().T)
return torch.FloatTensor(data),"HWC"
def data_to_bar(data, gt_data, tok_size, tick, zero_pad_tok=True):
if len(gt_data) == 0:
return None
# Hack to get discrete bar graph
cnts = [data.count(i)/len(data) for i in range(tok_size)]
gt_cnts = [gt_data.count(i)/len(gt_data) for i in range(tok_size)]
if zero_pad_tok:
cnts[0] = 0
gt_cnts[0] = 0
data = _save_canvas( (cnts,gt_cnts), meta=(range(tok_size),tick))
return torch.FloatTensor(data),"HWC"
def _save_canvas(data, meta=None):
fig, ax = plt.subplots(figsize=(16, 10))
if meta is None:
ax.imshow(data, aspect="auto", origin="lower")
else:
ax.bar(meta[0],data[0],tick_label=meta[1],fc=(0, 0, 1, 0.5))
ax.bar(meta[0],data[1],tick_label=meta[1],fc=(1, 0, 0, 0.5))
fig.canvas.draw()
# Note : torch tb add_image takes color as [0,1]
data = np.array(fig.canvas.renderer._renderer)[:,:,:-1]/255.0
plt.close(fig)
return data
# Reference : https://stackoverflow.com/questions/579310/formatting-long-numbers-as-strings-in-python
def human_format(num):
magnitude = 0
while num >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
return '{:3}{}'.format(num, [' ', 'K', 'M', 'G', 'T', 'P'][magnitude])
def cal_per(pred, truth):
# Calculate error rate of a batch
if pred is None:
return np.nan
elif len(pred.shape)>=3:
pred = pred.argmax(dim=-1)
er = []
for p,t in zip(pred.cpu(),truth.cpu()):
p = p.tolist()
p = [v for i,v in enumerate(p) if (i==0 or v!=p[i-1]) and v not in IGNORE_INDICES] # Trim repeat
t = [v for v in t.tolist() if v not in IGNORE_INDICES]
er.append(float(ed.eval( p,t))/len(t))
return sum(er)/len(er)
def cal_ppx(prob):
prob = prob.cpu()
prob_len = torch.sum(prob.sum(dim=-1)!=0,dim=-1,keepdim=True).float()
entropy = -torch.sum(prob*(prob+1e-10).log2(),dim=-1) # 2-based log
entropy = torch.mean(entropy.sum(dim=-1)/prob_len)
return torch.pow(torch.FloatTensor([2]),entropy)
# Reference :
# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/7e14834dd5e48bb1e6c74581c55684405e821298/transformer/Models.py
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
''' Sinusoid position encoding table '''
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.
return torch.FloatTensor(sinusoid_table)
def get_audio_feat_mask(actual_lengths, n_frames_per_step, dim):
"""
Return:
mask with 1 for padded part and 0 for non-padded part
"""
# padded length = actual lengths + at least 1 frame padded
padded_lengths = actual_lengths + n_frames_per_step-(actual_lengths%n_frames_per_step)
max_len = torch.max(padded_lengths).item()
if max_len % n_frames_per_step != 0:
max_len += n_frames_per_step - max_len % n_frames_per_step
assert max_len % n_frames_per_step == 0
ids = torch.arange(0, max_len).to(actual_lengths.device)
mask = (ids < padded_lengths.unsqueeze(1)).bool()
mask = ~mask
# (D, B, T)
mask = mask.expand(dim, mask.size(0), mask.size(1))
# (B, T, D)
mask = mask.permute(1, 2, 0)
return mask
def get_seq_mask(lens, max_len=None):
''' Mask for given sequence, return shape [B,T,1]'''
batch_size = len(lens)
max_l = lens.max() if max_len is None else max_len
mask = torch.arange(max_l).unsqueeze(0).repeat(batch_size,1).to(lens.device)>lens.unsqueeze(1)
return mask.unsqueeze(-1)
def read_phn_attr(phn_attr_pth, neg_val=0):
df = pd.read_csv(phn_attr_pth, index_col=0, sep=SEP)
attr = df.to_numpy()
attr[attr==0] = neg_val
attr = np.concatenate([np.zeros((PRESERVE_INDICES, attr.shape[1])), attr])
return attr
def get_audio_duration(path):
y, sr = sf.read(path)
return len(y) / sr
|
py | 1a42b4a388af5e80b0d53b629516cfc5d51167e0 | """
@author: hugonnet
compile the differences to IceBridge and ICESat into elevation biases, standardized uncertainties, and elevation change biases for all regions and parameters of interest
"""
import os
import pandas as pd
import numpy as np
from pybob.ddem_tools import nmad
from glob import glob
import scipy.interpolate
from sklearn.linear_model import LinearRegression
from pybob.bob_tools import mkdir_p
import pyddem.fit_tools as ft
# dir_valid = '/home/atom/ongoing/work_worldwide/validation/icesat'
# dir_valid_out = '/home/atom/ongoing/work_worldwide/validation/compiled'
dir_valid = '/data/icesat/travail_en_cours/romain/results/valid'
dir_valid_out = '/data/icesat/travail_en_cours/romain/results/valid_compil_stable'
mkdir_p(dir_valid_out)
list_fn_valid = glob(os.path.join(dir_valid,'*.csv'),recursive=True)
print('Found validation file list:')
print(list_fn_valid)
print('Concatenating data...')
df = pd.DataFrame()
for fn_valid in list_fn_valid:
tmp_df = pd.read_csv(fn_valid)
reg = int(os.path.basename(fn_valid).split('_')[2])
if os.path.basename(fn_valid).split('_')[1] == 'ICESat':
sensor = 'ICS'
else:
sensor = 'IB'
tmp_df = tmp_df.assign(reg=reg,sensor=sensor)
df = df.append(tmp_df)
#we want time series minus validation, easier to conceptualize
df.zsc = -df.zsc
df.dh = -df.dh
df.dh_ref = -df.dh_ref
#glacier only
df = df[np.abs(df.dh_ref)<300]
df = df[df.pos==1]
#remove very large outliers
nmad_gla = nmad(df.zsc)
df=df[np.abs(df.zsc-np.nanmedian(df.zsc))<10*nmad_gla]
def bin_valid_df_by_vals(df,bins,bins_val,list_var=['dh','zsc'],ls_dvardt=True,weight_ib=1./40,return_ls=False):
mid_bin, med, std, dvardt, dvardt_2std, ns_ics, ns_ib = ([] for i in range(7))
for i in range(len(bins)-1):
ind = np.logical_and(bins_val >= bins[i],bins_val < bins[i+1])
df_ind = df[ind]
nics = np.count_nonzero(df_ind.sensor == 'ICS')
nib=np.count_nonzero(df_ind.sensor == 'IB')
ns_ics.append(nics)
ns_ib.append(nib)
mid_bin.append(bins[i] + 0.5*(bins[i+1]-bins[i]))
sub_med = []
sub_std = []
sub_dvardt = []
sub_dvardt_2std = []
sub_mu = []
sub_w = []
sub_t = []
for var in list_var:
if weight_ib is not None:
if nics != 0 or nib !=0:
sub_med.append(np.nansum((np.nanmedian(df_ind[df_ind.sensor=='ICS'][var])*nics,np.nanmedian(df_ind[df_ind.sensor=='IB'][var])*nib*weight_ib))/(nics+nib*weight_ib))
sub_std.append(np.nansum((nmad(df_ind[df_ind.sensor == 'ICS'][var]) * nics,nmad(df_ind[df_ind.sensor == 'IB'][var]) * nib * weight_ib)) / (nics + nib * weight_ib))
else:
sub_med.append(np.nan)
sub_std.append(np.nan)
else:
sub_med.append(np.nanmedian(df_ind[var]))
sub_std.append(nmad(df_ind[var].values))
if ls_dvardt:
list_t = sorted(list(set(list(df_ind.t.values))))
ftime_delta = np.array(
[(np.datetime64(t) - np.datetime64('{}-01-01'.format(int(2000)))).astype(int) / 365.2422 for t in list_t])
mu = []
w = []
for val_t in list_t:
ind_t = df_ind.t.values == val_t
df_ind_t = df_ind[ind_t]
nics_t = np.count_nonzero(df_ind_t.sensor == 'ICS')
nib_t = np.count_nonzero(df_ind_t.sensor == 'IB')
if np.count_nonzero(ind_t) > 20:
med_t = np.nansum((np.nanmedian(df_ind_t[df_ind_t.sensor=='ICS'][var])*nics_t,np.nanmedian(df_ind_t[df_ind_t.sensor=='IB'][var])*nib_t*weight_ib))/(nics_t+nib_t*weight_ib)
mu.append(med_t)
std_t = np.nansum((nmad(df_ind_t[df_ind_t.sensor == 'ICS'][var]) * nics_t,nmad(df_ind_t[df_ind_t.sensor == 'IB'][var]) * nib_t * weight_ib)) / (nics_t + nib_t * weight_ib)
w.append(std_t/np.sqrt(nics_t+nib_t*weight_ib))
else:
mu.append(np.nan)
w.append(np.nan)
mu = np.array(mu)
w = np.array(w)
if np.count_nonzero(~np.isnan(mu)) > 5:
# reg = LinearRegression().fit(ftime_delta[~np.isnan(mu)].reshape(-1, 1),
# mu[~np.isnan(mu)].reshape(-1, 1))
beta1, _ , incert_slope, _, _ = ft.wls_matrix(ftime_delta[~np.isnan(mu)], mu[~np.isnan(mu)], 1. / w[~np.isnan(mu)]**2,
conf_slope=0.95)
# fig = plt.figure()
# plt.scatter(ftime_delta,mu_dh,color='red')
# plt.plot(np.arange(0,10,0.1),reg.predict(np.arange(0,10,0.1).reshape(-1,1)),color='black',label=reg)
# plt.ylim([-20,20])
# plt.text(5,0,str(reg.coef_[0]))
# plt.legend()
# coef = reg.coef_[0][0]
coef = beta1
sub_dvardt.append(coef)
sub_dvardt_2std.append(incert_slope)
else:
sub_dvardt.append(np.nan)
sub_dvardt_2std.append(np.nan)
sub_mu.append(mu)
sub_w.append(w)
sub_t.append(ftime_delta)
med.append(sub_med)
std.append(sub_std)
dvardt.append(sub_dvardt)
dvardt_2std.append(sub_dvardt_2std)
df_out = pd.DataFrame()
df_out = df_out.assign(mid_bin=mid_bin, ns_ics=ns_ics, ns_ib=ns_ib)
for var in list_var:
df_out['med_' + var] = list(zip(*med))[list_var.index(var)]
df_out['nmad_' + var] = list(zip(*std))[list_var.index(var)]
if ls_dvardt:
df_out['d'+var+'_dt'] = list(zip(*dvardt))[list_var.index(var)]
df_out['d'+var+'_dt_2std'] = list(zip(*dvardt_2std))[list_var.index(var)]
if return_ls and ls_dvardt:
df_ls = pd.DataFrame()
for var in list_var:
# print(len(sub_mu))
df_ls['mu_' + var] = sub_mu[list_var.index(var)]
df_ls['w_' + var] = sub_w[list_var.index(var)]
df_ls['t_' + var] = sub_t[list_var.index(var)]
return df_out, df_ls
else:
return df_out
def bin_valid_df_by_season(df,var='dh',weight_ib=1./40):
date=df.t
season_month_bins = np.arange(1,13,1)
mon = pd.DatetimeIndex(date).month.values
med, std, mid_bin, ns_ics, ns_ib = ([] for i in range(5))
for i in range(len(season_month_bins)):
ind = (mon == season_month_bins[i])
df_ind = df[ind]
nics = np.count_nonzero(df_ind.sensor == 'ICS')
nib = np.count_nonzero(df_ind.sensor == 'IB')
ns_ics.append(nics)
ns_ib.append(nib)
# med.append(np.nanmedian(df_ind[var].values))
# std.append(nmad(df_ind[var].values))
if nics != 0 or nib != 0:
med.append(np.nansum((np.nanmedian(df_ind[df_ind.sensor == 'ICS'][var]) * nics,
np.nanmedian(df_ind[df_ind.sensor == 'IB'][var]) * nib * weight_ib)) / (nics + nib * weight_ib))
std.append(np.nansum((nmad(df_ind[df_ind.sensor == 'ICS'][var]) * nics,
nmad(df_ind[df_ind.sensor == 'IB'][var]) * nib * weight_ib)) / (
nics + nib * weight_ib))
else:
med.append(np.nan)
std.append(np.nan)
mid_bin.append(season_month_bins[i])
df_out = pd.DataFrame()
df_out = df_out.assign(seas_dec=mid_bin,ns_ics=ns_ics,ns_ib=ns_ib)
df_out['med_'+var]=med
df_out['nmad_'+var]=std
return df_out
#1/ BEFORE SEASONAL CORRECTIONS
print('Deriving statistics without seasonal corrections')
#normalize elevation by region
list_reg = sorted(list(set(list(df.reg))))
for reg in list_reg:
min_elev = np.nanpercentile(df[df.reg == reg].h,95)
max_elev = np.nanpercentile(df[df.reg == reg].h,5)
df.loc[df.reg == reg,'h'] = (df.loc[df.reg == reg,'h'] - min_elev)/(max_elev-min_elev)
ind_0 = np.logical_and(df.reg==reg,df.h<0)
df.loc[ind_0,'h']=np.nan
ind_1 = np.logical_and(df.reg==reg,df.h>1)
df.loc[ind_1,'h']=np.nan
bin_dt = [0,60,120,180,240,300,360,540,720,900,1080]
dt = bin_valid_df_by_vals(df, bin_dt, np.abs(df.dt))
dt['type'] = 'dt'
bin_t = [np.datetime64('20'+str(i).zfill(2)+'-01-01') for i in range(21)]
t = bin_valid_df_by_vals(df,bin_t,pd.to_datetime(df.t))
t['type'] = 't'
bin_h = np.arange(0,1.1,0.1)
h = bin_valid_df_by_vals(df,bin_h,df.h)
h['type'] = 'h'
bin_dh_tot = [-150,-100,-50,-35,-15,-10,-5,0,5,10,15]
dh_tot = bin_valid_df_by_vals(df, bin_dh_tot, df.dh_tot)
dh_tot['type'] = 'dh_tot'
bin_reg = np.arange(1, 21)
r = bin_valid_df_by_vals(df, bin_reg, df.reg)
r['type'] = 'reg'
bin_dh = np.arange(-12,13,2)
dh = bin_valid_df_by_vals(df, bin_dh, df.dh)
dh['type'] ='dh'
bin_zsc = np.arange(-3,3.1,0.5)
zsc = bin_valid_df_by_vals(df, bin_zsc, df.zsc)
zsc['type'] ='zsc'
bin_all = [min(df.zsc),max(df.zsc)]
a, a_ls = bin_valid_df_by_vals(df,bin_all,df.zsc,return_ls=True)
a['type'] = 'all'
df_north = df[df.reg <=15]
bin_months = np.arange(1, 14, 2)
months = pd.DatetimeIndex(df_north.t).month.values
m_n = bin_valid_df_by_vals(df_north,bin_months,months)
m_n['type'] = 'seas_north'
df_south = df[df.reg > 15]
bin_months = np.arange(1, 14, 2)
months = pd.DatetimeIndex(df_south.t).month.values
m_s = bin_valid_df_by_vals(df_south,bin_months,months)
m_s['type'] = 'seas_south'
df_init = pd.concat([dt,t,h,dh_tot,r,dh,zsc,a,m_n,m_s])
df_init['seas_corr'] = 0
fn_out = os.path.join(dir_valid_out,'valid_ICS_IB_all_bins_all_ls_init.csv')
a_ls.to_csv(fn_out)
#2/ COMPUTE SEASONAL BIASES BY REGION
print('Computing and applying seasonal corrections')
list_s = []
list_s2 = []
for reg in list(set(list(df.reg))):
df_reg = df[df.reg == reg]
# df_reg = df_reg[df_reg.sensor=='ICS']
s = bin_valid_df_by_season(df_reg)
coefs1, _ = scipy.optimize.curve_fit(lambda t, a, b, c: a ** 2 * np.sin(t * 2 * np.pi / 12 + c) + b, s.seas_dec[~np.isnan(s.med_dh)].values,
s.med_dh[~np.isnan(s.med_dh)].values)
s2 = bin_valid_df_by_season(df_reg,var='zsc')
coefs2, _ = scipy.optimize.curve_fit(lambda t, a, b, c: a ** 2 * np.sin(t * 2 * np.pi / 12 + c) + b, s2.seas_dec[~np.isnan(s2.med_zsc)].values,
s2.med_zsc[~np.isnan(s2.med_zsc)].values)
season_month_bins = np.arange(1, 13, 1)
mon = pd.DatetimeIndex(df.t).month.values
for i in range(len(season_month_bins)):
ind = np.logical_and(mon == season_month_bins[i],df.reg==reg)
df.loc[ind,'dh'] -= coefs1[0] ** 2 * np.sin(season_month_bins[i] * 2 * np.pi / 12 + coefs1[2]) + coefs1[1]
df.loc[ind,'zsc'] -= coefs2[0] ** 2 * np.sin(season_month_bins[i] * 2 * np.pi / 12 + coefs2[2]) + coefs2[1]
s['reg'] = reg
s['var'] = 'dh'
s2['reg']=reg
s2['var']='zsc'
s['amp'] = coefs1[0]**2
s['phase'] = coefs1[2]*12/(2*np.pi) % 12
s['h_shift'] = coefs1[1]
s2['amp_zsc'] = coefs2[0]**2
s2['phase_zsc'] = coefs2[2]*12/(2*np.pi) % 12
s2['h_shift_zsc'] = coefs2[1]
list_s.append(s)
list_s2.append(s2)
#
# df_north = df[df.reg <=15]
# df_south = df[df.reg > 15]
#
# s_n_dh = bin_valid_df_by_season(df_north)
# s_n_dh['hemi'] = 'north'
# s_n_dh['var'] = 'dh'
# s_n_zsc = bin_valid_df_by_season(df_north,var='zsc')
# s_n_zsc['hemi'] = 'north'
# s_n_zsc['var'] = 'zsc'
#
# s_s_dh = bin_valid_df_by_season(df_south)
# s_s_dh['hemi'] = 'south'
# s_s_dh['var'] = 'dh'
# s_s_zsc = bin_valid_df_by_season(df_south,var='zsc')
# s_s_zsc['hemi'] = 'south'
# s_s_zsc['var'] = 'zsc'
#
# s_ns = pd.concat([s_n_dh,s_n_zsc,s_s_dh,s_s_zsc])
# fn_seas_ns = os.path.join(dir_valid_out,'valid_ICS_IB_seas_NS.csv')
# s_ns.to_csv(fn_seas_ns)
df_seas = pd.concat(list_s+list_s2)
fn_seas = os.path.join(dir_valid_out,'valid_ICS_IB_seas_corr_final_weight.csv')
df_seas.to_csv(fn_seas)
#
# #3/ AFTER SEASONAL CORRECTIONS
print('Deriving statistics after seasonal corrections')
bin_dt = [0,60,120,180,240,300,360,540,720,900,1080]
dt = bin_valid_df_by_vals(df, bin_dt, np.abs(df.dt))
dt['type'] = 'dt'
bin_t = [np.datetime64('20'+str(i).zfill(2)+'-01-01') for i in range(21)]
t = bin_valid_df_by_vals(df,bin_t,pd.to_datetime(df.t))
t['type'] = 't'
bin_h = np.arange(0,1.1,0.1)
h = bin_valid_df_by_vals(df,bin_h,df.h)
h['type'] = 'h'
bin_dh_tot = [-150,-100,-50,-35,-15,-10,-5,0,5,10,15]
dh_tot = bin_valid_df_by_vals(df, bin_dh_tot, df.dh_tot)
dh_tot['type'] = 'dh_tot'
bin_reg = np.arange(1, 21)
r = bin_valid_df_by_vals(df, bin_reg, df.reg)
r['type'] = 'reg'
bin_dh = np.arange(-12,13,2)
dh = bin_valid_df_by_vals(df, bin_dh, df.dh)
dh['type'] ='dh'
bin_zsc = np.arange(-3,3.1,0.5)
zsc = bin_valid_df_by_vals(df, bin_zsc, df.zsc)
zsc['type'] ='zsc'
bin_all = [min(df.zsc),max(df.zsc)]
a, a_ls = bin_valid_df_by_vals(df,bin_all,df.zsc,return_ls=True)
a['type'] = 'all'
df_north = df[df.reg <=15]
bin_months = np.arange(1, 14, 2)
months = pd.DatetimeIndex(df_north.t).month.values
m_n = bin_valid_df_by_vals(df_north,bin_months,months)
m_n['type'] = 'seas_north'
df_south = df[df.reg > 15]
bin_months = np.arange(1, 14, 2)
months = pd.DatetimeIndex(df_south.t).month.values
m_s = bin_valid_df_by_vals(df_south,bin_months,months)
m_s['type'] = 'seas_south'
df_end = pd.concat([dt,t,h,dh_tot,r,dh,zsc,a,m_n,m_s])
df_end['seas_corr'] = 1
df_out = pd.concat([df_init,df_end])
fn_out = os.path.join(dir_valid_out,'valid_ICS_IB_all_bins_final_weight.csv')
df_out.to_csv(fn_out)
fn_a_ls = os.path.join(dir_valid_out,'valid_ICS_IB_all_bins_final_weight_all_ls.csv')
a_ls.to_csv(fn_a_ls) |
py | 1a42b51323630125641d4ffb83ab652902b01d26 | from django.urls import path
from wren.users.views import user_detail_view, user_redirect_view, user_update_view
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
|
py | 1a42b52c9c74cd38581352f272b70c58586b36fc | """
Default configurations for action recognition TA3N lightning
"""
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.TO_VALIDATE = False # choices = [True, False]
# -----------------------------------------------------------------------------
# Paths
# -----------------------------------------------------------------------------
_C.PATHS = CN()
_C.PATHS.PATH_DATA_ROOT = "data/" # directory where the feature pickles are stored. Depends on users
_C.PATHS.PATH_LABELS_ROOT = "annotations/" # directory where the annotations are stored. Depends on users
_C.PATHS.PATH_EXP_ROOT="model/action-model/" # directory where the checkpoints are to be stored. Depends on users
_C.PATHS.DATASET_SOURCE="source_train" # depends on users
_C.PATHS.DATASET_TARGET="target_train" # depends on users
# training
_C.PATHS.PATH_DATA_SOURCE=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.DATASET_SOURCE)
_C.PATHS.PATH_DATA_TARGET=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.DATASET_TARGET)
_C.PATHS.TRAIN_SOURCE_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, 'EPIC_100_uda_source_train.pkl') # '/domain_adaptation_source_train_pre-release_v3.pkl'
_C.PATHS.TRAIN_TARGET_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, 'EPIC_100_uda_target_train_timestamps.pkl') # '/domain_adaptation_target_train_pre-release_v6.pkl'
_C.PATHS.VAL_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, "EPIC_100_uda_target_test_timestamps.pkl")
_C.PATHS.PATH_EXP=os.path.join(_C.PATHS.PATH_EXP_ROOT, "Testexp")
# validation
_C.PATHS.VAL_DATASET_SOURCE="source_val" # depends on users
_C.PATHS.VAL_DATASET_TARGET="target_val" # depends on users
_C.PATHS.PATH_VAL_DATA_SOURCE=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.VAL_DATASET_SOURCE)
_C.PATHS.PATH_VAL_DATA_TARGET=os.path.join(_C.PATHS.PATH_DATA_ROOT, _C.PATHS.VAL_DATASET_TARGET)
_C.PATHS.VAL_SOURCE_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, "EPIC_100_uda_source_val.pkl")
_C.PATHS.VAL_TARGET_LIST=os.path.join(_C.PATHS.PATH_LABELS_ROOT, "EPIC_100_uda_target_val.pkl")
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASET = CN()
_C.DATASET.DATASET = "epic" # dataset choices = [hmdb_ucf, hmdb_ucf_small, ucf_olympic]
_C.DATASET.NUM_CLASSES = "97,300"
_C.DATASET.NUM_SOURCE= 16115 # number of training data (source)
_C.DATASET.NUM_TARGET= 26115 # number of training data (target)
_C.DATASET.MODALITY = "RGB" # choices = [ALL, RGB, Audio, Flow]
_C.DATASET.FRAME_TYPE = "feature" # choices = [frame]
_C.DATASET.NUM_SEGMENTS = 5 # sample frame # of each video for training
_C.DATASET.VAL_SEGMENTS = 5 # sample frame # of each video for training
_C.DATASET.BASELINE_TYPE = "video" # choices = ['frame', 'tsn']
_C.DATASET.FRAME_AGGREGATION = "trn-m" # method to integrate the frame-level features. choices = [avgpool, trn, trn-m, rnn, temconv]
# ---------------------------------------------------------------------------- #
# Model
# ---------------------------------------------------------------------------- #
_C.MODEL = CN()
_C.MODEL.ADD_FC = 1 # number of shared features
_C.MODEL.FC_DIM = 512 # dimension of shared features
_C.MODEL.ARCH = "TBN" # choices = [resnet50]
_C.MODEL.USE_TARGET = "uSv" # choices = [uSv, Sv, none]
_C.MODEL.SHARE_PARAMS = "Y" # choices = [Y, N]
_C.MODEL.PRED_NORMALIZE = "N" # choices = [Y, N]
_C.MODEL.WEIGHTED_CLASS_LOSS_DA = "N" # choices = [Y, N]
_C.MODEL.WEIGHTED_CLASS_LOSS = "N" # choices = [Y, N]
_C.MODEL.DROPOUT_I = 0.5
_C.MODEL.DROPOUT_V = 0.5
_C.MODEL.NO_PARTIALBN = True
# DA configs
if _C.MODEL.USE_TARGET == "none":
_C.MODEL.EXP_DA_NAME="baseline"
else:
_C.MODEL.EXP_DA_NAME="DA"
_C.MODEL.DIS_DA = "DAN" # choices = [DAN, CORAL, JAN]
_C.MODEL.ADV_POS_0 = "Y" # discriminator for relation features. choices = [Y, N]
_C.MODEL.ADV_DA = "RevGrad" # choices = [None]
_C.MODEL.ADD_LOSS_DA = "attentive_entropy" # choices = [None, target_entropy, attentive_entropy]
_C.MODEL.ENS_DA = None # choices = [None, MCD]
# Attention configs
_C.MODEL.USE_ATTN = "TransAttn" # choices = [None, TransAttn, general]
_C.MODEL.USE_ATTN_FRAME = None # choices = [None, TransAttn, general]
_C.MODEL.USE_BN = None # choices = [None, AdaBN, AutoDIAL]
_C.MODEL.N_ATTN = 1
_C.MODEL.PLACE_DIS = ["Y", "Y", "N"]
_C.MODEL.PLACE_ADV = ["Y", "Y", "Y"]
# ---------------------------------------------------------------------------- #
# Hyperparameters
# ---------------------------------------------------------------------------- #
_C.HYPERPARAMETERS = CN()
_C.HYPERPARAMETERS.ALPHA = 0
_C.HYPERPARAMETERS.BETA = [0.75, 0.75, 0.5]
_C.HYPERPARAMETERS.GAMMA = 0.003 # U->H: 0.003 | H->U: 0.3
_C.HYPERPARAMETERS.MU = 0
# ---------------------------------------------------------------------------- #
# Trainer
# ---------------------------------------------------------------------------- #
_C.TRAINER = CN()
_C.TRAINER.TRAIN_METRIC = "all" # choices = [noun, verb]
_C.TRAINER.FC_DIM = 512 # dimension of shared features
_C.TRAINER.ARCH = "TBN" # choices = [resnet50]
_C.TRAINER.USE_TARGET = "uSv" # choices = [uSv, Sv, none]
_C.TRAINER.SHARE_PARAMS = "Y" # choices = [Y, N]
_C.TRAINER.PRETRAIN_SOURCE = False
_C.TRAINER.VERBOSE = True
_C.TRAINER.DANN_WARMUP = True
# Learning configs
_C.TRAINER.LOSS_TYPE = 'nll'
_C.TRAINER.LR = 0.003
_C.TRAINER.LR_DECAY = 10
_C.TRAINER.LR_ADAPTIVE = None # choices = [None, loss, dann]
_C.TRAINER.LR_STEPS = [10, 20]
_C.TRAINER.MOMENTUM = 0.9
_C.TRAINER.WEIGHT_DECAY = 0.0001
_C.TRAINER.BATCH_SIZE = [128, int(128*_C.DATASET.NUM_TARGET/_C.DATASET.NUM_SOURCE), 128]
_C.TRAINER.OPTIMIZER_NAME = "SGD" # choices = [SGD, Adam]
_C.TRAINER.CLIP_GRADIENT = 20
_C.TRAINER.PRETRAINED = None
_C.TRAINER.RESUME = ""
_C.TRAINER.RESUME_HP = ""
_C.TRAINER.MIN_EPOCHS = 25
_C.TRAINER.MAX_EPOCHS = 30
_C.TRAINER.ACCELERATOR = "ddp"
_C.PATHS.EXP_PATH = os.path.join(_C.PATHS.PATH_EXP + '_' + _C.TRAINER.OPTIMIZER_NAME + '-share_params_' + _C.MODEL.SHARE_PARAMS + '-lr_' + str(_C.TRAINER.LR) + '-bS_' + str(_C.TRAINER.BATCH_SIZE[0]), _C.DATASET.DATASET + '-'+ str(_C.DATASET.NUM_SEGMENTS) + '-alpha_' + str(_C.HYPERPARAMETERS.ALPHA) + '-beta_' + str(_C.HYPERPARAMETERS.BETA[0])+ '_'+ str(_C.HYPERPARAMETERS.BETA[1])+'_'+ str(_C.HYPERPARAMETERS.BETA[2])+"_gamma_" + str(_C.HYPERPARAMETERS.GAMMA) + "_mu_" + str(_C.HYPERPARAMETERS.MU))
# ---------------------------------------------------------------------------- #
# Tester
# ---------------------------------------------------------------------------- #
_C.TESTER = CN()
_C.TESTER.TEST_TARGET_DATA = os.path.join(_C.PATHS.PATH_DATA_ROOT, "target_test")
_C.TESTER.WEIGHTS = os.path.join(_C.PATHS.EXP_PATH , "checkpoint.pth.tar")
_C.TESTER.NOUN_WEIGHTS = None
_C.TESTER.BATCH_SIZE = 512
_C.TESTER.NOUN_TARGET_DATA = None
_C.TESTER.RESULT_JSON = "test.json"
_C.TESTER.TEST_SEGMENTS = 5 # sample frame # of each video for testing
_C.TESTER.SAVE_SCORES = os.path.join(_C.PATHS.EXP_PATH , "scores")
_C.TESTER.SAVE_CONFUSION = os.path.join(_C.PATHS.EXP_PATH , "confusion_matrix")
_C.TESTER.VERBOSE = True
# ---------------------------------------------------------------------------- #
# Miscellaneous configs
# ---------------------------------------------------------------------------- #
_C.MODEL.N_RNN = 1
_C.MODEL.RNN_CELL = "LSTM"
_C.MODEL.N_DIRECTIONS = 1
_C.MODEL.N_TS = 5
_C.MODEL.TENSORBOARD = True
_C.MODEL.FLOW_PREFIX = ""
_C.TRAINER.JOBS = 2
_C.TRAINER.EF = 1
_C.TRAINER.PF = 50
_C.TRAINER.SF = 50
_C.TRAINER.COPY_LIST = ["N", "N"]
_C.TRAINER.SAVE_MODEL = True
def get_cfg_defaults():
return _C.clone() |
py | 1a42b5babe4ef543a1f1819572d05f4f43f3698b | # coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fit_gmm_pair."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from ott.tools.gaussian_mixture import fit_gmm
from ott.tools.gaussian_mixture import fit_gmm_pair
from ott.tools.gaussian_mixture import gaussian_mixture
from ott.tools.gaussian_mixture import gaussian_mixture_pair
from ott.tools.gaussian_mixture import probabilities
class FitGmmPairTest(parameterized.TestCase):
def setUp(self):
super().setUp()
mean_generator0 = jnp.array([[2., -1.],
[-2., 0.],
[4., 3.]])
cov_generator0 = jnp.array([[[0.2, 0.], [0., 0.1]],
[[0.6, 0.], [0., 0.3]],
[[0.5, 0.4], [0.4, 0.5]]])
weights_generator0 = jnp.array([0.3, 0.3, 0.4])
gmm_generator0 = (
gaussian_mixture.GaussianMixture.from_mean_cov_component_weights(
mean=mean_generator0,
cov=cov_generator0,
component_weights=weights_generator0))
# shift the means to the right by varying amounts
mean_generator1 = mean_generator0 + jnp.array([[1., -0.5],
[-1., -1.],
[-1., 0.]])
cov_generator1 = cov_generator0
weights_generator1 = weights_generator0 + jnp.array([0., 0.1, -0.1])
gmm_generator1 = (
gaussian_mixture.GaussianMixture.from_mean_cov_component_weights(
mean=mean_generator1,
cov=cov_generator1,
component_weights=weights_generator1))
self.epsilon = 1.e-2
self.rho = 0.1
self.tau = self.rho / (self.rho + self.epsilon)
key = jax.random.PRNGKey(0)
self.key, subkey0, subkey1 = jax.random.split(key, num=3)
self.samples_gmm0 = gmm_generator0.sample(key=subkey0, size=2000)
self.samples_gmm1 = gmm_generator1.sample(key=subkey1, size=2000)
@parameterized.named_parameters(
('balanced_unweighted', True, False),
('balanced_weighted', True, True),
('unbalanced_unweighted', False, False),
('unbalanced_weighted', False, True))
def test_fit_gmm(self, balanced, weighted):
# dumb integration test that makes sure nothing crashes
if balanced:
tau = 1.
else:
tau = self.tau
if weighted:
weights0 = jnp.ones(self.samples_gmm0.shape[0])
weights1 = jnp.ones(self.samples_gmm0.shape[0])
weights_pooled = jnp.concatenate([weights0, weights1], axis=0)
else:
weights0 = None
weights1 = None
weights_pooled = None
# Fit a GMM to the pooled samples
samples = jnp.concatenate([self.samples_gmm0, self.samples_gmm1])
gmm_init = fit_gmm.initialize(
key=self.key,
points=samples,
point_weights=weights_pooled,
n_components=3,
verbose=False)
gmm = fit_gmm.fit_model_em(
gmm=gmm_init,
points=samples,
point_weights=None,
steps=20)
# use the same mixture model for gmm0 and gmm1 initially
pair_init = gaussian_mixture_pair.GaussianMixturePair(
gmm0=gmm, gmm1=gmm, epsilon=self.epsilon, tau=tau)
fit_model_em_fn = fit_gmm_pair.get_fit_model_em_fn(
weight_transport=0.1,
jit=True)
fit_model_em_fn(pair=pair_init,
points0=self.samples_gmm0,
points1=self.samples_gmm1,
point_weights0=weights0,
point_weights1=weights1,
em_steps=1,
m_steps=10,
verbose=False)
if __name__ == '__main__':
absltest.main()
|
py | 1a42b609cf2579ad84ee043fa2a219de584d5418 | # python setup.py sdist bdist_egg upload
from setuptools import setup, find_packages
setup(
name='sonm',
version='0.0.6',
description='Wrapper for SONM API https://github.com/sonm-io',
author='Telminov Sergey',
author_email='[email protected]',
url='https://github.com/telminov/sonm',
include_package_data=True,
packages=find_packages(),
data_files=[('', ['sonm/templates/bid.jinja2', 'sonm/templates/task.jinja2'])],
license='The MIT License',
install_requires=[
'jinja2',
]
)
|
py | 1a42b674bcedf861cfd154ea81ead92b0cc87227 | """Tests for recovering after errors."""
from pytype.tests import test_base
class RecoveryTests(test_base.TargetIndependentTest):
"""Tests for recovering after errors.
The type inferencer can warn about bad code, but it should never blow up.
These tests check that we don't faceplant when we encounter difficult code.
"""
def testBadSubtract(self):
ty = self.Infer("""
def f():
t = 0.0
return t - ("bla" - t)
""", report_errors=False)
self.assertTypesMatchPytd(ty, """
from typing import Any
def f() -> Any
""")
def testInheritFromInstance(self):
ty = self.Infer("""
class Foo(3):
pass
""", report_errors=False)
self.assertTypesMatchPytd(ty, """
class Foo(?):
pass
""")
def testNameError(self):
ty = self.Infer("""
x = foobar
class A(x):
pass
pow(A(), 2)
""", report_errors=False)
self.assertTypesMatchPytd(ty, """
x = ... # type: ?
class A(?):
pass
""")
def testObjectAttr(self):
self.assertNoCrash(self.Check, """
object.bla(int)
""")
def testAttrError(self):
ty = self.Infer("""
class A:
pass
x = A.x
class B:
pass
y = "foo".foo()
object.bar(int)
class C:
pass
""", report_errors=False)
self.assertTypesMatchPytd(ty, """
class A:
pass
x = ... # type: ?
class B:
pass
y = ... # type: ?
class C:
pass
""")
def testWrongCall(self):
ty = self.Infer("""
def f():
pass
f("foo")
x = 3
""", report_errors=False)
self.assertTypesMatchPytd(ty, """
def f() -> None: ...
x = ... # type: int
""")
def testDuplicateIdentifier(self):
ty = self.Infer("""
class A(object):
def __init__(self):
self.foo = 3
def foo(self):
pass
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
class A(object):
foo = ... # type: Any
""")
def testMethodWithUnknownDecorator(self):
_, errors = self.InferWithErrors("""\
from nowhere import decorator
class Foo(object):
@decorator
def f():
name_error
""", deep=True)
self.assertErrorLogIs(errors, [
(1, "import-error"),
(5, "name-error"),
])
def testAssertInConstructor(self):
self.Check("""\
class Foo(object):
def __init__(self):
self._bar = "foo"
assert False
def __str__(self):
return self._bar
""")
@test_base.skip("Line 7, in __str__: No attribute '_bar' on Foo'")
def testConstructorInfiniteLoop(self):
self.Check("""\
class Foo(object):
def __init__(self):
self._bar = "foo"
while True: pass
def __str__(self):
return self._bar
""")
def testAttributeAccessInImpossiblePath(self):
_, errors = self.InferWithErrors("""\
x = 3.14 if __random__ else 42
if isinstance(x, int):
if isinstance(x, float):
x.upper # not reported
3 in x
""")
self.assertErrorLogIs(errors, [
(5, "unsupported-operands"),
])
def testBinaryOperatorOnImpossiblePath(self):
_, errors = self.InferWithErrors("""\
x = "" if __random__ else []
if isinstance(x, list):
if isinstance(x, str):
x / x
""")
self.assertErrorLogIs(errors, [
(4, "unsupported-operands"),
])
test_base.main(globals(), __name__ == "__main__")
|
py | 1a42b78bd4bbd77b6bf2a912a6013bde23da382f | import ConfigParser
import requests
from requests.auth import HTTPBasicAuth
def update_ip():
cp = ConfigParser.ConfigParser()
cp.read('config.ini')
try:
label = cp.get('Main', 'label')
username = cp.get('Main', 'username')
password = cp.get('Main', 'password')
except ConfigParser.NoSectionError as e:
print "Please make sure you have a config.ini file in this directory, with label, username, and password defined."
return
response = requests.get('https://updates.opendns.com/nic/update?hostname=%s' % label, auth=HTTPBasicAuth(username, password))
if response.status_code == requests.codes.ok:
print "Successfully updated IP:", response.text
else:
print "Failed to update IP:", response
print "Make sure your password doesn't have any special characters in it, other than underscore. Try using underscore for the special character."
if __name__ == '__main__':
update_ip()
|
py | 1a42b824b9f50ff7309c0c8a39742db885f6e966 | def binary_search(data: [int], key: int) -> int:
lower_bound = 0
upper_bound = len(data) - 1
mid = 0
while True:
if lower_bound > upper_bound:
return -1
# 注意:直接使用 (lowerBound + upperBound) / 2 可能导致超出 int 范围溢出
mid = lower_bound + (upper_bound - lower_bound) / 2
if data[mid] == key:
return mid
elif data[mid] < key:
lower_bound = mid + 1
else:
upper_bound = mid - 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.