ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a423982b4b451821e61f259bd81de4d518cd264 | import os
from flask import Flask
from controller.routes import api
app = Flask(__name__)
app.register_blueprint(api)
app.run(port=5000, debug=True) |
py | 1a423bb2e8a91135a397e5d048f1f32caa331084 | # encoding: utf-8
# cython: profile=False
# cython: embedsignature=True
"""
Implementation of DirichletDistribution.
The Dirichlet distribution makes use of path counts from a DFA. Consider
two representations of the DFA for the golden mean process.
0 1
0 0 1
1 0 -1
and
0 1
0 0 1
1 0 2
2 2 2
In the first, we have an incomplete DFA, whereas the second DFA is complete.
The first represents forbidden transitions with a special node, -1. There is no
explicit treatment of this node, and so, when counting paths, one must deal
with the reality that paths can terminate. For the second DFA, node 2 is an
explicit node that receives all forbidden transitions. Self loops keep all
subsequent transitions at node 2. In this case, paths, even forbidden paths,
do not terminate prematurely. It should not be surprising that supporting
incomplete DFAs makes the code *more* complex. For example, the expression
for the likelihood that we use during inference is valid only if the path has
a nonzero probability, and so, the implementation must first check that the
path is valid.
Further complicating matters is that this implementation will infer transition
probabilities for every node that has more than one outgoing edge. Without
a mechanism for declaring which edges should be inferred and which should be
fixed, this means that doing inference on a complete DFA will yield undesirable
results---as forbidden edges will be inferred to have some nonzero probability.
What this means for the current implementation:
If one is attempting to do inference on an HMM that does not have full
support from each state, then one should pass in an incomplete DFA of its
support, rather than a complete DFA. The expression for the likelihood
(and thus, evidence) still only holds for words with nonzero probability.
Edges to implicit, forbidden nodes will have probability 0, which is fixed
and not inferred by the algorithm.
A goal for the future is to extend it so that one can declare which edges are
fixed and which should be inferred, and how the parameters might be related
to one another.
"""
from __future__ import absolute_import
from __future__ import division
import cython
#cimport cython
import numpy as np
#cimport numpy as np
from copy import deepcopy
from .counts import path_counts
from .exceptions import InvalidInitialNode
from itertools import product
BTYPE = np.bool
#ctypedef bint BTYPE_t
ITYPE = np.int64
#ctypedef np.int64_t ITYPE_t
__all__ = ['DirichletDistribution', 'Infer']
import dit
class DirichletDistribution(object):
"""
A barebones representation of a product of Dirichlet distributions.
"""
### Public
nodes = None
symbols = None
final_node = None
valid_initial_nodes = None
node_paths = None
nNodes = None
nSymbols = None
nInitial = None
nEdges = None
prng = None
### Private
tmatrix = None
edges = None
edge_alphas = None
edge_counts = None
node_alphas = None
node_counts = None
_temp = None
def __init__(self, tmatrix, data=None, node_path=False, prng=None, out_arrays=None):
#np.ndarray[ITYPE_t, ndim=2, mode="c"] tmatrix,
#np.ndarray[ITYPE_t, ndim=1, mode="c"] data,
#BTYPE_t node_path=False,
#out_arrays=None):
# In the follow, we use the following variables:
# n : number of nodes
# k : number of symbols
# L : length of data
# nInitial : number of valid initial nodes
# nEdges : number of edges
if prng is None:
prng = np.random.RandomState()
self.prng = prng
if data is None:
data = np.array((), dtype=int)
# shape: (n, k)
# axes: (node, symbol)
# Each element is the next node.
self.tmatrix = tmatrix
# shape: (nEdges, 2)
# axes: (edge index, (node, symbol))
self.edges = np.dstack(np.nonzero(tmatrix != -1))[0]
# shape : (n,)
# axes: (node,)
# Not strictly necessary since the nodes are integers from zero.
self.nodes = np.arange(tmatrix.shape[0])
# shape: (k,)
# axes: (symbol,)
self.symbols = np.arange(tmatrix.shape[1])
counts, final, node_paths = path_counts(tmatrix, data,
node_path, out_arrays)
# shape: (n, n, k)
# axes: (initialNode, node, symbol)
# Use float to support average counts.
self.edge_counts = counts.astype(float)
# shape: (n, n, k)
# axes: (initialNode, node, symbol)
# Start with uniform prior.
self.edge_alphas = np.zeros(counts.shape, dtype=float) + 1
self._update_node_alphacounts()
# shape: (n,)
# axes: (initial node,)
# Each element is the final node.
self.final_node = final
# shape: (nInitial,)
# axes: (initial node,)
# Each element is a valid initial node.
self.valid_initial_nodes = np.array(np.nonzero(final != -1)[0])
# Eventually, we will need to determine which nodes have edges that
# are to be inferred. Presently, this is every node since we cannot
# have fixed edges with this algorithm. This will affect self.temp.
# shape: (nNodes, L+1)
# axes: (initialNode, time)
self.node_paths = node_paths
# The first row is for numerator terms
# The second row is for denominator terms
shape = (2, len(self.edges) + len(self.nodes))
self._temp = np.empty(shape, dtype=float)
self.nNodes = tmatrix.shape[0]
self.nSymbols = tmatrix.shape[1]
self.nInitial = len(self.valid_initial_nodes)
self.nEdges = self.edges.shape[0]
def _update_node_alphacounts(self, alpha=True, counts=True):
"""
Recalculates `node_alpha` and `node_counts`.
This must be called any time `edge_alpha` or `edge_counts` is updated.
They are used to calculate the evidence.
Practically, the node counts are the number of times each node was
visited by some symbol. Effectively:
node_count(initial_node, node)
= \sum_{symbol} edge_count(initialNode, node, symbol)
"""
# axes: (initialNode, node)
# Each element is the count/alpha value.
# Recall edge_counts and edge_alphas have:
# shape: (n, n, k)
# axes: (initialNode, node, symbol)
# For the counts, if an edge was not traversed, then its count is
# zero and will not affect the sum along axis=2. When we consider
# the alphas, we must make sure that the alphas corresponding to
# nonedges (assuming incomplete DFAs) do not effect the node alpha,
# that is, the sum along axis=2. So we exclude nonedges from the sum.
# This means the minimum node alpha for every (initial node, node) pair
# is 1, even for nodes which have no edges that need to be inferred.
# However, this is not a problem since algorithms, like the evidence,
# will not query for those alpha values (since they use self.edges).
#
# The reason it is done this way is to simplify the data structure.
# Technically, you only need priors for edges that are to be inferred.
# As of now, the implementation is that these arrays will have fixed
# size, no matter how many edges need to be inferred. An alternative
# way to do this is to make axis=1 sparse and with size equal to the
# number of edges to be inferred. We would then need to use a lookup to
# match indexes along axis=1 to the edges.
if alpha:
condition = self.tmatrix != -1
self.node_alphas = np.where(condition, self.edge_alphas, 0).sum(axis=2)
if counts:
self.node_counts = self.edge_counts.sum(axis=2)
def add_counts_from(self, data):
"""
Adds additional counts from `data`.
"""
# For each symbol, add the count and update the final node.
for symbol in data:
for initial_node in self.valid_initial_nodes:
final_node = self.final_node[initial_node]
self.final_node[initial_node] = self.tmatrix[final_node, symbol]
self.edge_counts[initial_node, final_node, symbol] += 1
self.valid_initial_nodes = np.array(np.nonzero(self.final_node != -1)[0])
self._update_node_alphacounts()
def log_evidence(self, initial_node):
"""
Returns the log evidence of the data using `node` as the initial node.
Parameters
----------
initial_node : int
An initial node.
Returns
-------
log_evid : float
The base-2 log evidence of the data given the initial node. When
its value is -inf, then it is not possible to generate the given
data from the initial node. When its value is 0, then the given
data is the only possible data that could be generated from the
initial node.
"""
if self.final_node[initial_node] == -1:
# Then the data cannot be generated by this node.
#
# The form we use for the likelihood is valid only if the
# probability of the data is nonzero. The reason is that it
# requires edge counts for every edge, and we only obtain counts on
# allowed edges. We could, alternatively, work with complete DFAs,
# and then we *would* have counts for transitions following a
# forbidden transition. In this case, the transition matrix would
# have zero entries equal to -1 and one of the states would be the
# garbage state. But this doesn't work for other reasons. See the
# module docstring.
log_evid = -np.inf
else:
from scipy.special import gammaln
# shape: (2, nEdges + nNodes)
temp = self._temp
# It is no problem to iterate through nodes which only have
# one edge, since the edge and node counts/alphas will cancel out.
# Once we allow nodes with fixed probs, we will need to iterate
# only through inferred edges and nodes with inferred edges.
# Now iterate through every edge (u, x)
edges = self.edges
nEdges = self.nEdges
ealphas = self.edge_alphas
ecounts = self.edge_counts
for i in range(nEdges):
u = edges[i, 0]
x = edges[i, 1]
temp[0, i] = ealphas[initial_node, u, x] + \
ecounts[initial_node, u, x]
temp[1, i] = ealphas[initial_node, u, x]
# Similarly, iterate through every node (u, *)
nalphas = self.node_alphas
ncounts = self.node_counts
for i in range(self.nNodes):
temp[0, i + nEdges] = nalphas[initial_node, i]
temp[1, i + nEdges] = nalphas[initial_node, i] + \
ncounts[initial_node, i]
gammaln(temp, temp)
temp[1] *= -1
log_evid = temp.sum()
# Return base-2 logarithms.
return log_evid / np.log(2)
def log_evidence_array(self):
"""
Returns an array of the log evidence of each node.
"""
nNodes = self.nNodes
log_evid = np.empty(nNodes)
for i in range(nNodes):
log_evid[i] = self.log_evidence(i)
return log_evid
def sample_uhmm(self, initial_node, size=None, prng=None):
"""
Returns a uHMM sampled from the posterior.
Parameters
----------
initial_node : int
The initial node.
size : int
The number of uHMMs to return.
prng : np.RandomState
A pseudorandom number generator, compatible with NumPy RandomState.
Returns
-------
trans : NumPy array
The transition probabilities of the uHMM. If `size` is None, then
return a single transition matrix, shape (n, k). Otherwise, return
`size` transition matrices in an array of shape (`size`, n, k).
Raises
------
Exception
If `initial_node` is not a valid initial node.
Notes
-----
The final node can be obtained from self.final_node[initial_node].
"""
if prng is None:
prng = self.prng
final_node = self.final_node[initial_node]
if final_node == -1:
raise InvalidInitialNode(initial_node)
post = self.edge_alphas[initial_node] + self.edge_counts[initial_node]
condition = self.tmatrix != -1
if size is None:
shape = (1,) + self.tmatrix.shape
else:
shape = (size,) + self.tmatrix.shape
trans = np.zeros(shape, dtype=float)
for n in range(shape[0]):
for i in range(shape[1]):
cond = condition[i]
trans[n, i, cond] = prng.dirichlet(post[i, cond])
if size is None:
trans = trans[0]
return trans
def pm_uhmm(self, initial_node):
"""
Returns the posterior mean uHMM for the specified the initial node.
Parameters
----------
initial_node : int
The initial node.
Returns
-------
trans : NumPy array
The transition probabilities of the uHMM.
Raises
------
InvalidInitialNode
If `initial_node` is not a valid initial node.
Notes
-----
The final node can be obtained from self.final_node[initial_node].
"""
final_node = self.final_node[initial_node]
if final_node == -1:
raise InvalidInitialNode(initial_node)
# This is a vectorized version of pm_edge_probability().
# An edge is a node and symbol: s, x
# alpha(s, x|s_i) + counts(s, x|s_i)
trans = self.edge_alphas[initial_node] + \
self.edge_counts[initial_node]
# Now, we divide each row of trans by its normalization constant:
#
# \sum_x (alpha(s, x | s_i) + counts(s, x | s_i))
#
# The node_* arrays have row/cols (initial_node, node). So we need
# to associate their columns to the rows of trans. This is achieved
# by dividing trans by a column vector. Before the [:, np.newaxis],
# we have arrays of shape (n,). Afterwards, we have shape (n,1)
trans /= (self.node_alphas[initial_node] + \
self.node_counts[initial_node])[:, np.newaxis]
# It is necessary to explicitly mark forbidden transitions as having
# zero probability since the alphas are nonzero for all transitions.
condition = self.tmatrix == -1
trans[condition] = 0
return trans
def pm_uhmm_array(self):
"""
Returns an array of the posterior mean uHMMs.
"""
uhmms = np.zeros((self.nInitial, self.nNodes, self.nSymbols))
for i, initial_node in enumerate(self.valid_initial_nodes):
uhmms[i] = self.pm_uhmm(initial_node)
return uhmms
def _ntm(self, trans):
n = trans.shape[0]
ntm = np.zeros((n,n), dtype=float)
edges = self.edges
tmatrix = self.tmatrix
for i in range(len(edges)):
u = edges[i, 0]
x = edges[i, 1]
v = tmatrix[u, x]
ntm[u, v] += trans[u, x]
return ntm
def get_updated_prior(self):
"""
Returns a new DirichletDistribution that incorporates observed counts.
"""
new = deepcopy(self)
# Transfer edge counts to edge alphas.
new.edge_alphas += new.edge_counts
new.edge_counts *= 0
new._update_node_alphacounts()
# To sample from the posterior, P( \theta | D, \sigma) we must keep the
# same valid_initial_nodes. Note that the edge counts are zero in the
# updated posterior. This suggests that the final_nodes should be
# equal to the valid_initial_nodes since there is no data (e.g. no
# edge counts). But doing this will not allow us to properly add new
# since we *must* know the final state from all data seen (even if
# the counts in the updated prior are now zero).
return new
def predictive_probability(self, x, initial_node):
"""
Returns the mean predictive probability of `x` given `initial_node`.
That is, we calculate::
\Pr(x | D, \sigma) = \int \Pr( x | D, \theta, \sigma)
\Pr( \theta | D, \sigma) d \theta
This is a calculation from the posterior predictive distribution.
Parameters
----------
x : iterable
The new data used to calculate the predictive probability.
initial_node : int
The initial node.
Returns
-------
p : float
The base-e logarithm of the mean predictive probability of `x`.
Raises
------
InvalidInitialNode
If `initial_node` is not a valid initial node.
"""
new = self.get_updated_prior()
new.add_counts_from(x)
return new.log_evidence(initial_node)
class DirichletDistributionCP(DirichletDistribution):
"""
A Dirichlet distribution for Cartesian product inference.
Importantly, the node/edge alpha and counts are not directly used to
determine the posterior without first transforming them into the
constituent parts of the Cartesian product.
"""
### Public
nodes = None
symbols = None
final_node = None
valid_initial_nodes = None
node_paths = None
nMatrices = None
nNodes = None
nSymbols = None
nInitial = None
nEdges = None
prng = None
### Private
tmatrices = None
tmatrix = None
edges = None
_temp = None
def __init__(self, tmatrices, data=None, node_path=False, prng=None, out_arrays=None):
tmatrix = self._build_tmatrix(tmatrices, data)
base = super(DirichletDistributionCP, self)
base.__init__(tmatrix, data, node_path, prng, out_arrays)
def _build_tmatrix(self, tmatrices, data):
# list of length m
# elements are arrays of shape: (n_i, k_i)
# axes: (node, symbol) for the ith tmatrix.
# Each element is the next node.
self.tmatrices = tmatrices
self.nMatrices = len(tmatrices)
self.nNodes_array = np.array([tmatrix.shape[0] for tmatrix in tmatrices])
nNodes = np.prod(self.nNodes_array)
self.nodes = np.arange(nNodes)
self.node_tuples = list(product(*[range(n) for n in self.nNodes_array]))
self.node_tuples_index = dict(zip(self.node_tuples, self.nodes))
self.nSymbols_array = np.array([tmatrix.shape[1] for tmatrix in tmatrices])
nSymbols = np.prod(self.nSymbols_array)
self.symbols = np.arange(nSymbols)
self.symbol_tuples = list(product(*[range(n) for n in self.nSymbols_array]))
self.symbol_tuples_index = dict(zip(self.symbol_tuples, self.symbols))
shape = np.array([m.shape for m in self.tmatrices]).prod(axis=0)
tmatrix = np.zeros(shape, dtype=int) - 1
# Quick hack for now...generate the data for each tmatrix.
# This requires a scan of the data for each tmatrix. Slow.
# In principle, we can generate the counts/alphas with one scan,
# and then propagate these values through summations to the counts
# and alphas for each individual tmatrix.
self.dd = []
symbols = self.symbol_tuples
for i,m in enumerate(tmatrices):
if data is not None:
data_ = np.array([symbols[sym][i] for sym in data])
else:
data_ = None
self.dd.append( DirichletDistribution(m, data_) )
for edges in product(*[dd.edges for dd in self.dd]):
v = tuple(self.tmatrices[i][u, x] for i, (u, x) in enumerate(edges))
u, x = zip(*edges)
uu = self.node_tuples_index[u]
vv = self.node_tuples_index[v]
xx = self.symbol_tuples_index[x]
tmatrix[uu, xx] = vv
return tmatrix
def log_evidence(self, initial_node):
"""
Returns the log evidence of the data using `node` as the initial node.
Parameters
----------
initial_node : int
An initial node.
Returns
-------
log_evid : float
The base-2 log evidence of the data given the initial node. When
its value is -inf, then it is not possible to generate the given
data from the initial node. When its value is 0, then the given
data is the only possible data that could be generated from the
initial node.
"""
base = 2
ops = dit.math.get_ops(base)
node = self.node_tuples[initial_node]
log_evids = np.array([self.dd[i].log_evidence(node[i])
for i in range(self.nMatrices)])
log_evid = ops.mult_reduce(log_evids)
return log_evid
def sample_uhmm(self, initial_node, size=None, prng=None):
"""
Returns a uHMM sampled from the posterior.
Parameters
----------
initial_node : int
The initial node.
size : int
The number of uHMMs to return.
prng : np.RandomState
A pseudorandom number generator, compatible with NumPy RandomState.
Returns
-------
trans : NumPy array
The transition probabilities of the uHMM. If `size` is None, then
return a single transition matrix, shape (n, k). Otherwise, return
`size` transition matrices in an array of shape (`size`, n, k).
Raises
------
Exception
If `initial_node` is not a valid initial node.
Notes
-----
The final node can be obtained from self.final_node[initial_node].
"""
if prng is None:
prng = self.prng
final_node = self.final_node[initial_node]
if final_node == -1:
raise InvalidInitialNode(initial_node)
inodes = self.node_tuples[initial_node]
uhmms = [self.dd[i].sample_uhmm(inodes[i], prng=prng)
for i in range(self.nMatrices)]
trans = uhmms[0]
for uhmm in uhmms[1:]:
trans = np.kron(trans, uhmm)
return trans
def pm_uhmm(self, initial_node):
"""
Returns the posterior mean uHMM for the specified the initial node.
Parameters
----------
initial_node : int
The initial node.
Returns
-------
trans : NumPy array
The transition probabilities of the uHMM.
Raises
------
Exception
If `initial_node` is not a valid initial node.
Notes
-----
The final node can be obtained from self.final[initial_node].
"""
final_node = self.final_node[initial_node]
if final_node == -1:
raise InvalidInitialNode(initial_node)
inodes = self.node_tuples[initial_node]
pm_uhmms = [self.dd[i].pm_uhmm(inodes[i])
for i in range(self.nMatrices)]
trans = pm_uhmms[0]
for pm_uhmm in pm_uhmms[1:]:
trans = np.kron(trans, pm_uhmm)
return trans
class Infer(object):
"""
New methods are those which require a distribution over start nodes.
"""
prng = None
posterior = None
inode_prior = None
inode_posterior = None
# The final node distribution is a deterministic function of the initial
# node posterior distribution. It is a "posterior". For the prior, the
# fnode_prior would be equal to inode_prior, and so, we do not include it
# here.
fnode_dist = None
_nodedist_class = dit.ScalarDistribution
_symboldist_class = dit.ScalarDistribution
_posterior_class = DirichletDistribution
def __init__(self, tmatrix, data=None, inode_prior=None, node_path=False, prng=None, out_arrays=None, options=None):
"""
inode_prior is the initial node prior distribution.
"""
# Allow the user to customize the classes used internally.
if options is not None:
attrs = ['nodedist_class', 'symboldist_class', 'posterior_class']
for attr in attrs:
_attr = '_' + attr
setattr(self, _attr, options.get(attr, getattr(self, _attr)))
if prng is None:
prng = np.random.RandomState()
self.prng = prng
self.posterior = self._posterior_class(
tmatrix, data, node_path, self.prng, out_arrays
)
self._inode_init(inode_prior)
self._fnode_init()
def _inode_init(self, inode_prior):
#
# Set up initial node prior distribution.
#
if inode_prior is None:
outcomes = self.posterior.nodes
n = self.posterior.nNodes
pmf = [1 / n] * n
inode_prior = self._nodedist_class(outcomes, pmf)
else:
# Assumes:
# 1) the distribution is normalized
# 2) sample space is range(n)
if inode_prior.is_log():
inode_prior.set_base('linear')
# If the initial node dist does not assign positive probability to
# any of the valid initial nodes, then the evidence (averaged over
# the prior of nodes) will be 0, and the posterior over nodes is
# not defined. So we make sure that some probability is assigned
# to at least one valid initial node.
zero = inode_prior.ops.zero
for node in self.posterior.valid_initial_nodes:
if inode_prior[node] > zero:
break
else:
msg = "`inode_prior` does not assign probability to a valid node."
raise Exception(msg)
# There is no reason to make it sparse, except to match the posterior.
inode_prior.make_sparse()
self.inode_prior = inode_prior
#
# Calculate initial node posterior distribution. For state s and data x,
#
# p(s|x) = p(x|s) p(s) / p(x)
#
# where p(x) = \sum_s p(x|s) p(s)
#
base = 2
ops = dit.math.get_ops(base)
p_xgs = self.posterior.log_evidence_array()
# Need to use dense version of the prior's pmf
p_s = dit.copypmf(inode_prior, base=base, mode='dense')
p_sgx = ops.mult(p_xgs, p_s)
p_x = ops.add_reduce(p_sgx)
ops.mult_inplace(p_sgx, ops.invert(p_x))
# No need to sort since the prior was already sorted.
nodes = self.posterior.nodes
d = self._nodedist_class(nodes, p_sgx, base=base, sort=False)
d.set_base('linear')
d.make_sparse()
self.inode_posterior = d
def _fnode_init(self):
# This averages over initial nodes. Recall, due to unifilarity, for any
# given initial node, there is exact one final node.
#
# p(s_f | x) = \sum_{s_i} p(s_f | x, s_i) p(s_i | x)
#
# so p(s_f | x, s_i) is equal to 1.
#
ops = dit.math.LogOperations('e')
pmf = np.zeros(self.posterior.nNodes, dtype=float)
for initial_node in self.posterior.valid_initial_nodes:
p = self.inode_posterior[initial_node]
final_node = self.posterior.final_node[initial_node]
pmf[final_node] += p
nodes = self.posterior.nodes
d = self._nodedist_class(nodes, pmf, base='linear', validate=False)
d.make_sparse()
self.fnode_dist = d
def add_counts_from(self, data):
"""
Adds additional counts from `data`.
"""
self.posterior.add_counts_from(data)
self._inode_init(self.inode_prior)
self._fnode_init()
def get_updated_prior(self):
"""
Returns a new Infer that incorporates observed counts.
"""
posterior = self.posterior
try:
self.posterior = None
new = deepcopy(self)
finally:
self.posterior = posterior
new.posterior = posterior.get_updated_prior()
# The difference here is that we must use the inode_posterior as our
# new initial distribution.
new._inode_init(self.inode_posterior)
# There is no need to reinit the fnode_dist since
# new.posterior.valid_initial_nodes and new.posterior.final_node are
# the same as in `self.posterior`.
return new
def pm_next_symbol_dist(self):
# This averages over initial nodes.
#
# p(x | D) = \sum_{s_i} p( x | D, s_i) p(s_i | D)
#
# where
#
# p(x | D, s_i) = \int dtheta p(x | theta, D, s_i) p( theta | D, s_i)
#
# p(x | theta, D, s_i) = \sum_{s_f} p( x, s_f | theta, D, s_i)
# = p( x | theta, delta(s_i, D) )
#
# but note, this last equation is not really necessary for unifilar
# HMMs because the symbol x uniquely identifies the next state s_f.
# So we have:
#
# p(x | D, s_i) = \int dtheta p(x | theta, delta(s_i, D)) p(theta | D, s_i)
#
# Thus,
#
# p(x | D, s_i) = posterior mean of edge (x, delta(s_i, D))
#
# So for each valid initial node, we grab the row from the posterior
# mean uHMM corresponding to its final node. These are the mean
# probabilities of each symbol. This gives us a matrix of shape
# (number of valid initial nodes, symbols). We construct a column
# vector of the probability of each valid initial node and multiply
# it elementwise on the rows (with broadcasting) to the mean
# probabilities. Then, we sum the rows to get the final p(x | D).
shape = (self.posterior.nInitial, self.posterior.nSymbols)
probs = np.zeros(shape, dtype=float)
# We must work with valid initial nodes since we are indexing with
# the final node.
for i, initial_node in enumerate(self.posterior.valid_initial_nodes):
pm_uhmm = self.posterior.pm_uhmm(initial_node)
final_node = self.posterior.final_node[initial_node]
probs[i] = pm_uhmm[final_node]
weights = dit.copypmf(self.inode_posterior, 'linear', 'sparse')
weights = np.array([weights]).transpose()
probs *= weights
pmf = probs.sum(axis=0)
d = self._symboldist_class(self.posterior.symbols, pmf)
d.make_sparse()
return d
def log_evidence(self, initial_node=None):
"""
Returns the log evidence of the data using `node` as the initial node.
p(D | s) if initial_node is not None
\sum_s p(D|s) p(s) if initial_node is None
Parameters
----------
initial_node : int, None
An initial node. If `None`, then the expected log evidence is
returned, where the expectation is over the initial node prior
distribution.
Returns
-------
log_evid : float
The base-2 log evidence of the data.
"""
if initial_node is not None:
return self.posterior.log_evidence(initial_node)
base = 2
ops = dit.math.get_ops(base)
p_s = dit.copypmf(self.inode_prior, base=base, mode='dense')
evidences = self.posterior.log_evidence_array()
log_evid = ops.add_reduce(ops.mult(evidences, p_s))
return log_evid
def sample_uhmm(self, initial_node=None, size=None, prng=None):
"""
Returns uHMM transition matrices sampled from the posterior.
Parameters
----------
initial_node : int
The initial node. If `None`, then the initial node is sampled from
the initial node posterior distribution.
size : int
The number of uHMMs to return.
prng : np.RandomState
A pseudorandom number generator, compatible with NumPy RandomState.
Returns
-------
inodes : int or NumPy array
The initial nodes. If `size` is None, then return the integer
corresponding to the sampled initial node. Otherwise, a NumPy array
of shape (`size`,) containing the sampled initial nodes.
trans : NumPy array
The transition probabilities of the uHMM. If `size` is None, then
return a single transition matrix, shape (n, k). Otherwise, return
`size` transition matrices in an array of shape (`size`, n, k).
Raises
------
Exception
If `initial_node` is not a valid initial node.
"""
if prng is None:
prng = self.prng
single = False
if size is None:
size = 1
single = True
n, k = self.posterior.nNodes, self.posterior.nSymbols
uhmms = np.zeros((size, n, k))
if initial_node is None:
inodes = self.inode_posterior.rand(size, prng=prng)
else:
inodes = [initial_node] * size
for i, inode in enumerate(inodes):
uhmms[i] = self.posterior.sample_uhmm(inode, prng=prng)
if single:
inodes = inodes[0]
uhmms = uhmms[0]
return inodes, uhmms
# Depends on CMPy
def sample_stationary_distributions(self, n=None, prng=None):
from cmpy.math import stationary_distribution
if prng is None:
prng = self.prng
if n is None:
single = True
n = 1
else:
single = False
inodes = self.inode_posterior.rand(n, prng=prng)
# n is likely to be small...let's build it up.
pis = []
posterior = self.posterior
for initial_node in inodes:
tmatrix = posterior.sample_uhmm(initial_node, prng=prng)
ntm = posterior._ntm(tmatrix)
pi = stationary_distribution(ntm, logs=False)
pis.append(pi)
if single:
sdists = pis[0]
else:
sdists = np.array(pis)
return sdists
def predictive_probability(self, x, initial_node=None):
"""
Returns the mean predictive probability of `x` given `initial_node`.
That is, we calculate:
\Pr(x | D, \sigma) = \int \Pr( x | D, \theta, \sigma)
\Pr( \theta | D, \sigma) d \theta
This is a calculation from the posterior predictive distribution. When
`initial_node` is `None`, then we calculate:
\Pr(x | D) = \sum \Pr(x | D, \sigma) \Pr( \sigma | D)
Parameters
----------
x : iterable
The new data used to calculate the predictive probability.
initial_node : int or None
The initial node. If `None`, then the predictive probability is
averaged over the initial node posterior distribution.
Returns
-------
p : float
The base-e logarithm of the mean predictive probability of `x`.
Raises
------
InvalidInitialNode
If `initial_node` is not a valid initial node.
"""
new = self.get_updated_prior()
new.add_counts_from(x)
return new.log_evidence(initial_node)
class InferCP(Infer):
_posterior_class = DirichletDistributionCP
|
py | 1a423c849692b18d5f8674ea0ee8439b4141ee99 | import sys
exec("print('ALICE');"*int(input()))
|
py | 1a423c93038b5e012834c9864f18363bcdaf9f1d | from collections import OrderedDict
from functools import partial
from matplotlib.figure import Figure
from PyQt5 import QtWidgets, QtCore, QtGui
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from .model import AxesSet
from .widgets import *
class AxPositioningEditor(QtWidgets.QWidget):
"""
main widget for editing axes positions
Example:
>>>from matplotlib import pyplot as plt
>>>fig = plt.figure()
>>>w, h = fig.get_size_inches()
>>>AxPositioningEditor((w, h), bounds=[])
"""
position_codes = ['S', 'N', 'W', 'E', 'SW', 'NW', 'NE', 'SE', 'C']
position_names = [
'lower center',
'top center',
'left center',
'right center',
'lower left',
'upper left',
'upper right',
'lower right',
'center']
click_axes_data = dict(w=.3, h=.3)
def __init__(self, figsize, bounds=(), anchor='C', dpi=150):
super().__init__()
self.figsize = figsize
w, h = self.figsize
self.figure = Figure(figsize=(w, h))
self.dpi = dpi
self.settings = dict(guides=False,
guides_selected=False,
relative=True)
self.guides_subsetting_fields = []
self.axes = AxesSet(self.figure, bounds, anchor)
self.build()
self.canvas.mpl_connect('button_release_event', self.draw_axes)
self.pointing_axes = False
def build(self):
"""build the widget"""
self.setMinimumWidth(600)
self.setMinimumHeight(350)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
figure_layout = QtWidgets.QVBoxLayout()
layout.addLayout(figure_layout)
self.build_figure(figure_layout)
self.build_tools(layout)
self.msg_label = QtWidgets.QLabel()
self.msg_label.setContentsMargins(5, 5, 5, 5)
figure_layout.addWidget(self.msg_label)
self.draw()
self.set_message(None)
def build_figure(self, layout):
"""build the figure area"""
figure_scroll_area = QtWidgets.QScrollArea()
figure_scroll_area.setAlignment(QtCore.Qt.AlignCenter)
# create canvas
self.canvas = FigureCanvas(self.figure)
# update the canvas size based on the figure size
self.update_canvas_size()
figure_scroll_area.setWidget(self.canvas)
layout.addWidget(figure_scroll_area)
def build_tools(self, layout):
"""build the tools area"""
tools_widget = QtWidgets.QTabWidget()
tools_widget.setFixedWidth(320)
layout.addWidget(tools_widget)
fw = QtWidgets.QWidget()
figsize_layout = QtWidgets.QFormLayout(fw)
self.figure_fields = dict()
w, h = self.figsize
self.figure_fields['w'] = f = QtWidgets.QLineEdit('{:.2f}'.format(w))
f.setValidator(QtGui.QDoubleValidator(0, 1000, 2))
figsize_layout.addRow('Width', f)
self.figure_fields['h'] = f = QtWidgets.QLineEdit('{:.2f}'.format(h))
f.setValidator(QtGui.QDoubleValidator(0, 1000, 2))
figsize_layout.addRow('Height', f)
b = QtWidgets.QPushButton('Apply')
b.clicked.connect(self.set_figsize)
figsize_layout.addRow('', b)
tools_widget.addTab(fw, 'Figure')
tools_widget.addTab(self.build_positions_tab(), 'Positions')
w = AddAxesWidget(self.figure)
w.newbounds.connect(self.set_axes)
w.axes_added.connect(lambda x: self.add_axes_at_position(**x))
w.click_axes.connect(self.click_new_axes)
tools_widget.addTab(w, 'Add axes')
tools_widget.addTab(self.build_settings_tab(), 'Settings')
def build_settings_tab(self):
sw = QtWidgets.QWidget()
settings_layout = QtWidgets.QVBoxLayout(sw)
settings_layout.addWidget(QtWidgets.QLabel('Axes anchor'))
dropdown = QtWidgets.QComboBox()
dropdown.addItems(self.position_names)
dropdown.currentIndexChanged.connect(lambda x: self.update_anchor(self.position_codes[x]))
dropdown.setCurrentIndex(self.position_codes.index(self.axes.anchor))
settings_layout.addWidget(dropdown)
settings_layout.addWidget(hline())
cb = QtWidgets.QCheckBox('show guides')
cb.setChecked(self.settings.get('guides'))
cb.stateChanged.connect(self.set_show_guides)
settings_layout.addWidget(cb)
f = QtWidgets.QFrame()
l = QtWidgets.QVBoxLayout(f)
l.setContentsMargins(10, 5, 5, 5)
cb2 = QtWidgets.QCheckBox('for selected axes only')
cb2.setChecked(self.settings['guides_selected'])
cb2.stateChanged.connect(self.set_guides_selected)
cb2.setEnabled(self.settings['guides'])
self.guides_subsetting_fields.append(cb2)
l.addWidget(cb2)
settings_layout.addWidget(f)
cb3 = QtWidgets.QCheckBox('absolute positions (dots)')
cb3.setChecked(not self.settings['relative'])
cb3.stateChanged.connect(self.set_absolute)
settings_layout.addWidget(cb3)
settings_layout.addItem(QtWidgets.QSpacerItem(
0, 0,
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding))
return sw
def build_positions_tab(self):
w = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout(w)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(5)
# main buttons
button_layout = QtWidgets.QHBoxLayout()
button_layout.setContentsMargins(0, 0, 0, 0)
clear_figure_button = QtWidgets.QPushButton('Clear figure')
clear_figure_button.clicked.connect(self.clear_figure)
button_layout.addWidget(clear_figure_button)
select_all_button = QtWidgets.QPushButton('Select all')
select_all_button.clicked.connect(self.select_all_axes)
button_layout.addWidget(select_all_button)
select_none_button = QtWidgets.QPushButton('Clear selection')
select_none_button.clicked.connect(self.select_none_axes)
button_layout.addWidget(select_none_button)
layout.addLayout(button_layout)
# actions
action_layout = QtWidgets.QHBoxLayout()
layout.addLayout(action_layout)
action_layout.addItem(QtWidgets.QSpacerItem(
0, 0,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Maximum))
action_layout.addWidget(QtWidgets.QLabel('Actions'))
self.actions_dropdown = QtWidgets.QComboBox()
self.actions_dropdown.addItems(sorted(self.axes_actions.keys()))
action_layout.addWidget(self.actions_dropdown)
execute_action_button = QtWidgets.QPushButton('Apply')
execute_action_button.clicked.connect(self.execute_current_action)
action_layout.addWidget(execute_action_button)
self.axtable = AxesPositionsWidget(self.axes)
self.axtable.changed.connect(self.set_ax_position)
self.axtable.selected.connect(self.select_axes)
self.axtable.invalid_value.connect(self.reset_value)
self.axtable.moved.connect(self.move_axes)
layout.addWidget(self.axtable)
return w
def update_canvas_size(self):
w, h = self.figsize
self.figure.set_size_inches(w, h)
self.figure.set_dpi(self.dpi)
screenwidth, screenheight = w * self.dpi, h * self.dpi
self.canvas.resize(.5*screenwidth, .5*screenheight)
def set_figsize(self):
w = self.figure_fields['w'].text()
h = self.figure_fields['h'].text()
try:
w = float(w)
h = float(h)
except ValueError:
w, h = self.figure.get_size_inches()
self.figure_fields['w'].setText('{:.2f}'.format(w))
self.figure_fields['h'].setText('{:.2f}'.format(h))
else:
self.figsize = w, h
self.figure.set_size_inches(*self.figsize)
self.update_canvas_size()
self.draw(posfields=True)
def reset_value(self, row, col, attr):
ax = self.axes.names[row]
self.axtable.blockSignals(True)
self.axtable.item(row, col).setText('{:.3f}'.format(getattr(ax, attr)))
self.axtable.blockSignals(False)
def get_bounds(self):
"""returns a list of axes bounds as [(x, y, w, h)]"""
bounds = []
for n, a in self.axes.items():
bounds.append(a.bounds)
return bounds
def as_dict(self):
return dict(bounds=self.get_bounds(), figsize=self.figsize)
# ---------
# edit axes
# ---------
def draw_axes(self, event):
"""create an axes at the click location if self.pointing_axes is enabled"""
if self.pointing_axes:
x, y = self.figure.transFigure.inverted().transform((event.x, event.y))
a = self.add_axes_at_position(x, y, **self.click_axes_data)
self.pointing_axes = False
# clear the message widget
self.set_message(None)
def set_show_guides(self, b):
self.settings['guides'] = bool(b)
for item in self.guides_subsetting_fields:
item.setEnabled(b)
self.draw(posfields=False)
def set_guides_selected(self, b):
self.settings['guides_selected'] = bool(b)
self.draw(posfields=False)
def set_absolute(self, b):
self.settings['relative'] = not bool(b)
self.draw(posfields=True)
def click_new_axes(self, data):
self.pointing_axes = True
self.set_message('Click in the figure to place a new axes at that position')
self.click_axes_data = data
def add_axes_at_position(self, x, y, w=.4, h=.4, n=None, draw=True):
"""add axes at specified location in Figure coordinates"""
self.axes.add(x, y, w, h, apply_anchor=True)
if draw:
self.draw(posfields=True)
def add_axes(self, bounds, draw=True):
self.axes.add(*bounds)
if draw:
self.draw(posfields=True)
def set_axes(self, bounds, draw=True):
"""set several axes from a list of bounds"""
for bnd in bounds:
self.axes.add(*bnd)
if draw:
self.draw(posfields=True)
def set_ax_position(self, row, attr, value):
"""
set the position of an axes from the attribute name
:param axname: name of the axes
:param attr: name of the position attribute
:param value: value of the position attribute
"""
axname = self.axes.names[row]
self.axes.set_property(str(axname), attr, value, relative=self.settings['relative'])
self.draw(posfields=True)
def delete_axes(self, name, redraw=True):
"""delete an axes from the editor"""
self.axes.pop(str(name))
if redraw:
self.draw(posfields=True)
def move_axes(self, rows, ind):
if ind in rows or ind-1 in rows:
return
names = self.axes.names
def keyfn(v):
if v in rows:
return 1
elif v < ind:
return 0
else:
return 2
indices = sorted(list(range(len(names))), key=keyfn)
self.axes.change_order([names[i] for i in indices])
self.draw(posfields=True)
# -----------
# update gui
# -----------
def set_message(self, msg, level='INFO'):
"""
set a message in the message window
hide the messages if msg is None
:param msg: message text
:param level: level (see logging levels) of the message
"""
if msg is None:
self.msg_label.setText('')
self.msg_label.hide()
else:
self.msg_label.show()
styles = dict(
DEBUG='background-color: rgb(100, 250, 100)',
INFO='',
WARNING='background-color: rgb(250, 230, 150)',
ERROR='background-color: rgb(255, 150, 100)',
)
self.msg_label.setStyleSheet(styles[level])
self.msg_label.setText(msg)
def add_message(self, msg):
"""add to the end of the message (keep formatting)"""
txt = self.msg_label.text()
self.msg_label.setText(txt+'\n'+msg)
def draw(self, posfields=False):
"""redraw the contents"""
self.figure.clear()
for name, a in self.axes.items():
a.format_placeholder(name)
self.figure.add_axes(a)
if self.settings['guides']:
self.axes.plot_guides(selected=self.settings['guides_selected'],
relative=self.settings['relative'])
self.canvas.draw_idle()
if posfields:
self.axtable.clear()
self.axtable.fill(self.axes, relative=self.settings['relative'])
def update_anchor(self, pos, redraw=True):
"""set the position reference anchor of the axes to a new location"""
for name, a in self.axes.items():
a.set_anchor(pos)
self.axes.anchor = pos
if redraw:
self.draw(posfields=True)
# ------------------------------------
# selecting axes and executing actions
# ------------------------------------
def execute_current_action(self):
if not self.axes.any_selected():
return
action = self.actions_dropdown.currentText()
fn = getattr(self, self.axes_actions[str(action)])
fn(self.axes.selected_names, self.axes.selected)
def select_axes(self, key, b=True):
self.axes.select(str(key), b)
self.draw()
def clear_figure(self):
self.figure.clear()
for k in list(self.axes.keys()):
self.delete_axes(k, redraw=False)
self.draw(posfields=True)
def select_all_axes(self):
self.axes.select_all()
self.draw(posfields=True)
def select_none_axes(self):
self.axes.select_none()
self.draw(posfields=True)
# --------------
# Define actions
# --------------
axes_actions = {
'delete': 'delete_axes_objects',
'align X': 'axes_equal_x',
'align Y': 'axes_equal_y',
'equal width': 'axes_equal_w',
'equal height': 'axes_equal_h',
'equal aspect': 'axes_equal_aspect',
'join': 'axes_join',
'split': 'axes_split'
}
def delete_axes_objects(self, names, axes, redraw=True):
for n in names:
self.axes.pop(n)
if redraw:
self.draw(posfields=True)
def axes_equal_x(self, names, axes, redraw=True):
x = axes.pop(0).x
for a in axes:
a.x = x
if redraw:
self.draw(posfields=True)
def axes_equal_y(self, names, axes, redraw=True):
y = axes.pop(0).y
for a in axes:
a.y = y
if redraw:
self.draw(posfields=True)
def axes_equal_w(self, names, axes, redraw=True):
w = axes.pop(0).w
for a in axes:
a.w = w
if redraw:
self.draw(posfields=True)
def axes_equal_h(self, names, axes, redraw=True):
h = axes.pop(0).h
for a in axes:
a.h = h
if redraw:
self.draw(posfields=True)
def axes_equal_aspect(self, names, axes, redraw=True):
A = axes.pop(0).aspect
for a in axes:
a.aspect = A
if redraw:
self.draw(posfields=True)
def axes_join(self, names, axes, redraw=True):
"""join axes within bounding box of all selected axes"""
# store anchor
anchor = self.axes.anchor
# update anchor to lower left during processing
self.update_anchor('SW', True, redraw=False)
# determine bounding box
xll = min(a.x for a in axes)
yll = min(a.y for a in axes)
xur = max(a.x + a.w for a in axes)
yur = max(a.y + a.h for a in axes)
# redefine first axes position to bounding box
axes[0].set_position((xll, yll, xur-xll, yur-yll))
# delete other axes
self.delete_axes_objects(names[1:], axes[1:], redraw=False)
# update the anchor to the original
self.update_anchor(anchor, True, redraw=redraw)
def axes_split(self, names, axes, redraw=True):
"""
split axes in two parts based on a given ratio
"""
def show_error(msg):
m = QtWidgets.QMessageBox()
m.setText(msg)
m.exec()
# create dialog to input ratio, spacing and h/v split
dialog = SplitDialog()
if dialog.exec() != QtWidgets.QDialog.Accepted:
return
ratio, spacing, horizontal = dialog.get_data()
if ratio < 0 or ratio > 1:
show_error('ratio must be between 0 and 1')
return
for a in axes:
try:
new_bounds = a.split(ratio, spacing, wsplit=horizontal)
except ValueError as e:
show_error(str(e))
return
else:
# create 2nd axes and copy selected state
new_ax = self.axes.add(*new_bounds, anchor=a.get_anchor())
new_ax._selected = a._selected
if redraw:
self.draw(posfields=True)
|
py | 1a423ca54ed38b8d57e3875f8dcda15af7e42796 | import os
from configparser import ConfigParser
from nipype.utils import config as nuc
from pkg_resources import resource_filename
def get_fitlins_config():
"""Construct Nipype configuration object with precedence:
- Local config (``./nipype.cfg``)
- Global config (``$HOME/.nipype/nipype.cfg`` or ``$NIPYPE_CONFIG_DIR/nipype.cfg``)
- FitLins config (``<fitlins_install_dir>/data/nipype.cfg``)
- Nipype default config (defined in ``nipype/utils/config.py``)
"""
config = nuc.NipypeConfig()
config.set_default_config()
fitlins_config_file = resource_filename('fitlins', 'data/nipype.cfg')
global_config_file = os.path.join(
os.path.expanduser(os.getenv("NIPYPE_CONFIG_DIR", default="~/.nipype")), "nipype.cfg"
)
local_config_file = "nipype.cfg"
fitlins_conf = ConfigParser()
fitlins_conf.read([fitlins_config_file, global_config_file, local_config_file])
config.update_config(fitlins_conf)
return config
|
py | 1a423ce2b066a950fbb7133724f1fe4db3de8c91 | import importlib
from abc import ABC, abstractmethod
from textwrap import dedent
from collections.abc import Sequence
from astropy.table import Table
from sunpy.util.util import get_width
__all__ = ['BaseQueryResponse', 'BaseClient']
class BaseQueryResponse(Sequence):
"""
An Abstract Base Class for results returned from BaseClient.
Notes
-----
* A QueryResponse object must be able to be instantiated with only one
iterable argument. (i.e. the ``__init__`` must only have one required
argument).
* The `client` property must be settable.
* The base class does not prescribe how you store the results from your
client, only that it must be possible to represent them as an astropy
table in the ``build_table`` method.
* `__getitem__` **must** return an instance of the type it was called on.
I.e. it must always return an object of ``type(self)``.
"""
@abstractmethod
def build_table(self):
"""
Return an `astropy.table.Table` representation of the query response.
"""
@property
@abstractmethod
def client(self):
"""
An instance of `BaseClient` used to generate the results.
Generally this is used to fetch the results later.
.. note::
In general, this doesn't have to be the same instance of
``BaseClient``, this is left to the client developer. If there is a
significant connection overhead in creating an instance of a client
you might want it to be the same instance as used for the search.
"""
@client.setter
@abstractmethod
def client(self, value):
pass
@property
@abstractmethod
def blocks(self):
"""
A `collections.abc.Sequence` object which contains the records
contained within the Query Response.
"""
@abstractmethod
def response_block_properties(self):
"""
Returns a set of class attributes on all the response blocks.
Returns
-------
s : `set`
List of strings, containing attribute names in the response blocks.
"""
def __str__(self):
"""Print out human-readable summary of records retrieved"""
return '\n'.join(self.build_table().pformat(show_dtype=False))
def __repr__(self):
"""Print out human-readable summary of records retrieved"""
return object.__repr__(self) + "\n" + str(self)
def _repr_html_(self):
return self.build_table()._repr_html_()
def show(self, *cols):
"""
Returns response tables with desired columns for the Query.
Parameters
----------
\\*cols : `tuple`
Name of columns to be shown.
Returns
-------
`astropy.table.Table`
A table showing values for specified columns.
"""
table = self.build_table()
if len(cols) == 0:
return table
return table[list(cols)]
def _print_client(client, html=False):
"""
Given a BaseClient instance will print out each registered attribute.
Parameters
----------
client : `sunpy.net.base_client.BaseClient`
The instance class to print for.
html : bool
Will return a html table instead.
Returns
-------
`str`
String with the client.
"""
width = -1 if html else get_width()
class_name = f"{client.__module__+'.' or ''}{client.__class__.__name__}"
attrs = client.register_values()
lines = []
t = Table(names=["Attr Type", "Name", "Description"],
dtype=["U80", "U80", "U80"])
for client_key in attrs.keys():
# Work around for * attrs having one length.
if len(attrs[client_key]) == 1 and attrs[client_key][0] == "*":
t.add_row((client_key.__name__, "All", "All valid values"))
continue
for name, desc in attrs[client_key]:
t.add_row((client_key.__name__, name, desc))
lines = [class_name, dedent(client.__doc__.partition("\n\n")[0])]
if html:
lines = [f"<p>{line}</p>" for line in lines]
lines.extend(t.pformat_all(show_dtype=False, max_width=width, align="<", html=html))
return '\n'.join(lines)
class BaseClient(ABC):
"""
This defines the Abstract Base Class for each download client.
The BaseClient has several abstract methods that ensure that any subclass enforces the bare minimum API.
These are `search`, `fetch` and `_can_handle_query`.
The last one ensures that each download client can be registered with Fido.
Most download clients should subclass `~sunpy.net.dataretriever.GenericClient`.
If the structure of `~sunpy.net.dataretriever.GenericClient`
is not useful you should use `~sunpy.net.BaseClient`.
`~sunpy.net.vso.VSOClient` and `~sunpy.net.jsoc.JSOCClient`
are examples of download clients that subclass ``BaseClient``.
"""
_registry = dict()
def __init_subclass__(cls, *args, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `_can_handle_query` attribute.
This is then passed into the UnifiedDownloaderFactory so we can register them.
This means that Fido can use the clients internally.
"""
super().__init_subclass__(**kwargs)
# We do not want to register GenericClient since its a dummy client.
if cls.__name__ in ('GenericClient'):
return
cls._registry[cls] = cls._can_handle_query
if hasattr(cls, "_attrs_module"):
from sunpy.net import attrs
name, module = cls._attrs_module()
module_obj = importlib.import_module(module)
existing_mod = getattr(attrs, name, None)
if existing_mod and existing_mod is not module_obj:
raise NameError(f"{name} has already been registered as an attrs name.")
setattr(attrs, name, module_obj)
if name not in attrs.__all__:
attrs.__all__.append(name)
# Register client attrs after it has regsitered its own attrs
from sunpy.net import attr
values = cls.register_values()
# If the client has no support, we won't try to register attrs
if values:
attr.Attr.update_values({cls: values})
def __repr__(self):
"""
Returns the normal repr plus the pretty client __str__.
"""
return object.__repr__(self) + "\n" + str(self)
def __str__(self):
"""
This enables the "pretty" printing of BaseClient.
"""
return _print_client(self)
def _repr_html_(self):
"""
This enables the "pretty" printing of the BaseClient with html.
"""
return _print_client(self, html=True)
@abstractmethod
def search(self, *args, **kwargs):
"""
This enables the user to search for data using the client.
Must return a subclass of `BaseQueryResponse`.
"""
@abstractmethod
def fetch(self, *query_results, path=None, overwrite=False, progress=True,
max_conn=5, downloader=None, wait=True, **kwargs):
"""
This enables the user to fetch the data using the client, after a search.
Parameters
----------
query_results:
Results to download.
path : `str` or `pathlib.Path`, optional
Path to the download directory
overwrite : `bool`, optional
Replace files with the same name if True.
progress : `bool`, optional
Print progress info to terminal.
max_conns : `int`, optional
Maximum number of download connections.
downloader : `parfive.Downloader`, optional
The download manager to use.
wait : `bool`, optional
If `False` ``downloader.download()`` will not be called. Only has
any effect if `downloader` is not `None`.
Returns
-------
`parfive.Results`
The results object, can be `None` if ``wait`` is `False`.
"""
@classmethod
@abstractmethod
def _can_handle_query(cls, *query):
"""
This enables the client to register what kind of searches it can handle, to prevent Fido
using the incorrect client.
"""
@staticmethod
def check_attr_types_in_query(query, required_attrs={}, optional_attrs={}):
"""
Check a query againsted required and optional attributes.
Returns `True` if *query* contains all the attrs in *required_attrs*,
and if *query* contains only attrs in both *required_attrs* and *optional_attrs*.
"""
query_attrs = {type(x) for x in query}
all_attrs = required_attrs.union(optional_attrs)
return required_attrs.issubset(query_attrs) and query_attrs.issubset(all_attrs)
@classmethod
def register_values(cls, *query):
"""
This enables the client to register what kind of Attrs it can use directly.
Returns
-------
`dict`
A dictionary with key values of Attrs and the values are a tuple of
("Attr Type", "Name", "Description").
"""
return {}
|
py | 1a423d1a15aff1bd482853351fe581b856c08ff6 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class NetworkInterfaceLoadBalancersOperations(object):
"""NetworkInterfaceLoadBalancersOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2020-07-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-07-01"
self.config = config
def list(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""List all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancer
:rtype:
~azure.mgmt.network.v2020_07_01.models.LoadBalancerPaged[~azure.mgmt.network.v2020_07_01.models.LoadBalancer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'}
|
py | 1a423ef4f4358fd9b24a3590ef074035f2d2c062 | # !/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
__all__ = ['ciphers']
cached_tables = {}
if hasattr(string, 'maketrans'):
maketrans = string.maketrans
translate = string.translate
else:
maketrans = bytes.maketrans
translate = bytes.translate
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
a, b = struct.unpack('<QQ', s)
table = maketrans(b'', b'')
table = [table[i: i + 1] for i in range(len(table))]
for i in range(1, 1024):
table.sort(key=lambda x: int(a % (ord(x) + i)))
return table
def init_table(key):
if key not in cached_tables:
encrypt_table = b''.join(get_table(key))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
cached_tables[key] = [encrypt_table, decrypt_table]
return cached_tables[key]
class TableCipher(object):
def __init__(self, cipher_name, key, iv, op):
self._encrypt_table, self._decrypt_table = init_table(key)
self._op = op
def update(self, data):
if self._op:
return translate(data, self._encrypt_table)
else:
return translate(data, self._decrypt_table)
def clean(self):
pass
class NoneCipher(object):
def __init__(self, cipher_name, key, iv, op):
pass
def update(self, data):
return data
def clean(self):
pass
ciphers = {
'none': (16, 0, NoneCipher),
'table': (16, 0, TableCipher)
}
def test_table_result():
from shadowsocks.common import ord
target1 = [
[60, 53, 84, 138, 217, 94, 88, 23, 39, 242, 219, 35, 12, 157, 165, 181,
255, 143, 83, 247, 162, 16, 31, 209, 190, 171, 115, 65, 38, 41, 21,
245, 236, 46, 121, 62, 166, 233, 44, 154, 153, 145, 230, 49, 128, 216,
173, 29, 241, 119, 64, 229, 194, 103, 131, 110, 26, 197, 218, 59, 204,
56, 27, 34, 141, 221, 149, 239, 192, 195, 24, 155, 170, 183, 11, 254,
213, 37, 137, 226, 75, 203, 55, 19, 72, 248, 22, 129, 33, 175, 178,
10, 198, 71, 77, 36, 113, 167, 48, 2, 117, 140, 142, 66, 199, 232,
243, 32, 123, 54, 51, 82, 57, 177, 87, 251, 150, 196, 133, 5, 253,
130, 8, 184, 14, 152, 231, 3, 186, 159, 76, 89, 228, 205, 156, 96,
163, 146, 18, 91, 132, 85, 80, 109, 172, 176, 105, 13, 50, 235, 127,
0, 189, 95, 98, 136, 250, 200, 108, 179, 211, 214, 106, 168, 78, 79,
74, 210, 30, 73, 201, 151, 208, 114, 101, 174, 92, 52, 120, 240, 15,
169, 220, 182, 81, 224, 43, 185, 40, 99, 180, 17, 212, 158, 42, 90, 9,
191, 45, 6, 25, 4, 222, 67, 126, 1, 116, 124, 206, 69, 61, 7, 68, 97,
202, 63, 244, 20, 28, 58, 93, 134, 104, 144, 227, 147, 102, 118, 135,
148, 47, 238, 86, 112, 122, 70, 107, 215, 100, 139, 223, 225, 164,
237, 111, 125, 207, 160, 187, 246, 234, 161, 188, 193, 249, 252],
[151, 205, 99, 127, 201, 119, 199, 211, 122, 196, 91, 74, 12, 147, 124,
180, 21, 191, 138, 83, 217, 30, 86, 7, 70, 200, 56, 62, 218, 47, 168,
22, 107, 88, 63, 11, 95, 77, 28, 8, 188, 29, 194, 186, 38, 198, 33,
230, 98, 43, 148, 110, 177, 1, 109, 82, 61, 112, 219, 59, 0, 210, 35,
215, 50, 27, 103, 203, 212, 209, 235, 93, 84, 169, 166, 80, 130, 94,
164, 165, 142, 184, 111, 18, 2, 141, 232, 114, 6, 131, 195, 139, 176,
220, 5, 153, 135, 213, 154, 189, 238, 174, 226, 53, 222, 146, 162,
236, 158, 143, 55, 244, 233, 96, 173, 26, 206, 100, 227, 49, 178, 34,
234, 108, 207, 245, 204, 150, 44, 87, 121, 54, 140, 118, 221, 228,
155, 78, 3, 239, 101, 64, 102, 17, 223, 41, 137, 225, 229, 66, 116,
171, 125, 40, 39, 71, 134, 13, 193, 129, 247, 251, 20, 136, 242, 14,
36, 97, 163, 181, 72, 25, 144, 46, 175, 89, 145, 113, 90, 159, 190,
15, 183, 73, 123, 187, 128, 248, 252, 152, 24, 197, 68, 253, 52, 69,
117, 57, 92, 104, 157, 170, 214, 81, 60, 133, 208, 246, 172, 23, 167,
160, 192, 76, 161, 237, 45, 4, 58, 10, 182, 65, 202, 240, 185, 241,
79, 224, 132, 51, 42, 126, 105, 37, 250, 149, 32, 243, 231, 67, 179,
48, 9, 106, 216, 31, 249, 19, 85, 254, 156, 115, 255, 120, 75, 16]]
target2 = [
[124, 30, 170, 247, 27, 127, 224, 59, 13, 22, 196, 76, 72, 154, 32,
209, 4, 2, 131, 62, 101, 51, 230, 9, 166, 11, 99, 80, 208, 112, 36,
248, 81, 102, 130, 88, 218, 38, 168, 15, 241, 228, 167, 117, 158, 41,
10, 180, 194, 50, 204, 243, 246, 251, 29, 198, 219, 210, 195, 21, 54,
91, 203, 221, 70, 57, 183, 17, 147, 49, 133, 65, 77, 55, 202, 122,
162, 169, 188, 200, 190, 125, 63, 244, 96, 31, 107, 106, 74, 143, 116,
148, 78, 46, 1, 137, 150, 110, 181, 56, 95, 139, 58, 3, 231, 66, 165,
142, 242, 43, 192, 157, 89, 175, 109, 220, 128, 0, 178, 42, 255, 20,
214, 185, 83, 160, 253, 7, 23, 92, 111, 153, 26, 226, 33, 176, 144,
18, 216, 212, 28, 151, 71, 206, 222, 182, 8, 174, 205, 201, 152, 240,
155, 108, 223, 104, 239, 98, 164, 211, 184, 34, 193, 14, 114, 187, 40,
254, 12, 67, 93, 217, 6, 94, 16, 19, 82, 86, 245, 24, 197, 134, 132,
138, 229, 121, 5, 235, 238, 85, 47, 103, 113, 179, 69, 250, 45, 135,
156, 25, 61, 75, 44, 146, 189, 84, 207, 172, 119, 53, 123, 186, 120,
171, 68, 227, 145, 136, 100, 90, 48, 79, 159, 149, 39, 213, 236, 126,
52, 60, 225, 199, 105, 73, 233, 252, 118, 215, 35, 115, 64, 37, 97,
129, 161, 177, 87, 237, 141, 173, 191, 163, 140, 234, 232, 249],
[117, 94, 17, 103, 16, 186, 172, 127, 146, 23, 46, 25, 168, 8, 163, 39,
174, 67, 137, 175, 121, 59, 9, 128, 179, 199, 132, 4, 140, 54, 1, 85,
14, 134, 161, 238, 30, 241, 37, 224, 166, 45, 119, 109, 202, 196, 93,
190, 220, 69, 49, 21, 228, 209, 60, 73, 99, 65, 102, 7, 229, 200, 19,
82, 240, 71, 105, 169, 214, 194, 64, 142, 12, 233, 88, 201, 11, 72,
92, 221, 27, 32, 176, 124, 205, 189, 177, 246, 35, 112, 219, 61, 129,
170, 173, 100, 84, 242, 157, 26, 218, 20, 33, 191, 155, 232, 87, 86,
153, 114, 97, 130, 29, 192, 164, 239, 90, 43, 236, 208, 212, 185, 75,
210, 0, 81, 227, 5, 116, 243, 34, 18, 182, 70, 181, 197, 217, 95, 183,
101, 252, 248, 107, 89, 136, 216, 203, 68, 91, 223, 96, 141, 150, 131,
13, 152, 198, 111, 44, 222, 125, 244, 76, 251, 158, 106, 24, 42, 38,
77, 2, 213, 207, 249, 147, 113, 135, 245, 118, 193, 47, 98, 145, 66,
160, 123, 211, 165, 78, 204, 80, 250, 110, 162, 48, 58, 10, 180, 55,
231, 79, 149, 74, 62, 50, 148, 143, 206, 28, 15, 57, 159, 139, 225,
122, 237, 138, 171, 36, 56, 115, 63, 144, 154, 6, 230, 133, 215, 41,
184, 22, 104, 254, 234, 253, 187, 226, 247, 188, 156, 151, 40, 108,
51, 83, 178, 52, 3, 31, 255, 195, 53, 235, 126, 167, 120]]
encrypt_table = b''.join(get_table(b'foobar!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target1[0][i] == ord(encrypt_table[i]))
assert (target1[1][i] == ord(decrypt_table[i]))
encrypt_table = b''.join(get_table(b'barfoo!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target2[0][i] == ord(encrypt_table[i]))
assert (target2[1][i] == ord(decrypt_table[i]))
def test_encryption():
from shadowsocks.crypto import util
cipher = TableCipher('table', b'test', b'', 1)
decipher = TableCipher('table', b'test', b'', 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_table_result()
test_encryption()
|
py | 1a4240f6f69f9f667e2c0a0b29d7dabeb7be5242 | import inspect
def get_classname(o):
""" Returns the classname of an object r a class
:param o:
:return:
"""
if inspect.isclass(o):
target = o
elif callable(o):
target = o
else:
target = o.__class__
try:
return target.__qualname__
except AttributeError: # pragma: no cover
return target.__name__
def fqn(o):
"""Returns the fully qualified class name of an object or a class
:param o: object or class
:return: class name
>>> import django_db_logging
>>> fqn('str')
Traceback (most recent call last):
...
ValueError: Invalid argument `str`
>>> class A(object):
... def method(self):
... pass
>>> str(fqn(A))
'django_db_logging.utils.A'
>>> str(fqn(A()))
'django_db_logging.utils.A'
>>> str(fqn(A.method))
'django_db_logging.utils.A.method'
>>> str(fqn(django_db_logging))
'django_db_logging'
"""
parts = []
if hasattr(o, '__module__'):
parts.append(o.__module__)
parts.append(get_classname(o))
elif inspect.ismodule(o):
return o.__name__
if not parts:
raise ValueError("Invalid argument `%s`" % o)
return ".".join(parts)
|
py | 1a424142bbb907614c77dd095f8f572a00fd6b27 | import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import keras
from google.colab.patches import cv2_imshow
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Layer, Activation, Dense, Flatten, Dropout, Lambda, Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose, SpatialDropout2D
from tensorflow.keras import losses
from tensorflow.keras import backend as K
from tensorflow.keras.utils import to_categorical
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import keras
from src.helper_functions.utils import *
from keras.datasets import cifar10
def build_decoder(inp_shape):
""" this function builds the decoder
output: the decoder model
"""
# input image shape
inp = tf.keras.layers.Input(shape=inp_shape)
# layer 1
x = Conv2DTranspose(64, (3, 3), activation='relu')(inp)
x = tf.keras.layers.BatchNormalization()(x)
x = residual(x,64)
# layer 2
x = Conv2DTranspose(64, (3, 3), activation='relu')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = residual(x,32)
# layer 3
x = Conv2DTranspose(128, (3, 3), activation='relu')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = residual(x,32)
# final layer
x = Conv2DTranspose(3, (3, 3),activation='tanh')(x)
# return the model
return tf.keras.models.Model(inp, x)
|
py | 1a42418ea36bb58de354ac5e1ebebce5be2db41c | import os
from conan.tools.build import build_jobs
from conan.tools.files.files import load_toolchain_args
from conan.tools.microsoft.subsystems import subsystem_path, deduce_subsystem
from conans.client.build import join_arguments
class Autotools(object):
def __init__(self, conanfile, namespace=None):
self._conanfile = conanfile
toolchain_file_content = load_toolchain_args(self._conanfile.generators_folder,
namespace=namespace)
self._configure_args = toolchain_file_content.get("configure_args")
self._make_args = toolchain_file_content.get("make_args")
def configure(self, build_script_folder=None):
"""
http://jingfenghanmax.blogspot.com.es/2010/09/configure-with-host-target-and-build.html
https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html
"""
source = self._conanfile.source_folder
if build_script_folder:
source = os.path.join(self._conanfile.source_folder, build_script_folder)
configure_cmd = "{}/configure".format(source)
subsystem = deduce_subsystem(self._conanfile, scope="build")
configure_cmd = subsystem_path(subsystem, configure_cmd)
cmd = "{} {}".format(configure_cmd, self._configure_args)
self._conanfile.output.info("Calling:\n > %s" % cmd)
self._conanfile.run(cmd)
def make(self, target=None):
make_program = self._conanfile.conf.get("tools.gnu:make_program",
default="mingw32-make" if self._use_win_mingw() else "make")
str_args = self._make_args
jobs = ""
if "-j" not in str_args and "nmake" not in make_program.lower():
njobs = build_jobs(self._conanfile)
if njobs:
jobs = "-j{}".format(njobs)
command = join_arguments([make_program, target, str_args, jobs])
self._conanfile.run(command)
def install(self):
self.make(target="install")
def _use_win_mingw(self):
if hasattr(self._conanfile, 'settings_build'):
os_build = self._conanfile.settings_build.get_safe('os')
else:
os_build = self._conanfile.settings.get_safe("os")
if os_build == "Windows":
compiler = self._conanfile.settings.get_safe("compiler")
sub = self._conanfile.settings.get_safe("os.subsystem")
if sub in ("cygwin", "msys2", "msys") or compiler == "qcc":
return False
else:
if self._conanfile.win_bash:
return False
return True
return False
|
py | 1a4243b5f5adfcfa67819fe6ce697a4143cbf51e | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def, param_name=attr_def.name)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
try:
return tensor_shape.as_shape(v).as_proto()
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e))
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
def _MakeFunc(v, arg_name):
"""Ensure v is a func."""
if isinstance(v, attr_value_pb2.NameAttrList):
return v
fn_attr = attr_value_pb2.NameAttrList()
if isinstance(v, compat.bytes_or_text_types):
fn_attr.name = v
elif hasattr(v, "add_to_graph"):
v.add_to_graph(ops.get_default_graph())
fn_attr.name = v.name
else:
raise TypeError("Don't know how to convert {} to a func for "
"argument {}".format(v, arg_name))
return fn_attr
class _OpInfo(object):
"""All per-Op state we would like to precompute/validate."""
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
class OpDefLibrary(object):
"""Holds a collection of OpDefs, can add the corresponding Ops to a graph."""
def __init__(self):
self._ops = {}
# pylint: disable=invalid-name
def add_op(self, op_def):
"""Register an OpDef. May call apply_op with the name afterwards."""
if not isinstance(op_def, op_def_pb2.OpDef):
raise TypeError("%s is %s, not an op_def_pb2.OpDef" %
(op_def, type(op_def)))
if not op_def.name:
raise ValueError("%s missing name." % op_def)
if op_def.name in self._ops:
raise RuntimeError("Op name %s registered twice." % op_def.name)
self._ops[op_def.name] = _OpInfo(op_def)
def add_op_list(self, op_list):
"""Register the OpDefs from an OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
self.add_op(op_def)
def apply_op(self, op_type_name, name=None, **keywords):
# pylint: disable=g-doc-args
"""Add a node invoking a registered Op to a graph.
Example usage:
# input1 and input2 can be Tensors or anything ops.convert_to_tensor()
# will convert to a Tensor.
op_def_library.apply_op("op", input1=input1, input2=input2)
# Can specify a node name.
op_def_library.apply_op("op", input1=input1, name="node_name")
# Must use keyword arguments, with the names specified in the OpDef.
op_def_library.apply_op("op", input_name=input, attr_name=attr)
All attrs must either be inferred from an input or specified.
(If inferred, the attr must not be specified.) If an attr has a default
value specified in the Op's OpDef, then you may pass None as the value
of that attr to get the default.
Args:
op_type_name: string. Must match the name field of a registered Op.
name: string. Optional name of the created op.
**keywords: input Tensor and attr arguments specified by name,
and optional parameters to pass when constructing the Operation.
Returns:
The Tensor(s) representing the output of the operation, or the Operation
itself if there are no outputs.
Raises:
RuntimeError: On some errors.
TypeError: On some errors.
ValueError: On some errors.
"""
output_structure, is_stateful, op = self._apply_op_helper(
op_type_name, name, **keywords)
if output_structure:
outputs = op.outputs
res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
if isinstance(res, list) and not res and is_stateful:
return op
else:
return res
else:
return op
def _apply_op_helper(self, op_type_name, name=None, **keywords):
"""Implementation of apply_op that returns output_structure, op."""
op_info = self._ops.get(op_type_name, None)
if op_info is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
op_def = op_info.op_def
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pylint: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Fill in the list of default types for all "type" attrs. This
# will be used to choose a preferred dtype to convert to in the
# absence of input type information.
#
# TODO(b/31302892): Currently the defaults don't work in the right
# way if you have two inputs, one of whose type resolution depends
# on the other. Handling this will require restructuring this code
# significantly.
default_type_attr_map = {}
for attr_def in op_def.attr:
if attr_def.type != "type":
continue
key = attr_def.name
if attr_def.HasField("default_value"):
default_type_attr_map[key] = dtypes.as_dtype(
attr_def.default_value.type)
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# type.
# * If the input_arg's type is determined by attrs, either set
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
# dtype still not found, prefer using the default dtype
# from the attr.
if dtype is None and input_arg.type_attr in default_type_attr_map:
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.internal_convert_n_to_tensor(
values,
name=input_arg.name,
dtype=dtype if dtype else None,
preferred_dtype=default_dtype,
as_ref=input_arg.is_ref)
if input_arg.number_attr and len(
set(v.dtype.base_dtype for v in values)) > 1:
raise TypeError() # All types should match.
except (TypeError, ValueError):
# What types does the conversion function think values have?
observed_types = []
for value in values:
try:
converted_value = ops.internal_convert_to_tensor(
value, as_ref=input_arg.is_ref)
observed_types.append(converted_value.dtype.base_dtype.name)
except (TypeError, ValueError):
observed_types.append("<NOT CONVERTIBLE TO TENSOR>")
observed = ", ".join(observed_types)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.number_attr:
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
else:
raise TypeError(
"%s that are invalid. Tensors: %s" % (prefix, values))
types = [x.dtype for x in values]
inputs.extend(values)
else:
# In cases where we have an expected type, try to convert non-Tensor
# arguments to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
elif input_arg.type_attr in default_type_attr_map:
# The dtype could not be inferred solely from the inputs,
# so we prefer the attr's default, so code that adds a new attr
# with a default is backwards compatible.
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
values = ops.internal_convert_to_tensor(
values,
name=input_arg.name,
dtype=dtype,
as_ref=input_arg.is_ref,
preferred_dtype=default_dtype)
except TypeError as err:
if dtype is None:
raise err
else:
raise TypeError(
"Expected %s passed to parameter '%s' of op '%s', got %s of "
"type '%s' instead. Error: %s" %
(dtypes.as_dtype(dtype).name, input_arg.name, op_type_name,
repr(values), type(values).__name__, err))
except ValueError:
# What type does convert_to_tensor think it has?
try:
observed = ops.internal_convert_to_tensor(
values, as_ref=input_arg.is_ref).dtype.name
except ValueError as err:
raise ValueError(
"Tried to convert '%s' to a tensor and failed. Error: %s" %
(input_name, err))
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
# Update the maps with the default, if needed.
k = input_arg.type_attr
if k in default_type_attr_map:
if k not in attrs:
attrs[k] = default_type_attr_map[k]
if k not in inferred_from:
inferred_from[k] = "Default in OpDef"
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
# <number-attr> * <type> or <number-attr> * <type-attr>
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
# All tensors must have the same base type.
if any(bt != base_types[0] for bt in base_types):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
# <number-attr> * <type> case
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
# <number-attr> * <type-attr> case, where <type-attr> already
# has an inferred value.
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
# <number-attr> * <type-attr> case, where we are now setting
# the <type-attr> based on this input
if not base_types:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr,
param_name=input_name)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
assert False, "Unreachable"
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr),
param_name=input_name)
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr),
param_name=input_name)
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x._is_ref_dtype for x in types): # pylint: disable=protected-access
raise TypeError(
("'%s' Op requires that input '%s' be a mutable tensor "
"(e.g.: a tf.Variable)") % (op_type_name, input_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
attr_value.func.CopyFrom(_MakeFunc(value, key))
elif attr_def.type == "list(func)":
attr_value.list.func.extend([_MakeFunc(x, key) for x in value])
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_structure = []
for arg in op_def.output_arg:
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
output_structure.append(len(t.list.type))
else:
output_structure.append(None)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
op = g.create_op(op_type_name, inputs, dtypes=None, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
return output_structure, op_def.is_stateful, op
# pylint: enable=invalid-name
|
py | 1a4244a41b2980a366ea66abf602fb7d2e09a770 | from functools import wraps
from django.conf import settings
from django.http import Http404
from django.shortcuts import render
from django.utils.decorators import available_attrs
import djangosecure.middleware
class AdminAuthMiddleware(object):
def process_request(self, request):
"""
Don't make Django admin visible unless user is already logged into dashboard and is an admin.
"""
if request.path.startswith('/admin/') and not getattr(request.user, 'is_staff', False):
raise Http404
### SSL forwarding ###
def ssl_optional(view_func):
"""
Mark view functions with @ssl_optional to exclude them from automatic forwarding to https:.
"""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.ssl_optional = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
class SecurityMiddleware(djangosecure.middleware.SecurityMiddleware):
"""
Apply the same test as djangosecure.middleware.SecurityMiddleware,
but do it in process_view instead of process_request so we can check
whether the view has been decorated with ssl_optional.
"""
def process_request(self, request):
return
def process_view(self, request, view_func, view_args, view_kwargs):
if getattr(view_func, 'ssl_optional', False):
return
return super(SecurityMiddleware, self).process_request(request)
### read only mode ###
class ReadOnlyMiddleware(object):
def process_exception(self, request, exception):
if settings.READ_ONLY_MODE:
return render(request, 'read_only_mode.html')
|
py | 1a4244ccc40a06d35cc692a5ddb00678f0b36d13 | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.core.window import Window
from random import randint, choice
from math import radians, pi, sin, cos
import kivent_core
import kivent_cymunk
from kivent_core.gameworld import GameWorld
from kivent_core.managers.resource_managers import texture_manager
from kivent_core.systems.renderers import RotateRenderer
from kivent_core.systems.position_systems import PositionSystem2D
from kivent_core.systems.rotate_systems import RotateSystem2D
from kivent_cymunk.interaction import CymunkTouchSystem
from kivy.properties import StringProperty, NumericProperty
from functools import partial
from os.path import dirname, join, abspath
texture_manager.load_atlas(join(dirname(dirname(abspath(__file__))), 'assets',
'background_objects.atlas'))
class TestGame(Widget):
def on_kv_post(self, *args):
self.gameworld.init_gameworld(
['cymunk_physics', 'rotate_renderer', 'rotate', 'position',
'cymunk_touch'],
callback=self.init_game)
def init_game(self):
self.setup_states()
self.set_state()
def destroy_created_entity(self, ent_id, dt):
self.gameworld.remove_entity(ent_id)
self.app.count -= 1
def draw_some_stuff(self):
size = Window.size
w, h = size[0], size[1]
delete_time = 2.5
create_asteroid = self.create_asteroid
destroy_ent = self.destroy_created_entity
for x in range(100):
pos = (randint(0, w), randint(0, h))
ent_id = create_asteroid(pos)
self.app.count += 100
def create_asteroid(self, pos):
x_vel = randint(-500, 500)
y_vel = randint(-500, 500)
angle = radians(randint(-360, 360))
angular_velocity = radians(randint(-150, -150))
shape_dict = {'inner_radius': 0, 'outer_radius': 22,
'mass': 50, 'offset': (0, 0)}
col_shape = {'shape_type': 'circle', 'elasticity': .5,
'collision_type': 1, 'shape_info': shape_dict, 'friction': 1.0}
col_shapes = [col_shape]
physics_component = {'main_shape': 'circle',
'velocity': (x_vel, y_vel),
'position': pos, 'angle': angle,
'angular_velocity': angular_velocity,
'vel_limit': 250,
'ang_vel_limit': radians(200),
'mass': 50, 'col_shapes': col_shapes}
create_component_dict = {'cymunk_physics': physics_component,
'rotate_renderer': {'texture': 'asteroid1',
'size': (45, 45),
'render': True},
'position': pos, 'rotate': 0, }
component_order = ['position', 'rotate', 'rotate_renderer',
'cymunk_physics',]
return self.gameworld.init_entity(
create_component_dict, component_order)
def update(self, dt):
self.gameworld.update(dt)
def setup_states(self):
self.gameworld.add_state(state_name='main',
systems_added=['rotate_renderer'],
systems_removed=[], systems_paused=[],
systems_unpaused=['rotate_renderer'],
screenmanager_screen='main')
def set_state(self):
self.gameworld.state = 'main'
class DebugPanel(Widget):
fps = StringProperty(None)
def __init__(self, **kwargs):
super(DebugPanel, self).__init__(**kwargs)
Clock.schedule_once(self.update_fps)
def update_fps(self,dt):
self.fps = str(int(Clock.get_fps()))
Clock.schedule_once(self.update_fps, .05)
class YourAppNameApp(App):
count = NumericProperty(0)
if __name__ == '__main__':
YourAppNameApp().run() |
py | 1a4244ce36e7e4aacc9384ffc26927ccc9ad5095 | def save(file, conf):
with open(file, 'w') as configfile:
conf.write(configfile)
def getOpts():
import configparser
import copy
import os
config = configparser.ConfigParser()
file = os.path.abspath(os.path.join('.', 'config.ini'))
DEFAULT_OPTIONS = {
'DEFAULT': {
'limit': 5,
'domain_name': 'https://nyaa.si',
'out_dir': os.path.abspath(os.path.join('.', 'output'))
}
}
if os.path.isfile(file):
config.read(file)
for i in DEFAULT_OPTIONS:
if i not in config:
config[i] = copy.deepcopy(DEFAULT_OPTIONS[i])
for x in DEFAULT_OPTIONS[i]:
if x not in config[i]:
config[i][x] = str(copy.deepcopy(DEFAULT_OPTIONS[i][x]))
save(file, config)
else:
for i in DEFAULT_OPTIONS:
config[i] = DEFAULT_OPTIONS[i]
save(file, config)
options = copy.deepcopy(dict(config['DEFAULT']))
return options
|
py | 1a4245f958949c8e746e2d6d0580ef500ce1f3ce | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_vmutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutilsv2
class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
"""Unit tests for the Hyper-V VMUtilsV2 class."""
_DEFINE_SYSTEM = 'DefineSystem'
_DESTROY_SYSTEM = 'DestroySystem'
_DESTROY_SNAPSHOT = 'DestroySnapshot'
_ADD_RESOURCE = 'AddResourceSettings'
_REMOVE_RESOURCE = 'RemoveResourceSettings'
_SETTING_TYPE = 'VirtualSystemType'
_VM_GEN = constants.VM_GEN_2
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
def setUp(self):
super(VMUtilsV2TestCase, self).setUp()
self._vmutils = vmutilsv2.VMUtilsV2()
self._vmutils._conn = mock.MagicMock()
def test_create_vm(self):
super(VMUtilsV2TestCase, self).test_create_vm()
mock_vssd = self._vmutils._conn.Msvm_VirtualSystemSettingData.new()
self.assertEqual(self._vmutils._VIRTUAL_SYSTEM_SUBTYPE_GEN2,
mock_vssd.VirtualSystemSubType)
self.assertFalse(mock_vssd.SecureBootEnabled)
def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._modify_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.ModifyResourceSettings.assert_called_with(
ResourceSettings=[self._FAKE_RES_DATA])
@mock.patch.object(vmutilsv2, 'wmi', create=True)
@mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
self._lookup_vm()
mock_svc = self._get_snapshot_service()
mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
mock_svc.CreateSnapshot.assert_called_with(
AffectedSystem=self._FAKE_VM_PATH,
SnapshotType=self._vmutils._SNAPSHOT_FULL)
mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
self._FAKE_JOB_PATH)
@mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
mock_add_virt_res):
self._lookup_vm()
fake_eth_port = mock_get_new_sd.return_value
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
metric_def = mock.MagicMock()
mock_disk = mock.MagicMock()
mock_disk.path_.return_value = self._FAKE_RES_PATH
mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
fake_metric_def_paths = ['fake_0', 'fake_0', None]
fake_metric_resource_paths = [self._FAKE_VM_PATH,
self._FAKE_VM_PATH,
self._FAKE_RES_PATH]
metric_def.path_.side_effect = fake_metric_def_paths
self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
metric_def]
self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
calls = [mock.call(Name=def_name)
for def_name in [self._vmutils._METRIC_AGGR_CPU_AVG,
self._vmutils._METRIC_AGGR_MEMORY_AVG]]
self._vmutils._conn.CIM_BaseMetricDefinition.assert_has_calls(calls)
calls = []
for i in range(len(fake_metric_def_paths)):
calls.append(mock.call(
Subject=fake_metric_resource_paths[i],
Definition=fake_metric_def_paths[i],
MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
def _get_snapshot_service(self):
return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
def _assert_add_resources(self, mock_svc):
getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
def _assert_remove_resources(self, mock_svc):
getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
[self._FAKE_RES_PATH])
def test_list_instance_notes(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name',
'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instance_notes()
self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName', 'Notes'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val')
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj')
def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val,
vm_path, dynamic_memory_ratio=1.0):
mock_vs_man_svc = mock.MagicMock()
mock_vs_data = mock.MagicMock()
mock_job = mock.MagicMock()
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
_conn = self._vmutils._conn.Msvm_VirtualSystemSettingData
mock_check_ret_val.return_value = mock_job
_conn.new.return_value = mock_vs_data
mock_vs_man_svc.DefineSystem.return_value = (fake_job_path,
vm_path,
fake_ret_val)
mock_job.associators.return_value = ['fake vm path']
response = self._vmutils._create_vm_obj(
vs_man_svc=mock_vs_man_svc,
vm_name='fake vm',
vm_gen='fake vm gen',
notes='fake notes',
dynamic_memory_ratio=dynamic_memory_ratio)
if not vm_path:
mock_job.associators.assert_called_once_with(
self._vmutils._AFFECTED_JOB_ELEMENT_CLASS)
_conn.new.assert_called_once_with()
self.assertEqual(mock_vs_data.ElementName, 'fake vm')
mock_vs_man_svc.DefineSystem.assert_called_once_with(
ResourceSettings=[], ReferenceConfiguration=None,
SystemSettings=mock_vs_data.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
if dynamic_memory_ratio > 1:
self.assertFalse(mock_vs_data.VirtualNumaEnabled)
mock_get_wmi_obj.assert_called_with('fake vm path')
self.assertEqual(mock_vs_data.Notes, 'fake notes')
self.assertEqual(response, mock_get_wmi_obj())
def test_create_vm_obj(self):
self._test_create_vm_obj(vm_path='fake vm path')
def test_create_vm_obj_no_vm_path(self):
self._test_create_vm_obj(vm_path=None)
def test_create_vm_obj_dynamic_memory(self):
self._test_create_vm_obj(vm_path=None, dynamic_memory_ratio=1.1)
def test_list_instances(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name'}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instances()
self.assertEqual([(attrs['ElementName'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
def test_get_attached_disks(self):
mock_scsi_ctrl_path = mock.MagicMock()
expected_query = ("SELECT * FROM %(class_name)s "
"WHERE (ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s' OR "
"ResourceSubType='%(res_sub_type_dvd)s') AND "
"Parent = '%(parent)s'" %
{"class_name":
self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._vmutils._PHYS_DISK_RES_SUB_TYPE,
"res_sub_type_virt":
self._vmutils._DISK_DRIVE_RES_SUB_TYPE,
"res_sub_type_dvd":
self._vmutils._DVD_DRIVE_RES_SUB_TYPE,
"parent": mock_scsi_ctrl_path.replace("'", "''")})
expected_disks = self._vmutils._conn.query.return_value
ret_disks = self._vmutils.get_attached_disks(mock_scsi_ctrl_path)
self._vmutils._conn.query.assert_called_once_with(expected_query)
self.assertEqual(expected_disks, ret_disks)
def test_get_vm_dvd_disk_paths(self):
mock_vm = self._lookup_vm()
mock_sasd1 = mock.MagicMock(
ResourceSubType=self._vmutils._DVD_DISK_RES_SUB_TYPE,
HostResource=[mock.sentinel.FAKE_DVD_PATH1])
mock_settings = mock.MagicMock()
mock_settings.associators.return_value = [mock_sasd1]
mock_vm.associators.return_value = [mock_settings]
ret_val = self._vmutils.get_vm_dvd_disk_paths(self._FAKE_VM_NAME)
self.assertEqual(mock.sentinel.FAKE_DVD_PATH1, ret_val[0])
|
py | 1a4246a1c5407e1a06881bbe00dbfb7f1d3c5185 | from django.db import models
# Create your models here.
class Assessment(models.Model):
date = models.DateTimeField(auto_now=True)
class SourceFile(models.Model):
assessment = models.ForeignKey(Assessment, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
class Error(models.Model):
source_file = models.ForeignKey(SourceFile, on_delete=models.CASCADE)
begin_line = models.IntegerField()
end_line = models.IntegerField()
priority = models.IntegerField()
category = models.CharField(max_length=255)
source = models.CharField(max_length=255)
text = models.CharField(max_length=255)
|
py | 1a4246a8631ba539cd7ff25e22f35c32f6767003 | import unittest
from biolinkml.generators.jsonldgen import JSONLDGenerator
from biolinkml.generators.markdowngen import MarkdownGenerator
from biolinkml.generators.pythongen import PythonGenerator
from biolinkml.generators.rdfgen import RDFGenerator
from biolinkml.utils.schemaloader import SchemaLoader
from biolinkml.utils.yamlutils import as_yaml
from tests.utils.compare_rdf import compare_rdf
from tests.utils.filters import yaml_filter, json_metadata_filter
from tests.utils.python_comparator import compare_python
from tests.utils.test_environment import TestEnvironmentTestCase
from tests.test_issues.environment import env
class Issue167TestCase(TestEnvironmentTestCase):
env = env
def test_issue_167(self):
""" Test extensions to the four basic types """
env.generate_single_file('issue_167.yaml',
lambda: as_yaml(SchemaLoader(env.input_path('issue_167.yaml')).resolve()),
value_is_returned=True, filtr=yaml_filter)
def test_issue_167b_yaml(self):
""" Annotations yaml example """
env.generate_single_file('issue_167b.yaml',
lambda: as_yaml(SchemaLoader(env.input_path('issue_167b.yaml'),
importmap=env.import_map).resolve()),
value_is_returned=True, filtr=yaml_filter)
def test_issue_167b_python(self):
""" Annotations python example """
env.generate_single_file('issue_167b.py',
lambda: PythonGenerator(env.input_path('issue_167b.yaml'),
importmap=env.import_map).serialize(),
comparator=compare_python, value_is_returned=True)
def test_issue_167b_json(self):
env.generate_single_file('issue_167b.json',
lambda: JSONLDGenerator(env.input_path('issue_167b.yaml'),
importmap=env.import_map).serialize(),
filtr=json_metadata_filter, value_is_returned=True)
def test_issue_167b_rdf(self):
env.generate_single_file('issue_167b.ttl',
lambda: RDFGenerator(env.input_path('issue_167b.yaml'),
importmap=env.import_map).serialize(),
comparator=compare_rdf, value_is_returned=True)
if __name__ == '__main__':
unittest.main()
|
py | 1a42472b5ea388cbb34291d47f23153f9d16318a | from flask import Flask
from flask_wtf.csrf import CSRFProtect
app = Flask(__name__)
csrf = CSRFProtect(app)
@app.route("/")
def pagina_inicial():
return "Laboratorio Pipeline DevOps"
if __name__ == '__main__':
app.run(debug=True)
|
py | 1a424737d103507981933a854b8c3e4b91eaf688 | from . import data_checking
from . import plotting_callbacks
from . import tfbinding_generator
from . import additional_metrics
from . import bam_dataset_generator
from . import MACS_Import
from . import MACS_dataset_gen |
py | 1a4248d38f33ec9d79ee86ca9f263d45feff2cf7 | # Create a list of strings: mutants
mutants = ['charles xavier',
'bobby drake',
'kurt wagner',
'max eisenhardt',
'kitty pryde']
aliases= ['prof x', 'iceman', 'nightcrawler', 'magneto', 'shadowcat']
powers = ['telepathy',
'thermokinesis',
'teleportation',
'magnetokinesis',
'intangibility']
# Create a list of tuples: mutant_data
mutant_data = list(zip(mutants, aliases, powers))
# Print the list of tuples
print(mutant_data)
# Create a zip object using the three lists: mutant_zip
mutant_zip = zip(mutants, aliases, powers)
# Print the zip object
print(mutant_zip)
# Unpack the zip object and print the tuple values
for value1, value2, value3 in mutant_zip:
print(value1, value2, value3)
# Create a zip object from mutants and powers: z1
z1 = zip(mutants, powers)
# Print the tuples in z1 by unpacking with *
print(*z1)
# Re-create a zip object from mutants and powers: z1
z1 = zip(mutants, powers)
# 'Unzip' the tuples in z1 by unpacking with * and zip(): result1, result2
result1, result2 = zip(*z1)
# Check if unpacked tuples are equivalent to original tuples
print(result1 == mutants)
print(result2 == powers)
|
py | 1a4248f6e824c72f475e212f2c9a3f0d70fe7d85 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.objects.E820 import X86E820Table, X86E820Entry
from m5.objects.SMBios import X86SMBiosSMBiosTable
from m5.objects.IntelMP import X86IntelMPFloatingPointer, X86IntelMPConfigTable
from m5.objects.ACPI import X86ACPIRSDP
from m5.objects.Workload import KernelWorkload
class X86FsWorkload(KernelWorkload):
type = 'X86FsWorkload'
cxx_header = 'arch/x86/fs_workload.hh'
cxx_class = 'X86ISA::FsWorkload'
smbios_table = Param.X86SMBiosSMBiosTable(
X86SMBiosSMBiosTable(), 'table of smbios/dmi information')
intel_mp_pointer = Param.X86IntelMPFloatingPointer(
X86IntelMPFloatingPointer(),
'intel mp spec floating pointer structure')
intel_mp_table = Param.X86IntelMPConfigTable(
X86IntelMPConfigTable(),
'intel mp spec configuration table')
acpi_description_table_pointer = Param.X86ACPIRSDP(
X86ACPIRSDP(), 'ACPI root description pointer structure')
class X86FsLinux(X86FsWorkload):
type = 'X86FsLinux'
cxx_header = 'arch/x86/linux/fs_workload.hh'
cxx_class = 'X86ISA::FsLinux'
e820_table = Param.X86E820Table(
X86E820Table(), 'E820 map of physical memory')
|
py | 1a424933227c950fdd283d5dae6ca61e3b1df040 | import sys
from typing import Iterable, Optional
import numpy as np
import tensorflow as tf
def _as_tensor(x):
if isinstance(x, np.ndarray):
x = tf.convert_to_tensor(x)
return x
def _build_train_step(model, data, jit_compile: bool):
data = tf.nest.map_structure(_as_tensor, data)
@tf.function(jit_compile=jit_compile)
def train_fn():
return model.train_step(data)
return train_fn
def _build_test_step(model, data, jit_compile: bool):
data = tf.nest.map_structure(_as_tensor, data)
@tf.function(jit_compile=jit_compile)
def test_fn():
model.reset_metrics()
return model.test_step(data)
return test_fn
class EpochProgbarLogger(tf.keras.callbacks.Callback):
"""Progress bar that updates at the end of each epoch."""
def __init__(self):
super().__init__()
self.progbar = None
self.epochs = None
self.last_seen = None
def set_params(self, params):
self.epochs = params["epochs"]
def on_train_begin(self, logs=None):
class Universe:
"""Contains everything."""
def __contains__(self, x):
return True
self.progbar = tf.keras.utils.Progbar(target=self.epochs, unit_name="epoch",)
# probar uses stateful metrics to determine which metric values to average.
# Since this is only called on_epoch_end, no metrics should be averaged
# i.e. all metrics should be considered 'stateful'.
# don't set stateful_metrics in constructor because it wraps it in `set`.
self.progbar.stateful_metrics = Universe()
def on_epoch_end(self, epoch: int, logs=None):
self.last_seen = epoch + 1
self.progbar.update(epoch + 1, list(logs.items()))
def on_train_end(self, logs=None):
if self.last_seen < self.progbar.target:
if tf.version.VERSION < "2.3":
sys.stdout.write("\n")
else:
self.progbar.update(self.last_seen, finalize=True)
def fit_single(
model: tf.keras.Model,
train_data,
validation_data=None,
epochs: int = 1,
initial_epoch: int = 0,
validation_freq: int = 1,
callbacks: Iterable[tf.keras.callbacks.Callback] = (),
verbose: bool = True,
jit_compile: bool = False,
):
"""
Optimized keras.Model.fit for training on a single graph.
Args:
model: keras model to train.
train_data: (inputs, labels, sample_weight) or dataset with a
single element for training.
validation_data: (inputs, labels, sample_weight) or dataset with a
single element for validation.
epochs: int, maximum number of epochs / steps to train for.
initial_epoch: int, starting epoch.
validation_freq: int, number of training steps/epochs per validation.
callbacks: Iterable of tf.keras.callbacks.Callbacks.
verbose: flag resulting in verbose outputs.
jit_compile: flag indicating whether train/validation steps are compiled
with `jit`. Not all ops are jit compatible, though where they are this may
result in speed-ups.
Returns:
history: `tf.keras.callbacks.History` object.
"""
if isinstance(train_data, tf.data.Dataset):
train_data = tf.data.experimental.get_single_element(train_data)
if isinstance(validation_data, tf.data.Dataset):
validation_data = tf.data.experimental.get_single_element(validation_data)
do_validation = validation_data is not None
params = dict(epochs=epochs, verbose=verbose, steps=1, do_validation=do_validation,)
callbacks = list(callbacks)
if verbose:
callbacks.append(EpochProgbarLogger())
cb = tf.keras.callbacks.CallbackList(
callbacks, add_history=True, add_progbar=False, model=model, **params,
)
del callbacks
train_step = _build_train_step(model, train_data, jit_compile=jit_compile)
if validation_data is None:
validation_step = None
else:
validation_step = _build_test_step(
model, validation_data, jit_compile=jit_compile
)
model.stop_training = False
cb.on_train_begin(logs=None)
# _maybe_load_initial_epoch_from_ckpt behaviour is influenced by
# callbacks.experimental.BackupAndRestore
initial_epoch = model._maybe_load_initial_epoch_from_ckpt( # pylint: disable=protected-access
initial_epoch
)
logs = None
for epoch in range(initial_epoch, epochs):
model.reset_metrics()
cb.on_epoch_begin(epoch, logs=None)
cb.on_train_batch_begin(batch=0)
logs = train_step()
cb.on_train_batch_end(batch=0, logs=logs)
if model.stop_training:
break
# validation
if validation_step is not None and (epoch + 1) % validation_freq == 0:
val_logs = validation_step()
logs.update({f"val_{k}": v for k, v in val_logs.items()})
cb.on_epoch_end(epoch, logs)
if model.stop_training:
break
cb.on_train_end(logs)
return model.history
def fit(
model: tf.keras.Model,
train_data,
validation_data=None,
epochs: int = 1,
initial_epoch: int = 0,
validation_freq: int = 1,
callbacks: Iterable[tf.keras.callbacks.Callback] = (),
steps_per_epoch: Optional[int] = None,
verbose: bool = True,
jit_compile: bool = False,
):
"""
Call `fit_single` or `Model.fit` based on `train_data`.
Delegates to either `graph_tf.train.fit_single` or `tf.keras.Model.fit`.
Args:
model: keras model to train.
train_data: (inputs, labels, sample_weight) or dataset with a
single element for training.
validation_data: (inputs, labels, sample_weight) or dataset with a
single element for validation.
epochs: int, maximum number of steps/epochs to train for.
initial_epoch: int, starting epoch.
validation_freq: int, number of training steps/epochs per validation.
callbacks: Iterable of `tf.keras.callbacks.Callbacks`.
steps_per_epoch: Number of steps per epoch. Must be 1 if specified and
train_data is a not a `tf.data.Dataset`.
verbose: flag resulting in verbose outputs.
jit_compile: used in fit_single. Ignored if more than one example.
Returns:
history: `tf.keras.callbacks.History` object.
"""
if not isinstance(train_data, tf.data.Dataset) or len(train_data) == 1:
assert steps_per_epoch is None or steps_per_epoch == 1
return fit_single(
model=model,
train_data=train_data,
validation_data=validation_data,
epochs=epochs,
initial_epoch=initial_epoch,
validation_freq=validation_freq,
callbacks=callbacks,
verbose=verbose,
jit_compile=jit_compile,
)
return model.fit(
train_data,
validation_data=validation_data,
epochs=epochs,
initial_epoch=initial_epoch,
validation_freq=validation_freq,
callbacks=callbacks,
verbose=verbose,
steps_per_epoch=steps_per_epoch,
)
|
py | 1a424affb549ba31b6673bf80ae984f6e8086478 | # If executes from local python Kernel
import sys
sys.path.append('./python_env/lib/python3.6/site-packages')
# Import libraries for general use
from unidecode import unidecode # Library to parse format
from bs4 import BeautifulSoup # Library to scripting in HTML
import numpy # Library for math function
import requests # Library for request to a website
# This function returns an array that contains the percentage of the population.
def get_population_percentage(population_array):
# Get the percentage for each item
population_percentage = []
for item in population_array:
population_percentage.append(item/numpy.sum(population_array)*100)
return numpy.array(population_percentage)
# This function create the output file
def create_output_file(head, data, population_percentage):
# Concatenate all data Head + Rows + New Colum (percentage)
result = numpy.concatenate((data, population_percentage.T),axis=1)
result = numpy.concatenate((head, result), axis=0)
result = result.astype('str')
# Save the result in to a csv file
return numpy.savetxt('Organización territorial de Chile.csv', result, delimiter=",",fmt="%s")
def get_data():
try:
website_text = requests.get('https://es.wikipedia.org/wiki/Chile').text
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
# Parse all data in to HTML format
soup = BeautifulSoup(website_text,'html.parser')
# Get the table 'Orgazniación ....'
territory_table = soup.find('table',{'class':'wikitable col1izq col2der col3der col4der col5der col6izq'})
# Get all data from table - tag <td>
if territory_table:
list_td = territory_table.find_all('td')
# Data Frames
head = ['Región', 'Población', 'Superficie', 'Densidad', 'Capital', 'Porcentaje']
data = []
row = []
population = []
for cell in list_td:
if(list_td.index(cell)==5): # Delete de 'Mapa administrativo' cell
continue
if cell.find_all('a'): # Get text for columm that contains an '<a>' tag.
a = cell.find_all('a')[0]
row.append(a.get_text())
else:
# For numbers parse into american float format
cell = unidecode(cell.get_text()).replace(" ","").replace(",",".")
# Delete <sub> tag info
if "(" in cell:
cell = cell.split("(")[0]
# Add cell to the row's table
row.append(float(cell))
# Save the population data to calculate percentage
if(len(row) == 2):
population.append(row[1])
# Add row to the table
if len(row) == 5:
data.append(row)
row = []
return numpy.array([head]), numpy.array(data), numpy.array([population])
else:
print("Table not found.")
return sys.exit(1)
if __name__ == '__main__':
head,data,population = get_data()
population_percentage = get_population_percentage(population)
create_output_file(head,data,population_percentage)
|
py | 1a424bfffd5533016e256d8d0ebe5a8956ae6ca6 | # Generated by Django 2.0.6 on 2018-06-29 05:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('images', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='comment',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='images.Image'),
),
migrations.AddField(
model_name='image',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='like',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='images.Image'),
),
]
|
py | 1a424c4179a59174cbca518fd29e0a71704eef8e | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os
import itertools
import pandas as pd
import unittest
from coremltools._deps import HAS_SKLEARN
from coremltools.models.utils import evaluate_classifier, macos_version
import pytest
if HAS_SKLEARN:
from coremltools.converters import sklearn as skl_converter
from sklearn.tree import DecisionTreeClassifier
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class DecisionTreeClassificationBostonHousingScikitNumericTest(unittest.TestCase):
def _check_metrics(self, metrics, params = {}):
self.assertEquals(metrics['num_errors'], 0, msg = 'Failed case %s. Results %s' % (params, metrics))
def _train_convert_evaluate_assert(self, **scikit_params):
scikit_model = DecisionTreeClassifier(random_state = 1, **scikit_params)
scikit_model.fit(self.X, self.target)
# Convert the model
spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name)
if macos_version() >= (10, 13):
# Get predictions
df = pd.DataFrame(self.X, columns=self.feature_names)
df['prediction'] = scikit_model.predict(self.X)
# Evaluate it
metrics = evaluate_classifier(spec, df)
self._check_metrics(metrics, scikit_params)
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class DecisionTreeBinaryClassificationBostonHousingScikitNumericTest(
DecisionTreeClassificationBostonHousingScikitNumericTest):
@classmethod
def setUpClass(self):
from sklearn.datasets import load_boston
from sklearn.tree import DecisionTreeClassifier
# Load data and train model
scikit_data = load_boston()
self.scikit_data = scikit_data
self.X = scikit_data.data.astype('f').astype('d') ## scikit-learn downcasts data
self.target = 1 * (scikit_data['target'] > scikit_data['target'].mean())
self.feature_names = scikit_data.feature_names
self.output_name = 'target'
def test_simple_binary_classifier(self):
self._train_convert_evaluate_assert()
@pytest.mark.slow
def test_binary_classifier_stress_test(self):
options = dict(
splitter = ['best'],
max_depth = [1, 10, None],
min_samples_split = [2, 10, 0.5],
min_samples_leaf = [1, 5],
min_weight_fraction_leaf = [0.0, 0.5],
max_features = [None, 1, 5],
max_leaf_nodes = [None, 20],
presort = [False, True],
)
# Make a cartesian product of all options
import itertools
product = itertools.product(*options.values())
args = [dict(zip(options.keys(), p)) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for it, arg in enumerate(args):
self._train_convert_evaluate_assert(**arg)
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class DecisionTreeMultiClassClassificationBostonHousingScikitNumericTest(
DecisionTreeClassificationBostonHousingScikitNumericTest):
@classmethod
def setUpClass(self):
from sklearn.datasets import load_boston
import numpy as np
# Load data and train model
scikit_data = load_boston()
num_classes = 3
self.X = scikit_data.data.astype('f').astype('d') ## scikit-learn downcasts data
t = scikit_data.target
target = np.digitize(t, np.histogram(t, bins = num_classes - 1)[1]) - 1
# Save the data and the model
self.scikit_data = scikit_data
self.target = target
self.feature_names = scikit_data.feature_names
self.output_name = 'target'
def test_simple_multiclass(self):
self._train_convert_evaluate_assert()
@pytest.mark.slow
def test_multiclass_stress_test(self):
options = dict(
splitter = ['best'],
max_depth = [1, 10, None],
min_samples_split = [2, 10, 0.5],
min_samples_leaf = [1, 5],
min_weight_fraction_leaf = [0.0, 0.5],
max_features = [None, 1, 5],
max_leaf_nodes = [None, 20],
presort = [False, True],
)
# Make a cartesian product of all options
product = itertools.product(*options.values())
args = [dict(zip(options.keys(), p)) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for it, arg in enumerate(args):
self._train_convert_evaluate_assert(**arg)
|
py | 1a424d3f7b4af5941326c44f0f4d3e8dc5d0d128 | from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
jupyterhub-share-link-serverextension does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='jupyterhub-share-link-serverextension',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Python package for doing science.",
long_description=readme,
author="JupyterHub Share Link Contributors",
author_email='[email protected]',
url='https://github.com/danielballan/jupyterhub-share-link-serverextension',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'jupyterhub_share_link_serverextension': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
py | 1a424d5a46d0ff01d078eb65771d76edd2635771 | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from petstore_api.exceptions import ApiAttributeError
class ClassModel(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'_class': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'_class': '_class', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ClassModel - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_class (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ClassModel - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_class (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | 1a424da371a77f49cafcf1b25908551d82fcf1e6 | """
Modified from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
Edits:
ResNet:
- Changed input layer from 3 channel -> 1 channel (depth images)
- Divided inplanes, planes, and width_per_group by 4
BasicBlock:
- Commented out ValueError triggered by base_width != 64
'To make the number of parameters comparable to point-based methods,
we use ResNet18 with one-fourth filters (ResNet18/4) as the backbone.'
"""
from typing import Type, Any, Callable, Union, List, Optional
import torch
import torch.nn as nn
from torchvision.models.resnet import (
Bottleneck,
conv3x3,
conv1x1
)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
"""
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
"""
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet_4(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64//4,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64//4
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64//4, layers[0])
self.layer2 = self._make_layer(block, 128//4, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256//4, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512//4, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512//4 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
def resnet18_4() -> ResNet_4:
"""
ResNet18/4: ResNet18 with 1/4 the filters
Note: contains ~0.83M params which is close to the 0.8M params reported in paper
"""
return ResNet_4(block=BasicBlock, layers=[2, 2, 2, 2])
|
py | 1a424efac972af02e2f5ad9168f28a838692fcd0 | from hpc.autoscale.ccbindings.mock import MockClusterBinding
from hpc.autoscale.job.job import Job
from hpc.autoscale.job.schedulernode import SchedulerNode
from hpc.autoscale.node.nodemanager import new_node_manager
def setup_module() -> None:
SchedulerNode.ignore_hostnames = True
def test_placement_group() -> None:
node = SchedulerNode("", {})
node.exists = False
node.placement_group = ""
assert node.placement_group is None
node.placement_group = "a"
assert node.placement_group == "a"
node.placement_group = "0"
assert node.placement_group == "0"
try:
node.placement_group = "."
except Exception:
pass
assert node.placement_group == "0"
node.set_placement_group_escaped(".")
assert node.placement_group == "_"
node.exists = True
try:
node.placement_group = "123"
except Exception:
assert node.placement_group == "_"
def test_custom_node_attrs_and_node_config() -> None:
b = MockClusterBinding()
b.add_nodearray("htc", {}, software_configuration={"myscheduler": {"A": 1}})
b.add_bucket("htc", "Standard_F2", 10, 10)
b.add_node("htc-1", "htc")
node_mgr = new_node_manager({"_mock_bindings": b})
(existing_node,) = node_mgr.get_nodes()
try:
existing_node.node_attribute_overrides["willfail"] = 123
assert False
except TypeError:
pass
result = node_mgr.allocate({"exclusive": True}, node_count=2)
assert result
(node,) = [n for n in result.nodes if not n.exists]
assert node.software_configuration.get("test_thing") is None
node.node_attribute_overrides["Configuration"] = {"test_thing": "is set"}
assert node.software_configuration.get("test_thing") == "is set"
try:
node.software_configuration["willfail"] = 123
assert not node.software_configuration.get("willfail")
except TypeError:
pass
# we won't handle dict merges here.
assert node.software_configuration.get("myscheduler") == {"A": 1}
node.node_attribute_overrides["Configuration"] = {"myscheduler": {"B": 2}}
assert node.software_configuration.get("myscheduler") == {"B": 2}
# if you want to add to the existing software_configuration, use
# the node.software_configuration
node.node_attribute_overrides["Configuration"][
"myscsheduler"
] = node.software_configuration.get("myscheduler", {})
node.node_attribute_overrides["Configuration"]["myscheduler"]["B"] = 2
node.node_attribute_overrides["Configuration"] = {"myscheduler": {"A": 1, "B": 2}}
node.software_configuration["willsucceed"] = 123
node.exists = True
try:
node.software_configuration["willfail"] = 123
assert False
except TypeError:
pass
def test_clone() -> None:
orig = SchedulerNode("lnx0", {"ncpus": 4})
orig.metadata["exists_in_both"] = True
new = orig.clone()
assert new.available["ncpus"] == 4
assert new.resources["ncpus"] == 4
new.available["ncpus"] -= 1
assert new.available["ncpus"] == 3
assert orig.available["ncpus"] == 4
job = Job("1", {"ncpus": 2})
new.decrement(job._constraints, assignment_id=job.name)
assert new.available["ncpus"] == 1
assert orig.available["ncpus"] == 4
assert new.assignments == set(["1"])
assert orig.assignments == set()
orig.metadata["exists_in_orig"] = True
new.metadata["exists_in_new"] = True
assert orig.metadata["exists_in_both"] is True
assert "exists_in_new" not in orig.metadata
assert orig.metadata["exists_in_orig"] is True
assert new.metadata["exists_in_both"] is True
assert new.metadata["exists_in_new"] is True
assert "exists_in_orig" not in new.metadata
|
py | 1a424f06fc3801946d9b5ed38fec62496ac10f28 | """
Copyright (C) 2021 University of Luxembourg
Developed by Dr. Joshua Heneage Dawes.
Module containing classes for construction of iCFTL specifications.
Specifications are constructed hierarchically, as chains of objects.
The root object is always a Specification instance. This can contain configuration information for the specification.
The first object inside the Specification must be a Forall instance. This indicates universal quantification.
There can arbitrarily many Forall instances nested.
The final instance in the chain must be a Constraint instance. This has recursive structure (based on the grammar of iCFTL).
"""
import logging
logger = logging.getLogger("VyPR")
from VyPR.Specifications.predicates import changes, calls, future
from VyPR.Specifications.constraints import (Constraint,
ConstraintBase,
ConcreteStateExpression,
TransitionExpression,
ConcreteStateVariable,
TransitionVariable,
Conjunction,
Disjunction,
Negation,
TimeBetween,
ValueInConcreteStateEqualsConstant,
ValueInConcreteStateLessThanConstant,
ValueInConcreteStateGreaterThanConstant,
DurationOfTransitionLessThanConstant,
DurationOfTransitionGreaterThanConstant,
ConcreteStateBeforeTransition,
ConcreteStateAfterTransition,
NextTransitionFromConcreteState,
NextConcreteStateFromConcreteState,
NextTransitionFromTransition,
NextConcreteStateFromTransition,
TimeBetweenLessThanConstant)
class Specification():
"""
The top-level class for specifications.
"""
def __init__(self):
logger.info("Instantiating new specification...")
self._quantifier = None
def __repr__(self):
"""
Construct the string representation recursively.
"""
return f"{self._quantifier}"
def get_quantifier(self):
return self._quantifier
def get_variable_to_obj_map(self) -> dict:
"""
Traverse the specification in order to construct a map
from each variable to the type of object it will hold
(either a ConcreteState or a Transition instance).
Note: this function should not try to serialise any objects from the specification
because serialisation of a Constraint instance requires calling of this function,
hence the result would be an infinite loop.
"""
logger.info("Deriving map variable names -> variable object from quantifiers")
# initialise an empty map
variable_to_obj = {}
# set the current object to be the top-level specification
current_obj = self
# iterate through the structure, using the type Constraint as a place to stop
logger.info("Traversing specification structure")
while type(current_obj) is not Constraint:
logger.info(f"current_obj = {type(current_obj)}")
# traverse depending on the type of the current object
if type(current_obj) is Specification:
current_obj = current_obj._quantifier
elif type(current_obj) is Forall:
# first, add to the map
# we check the type of the predicate so we know what kind of variable to instantiate
if type(current_obj._predicate) is changes:
variable_to_obj[current_obj._variable] = ConcreteStateVariable(current_obj._variable)
elif type(current_obj._predicate) is calls:
variable_to_obj[current_obj._variable] = TransitionVariable(current_obj._variable)
elif type(current_obj._predicate) is future:
if type(current_obj._predicate._predicate) is changes:
variable_to_obj[current_obj._variable] = ConcreteStateVariable(current_obj._variable)
elif type(current_obj._predicate._predicate) is calls:
variable_to_obj[current_obj._variable] = TransitionVariable(current_obj._variable)
# in the case of a quantifier, the two possibilities are
# that the next item to consider is a quantifier or a constraint
if current_obj._quantifier:
current_obj = current_obj._quantifier
else:
# if we arrive at a constraint, the loop
# will stop at the next ieration
current_obj = current_obj._constraint
logger.info(f"variable_to_obj = {variable_to_obj}")
return variable_to_obj
def get_variables(self) -> list:
"""
Traverse the specification in order to construct a list of variables.
The order of the list matches the order in which the variables occur in quantifiers.
"""
logger.info("Deriving list of variables from quantifiers")
# initialise an empty list
variables = []
# set the current object to be the top-level specification
current_obj = self
# iterate through the structure, using the type Constraint as a place to stop
logger.info("Traversing specification structure")
while type(current_obj) is not Constraint:
logger.info(f"current_obj = {type(current_obj)}")
# traverse depending on the type of the current object
if type(current_obj) is Specification:
current_obj = current_obj._quantifier
elif type(current_obj) is Forall:
# first, add to the map
# we check the type of the predicate so we know what kind of variable to instantiate
if type(current_obj._predicate) is changes:
variables.append(current_obj._variable)
elif type(current_obj._predicate) is calls:
variables.append(current_obj._variable)
elif type(current_obj._predicate) is future:
if type(current_obj._predicate._predicate) is changes:
variables.append(current_obj._variable)
elif type(current_obj._predicate._predicate) is calls:
variables.append(current_obj._variable)
# in the case of a quantifier, the two possibilities are
# that the next item to consider is a quantifier or a constraint
if current_obj._quantifier:
current_obj = current_obj._quantifier
else:
# if we arrive at a constraint, the loop
# will stop at the next ieration
current_obj = current_obj._constraint
return variables
def get_function_names_used(self):
"""
Traverse the specification and, each time a predicate is encountered, extract the function
name used and add to the list.
"""
# initialise an empty list of function names
all_function_names = []
# initialise stack wth top-level Specification object for traversal
stack = [self]
# process the stack while it is not empty
while len(stack) > 0:
# get the top element from the stack
top = stack.pop()
# based on the type, add child elements to the stack or add a new function name
# to the list
if type(top) in [changes, calls]:
all_function_names.append(top._during_function)
elif type(top) is future:
stack.append(top.get_predicate())
elif type(top) is Specification:
stack.append(top.get_quantifier())
elif type(top) is Forall:
# add the predicate to the stack
stack.append(top.get_predicate())
# also, carry on traversing the specification
if top.get_quantifier():
stack.append(top.get_quantifier())
else:
stack.append(top.get_constraint())
elif type(top) is Constraint:
stack.append(top.instantiate())
elif type(top) is Conjunction:
stack += top.get_conjuncts()
elif type(top) is Disjunction:
stack += top.get_disjuncts()
elif type(top) is Negation:
stack.append(top.get_operand())
elif type(top) in [ValueInConcreteStateEqualsConstant, ValueInConcreteStateLessThanConstant, ValueInConcreteStateGreaterThanConstant]:
stack.append(top.get_value_expression().get_concrete_state_expression())
elif type(top) in [ConcreteStateBeforeTransition, ConcreteStateAfterTransition]:
stack.append(top.get_transition_expression())
elif type(top) in [DurationOfTransitionLessThanConstant, DurationOfTransitionGreaterThanConstant]:
stack.append(top.get_transition_duration_obj().get_transition_expression())
elif type(top) in [NextTransitionFromConcreteState, NextConcreteStateFromConcreteState]:
stack.append(top.get_predicate())
elif type(top) in [NextTransitionFromTransition, NextConcreteStateFromTransition]:
stack.append(top.get_predicate())
elif type(top) is TimeBetweenLessThanConstant:
# traverse both arguments to the timeBetween operator
stack.append(top.get_time_between_expression().get_lhs_expression())
stack.append(top.get_time_between_expression().get_rhs_expression())
all_function_names = list(set(all_function_names))
return all_function_names
def get_constraint(self):
"""
Traverse the specification until a constraint is reached.
"""
# set the current object to be the first quantifier
current_obj = self._quantifier
# iterate through the structure, using the type Constraint as a place to stop
while type(current_obj) is not Constraint:
# traverse depending on the type of the current object
if type(current_obj) is Specification:
current_obj = current_obj.get_quantifier()
elif type(current_obj) is Forall:
# in the case of a quantifier, the two possibilities are
# that the next item to consider is a quantifier or a constraint
if current_obj.get_quantifier():
current_obj = current_obj.get_quantifier()
else:
# if we arrive at a constraint, the loop
# will stop at the next ieration
current_obj = current_obj.get_constraint()
return current_obj
def forall(self, **quantified_variable):
"""
**quantified variable must be a dictionary with only one key - the variable being given.
The value associated with the variable must be a Predicate instance.
"""
# if there is more than 1 variable, raise an exception
if len(quantified_variable.keys()) > 1:
raise Exception("A single variable must be given for each level of universal quantification.")
# check the type of the value
predicate = list(quantified_variable.values())[0]
if type(predicate) not in [changes, calls, future]:
raise Exception(f"Type '{type(predicate).__name__}' not supported.")
# make sure the predicate is complete
variable = list(quantified_variable.keys())[0]
if not predicate._during_function:
raise Exception(f"Predicate used for variable {variable} not complete")
logger.info(f"Adding quantifier with arguments {quantified_variable}")
# store the quantifier
self._quantifier = Forall(self, **quantified_variable)
return self._quantifier
class Forall():
"""
The class for representing universal quantification in specifications.
"""
def __init__(self, specification_obj: Specification, **quantified_variable):
self._specification_obj = specification_obj
# we will use the fact that either a constraint or a quantifier is stored
# to determine what the next thing we will see in the structure of the specification is
self._constraint = None
self._quantifier = None
# Note: .keys() does not give a structure with an ordering,
# so normally converting to a list would be problematic
# but here we know that there must be one element
self._variable = list(quantified_variable.keys())[0]
self._predicate = list(quantified_variable.values())[0]
def __repr__(self):
if self._constraint:
# this is the last quantifier, so the next thing to turn into a string is a constraint
return f"forall {self._variable} in {self._predicate}:\n {self._constraint}"
else:
# this is not the last quantifier - there is another nested inside
return f"forall {self._variable} in {self._predicate}:\n{self._quantifier}"
def get_specification_obj(self):
return self._specification_obj
def get_quantifier(self):
return self._quantifier
def get_constraint(self):
return self._constraint
def get_predicate(self):
return self._predicate
def get_variable(self):
return self._variable
def forall(self, **quantified_variable):
"""
**quantified variable must be a dictionary with only one key - the variable being given.
The value associated with the variable must be a Predicate instance.
"""
# if there is more than 1 variable, raise an exception
if len(quantified_variable.keys()) > 1:
raise Exception("A single variable must be given for each level of universal quantification.")
# check the type of the value - this is not the first quantifier,
# so the type must be future
predicate = list(quantified_variable.values())[0]
if type(predicate) is not future:
raise Exception(f"Type '{type(predicate).__name__}' not supported.")
# make sure the predicate is complete
variable = list(quantified_variable.keys())[0]
if not predicate._predicate._during_function:
raise Exception(f"Predicate used for variable {variable} not complete")
logger.info(f"Initialising new instance of Forall with quantified_variable = {quantified_variable}")
# store the quantifier
self._quantifier = Forall(self._specification_obj, **quantified_variable)
return self._quantifier
def check(self, expression):
"""
Instantiate a top-level Constraint instance with the given constraint lambda.
The lambda will later be called and supplied with the necessary variables during instrumentation and monitoring.
"""
# make sure constraint is a lambda
if type(expression) is not type(lambda:0):
raise Exception("Constraint given must be a lambda expression.")
logger.info("Setting self._constraint to new Constraint instance")
self._constraint = Constraint(self._specification_obj, expression)
return self._specification_obj
"""
Syntax sugar functions.
"""
def all_are_true(*conjuncts):
"""
Encode a conjunction.
"""
return Conjunction(*conjuncts)
def one_is_true(*disjuncts):
"""
Encode a disjunction.
"""
return Disjunction(*disjuncts)
def not_true(operand):
"""
Given an operand, instantiate either a single negation,
or another structure by propagating negation through to atomic constraints.
"""
if type(operand) is Conjunction:
# rewrite negation of conjunction as disjunction of negations
return Disjunction(*map(lambda conjunct : not_true(conjunct), operand.get_conjuncts()))
elif type(operand) is Disjunction:
# rewrite negation of disjunction as conjunction of negations
return Conjunction(*map(lambda disjunct : not_true(disjunct), operand.get_disjuncts()))
elif type(operand) is Negation:
# eliminate double negation
return operand.get_operand()
else:
# assume operand is atomic constraint
return Negation(operand)
def timeBetween(concrete_state_expression_1, concrete_state_expression_2):
return TimeBetween(concrete_state_expression_1, concrete_state_expression_2) |
py | 1a424f28ec101ad74dd02d9ec3c04361f182ec8a | # -*- coding: utf-8 -*-
from .fixtures import fixture_data, Amount, Asset, Price
import unittest
class Testcases(unittest.TestCase):
def setUp(self):
fixture_data()
def test_init(self):
# self.assertEqual(1, 1)
Price("0.315 USD/GPH")
Price(1.0, "USD/GOLD")
Price(0.315, base="USD", quote="GPH")
Price(0.315, base=Asset("USD"), quote=Asset("GPH"))
Price(
{
"base": {"amount": 1, "asset_id": "1.3.0"},
"quote": {"amount": 10, "asset_id": "1.3.8"},
}
)
Price(
{
"receives": {"amount": 1, "asset_id": "1.3.0"},
"pays": {"amount": 10, "asset_id": "1.3.8"},
},
base_asset=Asset("1.3.0"),
)
Price(quote="10 GOLD", base="1 USD")
Price("10 GOLD", "1 USD")
Price(Amount("10 GOLD"), Amount("1 USD"))
def test_multiplication(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "EUR/USD")
p3 = p1 * p2
p4 = p3.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 50 EUR/GOLD = 0.02 GOLD/EUR
self.assertEqual(float(p4), 0.02)
# Inline multiplication
p5 = p1
p5 *= p2
p4 = p5.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.02 GOLD/EUR
self.assertEqual(float(p4), 0.02)
def test_div(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/EUR")
# 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD
p3 = p1 / p2
p4 = p3.as_base("EUR")
self.assertEqual(p4["base"]["symbol"], "EUR")
self.assertEqual(p4["quote"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 2)
def test_div2(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/GOLD")
# 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD
p3 = p1 / p2
self.assertTrue(isinstance(p3, (float, int)))
self.assertEqual(float(p3), 2.0)
|
py | 1a424f36720c226cf6125e969b6b1fadf2eb9344 | from enemy import Enemy
class Minion(Enemy):
"""
Minion class inherits from Enemy class.
It is similar in all forms to the enemy class except that it will
move towards bomberman with probability 0.8.
"""
def __init__(self, r, c):
Enemy.__init__(self, r, c)
self.random_prob = 0.2
|
py | 1a424f4426e42c72d73cafd39def67ca7745176d | import torch
import numpy as np
from baseline.utils import lookup_sentence, get_version
from torch.autograd import Variable
import torch.autograd
import torch.nn as nn
import torch.nn.functional
import math
import copy
PYT_MAJOR_VERSION = get_version(torch)
def sequence_mask(lengths):
lens = lengths.cpu()
max_len = torch.max(lens)
# 1 x T
row = torch.arange(0, max_len.item()).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
def classify_bt(model, batch_time):
tensor = torch.from_numpy(batch_time) if type(batch_time) == np.ndarray else batch_time
probs = model(torch.autograd.Variable(tensor, requires_grad=False).cuda()).exp().data
probs.div_(torch.sum(probs))
results = []
batchsz = probs.size(0)
for b in range(batchsz):
outcomes = [(model.labels[id_i], prob_i) for id_i, prob_i in enumerate(probs[b])]
results.append(outcomes)
return results
def predict_seq_bt(model, x, xch, lengths):
x_t = torch.from_numpy(x) if type(x) == np.ndarray else x
xch_t = torch.from_numpy(xch) if type(xch) == np.ndarray else xch
len_v = torch.from_numpy(lengths) if type(lengths) == np.ndarray else lengths
x_v = torch.autograd.Variable(x_t, requires_grad=False).cuda()
xch_v = torch.autograd.Variable(xch_t, requires_grad=False).cuda()
#len_v = torch.autograd.Variable(len_t, requires_grad=False)
results = model((x_v, xch_v, len_v))
#print(results)
#if type(x) == np.ndarray:
# # results = results.cpu().numpy()
# # Fix this to not be greedy
# results = np.argmax(results, -1)
return results
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
class SequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.NLLLoss):
super(SequenceCriterion, self).__init__()
self.crit = LossFn(ignore_index=0, size_average=False)
def forward(self, inputs, targets):
# This is BxT, which is what we want!
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return loss
class StackedLSTMCell(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTMCell, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size=input_size, hidden_size=rnn_size, bias=False))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
hs, cs = [], []
for i, layer in enumerate(self.layers):
h_i, c_i = layer(input, (h_0[i], c_0[i]))
input = h_i
if i != self.num_layers - 1:
input = self.dropout(input)
hs += [h_i]
cs += [c_i]
hs = torch.stack(hs)
cs = torch.stack(cs)
return input, (hs, cs)
class StackedGRUCell(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRUCell, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size=input_size, hidden_size=rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0 = hidden
hs = []
for i, layer in enumerate(self.layers):
h_i = layer(input, (h_0[i]))
input = h_i
if i != self.num_layers:
input = self.dropout(input)
hs += [h_i]
hs = torch.stack(hs)
return input, hs
def pytorch_rnn_cell(insz, hsz, rnntype, nlayers, dropout):
if rnntype == 'gru':
rnn = StackedGRUCell(nlayers, insz, hsz, dropout)
else:
rnn = StackedLSTMCell(nlayers, insz, hsz, dropout)
return rnn
def pytorch_embedding(x2vec, finetune=True):
dsz = x2vec.dsz
lut = nn.Embedding(x2vec.vsz + 1, dsz, padding_idx=0)
del lut.weight
lut.weight = nn.Parameter(torch.FloatTensor(x2vec.weights),
requires_grad=finetune)
return lut
def pytorch_activation(name="relu"):
if name == "tanh":
return nn.Tanh()
if name == "hardtanh":
return nn.Hardtanh()
if name == "prelu":
return nn.PReLU()
if name == "sigmoid":
return nn.Sigmoid()
if name == "log_sigmoid":
return nn.LogSigmoid()
return nn.ReLU()
def pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0, initializer=None):
c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding)
if unif > 0:
c.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(c.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(c.weight)
else:
nn.init.xavier_uniform_(c.weight)
return c
def pytorch_linear(in_sz, out_sz, unif=0, initializer=None):
l = nn.Linear(in_sz, out_sz)
if unif > 0:
l.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(l.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(l.weight)
else:
nn.init.xavier_uniform_(l.weight)
l.bias.data.zero_()
return l
def pytorch_clone_module(module_, N):
return nn.ModuleList([copy.deepcopy(module_) for _ in range(N)])
def _cat_dir(h):
return torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], dim=-1)
class BiRNNWrapper(nn.Module):
def __init__(self, rnn, nlayers):
super(BiRNNWrapper, self).__init__()
self.rnn = rnn
self.nlayers = nlayers
def forward(self, seq):
output, hidden = self.rnn(seq)
if isinstance(hidden, tuple):
hidden = tuple(_cat_dir(h) for h in hidden)
else:
hidden = _cat_dir(hidden)
return output, hidden
def pytorch_rnn(insz, hsz, rnntype, nlayers, dropout):
if nlayers == 1:
dropout = 0.0
if rnntype == 'gru':
rnn = torch.nn.GRU(insz, hsz, nlayers, dropout=dropout)
elif rnntype == 'blstm':
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True)
rnn = BiRNNWrapper(rnn, nlayers)
else:
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout)
return rnn
class ParallelConv(nn.Module):
def __init__(self, insz, outsz, filtsz, activation_type, pdrop):
super(ParallelConv, self).__init__()
convs = []
outsz_filts = outsz
if type(outsz) == int:
outsz_filts = len(filtsz) * [outsz]
self.outsz = sum(outsz_filts)
for i, fsz in enumerate(filtsz):
pad = fsz//2
conv = nn.Sequential(
nn.Conv1d(insz, outsz_filts[i], fsz, padding=pad),
pytorch_activation(activation_type)
)
convs.append(conv)
# Add the module so its managed correctly
self.convs = nn.ModuleList(convs)
self.conv_drop = nn.Dropout(pdrop)
def forward(self, input_bct):
mots = []
for conv in self.convs:
# In Conv1d, data BxCxT, max over time
conv_out = conv(input_bct)
mot, _ = conv_out.max(2)
mots.append(mot)
mots = torch.cat(mots, 1)
return self.conv_drop(mots)
class Highway(nn.Module):
def __init__(self,
input_size):
super(Highway, self).__init__()
self.proj = nn.Linear(input_size, input_size)
self.transform = nn.Linear(input_size, input_size)
self.transform.bias.data.fill_(-2.0)
def forward(self, input):
proj_result = nn.functional.relu(self.proj(input))
proj_gate = nn.functional.sigmoid(self.transform(input))
gated = (proj_gate * proj_result) + ((1 - proj_gate) * input)
return gated
class LayerNorm(nn.Module):
"""
Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
This is provided in pytorch's master, and can be replaced in the near future.
For the time, being, this code is adapted from:
http://nlp.seas.harvard.edu/2018/04/03/attention.html
https://github.com/pytorch/pytorch/pull/2019
"""
def __init__(self, num_features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a = nn.Parameter(torch.ones(num_features))
self.b = nn.Parameter(torch.zeros(num_features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = ((x - mean).pow(2).sum(-1, keepdim=True).div(x.size(-1) - 1) + self.eps).sqrt()
d = (std + self.eps) + self.b
return self.a * (x - mean) / d
def pytorch_lstm(insz, hsz, rnntype, nlayers, dropout, unif=0, batch_first=False, initializer=None):
if nlayers == 1:
dropout = 0.0
ndir = 2 if rnntype.startswith('b') else 1
#print('ndir: %d, rnntype: %s, nlayers: %d, dropout: %.2f, unif: %.2f' % (ndir, rnntype, nlayers, dropout, unif))
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True if ndir > 1 else False, batch_first=batch_first)#, bias=False)
if unif > 0:
for weight in rnn.parameters():
weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(rnn.weight_hh_l0)
nn.init.orthogonal(rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(rnn.weight_hh_l0)
nn.init.kaiming_uniform(rnn.weight_ih_l0)
else:
nn.init.xavier_uniform_(rnn.weight_hh_l0)
nn.init.xavier_uniform_(rnn.weight_ih_l0)
return rnn, ndir*hsz
def pytorch_prepare_optimizer(model, **kwargs):
mom = kwargs.get('mom', 0.9)
optim = kwargs.get('optim', 'sgd')
eta = kwargs.get('eta', kwargs.get('lr', 0.01))
decay_rate = float(kwargs.get('decay_rate', 0.0))
decay_type = kwargs.get('decay_type', None)
if optim == 'adadelta':
optimizer = torch.optim.Adadelta(model.parameters(), lr=eta)
elif optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=eta)
elif optim == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=eta)
elif optim == 'asgd':
optimizer = torch.optim.ASGD(model.parameters(), lr=eta)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=eta, momentum=mom)
scheduler = None
if decay_rate > 0.0 and decay_type is not None:
if decay_type == 'invtime':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=decay_rate)
return optimizer, scheduler
def append2seq(seq, modules):
for i, module in enumerate(modules):
seq.add_module('%s-%d' % (str(module).replace('.', 'dot'), i), module)
def tensor_max(tensor):
return tensor.max()
def tensor_shape(tensor):
return tensor.size()
def tensor_reverse_2nd(tensor):
idx = torch.LongTensor([i for i in range(tensor.size(1)-1, -1, -1)])
return tensor.index_select(1, idx)
def long_0_tensor_alloc(dims, dtype=None):
lt = long_tensor_alloc(dims)
lt.zero_()
return lt
def long_tensor_alloc(dims, dtype=None):
if type(dims) == int or len(dims) == 1:
return torch.LongTensor(dims)
return torch.LongTensor(*dims)
def prepare_src(model, tokens, mxlen=100):
src_vocab = model.get_src_vocab()
length = min(len(tokens), mxlen)
x = torch.LongTensor(length).zero_()
for j in range(length):
word = tokens[j]
if word not in src_vocab:
if word != '':
print(word)
idx = 0
else:
idx = src_vocab[word]
x[j] = idx
return torch.autograd.Variable(x.view(-1, 1))
#def beam_decode_tokens(model, src_tokens, K, idx2word, mxlen=50):
# src = prepare_src(model, src_tokens, mxlen)
# paths, scores = beam_decode(model, src, K)
# path_str = []
# for j, path in enumerate(paths):
# path_str.append([idx2word[i] for i in path])
# return path_str, scores
#return beam_decode(model, src, K)
def show_examples_pytorch(model, es, rlut1, rlut2, embed2, mxlen, sample, prob_clip, max_examples, reverse):
si = np.random.randint(0, len(es))
batch_dict = es[si]
src_array = batch_dict['src']
tgt_array = batch_dict['dst']
src_len = batch_dict['src_len']
if max_examples > 0:
max_examples = min(max_examples, src_array.size(0))
src_array = src_array[0:max_examples]
tgt_array = tgt_array[0:max_examples]
src_len = src_len[0:max_examples]
# TODO: fix this, check for GPU first
src_array = src_array.cuda()
for src_len_i, src_i, tgt_i in zip(src_len, src_array, tgt_array):
print('========================================================================')
src_len_i = torch.ones(1).fill_(src_len_i).type_as(src_len)
sent = lookup_sentence(rlut1, src_i.cpu().numpy(), reverse=reverse)
print('[OP] %s' % sent)
sent = lookup_sentence(rlut2, tgt_i.cpu().numpy())
print('[Actual] %s' % sent)
src_dict = {'src': torch.autograd.Variable(src_i.view(1, -1), requires_grad=False),
'src_len': torch.autograd.Variable(src_len_i, requires_grad=False)}
dst_i = model.run(src_dict)[0][0]
dst_i = [idx.item() for idx in dst_i]
sent = lookup_sentence(rlut2, dst_i)
print('Guess: %s' % sent)
print('------------------------------------------------------------------------')
# Some of this code is borrowed from here:
# https://github.com/rguthrie3/DeepLearningForNLPInPytorch
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return idx.data[0]
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
def vec_log_sum_exp(vec, dim):
"""Vectorized version of log-sum-exp
:param vec: Vector
:param dim: What dimension to operate on
:return:
"""
max_scores, idx = torch.max(vec, dim, keepdim=True)
max_scores_broadcast = max_scores.expand_as(vec)
return max_scores + torch.log(torch.sum(torch.exp(vec - max_scores_broadcast), dim, keepdim=True))
class CRF(nn.Module):
def __init__(self, n_tags, idxs=None):
"""Initialize the object.
:param n_tags: int The number of tags in your output (emission size)
:param idxs: Tuple(int. int) The index of the start and stop symbol
in emissions.
Note:
if idxs is none then the CRF adds these symbols to the emission
vectors and n_tags is assumed to be the number of output tags.
if idxs is not none then the first element is assumed to be the
start index and the second idx is assumed to be the end index. In
this case n_tags is assumed to include the start and end symbols.
"""
super(CRF, self).__init__()
if idxs is None:
self.start_idx = n_tags
self.end_idx = n_tags + 1
self.n_tags = n_tags + 2
self.add_ends = True
else:
self.start_idx, self.end_idx = idxs
self.n_tags = n_tags
self.add_ends = False
self.transitions = nn.Parameter(torch.Tensor(self.n_tags, self.n_tags).zero_())
@staticmethod
def _prep_input(input_):
ends = torch.Tensor(input_.size()[0], 2).fill_(-1000.).to(input_.device)
return torch.cat([input_, ends], dim=1)
def neg_log_loss(self, unary, tags):
if self.add_ends:
unary = CRF._prep_input(unary)
viterbi_score = self.forward(unary)
gold_score = self.score_sentence(unary, tags)
return viterbi_score - gold_score
def score_sentence(self, unary, tags):
# Gives the score of a provided tag sequence
score = torch.autograd.Variable(torch.Tensor([0]).cuda())
tags = torch.cat([torch.LongTensor([self.start_idx]).cuda(), tags])
for i, unary_t in enumerate(unary):
score = score + self.transitions[tags[i + 1], tags[i]] + unary_t[tags[i + 1]]
score = score + self.transitions[self.end_idx, tags[-1]]
return score
def forward(self, unary):
"""Vectorized forward algorithm for CRF layer
:param unary: The observations
:param transitions: The transitions
:param start_idx: The index of the start position
:param end_idx: The index of the end position
:return: Alphas
"""
# Do the forward algorithm to compute the partition function
init_alphas = torch.Tensor(1, self.n_tags).fill_(-1000.).to(unary.device)
# START_TAG has all of the score.
init_alphas[0][self.start_idx] = 0.
# Wrap in a variable so that we will get automatic backprop
alphas = torch.autograd.Variable(init_alphas)
# Iterate through the sentence
for t, unary_t in enumerate(unary):
emit_scores_transpose = unary_t.view(-1, 1)
next_tag_var = alphas + emit_scores_transpose + self.transitions
scores = vec_log_sum_exp(next_tag_var, 1).transpose(0, 1)
alphas = scores
terminal_var = alphas + self.transitions[self.end_idx]
alpha = log_sum_exp(terminal_var)
return alpha
def decode(self, unary):
if self.add_ends:
unary = CRF._prep_input(unary)
backpointers = []
inits = torch.Tensor(1, self.n_tags).fill_(-10000.).cuda()
inits[0][self.start_idx] = 0
# alphas at step i holds the viterbi variables for step i-1
alphas = torch.autograd.Variable(inits)
for unary_t in unary:
backpointers_t = [] # holds the backpointers for this step
viterbi_t = [] # holds the viterbi variables for this step
for tag in range(self.n_tags):
next_tag_var = alphas + self.transitions[tag]
best_tag_id = argmax(next_tag_var)
backpointers_t.append(best_tag_id)
viterbi_t.append(next_tag_var[0][best_tag_id])
if PYT_MAJOR_VERSION < 0.4:
alphas = (torch.cat(viterbi_t) + unary_t).view(1, -1)
else:
alphas = (torch.stack(viterbi_t, 0) + unary_t).view(1, -1)
backpointers.append(backpointers_t)
# Transition to STOP_TAG
terminal_var = alphas + self.transitions[self.end_idx]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for backpointers_t in reversed(backpointers):
best_tag_id = backpointers_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.start_idx
best_path.reverse()
return torch.LongTensor(best_path), path_score
|
py | 1a424fa299c9021d844f8f99b0177f7036f1c6c7 |
import sys, os
sys.path.append(os.path.join(os.path.expanduser("~"), "chipfish"))
import app as chipfish
from glbase_wrapper import location, glload, genelist
draw = 'pdf'
c = chipfish.app()
c.startup(os.path.expanduser("../trk_TEs.txt"))
#annot = glload(os.path.expanduser('~/hg38/hg38_ensembl_v95_enst.glb'))
#annot = annot.renameKey('name', 'gene_symbol')
gllocs = glload('../../te_discovery/te_transcripts/transcript_table_merged.mapped.glb')
locs = ['SOX2', 'NANOG', 'SALL4', 'LIN28A', 'LIN28B', 'SALL1', 'POU5F1A',
'DPPA2', 'DPPA3', 'DPPA5A', 'PRDM14', 'JARID2', 'SALL2', 'SALL3', 'TCF3',
'ZFP42', 'C9ORF135', 'ST6GAL1', 'LRP4', 'MSTO1', 'PRODH',# From Pontis et al., 2019 CSC
'ESRRB', 'LIN28A', 'LIN28B', 'PRDM14',
'POU5F1', 'SOX2', 'NANOG', 'NCOR1', 'NCOR2', 'SALL1', 'KLF4', 'SALL1', 'NR5A1', 'NR5A2', 'NR5A3',
'KLF2', 'KLF5', 'LEFTY1', 'LEFTY2', 'FGF4', 'NODAL',
# Naive-specific genes;
'ESRRB', 'TFCP2L1', 'ZFP42', 'MT1H', 'DPPA3', 'DPPA4', 'DPPA5', 'ZNF486', 'CR1L', 'DNMT3L', 'ZNF534',
# Diffenretiation genes;
'GATA2', 'GATA3', 'GATA4', 'SOX17', 'CER1',
# 2C genes
'NR0B1', 'CDX2', 'DUXF3',
# Down in naive:
'SFRP1', 'ZIC2', 'KDR', 'OTX2', 'DUSP6', 'SPRY4', 'THY1', 'ID3', 'ZIC5',
# MA Gang's possibles:
'HNRNPK', 'DDX1', 'DDX50', 'BRCA2', 'BRCA1', 'TOP1', 'RAP3', 'TRIM25', 'HNRNPU',
# Headline genes from Ihry et al., Cell Rep.
# Significantly down-regualte POU5F1 P<0.05
'TGFBR2',
'GGCT',
'MAML2',
'POU5F1',
'TGFBR1',
'TMEM107',
'ZNF469',
'SMARCA4',
'PROK2',
'PAQR7',
'MINDY4',
# Odd stuff:
'LIN28B-AS1',
# Wang Jichang paper, Fig 3a. These ones have HERV spliced into their message
'SCGB3A2', 'NCR1', 'KLKB1', 'IL34', 'PLP1', 'ESRG', 'RPL39L',
]
locs = genelist(loadable_list=[{'gene_symbol': k} for k in locs])
ll = locs.map(genelist=gllocs, key='gene_symbol')
print(ll)
for gene in ll:
print(gene['name'])
c.draw.setLocation(loc=gene['loc'].expand(len(gene['loc']) / 10))
scale = 1.0
if draw == 'svg':
scale = 0.3
c.draw.exportImage("%s/%s_%s.%s" % (draw, gene['name'], gene['transcript_id'], draw), scale=scale, type=draw) # Cannot draw png and svg interleaved for some reason.
|
py | 1a42503dec5e76f554e894030b29dd539c5d713b | """
This project demonstrates NESTED LOOPS (i.e., loops within loops)
in the context of PRINTING on the CONSOLE.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Christina Rogers.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the other functions to test them. """
run_test_rectangle_of_stars()
run_test_triangle_of_stars()
run_test_decreasing_exclamation_marks()
run_test_alternating_brackets()
run_test_triangle_same_number_in_each_row()
run_test_triangle_all_numbers_in_each_row()
def run_test_rectangle_of_stars():
""" Tests the rectangle_of_stars function. """
print()
print('--------------------------------------------')
print('Testing the RECTANGLE_OF_STARS function:')
print('--------------------------------------------')
print('Test 1 of rectangle_of_stars: (3, 5)')
rectangle_of_stars(3, 5)
print('Test 2 of rectangle_of_stars: (4, 11)')
rectangle_of_stars(4, 11)
print('Test 3 of rectangle_of_stars: (6, 2)')
rectangle_of_stars(6, 2)
def rectangle_of_stars(r, c):
"""
Prints a rectangle of stars (asterisks), with r rows and c columns.
For example, when r = 3 and c = 5:
*****
*****
*****
Preconditions: r and c are non-negative integers.
"""
# ------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# Some tests are already written for you (above).
#
# *** Unless your instructor directs you otherwise,
# see the video
# nested_loops_in_PRINTING.mp4
# in Preparation for Session 18
# ** NOW **
# and follow along in that video as you do this problem.
# (Pause the video when it completes this problem.)
# ***
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for k in range (r):
for i in range (c):
print('*', end = '')
print()
def run_test_triangle_of_stars():
""" Tests the triangle_of_stars function. """
print()
print('-------------------------------------------')
print('Testing the TRIANGLE_OF_STARS function:')
print('-------------------------------------------')
print('Test 1 of triangle_of_stars: (5)')
triangle_of_stars(5)
print('Test 2 of triangle_of_stars: (1)')
triangle_of_stars(1)
print('Test 3 of triangle_of_stars: (3)')
triangle_of_stars(3)
print('Test 4 of triangle_of_stars: (6)')
triangle_of_stars(6)
def triangle_of_stars(r):
"""
Prints a triangle of stars (asterisks), with r rows.
-- The first row is 1 star,
the second is 2 stars,
the third is 3 stars, and so forth.
For example, when r = 5:
*
**
***
****
*****
Precondition: r is a non-negative integer.
"""
# ------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Some tests are already written for you (above).
#
# *** Unless your instructor directs you otherwise,
# see the video
# nested_loops_in_PRINTING.mp4
# in Preparation for Session 18
# ** NOW **
# and follow along in that video as you do this problem.
# (Continue the video from where you paused it
# in the previous problem.)
# ***
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for k in range (r):
for j in range (k+1):
print('*', end = '')
print ()
def run_test_decreasing_exclamation_marks():
""" Tests the decreasing_exclamation_marks function. """
print()
print('----------------------------------------------------------')
print('Testing the DECREASING_EXCLAMATION_MARKS function:')
print('----------------------------------------------------------')
print('Test 1 of decreasing_exclamation_marks: (5, 2)')
decreasing_exclamation_marks(5, 2)
print('Test 2 of decreasing_exclamation_marks: (3, 1)')
decreasing_exclamation_marks(3, 1)
print('Test 3 of decreasing_exclamation_marks: (4, 4)')
decreasing_exclamation_marks(4, 4)
print('Test 4 of decreasing_exclamation_marks: (8, 6)')
decreasing_exclamation_marks(8, 6)
def decreasing_exclamation_marks(m, n):
"""
Prints exclamation marks: m on the first row,
m-1 on the next row, m-2 on the next, etc, until n on the last row.
For example, when m = 5 and n = 2:
!!!!!
!!!!
!!!
!!
Precondition: m and n are positive integers with m >= n.
"""
# ------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for k in range (m-n+1):
for i in range (m-k):
print ('!', end = '')
print()
def run_test_alternating_brackets():
""" Tests the alternating_brackets function. """
print()
print('----------------------------------------------------------')
print('Testing the ALTERNATING_BRACKETS function:')
print('----------------------------------------------------------')
print('Test 1 of alternating_brackets: (5, 2)')
alternating_brackets(5, 2)
print('Test 2 of alternating_brackets: (3, 1)')
alternating_brackets(3, 1)
print('Test 3 of alternating_brackets: (4, 4)')
alternating_brackets(4, 4)
print('Test 4 of alternating_brackets: (8, 6)')
alternating_brackets(8, 6)
def alternating_brackets(m, n):
"""
Prints alternating left/right square brackets: m on the first row,
m-1 on the next row, m-2 on the next, etc, until n on the last row.
For example, when m = 5 and n = 2:
[][][
[][]
[][
[]
Precondition: m and n are positive integers with m >= n.
"""
# ------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for k in range (m-n+1):
for i in range (m-k):
if (i)%2 == 0:
print ('[', end = '')
else:
print(']', end = '')
print()
def run_test_triangle_same_number_in_each_row():
""" Tests the triangle_same_number_in_each_row function. """
print()
print('----------------------------------------------------------')
print('Testing the TRIANGLE_SAME_NUMBER_IN_EACH_ROW function:')
print('----------------------------------------------------------')
print('Test 1 of triangle_same_number_in_each_row: (5)')
triangle_same_number_in_each_row(5)
print('Test 2 of triangle_same_number_in_each_row: (1)')
triangle_same_number_in_each_row(1)
print('Test 3 of triangle_same_number_in_each_row: (3)')
triangle_same_number_in_each_row(3)
print('Test 4 of triangle_same_number_in_each_row: (6)')
triangle_same_number_in_each_row(6)
def triangle_same_number_in_each_row(r):
"""
Prints a triangle of numbers, with r rows.
The first row is 1, the 2nd is 22, the 3rd is 333, etc.
For example, when r = 5:
1
22
333
4444
55555
Precondition: r is a non-negative integer.
"""
# ------------------------------------------------------------------
# DONE: 6. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for k in range (r):
for i in range (k+1):
print (k+1, end = '')
print()
def run_test_triangle_all_numbers_in_each_row():
""" Tests the triangle_all_numbers_in_each_row function. """
print()
print('----------------------------------------------------------')
print('Testing the TRIANGLE_ALL_NUMBERS_IN_EACH_ROW function:')
print('----------------------------------------------------------')
print('Test 1 of triangle_all_numbers_in_each_row: (5)')
triangle_all_numbers_in_each_row(5)
print('Test 2 of triangle_all_numbers_in_each_row: (1)')
triangle_all_numbers_in_each_row(1)
print('Test 3 of triangle_all_numbers_in_each_row: (3)')
triangle_all_numbers_in_each_row(3)
print('Test 4 of triangle_all_numbers_in_each_row: (6)')
triangle_all_numbers_in_each_row(6)
def triangle_all_numbers_in_each_row(r):
"""
Prints a triangle of numbers, with r rows.
The first row is 1, the 2nd is 12, the 3rd is 123, etc.
For example, when r = 5:
1
12
123
1234
12345
Precondition: r is a non-negative integer.
"""
# ------------------------------------------------------------------
# DONE: 7. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for k in range (r):
for i in range (k+1):
print (i+1, end = '')
print()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
py | 1a425075161b233a48b6e9fae9dbe0b8241d84c3 | # -*- coding: utf-8 -*-
"""
Capacity scaling minimum cost flow algorithm.
"""
__author__ = """ysitu <[email protected]>"""
# Copyright (C) 2014 ysitu <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['capacity_scaling']
from itertools import chain
from math import log
import networkx as nx
from ...utils import BinaryHeap
from ...utils import generate_unique_node
from ...utils import not_implemented_for
from ...utils import arbitrary_element
def _detect_unboundedness(R):
"""Detect infinite-capacity negative cycles.
"""
s = generate_unique_node()
G = nx.DiGraph()
G.add_nodes_from(R)
# Value simulating infinity.
inf = R.graph['inf']
# True infinity.
f_inf = float('inf')
for u in R:
for v, e in R[u].items():
# Compute the minimum weight of infinite-capacity (u, v) edges.
w = f_inf
for k, e in e.items():
if e['capacity'] == inf:
w = min(w, e['weight'])
if w != f_inf:
G.add_edge(u, v, weight=w)
if nx.negative_edge_cycle(G):
raise nx.NetworkXUnbounded(
'Negative cost cycle of infinite capacity found. '
'Min cost flow may be unbounded below.')
@not_implemented_for('undirected')
def _build_residual_network(G, demand, capacity, weight):
"""Build a residual network and initialize a zero flow.
"""
if sum(G.node[u].get(demand, 0) for u in G) != 0:
raise nx.NetworkXUnfeasible("Sum of the demands should be 0.")
R = nx.MultiDiGraph()
R.add_nodes_from((u, {'excess': -G.node[u].get(demand, 0),
'potential': 0}) for u in G)
inf = float('inf')
# Detect selfloops with infinite capacities and negative weights.
for u, v, e in G.selfloop_edges(data=True):
if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf:
raise nx.NetworkXUnbounded(
'Negative cost cycle of infinite capacity found. '
'Min cost flow may be unbounded below.')
# Extract edges with positive capacities. Self loops excluded.
if G.is_multigraph():
edge_list = [(u, v, k, e)
for u, v, k, e in G.edges(data=True, keys=True)
if u != v and e.get(capacity, inf) > 0]
else:
edge_list = [(u, v, 0, e) for u, v, e in G.edges(data=True)
if u != v and e.get(capacity, inf) > 0]
# Simulate infinity with the larger of the sum of absolute node imbalances
# the sum of finite edge capacities or any positive value if both sums are
# zero. This allows the infinite-capacity edges to be distinguished for
# unboundedness detection and directly participate in residual capacity
# calculation.
inf = max(sum(abs(R.node[u]['excess']) for u in R),
2 * sum(e[capacity] for u, v, k, e in edge_list
if capacity in e and e[capacity] != inf)) or 1
for u, v, k, e in edge_list:
r = min(e.get(capacity, inf), inf)
w = e.get(weight, 0)
# Add both (u, v) and (v, u) into the residual network marked with the
# original key. (key[1] == True) indicates the (u, v) is in the
# original network.
R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0)
R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0)
# Record the value simulating infinity.
R.graph['inf'] = inf
_detect_unboundedness(R)
return R
def _build_flow_dict(G, R, capacity, weight):
"""Build a flow dictionary from a residual network.
"""
inf = float('inf')
flow_dict = {}
if G.is_multigraph():
for u in G:
flow_dict[u] = {}
for v, es in G[u].items():
flow_dict[u][v] = dict(
# Always saturate negative selfloops.
(k, (0 if (u != v or e.get(capacity, inf) <= 0 or
e.get(weight, 0) >= 0) else e[capacity]))
for k, e in es.items())
for v, es in R[u].items():
if v in flow_dict[u]:
flow_dict[u][v].update((k[0], e['flow'])
for k, e in es.items()
if e['flow'] > 0)
else:
for u in G:
flow_dict[u] = dict(
# Always saturate negative selfloops.
(v, (0 if (u != v or e.get(capacity, inf) <= 0 or
e.get(weight, 0) >= 0) else e[capacity]))
for v, e in G[u].items())
flow_dict[u].update((v, e['flow']) for v, es in R[u].items()
for e in es.values() if e['flow'] > 0)
return flow_dict
def capacity_scaling(G, demand='demand', capacity='capacity', weight='weight',
heap=BinaryHeap):
r"""Find a minimum cost flow satisfying all demands in digraph G.
This is a capacity scaling successive shortest augmenting path algorithm.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph or MultiDiGraph on which a minimum cost flow satisfying all
demands is to be found.
demand : string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight : string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
heap : class
Type of heap to be used in the algorithm. It should be a subclass of
:class:`MinHeap` or implement a compatible interface.
If a stock heap implementation is to be used, :class:`BinaryHeap` is
recommeded over :class:`PairingHeap` for Python implementations without
optimized attribute accesses (e.g., CPython) despite a slower
asymptotic running time. For Python implementations with optimized
attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better
performance. Default value: :class:`BinaryHeap`.
Returns
-------
flowCost : integer
Cost of a minimum cost flow satisfying all demands.
flowDict : dictionary
If G is a digraph, a dict-of-dicts keyed by nodes such that
flowDict[u][v] is the flow on edge (u, v).
If G is a MultiDiGraph, a dict-of-dicts-of-dicts keyed by nodes
so that flowDict[u][v][key] is the flow on edge (u, v, key).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed,
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
Notes
-----
This algorithm does not work if edge weights are floating-point numbers.
See also
--------
:meth:`network_simplex`
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowCost, flowDict = nx.capacity_scaling(G)
>>> flowCost
24
>>> flowDict # doctest: +SKIP
{'a': {'c': 1, 'b': 4}, 'c': {'d': 1}, 'b': {'d': 4}, 'd': {}}
It is possible to change the name of the attributes used for the
algorithm.
>>> G = nx.DiGraph()
>>> G.add_node('p', spam = -4)
>>> G.add_node('q', spam = 2)
>>> G.add_node('a', spam = -2)
>>> G.add_node('d', spam = -1)
>>> G.add_node('t', spam = 2)
>>> G.add_node('w', spam = 3)
>>> G.add_edge('p', 'q', cost = 7, vacancies = 5)
>>> G.add_edge('p', 'a', cost = 1, vacancies = 4)
>>> G.add_edge('q', 'd', cost = 2, vacancies = 3)
>>> G.add_edge('t', 'q', cost = 1, vacancies = 2)
>>> G.add_edge('a', 't', cost = 2, vacancies = 4)
>>> G.add_edge('d', 'w', cost = 3, vacancies = 4)
>>> G.add_edge('t', 'w', cost = 4, vacancies = 1)
>>> flowCost, flowDict = nx.capacity_scaling(G, demand = 'spam',
... capacity = 'vacancies',
... weight = 'cost')
>>> flowCost
37
>>> flowDict # doctest: +SKIP
{'a': {'t': 4}, 'd': {'w': 2}, 'q': {'d': 1}, 'p': {'q': 2, 'a': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
"""
R = _build_residual_network(G, demand, capacity, weight)
inf = float('inf')
# Account cost of negative selfloops.
flow_cost = sum(
0 if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0
else e[capacity] * e[weight]
for u, v, e in G.selfloop_edges(data=True))
# Determine the maxmimum edge capacity.
wmax = max(chain([-inf],
(e['capacity'] for u, v, e in R.edges(data=True))))
if wmax == -inf:
# Residual network has no edges.
return flow_cost, _build_flow_dict(G, R, capacity, weight)
R_node = R.node
R_succ = R.succ
delta = 2 ** int(log(wmax, 2))
while delta >= 1:
# Saturate Δ-residual edges with negative reduced costs to achieve
# Δ-optimality.
for u in R:
p_u = R_node[u]['potential']
for v, es in R_succ[u].items():
for k, e in es.items():
flow = e['capacity'] - e['flow']
if e['weight'] - p_u + R_node[v]['potential'] < 0:
flow = e['capacity'] - e['flow']
if flow >= delta:
e['flow'] += flow
R_succ[v][u][(k[0], not k[1])]['flow'] -= flow
R_node[u]['excess'] -= flow
R_node[v]['excess'] += flow
# Determine the Δ-active nodes.
S = set()
T = set()
S_add = S.add
S_remove = S.remove
T_add = T.add
T_remove = T.remove
for u in R:
excess = R_node[u]['excess']
if excess >= delta:
S_add(u)
elif excess <= -delta:
T_add(u)
# Repeatedly augment flow from S to T along shortest paths until
# Δ-feasibility is achieved.
while S and T:
s = arbitrary_element(S)
t = None
# Search for a shortest path in terms of reduce costs from s to
# any t in T in the Δ-residual network.
d = {}
pred = {s: None}
h = heap()
h_insert = h.insert
h_get = h.get
h_insert(s, 0)
while h:
u, d_u = h.pop()
d[u] = d_u
if u in T:
# Path found.
t = u
break
p_u = R_node[u]['potential']
for v, es in R_succ[u].items():
if v in d:
continue
wmin = inf
# Find the minimum-weighted (u, v) Δ-residual edge.
for k, e in es.items():
if e['capacity'] - e['flow'] >= delta:
w = e['weight']
if w < wmin:
wmin = w
kmin = k
emin = e
if wmin == inf:
continue
# Update the distance label of v.
d_v = d_u + wmin - p_u + R_node[v]['potential']
if h_insert(v, d_v):
pred[v] = (u, kmin, emin)
if t is not None:
# Augment Δ units of flow from s to t.
while u != s:
v = u
u, k, e = pred[v]
e['flow'] += delta
R_succ[v][u][(k[0], not k[1])]['flow'] -= delta
# Account node excess and deficit.
R_node[s]['excess'] -= delta
R_node[t]['excess'] += delta
if R_node[s]['excess'] < delta:
S_remove(s)
if R_node[t]['excess'] > -delta:
T_remove(t)
# Update node potentials.
d_t = d[t]
for u, d_u in d.items():
R_node[u]['potential'] -= d_u - d_t
else:
# Path not found.
S_remove(s)
delta //= 2
if any(R.node[u]['excess'] != 0 for u in R):
raise nx.NetworkXUnfeasible('No flow satisfying all demands.')
# Calculate the flow cost.
for u in R:
for v, es in R_succ[u].items():
for e in es.values():
flow = e['flow']
if flow > 0:
flow_cost += flow * e['weight']
return flow_cost, _build_flow_dict(G, R, capacity, weight)
|
py | 1a4250769da369e190f1787309b6c5d5c1934a09 | # -*- coding: utf-8 -*-
from nose.plugins.attrib import attr
from unittest import TestCase
import os
class BookTestCase(TestCase):
@attr("skip")
def test_scaffold(self):
assert False
# create temp directory
directory = "../var/tests/book"
if not os.path.exists(directory):
os.makedirs(directory)
# unpack the pattern with some settings
os.system("cd ../var/tests/book && ../../../bin/diamond scaffold --no-interactive analysis")
# assert
assert os.stat("../var/tests/book/Makefile")
# run the makefile
os.system("cd ../var/tests/book && make")
# test for certain files to be built
assert os.stat("../var/tests/book/.build/mybook.pdf")
|
py | 1a4250b698a62e761521b4e9cb9445bff89b2dc4 | import numpy as np
def levenshtein_distance(string1, string2):
m, n = len(string1), len(string2)
matrix = np.zeros((m + 1, n + 1), dtype=np.int32)
# source prefixes can be transformed into empty string by
# dropping all characters
for i in range(m + 1):
matrix[i, 0] = i
# target prefixes can be reached from empty source prefix
# by inserting every character
for j in range(n + 1):
matrix[0, j] = j
for j in range(n + 1):
for i in range(m + 1):
if string1[i - 1] == string2[j - 1]:
substitution_cost = 0
else:
substitution_cost = 1
matrix[i, j] = min(matrix[i - 1, j] + 1, # deletion
matrix[i, j - 1] + 1, # insertion
matrix[i - 1, j - 1] + substitution_cost) # substitution
return matrix[m, n]
|
py | 1a425153e14d18ddbe90fa4c3d8aef303b3db3f3 | import pytest
from spytest import st, tgapi, SpyTestDict
import apis.routing.ip as ipapi
import apis.routing.bgp as bgpapi
import apis.switching.vlan as vlanapi
import apis.system.logging as slog_obj
import apis.switching.portchannel as poapi
import BGP.bgplib as bgplib
import utilities.common as utils
vtysh_cli_type = "vtysh"
@pytest.fixture(scope="module", autouse=True)
def bgp_module_hooks(request):
global bgp_cli_type
st.ensure_min_topology('D1D2:1', 'D1T1:1', 'D2T1:1')
bgplib.init_resource_data(st.get_testbed_vars())
#bgp_cli_type = st.get_ui_type()
bgp_cli_type = "click"
if bgp_cli_type == 'click':
bgp_cli_type = 'vtysh'
bgp_pre_config()
yield
bgp_pre_config_cleanup()
# bgp module level pre config function
def bgp_pre_config():
global topo
st.banner("Running with {} CLI RUN".format(bgp_cli_type))
st.banner("BGP MODULE CONFIG - START")
ipapi.clear_ip_configuration(st.get_dut_names(), family='all', thread=True)
vlanapi.clear_vlan_configuration(st.get_dut_names())
poapi.clear_portchannel_configuration(st.get_dut_names())
if not st.is_community_build():
# loopback config
bgplib.l3tc_vrfipv4v6_address_leafspine_loopback_config_unconfig(config='yes', config_type='all')
# TG Configuration
bgplib.l3tc_vrfipv4v6_address_leafspine_tg_config_unconfig(config='yes', config_type='all')
bgplib.l3tc_vrfipv4v6_address_leafspine_tg_bgp_config(config='yes', config_type='all')
st.banner("BGP MODULE CONFIG - END")
# bgp module level pre config cleanup function
def bgp_pre_config_cleanup():
st.banner("BGP MODULE CONFIG CLEANUP - START")
if not st.is_community_build():
# loopback unconfig
bgplib.l3tc_vrfipv4v6_address_leafspine_loopback_config_unconfig(config='no')
# TG uconfiguration
bgplib.l3tc_vrfipv4v6_address_leafspine_tg_config_unconfig(config='no')
bgplib.l3tc_vrfipv4v6_address_leafspine_tg_bgp_config(config='no')
ipapi.clear_ip_configuration(st.get_dut_names(), family='all', thread=True)
vlanapi.clear_vlan_configuration(st.get_dut_names())
poapi.clear_portchannel_configuration(st.get_dut_names())
st.banner("BGP MODULE CONFIG CLEANUP - END")
@pytest.fixture(scope="function")
def bgp_func_hooks(request):
yield
"""
BGP common test cases class - START
add common bgp test casesfunctions (common to TestBGPRif or other non rif test classes
pick specific cases in derived classes
this is a abstract class with test cases and test should be run from this class
only it has to be run from derived classes.
**DONTs**
*dont* name member functions of TestBGPCommon starting test.have test member functions in derived class only
*dont* add fixtures in base class. add fixtures only in derived class.
"""
class TestBGPCommon:
def ft_bgp_clear(self):
"""
Validate clear ip bgp & sonic-clear functionality
"""
st.log("Clearing bgp neighbors from sonic cli")
[out, exceptions] = utils.exec_foreach(bgplib.fast_start, topo.dut_list, bgpapi.clear_ip_bgp)
st.log([out, exceptions])
if not utils.poll_wait(bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_check, 20, config_type='all'):
st.error("Neighbour is failed to Establish between Spine - Leaf")
st.report_fail('test_case_failed')
st.log("Clearing bgp neighbors from FRR cli")
[out, exceptions] = utils.exec_foreach(bgplib.fast_start, topo.dut_list, bgpapi.clear_bgp_vtysh)
st.log([out, exceptions])
if not utils.poll_wait(bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_check, 20, config_type='all'):
st.error("Neighbour is failed to Establish between Spine - Leaf")
st.report_fail('test_case_failed')
st.report_pass("test_case_passed")
def ft_bgp_peer_traffic_check(self):
"""
Traffic validation between Leaf Routers.
"""
TG_D1 = topo.tg_dut_list_name[0]
TG_D2 = topo.tg_dut_list_name[1]
tg_ob = topo['T1{}P1_tg_obj'.format(TG_D1)]
tg_ob.tg_traffic_control(port_handle=topo["T1{}P1_ipv4_tg_ph".format(TG_D1)], action='clear_stats')
tg_ob.tg_traffic_control(port_handle=topo["T1{}P1_ipv4_tg_ph".format(TG_D2)], action='clear_stats')
bgp_handle = topo['T1{}P1_ipv4_tg_bh'.format(TG_D1)]
tc_fail_flag = 0
spine_as = int(bgplib.data['spine_as'])
st.log("Advertising Routes from one of the Leaf Router")
bgp_route = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='add', num_routes='100',
prefix='121.1.1.0', as_path='as_seq:1')
bgp_ctrl = tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='start')
st.log("Check for route count in neighbour, before update delay timer configuration")
bgp_summary_spine_before_timer = bgpapi.show_bgp_ipv4_summary(topo.dut_list[1])
rib_entries_before_update_timer = bgp_summary_spine_before_timer[0]['ribentries']
st.log('RIB entries before update delay configuration : {}'.format(rib_entries_before_update_timer))
st.log("Configure Update delay timer on one of the Leaf router")
bgpapi.create_bgp_update_delay(topo.dut_list[0], spine_as, '60',cli_type=bgp_cli_type)
st.log("Do clear ip bgp to validate the update delay timer")
bgpapi.clear_bgp_vtysh(topo.dut_list[0], address_family="ipv4")
if not utils.poll_wait(bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_check, 20, config_type='ipv4'):
st.error("Neighbour is failed to Establish between Spine - Leaf after clear ip bgp")
tc_fail_flag = 1
bgp_summary_spine_before_timer = bgpapi.show_bgp_ipv4_summary(topo.dut_list[1])
rib_entries_before_update_timer = bgp_summary_spine_before_timer[0]['ribentries']
st.log('RIB entries before update delay timer expiry : {}'.format(rib_entries_before_update_timer))
if int(rib_entries_before_update_timer) >= 100:
st.error('Routes advertised to peer DUT, proir to update delay timer expiry')
tc_fail_flag = 1
# Sleep for update delay timer and the check the route count in neighbour
st.wait(60)
bgp_summary_spine_after_update_timer = bgpapi.show_bgp_ipv4_summary(topo.dut_list[1])
rib_entries_after_update_timer = bgp_summary_spine_after_update_timer[0]['ribentries']
st.log('RIB Entries after update delay timer expiry : {}'.format(rib_entries_after_update_timer))
if int(rib_entries_after_update_timer) < 100:
st.error('Routes are not advertised to peer DUT, even after the update delay timer expiry')
tc_fail_flag = 1
st.log("Initiating the Ipv4 traffic for those Routes from another Leaf Router")
src_handle = 'handle'
if tg_ob.tg_type == 'ixia':
src_handle = 'ipv4_handle'
tr1 = tg_ob.tg_traffic_config(port_handle=topo['T1{}P1_ipv4_tg_ph'.format(TG_D2)],
emulation_src_handle=topo['T1{}P1_ipv4_tg_ih'.format(TG_D2)][src_handle],
emulation_dst_handle=bgp_route['handle'], circuit_endpoint_type='ipv4',
mode='create',
transmit_mode='single_burst', pkts_per_burst='2000', length_mode='fixed',
rate_pps=1000)
stream_id1 = tr1['stream_id']
tg_ob.tg_traffic_control(action='run', handle=stream_id1)
tg_ob.tg_traffic_control(action='stop', port_handle=topo['T1{}P1_ipv4_tg_ph'.format(TG_D2)])
st.wait(5)
tg1_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv4_tg_ph".format(TG_D1)])
tg2_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv4_tg_ph".format(TG_D2)])
if not (int(tg2_stats.tx.total_packets) and int(tg1_stats.rx.total_packets)):
st.error('Recieved ZERO stats.')
tc_fail_flag = 1
else:
percent_rx = float(int(tg1_stats.rx.total_packets) - int(tg2_stats.tx.total_packets)) / int(
tg2_stats.tx.total_packets) * 100
st.log('tg1_stats.rx.total_packets : {}'.format(tg1_stats.rx.total_packets))
st.log('tg2_stats.tx.total_packets : {}'.format(tg2_stats.tx.total_packets))
st.log('percent_rx : {}'.format(percent_rx))
if int(tg1_stats.rx.total_packets) < int(tg2_stats.tx.total_packets)*0.95:
tc_fail_flag = 1
tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='stop')
bgpapi.create_bgp_update_delay(topo.dut_list[0], spine_as, '0', cli_type=bgp_cli_type)
if tc_fail_flag:
st.report_fail("traffic_verification_failed")
st.report_pass('test_case_passed')
def ft_bgp_graceful_restart_and_aware_routers(self):
"""
Verify the BGP peering between a graceful restart capable and graceful restart aware routers.
"""
st.banner("Verify the BGP peering between a graceful restart capable and graceful restart aware routers.")
# Getting topo info between an spine and leaf
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type = 'spine-leaf', max_tg_links= '0', nodes='2')
# NOTE: D1 is spine and D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
# Configure graceful restart capability on the Leaf router
bgpapi.config_bgp_graceful_restart(leaf_name, local_asn=info['D2_as'], user_command='preserve-fw-state',
config='add', cli_type=bgp_cli_type)
# Verify bgp neighbors
result = bgpapi.verify_bgp_summary(leaf_name, family='ipv4', neighbor=info['D1D2P1_ipv4'], state='Established')
if not result:
bgplib.show_bgp_neighbors([leaf_name, spine_name], af='ipv4')
# Delete the graceful restart capability
bgpapi.config_bgp_graceful_restart(leaf_name, local_asn=info['D2_as'], user_command='preserve-fw-state',
config='delete', cli_type=bgp_cli_type)
if result:
st.log("BGP adjacency verified between graceful restart capable and aware router")
st.report_pass("test_case_passed")
else:
st.log("Failed to form BGP peering between graceful restart capable and aware router")
st.report_fail("bgp_ip_peer_establish_fail", info['D1D2P1_ipv4'])
def ft_bgp_ipv4_no_route_aggregation_for_exact_prefix_match(self):
"""
Verify that when the 'aggregate-address' command creates a summary address, incoming networks that
exactly match that prefix are not aggregated.
"""
st.banner("Verify that when the 'aggregate-address' command creates a summary address, "
"incoming networks that exactly match that prefix are not aggregated.")
# Getting topo info between an spine and leaf
aggr_route = "122.1.1.0/24"
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type='spine-leaf', max_tg_links='1', nodes='2')
# NOTE: D1 is spine and D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
TG_D2 = 'D2'
# Verify bgp neighbors between leaf and Tg
result = bgpapi.verify_bgp_summary(leaf_name, family='ipv4', neighbor=info['T1D2P1_ipv4'], state='Established')
if not result:
bgplib.show_bgp_neighbors([leaf_name, spine_name], af='ipv4')
st.report_fail("test_case_failed")
# Configure the route aggregation on the Leaf router
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
family="ipv4", config="add", cli_type=bgp_cli_type)
tg_ob = info['T1{}P1_tg_obj'.format(TG_D2)]
bgp_handle = info['T1{}P1_ipv4_tg_bh'.format(TG_D2)]
st.log("Advertising Routes from the Leaf Router")
bgp_route = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='add', num_routes='4',
prefix='122.1.1.0', as_path='as_seq:1')
st.log("BGPROUTE: "+str(bgp_route))
st.log("Advertise those routes from Ixia")
ctrl1=tg_ob.tg_bgp_routes_control(handle=bgp_handle['handle'], route_handle=bgp_route['handle'],
mode='readvertise')
st.log("TR_CTRL: "+str(ctrl1))
st.wait(5)
# Verify the prefix on spine
entries = bgpapi.get_ip_bgp_route(spine_name, family="ipv4", network=aggr_route)
if not entries:
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
family="ipv4", config="delete", cli_type=bgp_cli_type)
st.report_fail("bgp_route_info", aggr_route, "not found")
AS_PATH_STRING = str(entries[0]['as_path'])
asn = AS_PATH_STRING.split(" ")
# If the route is aggregated the as_path will have only the peer-asn if not the whole as_path
if not int(asn[0]) == info['D2_as'] and len(asn) > 1:
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
family="ipv4", config="delete", cli_type=bgp_cli_type)
st.report_fail("bgp_aggregation_pass", aggr_route)
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
family="ipv4", config="delete", cli_type=bgp_cli_type)
bgp_route1 = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='remove', num_routes='4',
prefix='122.1.1.0', as_path='as_seq:1')
st.report_pass("test_case_passed")
def ft_bgp_ipv4_route_aggregation_atomic_aggregate_without_as_set(self):
"""
Verify that the AGGREGATOR and ATOMIC AGGREGATE attribute is included when an AS_SET is not configured
in aggregation.
"""
st.banner("Verify that the AGGREGATOR and ATOMIC AGGREGATE attribute is included when an AS_SET "
"is not configured in aggregation.")
aggr_route = "123.1.0.0/16"
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type = 'spine-leaf', max_tg_links='1', nodes='2')
# NOTE: D1 is spine and D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
TG_D2 = 'D2'
# Verify bgp neighbors between leaf and Tg
result = bgpapi.verify_bgp_summary(leaf_name, family='ipv4', neighbor=info['T1D2P1_ipv4'], state='Established')
if not result:
bgplib.show_bgp_neighbors([leaf_name, spine_name], af='ipv4')
st.report_fail("test_case_failed")
# Configure the route aggregation on the Leaf router
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", family="ipv4", config="add", cli_type=bgp_cli_type)
st.log(" clear the syslog file")
slog_obj.clear_logging(spine_name)
# Enable zebra logs
bgpapi.bgp_debug_config(spine_name, message="updates", prefix=aggr_route)
string = "bgp#supervisord:"
tg_ob=info['T1{}P1_tg_obj'.format(TG_D2)]
bgp_handle = info['T1{}P1_ipv4_tg_bh'.format(TG_D2)]
st.log("Configure routes to be advertised from Ixia")
bgp_route = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='add', num_routes='4',
prefix='123.1.1.0', as_path='as_seq:1')
st.log("Advertise those routes from Ixia")
ctrl1=tg_ob.tg_bgp_routes_control(handle=bgp_handle['handle'], route_handle=bgp_route['handle'],
mode='readvertise')
st.log("TR_CTRL: "+str(ctrl1))
st.wait(5)
st.log("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
st.log(slog_obj.show_logging(spine_name, lines=200))
st.log("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
st.log("Verify logs on spine to check if aggregator and atomic ")
log_msg = slog_obj.get_logging_count(spine_name, filter_list=['{}'.format(string), 'atomic-aggregate',
'aggregated by {}'.format(info['D2_as']),
'path {}'.format(info['D2_as'])])
st.log("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
st.log(log_msg)
st.log("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", family="ipv4", config="delete", cli_type=bgp_cli_type)
bgp_route1 = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='remove', num_routes='4',
prefix='123.1.1.0', as_path='as_seq:1')
if not log_msg:
st.report_fail("bgp_aggregation_fail", aggr_route)
st.report_pass("test_case_passed")
def ft_bgp_ipv6_route_aggregation_with_as_set(self):
"""
Verify that aggregation of ipv6 prefixes occurs correctly with as-set keyword
"""
st.banner("Verify that aggregation of ipv6 prefixes occurs correctly with as-set keyword")
aggr_route = "6002:1::0/64"
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type = 'spine-leaf', max_tg_links= '1', nodes='2')
# NOTE: D1 is spine and D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
TG_D2 = 'D2'
# Configure the route aggregation on the Leaf router
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", as_set="as-set", family="ipv6", config="add", cli_type=bgp_cli_type)
tg_ob=info['T1{}P1_tg_obj'.format(TG_D2)]
bgp_handle = info['T1{}P1_ipv6_tg_bh'.format(TG_D2)]
# Starting the BGP device.
bgp_ctrl=tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='start')
st.log("BGPCTRL: "+str(bgp_ctrl))
# Verified at neighbor.
# Verify bgp neighbors between leaf and Tg
result = bgpapi.verify_bgp_summary(leaf_name, family='ipv6', neighbor=info['T1D2P1_ipv6'], state='Established')
if not result:
bgplib.show_bgp_neighbors([leaf_name, spine_name], af='ipv6')
st.report_fail("test_case_failed")
st.log("BGP neighbors established.")
st.log("Advertising Routes from the Leaf Router")
bgp_route_ipv6 = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='add', ip_version='6',
num_routes='4', prefix='6002:1::0', as_path='as_seq:1')
st.log("BGPROUTE: "+str(bgp_route_ipv6))
bgp_ctrl = tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='start')
ctrl1=tg_ob.tg_bgp_routes_control(handle=bgp_handle['handle'], route_handle=bgp_route_ipv6['handle'],
mode='readvertise')
st.log("TR_CTRL: "+str(ctrl1))
st.wait(10)
# Verify the prefix on spine
st.log("Verify the prefix on spine")
entries = bgpapi.get_ip_bgp_route(spine_name, family="ipv6", network="6002:1::/64")
if not entries:
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", as_set="as-set", family="ipv6",
config="delete", cli_type=bgp_cli_type)
st.report_fail("bgp_route_info", aggr_route, "not found")
AS_PATH_STRING = str(entries[0]['as_path'])
asn = AS_PATH_STRING.split(" ")
# If the route is aggregated the as_path will have the whole as_path because of as-set configuration
if not int(asn[0]) == info['D2_as'] and len(asn) > 1:
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", as_set="as-set", family="ipv6",
config="delete", cli_type=bgp_cli_type)
st.report_fail("bgp_aggregation_fail", aggr_route)
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", as_set="as-set", family="ipv6",
config="delete", cli_type=bgp_cli_type)
bgp_route_ipv6_rem = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='remove',
ip_version='6', num_routes='4', prefix='6002:1::0',
as_path='as_seq:1')
st.report_pass("test_case_passed")
def ft_bgp_route_aggregation_4byteASN(self):
"""
Validate AS4_Aggregate attribute w.r.to the BGP 4-byte ASN Feature
"""
aggr_route = "151.1.0.0/16"
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type='spine-leaf', max_tg_links='1', nodes='2')
# NOTE: D1 is spine and D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
TG_D2 = 'D2'
# Verify bgp neighbors between leaf and Tg
if not utils.poll_wait(bgpapi.verify_bgp_summary, 30, leaf_name, family='ipv4', neighbor=info['T1D2P1_ipv4'],
state='Established'):
bgplib.show_bgp_neighbors([leaf_name, spine_name], af='ipv4')
st.error("Neighbour is failed to Establish between Leaf - TG")
st.report_fail('test_case_failed')
# Configure the route aggregation on the Leaf router
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", as_set="as-set", family="ipv4", config="add", cli_type=bgp_cli_type)
tg_ob=info['T1{}P1_tg_obj'.format(TG_D2)]
bgp_handle = info['T1{}P1_ipv4_tg_bh'.format(TG_D2)]
st.log("Advertising Routes from the Leaf Router")
bgp_route = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='add', num_routes='4',
prefix='151.1.1.0', as_path='as_seq:1')
st.log("Advertise those routes from Ixia")
ctrl1=tg_ob.tg_bgp_routes_control(handle=bgp_handle['handle'], route_handle=bgp_route['handle'],
mode='readvertise')
st.log("TR_CTRL: "+str(ctrl1))
st.wait(10)
# Verify the prefix on spine
entries = bgpapi.get_ip_bgp_route(spine_name, family="ipv4", network=aggr_route)
if not entries:
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", as_set="as-set", family="ipv4",
config="delete", cli_type=bgp_cli_type)
st.report_fail("bgp_route_info", aggr_route, "not found")
AS_PATH_STRING = str(entries[0]['as_path'])
asn = AS_PATH_STRING.split(" ")
# Since as-set is set, as-path will have the whole path for the aggregated route including the 4-byte AS.
if not int(asn[0]) == info['D2_as'] and len(asn)>1:
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", as_set="as-set", family="ipv4",
config="delete", cli_type=bgp_cli_type)
st.report_fail("bgp_aggregation_fail", aggr_route)
bgpapi.create_bgp_aggregate_address(leaf_name, local_asn=info['D2_as'], address_range=aggr_route,
summary="summary-only", as_set="as-set", family="ipv4",
config="delete", cli_type=bgp_cli_type)
st.report_pass("test_case_passed")
"""
BGP common test cases class - END
"""
"""
BGP Neighbor over regular router interface fixture, class and test cases - START
"""
def bgp_rif_pre_config():
global topo
st.banner("BGP RIF CLASS CONFIG - START")
# underlay config
bgplib.l3tc_underlay_config_unconfig(config='yes', config_type='phy')
bgplib.l3tc_vrfipv4v6_address_leafspine_config_unconfig(config='yes', config_type='all')
# Ping Verification
if not bgplib.l3tc_vrfipv4v6_address_leafspine_ping_test(config_type='all', ping_count=3):
st.error("Ping failed in between Spine - Leaf")
st.report_fail('test_case_failed')
bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_config(config='yes')
bgplib.l3tc_vrfipv4v6_address_leafspine_tg_bgp_config(config='yes', config_type='all', class_reconfig='Yes')
st.wait(10)
# BGP Neighbour Verification
if not utils.poll_wait(bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_check, 10, config_type='all'):
st.error("Neighbour is failed to Establish between Spine - Leaf")
st.report_fail('test_case_failed')
st.log("Getting all topology info related to connectivity / TG and other parameters between duts")
topo = bgplib.get_leaf_spine_topology_info()
st.banner("BGP RIF CLASS CONFIG - END")
def bgp_rif_pre_config_cleanup():
st.banner("BGP RIF CLASS CONFIG CLEANUP - START")
bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_config(config='no')
bgplib.l3tc_vrfipv4v6_address_leafspine_config_unconfig(config='no')
# cleanup underlay config
bgplib.l3tc_underlay_config_unconfig(config='no', config_type='phy')
st.banner("BGP RIF CLASS CONFIG CLEANUP - END")
@pytest.fixture(scope='class')
def bgp_rif_class_hook(request):
bgp_rif_pre_config()
yield
bgp_rif_pre_config_cleanup()
# TestBGPRif class
@pytest.mark.usefixtures('bgp_rif_class_hook')
class TestBGPRif(TestBGPCommon):
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_pass
def test_ft_bgp_v6_link_local_bgp(self):
"""
Verify that BGP peer session is established with v6 link local address
"""
# Getting topo info between an spine and leaf
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type='spine-leaf', max_tg_links='0', nodes='2')
# NOTE: D1 is spine and D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
result = bgpapi.create_bgp_neighbor_interface(leaf_name, info['D2_as'], info['D2D1P1'], info['D1_as'], 'ipv6', cli_type=bgp_cli_type)
if not result:
st.error("Failed to enable BGP on interface {}".format(info['D2D1P1']))
st.report_fail('test_case_failed')
result = bgpapi.create_bgp_neighbor_interface(spine_name, info['D1_as'], info['D1D2P1'], info['D2_as'], 'ipv6', cli_type=bgp_cli_type)
if not result:
# Clear the previous config
bgpapi.create_bgp_neighbor_interface(leaf_name, info['D2_as'], info['D2D1P1'], info['D1_as'], 'ipv6', 'no', cli_type=bgp_cli_type)
st.error("Failed to enable BGP on interface {}".format(info['D1D2P1']))
st.report_fail('test_case_failed')
# Verify bgp session on interface
if not utils.poll_wait(bgpapi.verify_bgp_summary, 130, leaf_name, family='ipv6', neighbor=info['D2D1P1'],
state='Established'):
# show neighbors for debug in case of failure and Clear all config
utils.exec_all(True, [[bgpapi.show_bgp_ipv6_neighbor_vtysh, leaf_name], [bgpapi.show_bgp_ipv6_neighbor_vtysh, spine_name]])
bgpapi.create_bgp_neighbor_interface(leaf_name, info['D2_as'], info['D2D1P1'], info['D1_as'], 'ipv6', 'no', cli_type=bgp_cli_type)
bgpapi.create_bgp_neighbor_interface(spine_name, info['D1_as'], info['D1D2P1'], info['D2_as'], 'ipv6', 'no', cli_type=bgp_cli_type)
st.error("BGP Neighbor failed to Establish between DUT and Partner")
st.report_fail('operation_failed')
utils.exec_all(True, [[bgpapi.show_bgp_ipv6_neighbor_vtysh, leaf_name],
[bgpapi.show_bgp_ipv6_neighbor_vtysh, spine_name]])
bgpapi.create_bgp_neighbor_interface(leaf_name, info['D2_as'], info['D2D1P1'], info['D1_as'], 'ipv6', 'no', cli_type=bgp_cli_type)
bgpapi.create_bgp_neighbor_interface(spine_name, info['D1_as'], info['D1D2P1'], info['D2_as'], 'ipv6', 'no', cli_type=bgp_cli_type)
st.report_pass("test_case_passed")
@pytest.mark.bgp_clear
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_pass
def test_ft_bgp_clear(self):
TestBGPCommon.ft_bgp_clear(self)
@pytest.mark.bgp_traffic
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_fail
def test_ft_bgp_peer_traffic_check(self):
TestBGPCommon.ft_bgp_peer_traffic_check(self)
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_pass
def test_ft_bgp_graceful_restart_and_aware_routers(self):
TestBGPCommon.ft_bgp_graceful_restart_and_aware_routers(self)
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_fail
def test_ft_bgp_ipv4_no_route_aggregation_for_exact_prefix_match(self):
TestBGPCommon.ft_bgp_ipv4_no_route_aggregation_for_exact_prefix_match(self)
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_fail
def test_ft_bgp_ipv4_route_aggregation_atomic_aggregate_without_as_set(self):
TestBGPCommon.ft_bgp_ipv4_route_aggregation_atomic_aggregate_without_as_set(self)
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_fail
def test_bgp_route_aggregation_4byteASN(self):
TestBGPCommon.ft_bgp_route_aggregation_4byteASN(self)
@pytest.mark.bgp_ft
def test_ft_bgp_ipv6_route_aggregation_with_as_set(self):
TestBGPCommon.ft_bgp_ipv6_route_aggregation_with_as_set(self)
@pytest.mark.bgp_ft
def test_ft_bgp_v4_dyn_nbr(self):
"""
Verify that BGP peering is formed with dynamic neighbors having 4btye ASN
"""
# Getting topo info between an spine and leaf
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type='spine-leaf', max_tg_links='0', nodes='2')
# NOTE: D1 is spine D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
# Configure an ip address on Spine
spine_ipv4 = '45.45.45.45'
ipapi.config_ip_addr_interface(spine_name, info['D1D2P1'], spine_ipv4, 24)
# Configure an ip address on Leaf
leaf_ipv4 = '45.45.45.46'
ipapi.config_ip_addr_interface(leaf_name, info['D2D1P1'], leaf_ipv4, 24)
# if bgp_cli_type == "klish":
# bgpapi.config_bgp_peer_group(leaf_name, info['D2_as'], 'leaf_spine', config="yes", cli_type=vtysh_cli_type)
# Add a listen range on Leaf
listen_range = '45.45.45.0'
bgpapi.config_bgp_listen(leaf_name, info['D2_as'], listen_range, 24, 'leaf_spine', 0, cli_type=bgp_cli_type)
# Add neighbor on Spine
bgpapi.create_bgp_neighbor_use_peergroup(spine_name, info['D1_as'], 'spine_leaf', leaf_ipv4, cli_type=bgp_cli_type)
# Verify bgp neighbors
result = bgpapi.verify_bgp_summary(leaf_name, family='ipv4', neighbor='*'+spine_ipv4, state='Established')
if not result:
bgplib.show_bgp_neighbors([leaf_name, spine_name], af='ipv4')
# Clear applied configs
# Delete listen range
bgpapi.config_bgp_listen(leaf_name, info['D2_as'], listen_range, 24, 'leaf_spine', 0, 'no', cli_type=bgp_cli_type)
# Delete the neighbor from Spine
bgpapi.delete_bgp_neighbor(spine_name, info['D1_as'], leaf_ipv4, info['D2_as'], cli_type=bgp_cli_type)
# Delete ip address from Leaf
ipapi.delete_ip_interface(leaf_name, info['D2D1P1'], leaf_ipv4, 24)
# Delete ip address from Spine
ipapi.delete_ip_interface(spine_name, info['D1D2P1'], spine_ipv4, 24)
# if bgp_cli_type == "klish":
# bgpapi.config_bgp_peer_group(leaf_name, info['D2_as'], 'leaf_spine', config="no", cli_type=bgp_cli_type)
if result:
st.log("BGP adjacency verified")
st.report_pass("test_case_passed")
else:
st.log("Failed to form BGP peering using dynamic ipv4 neighbors")
st.report_fail("test_case_failed")
@pytest.mark.bgp_ft
def test_ft_bgp_v6_dyn_nbr(self):
"""
Verify that ipv6 BGP peering is formed with dynamic neighbors
"""
# Getting topo info between an spine and leaf
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type='spine-leaf', max_tg_links='0', nodes='2')
# NOTE: D1 is spine D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
# Configure an ip address on Spine
spine_ipv6 = '2001::1'
ipapi.config_ip_addr_interface(spine_name, info['D1D2P1'], spine_ipv6, 64, family='ipv6')
# Configure an ip address on Leaf
leaf_ipv6 = '2001::2'
ipapi.config_ip_addr_interface(leaf_name, info['D2D1P1'], leaf_ipv6, 64, family='ipv6')
# Add a listen range on Leaf
listen_range = '2001::0'
bgpapi.config_bgp_listen(leaf_name, info['D2_as'], listen_range, 64, 'leaf_spine6', 0, cli_type=bgp_cli_type)
# Add neighbor on Spine
bgpapi.create_bgp_neighbor_use_peergroup(spine_name, info['D1_as'], 'spine_leaf6', leaf_ipv6, family='ipv6', cli_type=bgp_cli_type)
# Verify dynamic bgp neighbors
result = bgpapi.verify_bgp_summary(leaf_name, family='ipv6', neighbor='*'+spine_ipv6, state='Established')
if not result:
bgplib.show_bgp_neighbors([leaf_name, spine_name], af='ipv6')
# Clear applied configs
# Delete listen range
bgpapi.config_bgp_listen(leaf_name, info['D2_as'], listen_range, 64, 'leaf_spine6', 0, 'no', cli_type=bgp_cli_type)
# Delete the neighbor from Spine
bgpapi.delete_bgp_neighbor(spine_name, info['D1_as'], leaf_ipv6, info['D2_as'], cli_type=bgp_cli_type)
# Delete ip address from Leaf
ipapi.delete_ip_interface(leaf_name, info['D2D1P1'], leaf_ipv6, 64, family='ipv6')
# Delete ip address from Spine
ipapi.delete_ip_interface(spine_name, info['D1D2P1'], spine_ipv6, 64, family='ipv6')
if result:
st.log("BGP adjacency verified")
st.report_pass("test_case_passed")
else:
st.log("Failed to form BGP peering using dynamic ipv6 neighbors")
st.report_fail("test_case_failed")
@pytest.mark.bgp_ft
def test_ft_bgp_v4_max_dyn_nbr(self):
"""
Verify that BGP peering is established with maximum supported dynamic neighbors with maximum listen
ranges at once
"""
# Getting topo info between an spine and leaf
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type='spine-leaf', max_tg_links='0', nodes='2')
# NOTE: D1 is spine D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
result = True
# Set listen limit
# NOTE: Setting a limit to max dynamic neighbors. It can be set to any value, but the test case execution
# time increases
limit = 5
bgpapi.config_bgp_listen(leaf_name,info['D2_as'], 0, 0, 'leaf_spine', limit,cli_type=bgp_cli_type)
# Apply Configs:
# Add IP addresses on leaf and spine
# Add neighbor on spine
# Add listen range on leaf
for i in range(1, limit+1):
leaf_ipaddr = '{}.0.5.1'.format(20+i)
spine_ipaddr = '{}.0.5.2'.format(20+i)
listen_range = '{}.0.5.0'.format(20+i)
ipapi.config_ip_addr_interface(spine_name, info['D1D2P1'], spine_ipaddr, 24)
ipapi.config_ip_addr_interface(leaf_name, info['D2D1P1'], leaf_ipaddr, 24)
bgpapi.config_bgp_listen(leaf_name, info['D2_as'], listen_range, 24, 'leaf_spine', 0,cli_type=bgp_cli_type)
bgpapi.create_bgp_neighbor_use_peergroup(spine_name, info['D1_as'], 'spine_leaf', leaf_ipaddr)
# Verify dynamic bgp neighbors
result = result & (bgpapi.verify_bgp_summary(leaf_name, family='ipv4', neighbor='*'+spine_ipaddr,
state='Established'))
if not result:
bgplib.show_bgp_neighbors([leaf_name, spine_name], af='ipv4')
# Clear applied configs
# Delete listen limit
bgpapi.config_bgp_listen(leaf_name, info['D2_as'], 0, 0, 'leaf_spine', limit, 'no',cli_type=bgp_cli_type)
for i in range(1, limit+1):
leaf_ipaddr = '{}.0.5.1'.format(20+i)
spine_ipaddr = '{}.0.5.2'.format(20+i)
listen_range = '{}.0.5.0'.format(20+i)
# Delete listen range
bgpapi.config_bgp_listen(leaf_name, info['D2_as'], listen_range, 24, 'leaf_spine', 0, 'no',cli_type=bgp_cli_type)
# Delete the neighbor from Spine
bgpapi.delete_bgp_neighbor(spine_name, info['D1_as'], leaf_ipaddr, info['D2_as'])
# Delete ip address from Leaf
ipapi.delete_ip_interface(leaf_name, info['D2D1P1'], leaf_ipaddr, 24)
# Delete ip address from Spine
ipapi.delete_ip_interface(spine_name, info['D1D2P1'], spine_ipaddr, 24)
if result:
st.log("BGP adjacency verified")
st.report_pass("test_case_passed")
else:
st.log("Failed to form BGP peering using max dynamic ipv4 neighbors")
st.report_fail("test_case_failed")
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_pass
def test_ft_bgp_rmap(self):
"""
Verify a route map application after route has been installed
"""
# Getting topo info
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type='spine-leaf', max_tg_links='0', nodes='2')
# NOTE: D1 is spine D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
network1 = '134.5.6.0/24'
# Advertise a network to peer
n1 = bgpapi.advertise_bgp_network(leaf_name, info['D2_as'], network1, cli_type=vtysh_cli_type)
n1 = ipapi.verify_ip_route(spine_name,ip_address=network1)
if n1:
st.log("Advertised route present")
# Create a route-map to deny the network advertisement
ipapi.config_route_map_match_ip_address(leaf_name, 'test-rmap', 'deny', '10', 'test-access-list1')
# Create access-list test-access-list1 and deny the network
ipapi.config_access_list(leaf_name, 'test-access-list1', network1, 'deny')
# Add route-map to advertised network
bgpapi.advertise_bgp_network(leaf_name, info['D2_as'], network1, 'test-rmap', cli_type=vtysh_cli_type)
# Verify the network on spine
n1 = ipapi.verify_ip_route(spine_name, ip_address=network1)
if not n1:
result = True
else:
result = False
# Clear applied configs
ipapi.config_access_list(leaf_name, 'test-access-list1', network1, 'deny', config='no')
ipapi.config_route_map_mode(leaf_name, 'test-rmap', 'permit', '10', config='no')
bgpapi.advertise_bgp_network(leaf_name, info['D2_as'], network1, 'test-rmap', config='no', cli_type=vtysh_cli_type)
if result:
st.report_pass("test_case_passed")
else:
st.report_fail("test_case_failed")
@pytest.mark.bgp_ft
@pytest.mark.community
@pytest.mark.community_fail
def test_ft_bgp_rmap_out(self):
"""
Verify a route map with multiple match and set option in out direction
"""
# Getting topo info
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type = 'spine-leaf', max_tg_links= '0', nodes='2')
# NOTE: D1 is spine D2 is leaf by default
leaf_name = info['D2']
spine_name = info['D1']
result = True
network1 = '134.5.6.0/24'
network2 = '134.5.7.0/24'
network3 = '134.5.8.0'
# Create route-map and permit network3
ipapi.config_route_map_match_ip_address(leaf_name, 'test-rmap', 'permit', '10', 'test-access-list1')
# Add set option to prepend as-path 200
ipapi.config_route_map_set_aspath(leaf_name, 'test-rmap', 'permit', '10', '200')
# Create access-list test-access-list1
ipapi.config_access_list(leaf_name, 'test-access-list1', network3+'/24', 'permit')
# Advertise two networks from leaf
bgpapi.advertise_bgp_network(leaf_name, info['D2_as'], network1, 'test-rmap', cli_type=vtysh_cli_type)
bgpapi.advertise_bgp_network(leaf_name, info['D2_as'], network2, 'test-rmap', cli_type=vtysh_cli_type)
bgpapi.advertise_bgp_network(leaf_name, info['D2_as'], network3+'/24', 'test-rmap', cli_type=vtysh_cli_type)
# In route-map, deny network1
ipapi.config_route_map_match_ip_address(leaf_name, 'test-rmap', 'deny', '20', 'test-access-list2')
# Create access-list test-access-list2
ipapi.config_access_list(leaf_name, 'test-access-list2', network1, 'deny')
# In route-map, permit network2
ipapi.config_route_map_match_ip_address(leaf_name, 'test-rmap', 'permit', '30', 'test-access-list3')
# Create access-list test-access-list3
ipapi.config_access_list(leaf_name, 'test-access-list3', network2, 'permit')
# verify that the neighbor has the as-path prepended
output = bgpapi.show_bgp_ipvx_prefix(spine_name, prefix=network3, masklen=24)
st.log(output)
for x in output: # type: basestring
as_path = x['peerasn']
as_path = as_path.split()
for each in as_path:
if each == "200":
result = True
# verify that network1 is not present in bgp routes
n1 = ipapi.verify_ip_route(spine_name,ip_address=network1)
if not n1:
result = result & True
else:
result = result & False
# verify that network2 is present in bgp routes
n2 = ipapi.verify_ip_route(spine_name,ip_address=network2)
if n2:
result = result & True
else:
result = result & False
# CLear applied configs
ipapi.config_access_list(leaf_name, 'test-access-list3', network2, 'permit', config='no')
ipapi.config_access_list(leaf_name, 'test-access-list2', network1, 'deny', config='no')
ipapi.config_access_list(leaf_name, 'test-access-list1', network3+'/24', 'permit', config='no')
ipapi.config_route_map_mode(leaf_name, 'test-rmap', 'permit', '10', config='no')
bgpapi.advertise_bgp_network(leaf_name, info['D2_as'], network1, 'test-rmap', config='no', cli_type=vtysh_cli_type)
bgpapi.advertise_bgp_network(leaf_name, info['D2_as'], network2, 'test-rmap', config='no', cli_type=vtysh_cli_type)
bgpapi.advertise_bgp_network(leaf_name, info['D2_as'], network3+'/24', 'test-rmap', config='no', cli_type=vtysh_cli_type)
if result:
st.report_pass("test_case_passed")
else:
st.report_fail("test_case_failed")
@pytest.mark.regression
def test_ft_bgp_ebgp_confed(self):
"""
Author : [email protected]
Verify the functionality of route-maps with confederation peers
"""
TG_D1 = topo.tg_dut_list_name[0]
tg_ob = topo['T1{}P1_tg_obj'.format(TG_D1)]
bgp_handle = topo['T1{}P1_ipv4_tg_bh'.format(TG_D1)]
info = SpyTestDict()
info = bgplib.get_tg_topology_leafspine_bgp(dut_type='spine-leaf', max_tg_links='1', nodes='2')
spine_name = info['D1']
leaf_name = info['D2']
spine_as = info['D1_as']
leaf_as = info['D2_as']
confed_identifier = 65000
tc_fail_flag = 0
bgpapi.config_bgp(leaf_name, config='yes', config_type_list='', local_as=leaf_as,
conf_identf=confed_identifier,cli_type=vtysh_cli_type)
bgpapi.config_bgp(leaf_name, config='yes', config_type_list='', local_as=leaf_as, conf_peers=spine_as,cli_type=vtysh_cli_type)
bgpapi.config_bgp(spine_name, config='yes', config_type_list='', local_as=spine_as,
conf_identf=confed_identifier,cli_type=vtysh_cli_type)
bgpapi.config_bgp(spine_name, config='yes', config_type_list='', local_as=spine_as, conf_peers=leaf_as,cli_type=vtysh_cli_type)
ipapi.config_route_map_match_ip_address(spine_name, 'confed-rmap', 'permit', '10', 'confed-access-list1')
ipapi.config_access_list(spine_name, 'confed-access-list1', '125.5.1.0/16', 'permit')
bgpapi.config_bgp(spine_name, local_as=spine_as, neighbor=info['D2D1P1_ipv4'], config_type_list=["routeMap"],
routeMap='confed-rmap', diRection='out',cli_type=vtysh_cli_type)
bgpapi.create_bgp_next_hop_self(spine_name, spine_as, 'ipv4', info['D2D1P1_ipv4'])
st.log("Advertising the route map matching routes from the Spine DUT i.e. they should "
"be advertised on Leaf node")
tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='add', num_routes='20',
prefix='125.5.1.0', as_path='as_seq:1')
tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='start')
n1 = ipapi.verify_ip_route(topo.dut_list[1], ip_address='125.5.5.0/24')
if not n1:
st.error('Route-map matching prefexis from the Spine DUT are not advertised to leaf DUT.')
tc_fail_flag = 1
st.log("Advertising the route-map non matching routes from the Spine DUT i.e. they should not be "
"advertised on Leaf node.")
tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='add', num_routes='20',
prefix='126.5.1.0', as_path='as_seq:1')
tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='start')
n1 = ipapi.verify_ip_route(topo.dut_list[1], ip_address='126.5.5.0/24')
n2 = ipapi.verify_ip_route(topo.dut_list[0], ip_address='126.5.5.0/24')
if (n1 == True) or (n2 == False):
st.error('Route check failed for the scenario, route-map non matching prefexis from the Spine DUT')
tc_fail_flag = 1
# Unconfig section
tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='stop')
ipapi.config_route_map_mode(topo.dut_list[0], 'confed-rmap', 'permit', '10', config='no')
ipapi.config_access_list(topo.dut_list[0], 'confed-access-list1', '121.5.1.0/16', 'permit', config='no')
bgpapi.config_bgp(topo.dut_list[0], local_as=spine_as, config='no', neighbor=info['D2D1P1_ipv4'],
config_type_list=["routeMap"], routeMap='confed-rmap', diRection='out',cli_type=vtysh_cli_type)
bgpapi.create_bgp_next_hop_self(topo.dut_list[0], spine_as, 'ipv4', info['D2D1P1_ipv4'], 'no', 'no')
if tc_fail_flag:
st.report_fail('test_case_failed')
st.report_pass('test_case_passed')
"""
BGP Neighbor over regular router interface fixture, class and test cases - END
"""
"""
BGP IPv4 and IPv6 router distribution and filtering TCs: Start
"""
@pytest.fixture(scope='class')
def bgp_ipvx_route_adv_filter_fixture(request):
"""
Prepare base for router advertisement and filtering TCs
Pick first spine, first leaf and first link between them and create reduced topo
The following will be changed to API based eventually.
Currently implemented like this to progress on TC
"""
reduced_topo = dict()
reduced_topo['dut1'] = topo.spine_list[0]
reduced_topo['dut2'] = topo.leaf_list[0]
reduced_topo['dut1_index'] = 1 + topo.dut_list.index(topo.spine_list[0])
reduced_topo['dut2_index'] = 1 + topo.dut_list.index(topo.leaf_list[0])
reduced_topo['dut1_as'] = "{}".format(topo['D{}_as'.format(reduced_topo['dut1_index'])])
reduced_topo['dut2_as'] = "{}".format(topo['D{}_as'.format(reduced_topo['dut2_index'])])
reduced_topo['dut1_addr_ipv4'] = topo[
'D{}D{}P1_ipv4'.format(reduced_topo['dut1_index'], reduced_topo['dut2_index'])]
reduced_topo['dut2_addr_ipv4'] = topo[
'D{}D{}P1_ipv4'.format(reduced_topo['dut2_index'], reduced_topo['dut1_index'])]
reduced_topo['dut1_addr_ipv6'] = topo[
'D{}D{}P1_ipv6'.format(reduced_topo['dut1_index'], reduced_topo['dut2_index'])]
reduced_topo['dut2_addr_ipv6'] = topo[
'D{}D{}P1_ipv6'.format(reduced_topo['dut2_index'], reduced_topo['dut1_index'])]
reduced_topo['dut1_outif'] = topo[
'D{}D{}P1'.format(reduced_topo['dut1_index'], reduced_topo['dut2_index'])]
reduced_topo['dut2_outif'] = topo[
'D{}D{}P1'.format(reduced_topo['dut1_index'], reduced_topo['dut2_index'])]
request.cls.local_topo = reduced_topo
config_items = {}
bgplib.configure_base_for_route_adv_and_filter(reduced_topo['dut1'], reduced_topo['dut2'], reduced_topo,
config_items)
yield reduced_topo
bgplib.unconfigure_base_for_route_adv_and_filter(reduced_topo['dut1'], reduced_topo['dut2'], reduced_topo,
config_items)
@pytest.mark.usefixtures('bgp_rif_class_hook', 'bgp_ipvx_route_adv_filter_fixture')
class TestBGPIPvxRouteAdvertisementFilter:
local_topo = dict()
def configure_base_for_filter_prefix_on_community(self, peer_grp4_name, config, cli_type="vtysh"):
bgpapi.config_bgp(dut=self.local_topo['dut1'], local_as=self.local_topo['dut1_as'], config=config,
config_type_list=["redist"], redistribute='static',cli_type=cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'],
neighbor=peer_grp4_name, addr_family='ipv4', config=config,
config_type_list=["routeMap"], routeMap='rmap1', diRection='in',cli_type=cli_type)
@pytest.mark.community
@pytest.mark.community_pass
def test_redistribute_connected_ipv4(self, bgp_ipvx_route_adv_filter_fixture):
bgpapi.config_address_family_redistribute(self.local_topo['dut1'], self.local_topo['dut1_as'],
'ipv4', 'unicast', "connected", config='yes', cli_type=bgp_cli_type)
output = ipapi.fetch_ip_route(self.local_topo['dut1'], match={'type': 'C'}, select=['ip_address'])
list_of_connected_network_on_dut1 = list(x['ip_address'] for x in output)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4',
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_from_dut1 = list(x['network'] for x in output)
st.log('List of connected network on dut1:' + str(list_of_connected_network_on_dut1))
st.log('List of network learnt on dut2 from dut1:' + str(list_of_learned_routes_on_dut2_from_dut1))
if set(list_of_connected_network_on_dut1).issubset(set(list_of_learned_routes_on_dut2_from_dut1)):
result = True
else:
result = False
bgpapi.config_address_family_redistribute(self.local_topo['dut1'], self.local_topo['dut1_as'],
'ipv4', 'unicast', "connected", config='no', cli_type=bgp_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_redistribute_static_ipv4(self, bgp_ipvx_route_adv_filter_fixture):
ipapi.create_static_route(self.local_topo['dut1'], self.local_topo['dut1_outif'], '100.1.1.1/32', family='ipv4')
output = ipapi.fetch_ip_route(self.local_topo['dut1'], match={'type': 'S'}, select=['ip_address'])
list_of_static_network_on_dut1 = list(x['ip_address'] for x in output)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4',
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
st.log('List of static route on dut1' + str(list_of_static_network_on_dut1))
st.log('List of network redistributed to dut2 from dut1' + str(list_of_learned_routes_on_dut2_by_dut1))
bgpapi.config_address_family_redistribute(self.local_topo['dut1'], self.local_topo['dut1_as'],
'ipv4', 'unicast', "static", config='yes', cli_type=bgp_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4',
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
st.log('List of static route on dut1' + str(list_of_static_network_on_dut1))
st.log('List of network redistributed to dut2 from dut1' + str(list_of_learned_routes_on_dut2_by_dut1))
if set(list_of_static_network_on_dut1).issubset(set(list_of_learned_routes_on_dut2_by_dut1)):
st.log('static on dut1 is subset of dut1 learned route on dut2')
result = True
else:
st.log('static on dut1 is not a subset of dut1 learned route on dut2')
result = False
bgpapi.config_address_family_redistribute(self.local_topo['dut1'], self.local_topo['dut1_as'],
'ipv4', 'unicast', "static", config='no', cli_type=bgp_cli_type)
ipapi.delete_static_route(self.local_topo['dut1'], self.local_topo['dut1_outif'], '100.1.1.1/32', family='ipv4')
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_distribute_list_in_ipv4(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4',
match={'next_hop': self.local_topo['dut1_addr_ipv4']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if '102.1.1.0/24' in list_of_learned_routes_on_dut2_by_dut1:
st.log("route learnt")
else:
st.log("route not learnt")
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["distribute_list"], distribute_list='11', diRection='in',cli_type=vtysh_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4',
match={'next_hop': self.local_topo['dut1_addr_ipv4']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if '102.1.1.0/24' in list_of_learned_routes_on_dut2_by_dut1:
st.log("route not suppressed")
result = False
else:
st.log("route suppressed")
result = True
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='no',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["distribute_list"], distribute_list='11', diRection='in',cli_type=vtysh_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_filter_list_in_ipv4(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4',
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if len(list_of_learned_routes_on_dut2_by_dut1):
st.log("route received for as {}".format(self.local_topo['dut1_as']))
else:
st.log("route not learnt")
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["filter_list"], filter_list='FILTER', diRection='in',cli_type=vtysh_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4',
match={'next_hop': self.local_topo['dut1_addr_ipv4']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if len(list_of_learned_routes_on_dut2_by_dut1) != 0:
st.log("still having routes from as {}".format(self.local_topo['dut1_as']))
result = False
else:
st.log("no routes from as {}".format(self.local_topo['dut1_as']))
result = True
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='no',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["filter_list"], filter_list='FILTER', diRection='in',cli_type=vtysh_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_prefix_list_out_ipv4(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv4',
match={'next_hop': self.local_topo['dut2_addr_ipv4']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut1_by_dut2 = list(x['network'] for x in output)
if '202.1.1.0/24' in list_of_learned_routes_on_dut1_by_dut2:
st.log("route learnt")
else:
st.log("route not learnt")
if bgp_cli_type == "klish":
ipapi.config_ip_prefix_list(self.local_topo['dut2'], 'PREFIXOUT', '202.1.1.0/24', family="ipv4", action="deny", cli_type=bgp_cli_type)
ipapi.config_ip_prefix_list(self.local_topo['dut2'], 'PREFIXOUT', 'any', family="ipv4", action="permit", cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["prefix_list"], prefix_list='PREFIXOUT', diRection='out',cli_type=bgp_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv4',
match={'next_hop': self.local_topo['dut2_addr_ipv4']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut1_by_dut2 = list(x['network'] for x in output)
if '202.1.1.0/24' in list_of_learned_routes_on_dut1_by_dut2:
st.log("route not suppressed")
result = False
else:
st.log("route suppressed")
result = True
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='no',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["prefix_list"], prefix_list='PREFIXOUT', diRection='out',cli_type=bgp_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_default_originate_ipv4(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv4',
match={'next_hop': self.local_topo['dut2_addr_ipv4']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut1_by_dut2 = list(x['network'] for x in output)
if '0.0.0.0/0' in list_of_learned_routes_on_dut1_by_dut2:
st.log("route learnt")
else:
st.log("route not learnt")
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["default_originate"], routeMap='UseGlobal',cli_type=vtysh_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv4',
match={'next_hop': self.local_topo['dut2_addr_ipv4']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut1_by_dut2 = list(x['network'] for x in output)
if '0.0.0.0/0' in list_of_learned_routes_on_dut1_by_dut2:
st.log("default route advertised")
result = True
else:
st.log("default route not advertised")
result = False
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='no',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["default_originate"], routeMap='UseGlobal',cli_type=vtysh_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_route_map_in_ipv4(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4',
match={'next_hop': self.local_topo['dut1_addr_ipv4']},
select=['network', 'local_pref', 'metric'])
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["routeMap"], routeMap='SETPROPS', diRection='in',cli_type=vtysh_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4',
match={'next_hop': self.local_topo['dut1_addr_ipv4']},
select=['network', 'local_pref', 'metric'])
metric = [x for x in output if x['network'] == '102.1.1.0/24'][0]['metric']
local_pref = [x for x in output if x['network'] == '101.1.1.0/24'][0]['local_pref']
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4',
config='no',
neighbor=self.local_topo['dut1_addr_ipv4'],
config_type_list=["routeMap"], routeMap='SETPROPS', diRection='in',cli_type=vtysh_cli_type)
if metric == '400' and local_pref == '200':
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_redistribute_connected_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
bgpapi.config_address_family_redistribute(self.local_topo['dut1'], self.local_topo['dut1_as'],
'ipv6', 'unicast', "connected", config='yes', cli_type=bgp_cli_type)
output = ipapi.fetch_ip_route(self.local_topo['dut1'], family='ipv6', match={'type': 'C'},
select=['ip_address'])
output = [x for x in output if not x['ip_address'].startswith('fe80')]
list_of_connected_network_on_dut1 = list(x['ip_address'] for x in output)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
st.log('List of connected network on dut1')
st.log(list_of_connected_network_on_dut1)
st.log('List of network redistributed to dut2 from dut1')
st.log(list_of_learned_routes_on_dut2_by_dut1)
if set(list_of_connected_network_on_dut1).issubset(set(list_of_learned_routes_on_dut2_by_dut1)):
result = True
else:
result = False
bgpapi.config_address_family_redistribute(self.local_topo['dut1'], self.local_topo['dut1_as'],
'ipv6', 'unicast', "connected", config='no', cli_type=bgp_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
def test_redistribute_static_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
ipapi.create_static_route(self.local_topo['dut1'], self.local_topo['dut1_outif'], '100:1::1:1/128',
family='ipv6')
output = ipapi.fetch_ip_route(self.local_topo['dut1'], family='ipv6', match={'type': 'S'},
select=['ip_address'])
list_of_static_network_on_dut1 = list(x['ip_address'] for x in output)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
st.log('List of static route on dut1' + str(list_of_static_network_on_dut1))
st.log('List of network redistributed to dut2 from dut1' + str(list_of_learned_routes_on_dut2_by_dut1))
bgpapi.config_address_family_redistribute(self.local_topo['dut1'], self.local_topo['dut1_as'],
'ipv6', 'unicast', "static", config='yes', cli_type=bgp_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
st.log('List of static route on dut1' + str(list_of_static_network_on_dut1))
st.log('List of network redistributed to dut2 from dut1' + str(list_of_learned_routes_on_dut2_by_dut1))
if set(list_of_static_network_on_dut1).issubset(set(list_of_learned_routes_on_dut2_by_dut1)):
st.log('static on dut1 is subset of dut1 learned route on dut2')
result = True
else:
st.log('static on dut1 is not a subset of dut1 learned route on dut2')
result = False
bgpapi.config_address_family_redistribute(self.local_topo['dut1'], self.local_topo['dut1_as'],
'ipv6', 'unicast', "static", config='no', cli_type=bgp_cli_type)
ipapi.delete_static_route(self.local_topo['dut1'], self.local_topo['dut1_outif'], '100:1::1:1/128',
family='ipv6')
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_distribute_list_in_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
match={'next_hop': self.local_topo['dut1_addr_ipv6']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if '102:1::/64' in list_of_learned_routes_on_dut2_by_dut1:
st.log("route learnt")
else:
st.log("route not learnt")
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["distribute_list"], distribute_list='12', diRection='in',cli_type=vtysh_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
match={'next_hop': self.local_topo['dut1_addr_ipv6']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if '102:1::/64' in list_of_learned_routes_on_dut2_by_dut1:
st.log("route not suppressed")
result = False
else:
st.log("route suppressed")
result = True
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='no',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["distribute_list"], distribute_list='12', diRection='in',cli_type=vtysh_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_filter_list_in_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if len(list_of_learned_routes_on_dut2_by_dut1):
st.log("route received for as {}".format(self.local_topo['dut1_as']))
else:
st.log("route not learnt")
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["filter_list"], filter_list='FILTER', diRection='in',cli_type=vtysh_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
match={'next_hop': self.local_topo['dut1_addr_ipv6']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut1_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if len(list_of_learned_routes_on_dut2_by_dut1) != 0:
st.log("still having routes from as {}".format(self.local_topo['dut1_as']))
result = False
else:
st.log("no routes from as {}".format(self.local_topo['dut1_as']))
result = True
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='no',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["filter_list"], filter_list='FILTER', diRection='in',cli_type=vtysh_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_prefix_list_out_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv6',
match={'next_hop': self.local_topo['dut2_addr_ipv6']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut1_by_dut2 = list(x['network'] for x in output)
if '202:1::/64' in list_of_learned_routes_on_dut1_by_dut2:
st.log("route learnt")
else:
st.log("route not learnt")
if bgp_cli_type == "klish":
ipapi.config_ip_prefix_list(self.local_topo['dut2'], 'PREFIXOUT6', '202:1::/64', family="ipv6", action="deny", cli_type=bgp_cli_type)
ipapi.config_ip_prefix_list(self.local_topo['dut2'], 'PREFIXOUT6', 'any', family="ipv6", action="permit", cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["prefix_list"], prefix_list='PREFIXOUT6', diRection='out',cli_type=bgp_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv6',
match={'next_hop': self.local_topo['dut2_addr_ipv6']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut1_by_dut2 = list(x['network'] for x in output)
if '202:1::/64' in list_of_learned_routes_on_dut1_by_dut2:
st.log("route not suppressed")
result = False
else:
st.log("route suppressed")
result = True
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='no',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["prefix_list"], prefix_list='PREFIXOUT6', diRection='out',cli_type=bgp_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_filter_list_out_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv6',
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if len(list_of_learned_routes_on_dut2_by_dut1):
st.log("route received for as {}".format(self.local_topo['dut2_as']))
else:
st.log("route not learnt")
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["filter_list"], filter_list='FILTER6', diRection='out',cli_type=vtysh_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv6',
match={'next_hop': self.local_topo['dut2_addr_ipv6']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut2_by_dut1 = list(x['network'] for x in output)
if len(list_of_learned_routes_on_dut2_by_dut1) != 0:
st.log("still having routes from as {}".format(self.local_topo['dut2_as']))
result = False
else:
st.log("no routes from as {}".format(self.local_topo['dut2_as']))
result = True
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='no',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["filter_list"], filter_list='FILTER6', diRection='out',cli_type=vtysh_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
def test_default_originate_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv6',
match={'next_hop': self.local_topo['dut2_addr_ipv6']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut1_by_dut2 = list(x['network'] for x in output)
if '::/0' in list_of_learned_routes_on_dut1_by_dut2:
st.log("route learnt")
else:
st.log("route not learnt")
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["default_originate"], routeMap='UseGlobal',cli_type=bgp_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv6',
match={'next_hop': self.local_topo['dut2_addr_ipv6']},
select=['network', 'as_path'])
output = [x for x in output if "{}".format(self.local_topo['dut2_as']) in x['as_path']]
list_of_learned_routes_on_dut1_by_dut2 = list(x['network'] for x in output)
if '::/0' in list_of_learned_routes_on_dut1_by_dut2:
st.log("default route advertised")
result = True
else:
st.log("default route not advertised")
result = False
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='no',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["default_originate"], routeMap='UseGlobal',cli_type=bgp_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
@pytest.mark.community
@pytest.mark.community_pass
def test_route_map_in_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
match={'next_hop': self.local_topo['dut1_addr_ipv6']},
select=['network', 'local_pref', 'metric'])
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["routeMap"], routeMap='SETPROPS6', diRection='in',cli_type=vtysh_cli_type)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
match={'next_hop': self.local_topo['dut1_addr_ipv6']},
select=['network', 'local_pref', 'metric'])
metric = bgplib.get_route_attribute(output, 'metric', network='102:1::/64')
local_pref = bgplib.get_route_attribute(output, 'local_pref', network='101:1::/64')
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6',
config='yes',
neighbor=self.local_topo['dut1_addr_ipv6'],
config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in',cli_type=vtysh_cli_type)
if metric == '6400' and local_pref == '6200':
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
# testcase: FtOtSoRtBgp4Fn016, To verify functioning of route-map to filter incoming IPv4 prefix(s)
# on community from dynamic neighbors
@pytest.mark.bgp_rtmap_comm
@pytest.mark.community
@pytest.mark.community_pass
def test_bgp_route_map_with_community(self, bgp_ipvx_route_adv_filter_fixture):
result = True
ipapi.config_route_map(dut=self.local_topo['dut2'], route_map='rmap1', config='yes',
sequence='10', community='100:100')
ipapi.create_static_route(dut=self.local_topo['dut1'], next_hop='blackhole', static_ip='40.1.1.1/32')
self.configure_base_for_filter_prefix_on_community('leaf_spine', 'yes')
# Check the show command in leaf
output = bgpapi.show_bgp_ipvx_prefix(self.local_topo['dut2'], prefix="40.1.1.1",
masklen=32, family='ipv4')
st.log(output)
# there is only one record
for x in output: # type: basestring
if ((x['peerip'].find('11.1.1.2')) != -1) and (x['community'] == '100:100'):
result = True
else:
result = False
self.configure_base_for_filter_prefix_on_community('leaf_spine', 'no')
ipapi.config_route_map(dut=self.local_topo['dut2'], route_map='rmap1', config='no',
community='100:100')
ipapi.delete_static_route(dut=self.local_topo['dut1'], next_hop='blackhole', static_ip='40.1.1.1/32')
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
# testcase: FtOtSoRtBgp4Fn014, Verify that BGP peering with dynamic neighbors established with update source option.
@pytest.mark.bgp_nbr_updsrc
@pytest.mark.community
@pytest.mark.community_pass
def test_bgp_ebgp4_nbr_update_source(self, bgp_ipvx_route_adv_filter_fixture):
result = True
# configure update source for both the duts
# Note: Currently, leaf spine topology has a fixed neighbor formation (peer-group leaf_spine and spine_leaf)
# Since in sonic, we must have neighbor which is same as update-source, we will use this nbr as the source.
# basically, we will use update-source on the same neighbor, which has been created using leaf spine topology.
bgpapi.config_bgp(dut=self.local_topo['dut1'], local_as=self.local_topo['dut1_as'],
neighbor=self.local_topo['dut2_addr_ipv4'], config='yes',
update_src=self.local_topo['dut1_addr_ipv4'], config_type_list=["update_src"],cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut1'], local_as=self.local_topo['dut1_as'],
neighbor=self.local_topo['dut2_addr_ipv4'], config='yes',
config_type_list=["ebgp_mhop"], ebgp_mhop='2',cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'],
neighbor=self.local_topo['dut1_addr_ipv4'], config='yes',
update_src=self.local_topo['dut2_addr_ipv4'],
config_type_list=["update_src"],cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'],
neighbor=self.local_topo['dut1_addr_ipv4'], config='yes',
config_type_list=["ebgp_mhop"], ebgp_mhop='2',cli_type=bgp_cli_type)
# clear bgp neighbors before checking for neighbor state again.
bgpapi.clear_ip_bgp_vtysh(dut=self.local_topo['dut1'], value="*")
bgpapi.clear_ip_bgp_vtysh(dut=self.local_topo['dut2'], value="*")
if not utils.poll_wait(bgpapi.verify_bgp_summary, 30, self.local_topo['dut1'], family='ipv4',
neighbor=self.local_topo['dut2_addr_ipv4'], state='Established'):
bgplib.show_bgp_neighbors([self.local_topo['dut1'], self.local_topo['dut2']], af='ipv4')
st.error("BGP Neighbor failed to Establish between DUT1 and DUT2")
st.log("{} - Neighbor {} is failed to Establish".format(self.local_topo['dut1'],
self.local_topo['dut2_addr_ipv4']))
result = False
# cleanup the testcase
bgpapi.config_bgp(dut=self.local_topo['dut1'], local_as=self.local_topo['dut1_as'],
neighbor=self.local_topo['dut2_addr_ipv4'], config='no',
update_src=self.local_topo['dut1_addr_ipv4'],
config_type_list=["update_src"],cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut1'], local_as=self.local_topo['dut1_as'],
neighbor=self.local_topo['dut2_addr_ipv4'], config='no',
config_type_list=["ebgp_mhop"], ebgp_mhop='2',cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'],
neighbor=self.local_topo['dut1_addr_ipv4'], config='no',
update_src=self.local_topo['dut2_addr_ipv4'],
config_type_list=["update_src"],cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'],
neighbor=self.local_topo['dut1_addr_ipv4'], config='no',
config_type_list=["ebgp_mhop"], ebgp_mhop='2',cli_type=bgp_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
# testcase: FtOtSoRtBgp4Fn015, Verify eBGP authentication.
@pytest.mark.bgp_nbr_auth
def test_bgp_ebgp4_nbr_authentication(self, bgp_ipvx_route_adv_filter_fixture):
result = True
# configure password for both the duts
bgpapi.config_bgp(dut=self.local_topo['dut1'], local_as=self.local_topo['dut1_as'],
neighbor=self.local_topo['dut2_addr_ipv4'], config='yes', password='broadcom',
config_type_list=["pswd"],cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'],
neighbor=self.local_topo['dut1_addr_ipv4'], config='yes', password='broadcom',
config_type_list=["pswd"],cli_type=bgp_cli_type)
# clear bgp neighbors before checking for neighbor state again.
bgpapi.clear_ip_bgp_vtysh(dut=self.local_topo['dut1'], value="*")
bgpapi.clear_ip_bgp_vtysh(dut=self.local_topo['dut2'], value="*")
if not utils.poll_wait(bgpapi.verify_bgp_summary, 30, self.local_topo['dut1'], family='ipv4',
neighbor=self.local_topo['dut2_addr_ipv4'], state='Established'):
bgplib.show_bgp_neighbors([self.local_topo['dut1'], self.local_topo['dut2']], af='ipv4')
st.error("BGP Neighbor failed to Establish between DUT1 and DUT2")
st.log("{} - Neighbor {} is failed to Establish".format(self.local_topo['dut1'],
self.local_topo['dut2_addr_ipv4']))
result = False
# Verify neighbors formation after rebooting Dut1
st.log("Verification of neighbor formation after reboot.")
# below API will change routing mode to split and save the sonic config.
bgpapi.enable_docker_routing_config_mode(dut=self.local_topo['dut1'])
st.vtysh(self.local_topo['dut1'], "copy running-config startup-config")
st.reboot(self.local_topo['dut1'], 'fast')
st.wait(3)
if not utils.poll_wait(bgpapi.verify_bgp_summary, 30, self.local_topo['dut1'], family='ipv4',
neighbor=self.local_topo['dut2_addr_ipv4'], state='Established'):
bgplib.show_bgp_neighbors([self.local_topo['dut1'], self.local_topo['dut2']], af='ipv4')
st.error("BGP Neighbor failed to Establish between DUT1 and DUT2")
st.log("{} - Neighbor {} is failed to Establish".format(self.local_topo['dut1'],
self.local_topo['dut2_addr_ipv4']))
result = False
# cleanup the testcase
bgpapi.config_bgp(dut=self.local_topo['dut1'], local_as=self.local_topo['dut1_as'],
neighbor=self.local_topo['dut2_addr_ipv4'], config='no', password='broadcom',
config_type_list=["pswd"],cli_type=bgp_cli_type)
bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'],
neighbor=self.local_topo['dut1_addr_ipv4'], config='no', password='broadcom',
config_type_list=["pswd"],cli_type=bgp_cli_type)
if result:
st.report_pass("operation_successful")
else:
st.report_fail("operation_failed")
# testcase: FtOtSoRtBgp4Fn015, Verify eBGP traffic for ipv6.
@pytest.mark.bgp_ebgp6_traffic
def test_bgp_ebgp6_traffic(self, bgp_ipvx_route_adv_filter_fixture):
result = True
TG_D1 = topo.tg_dut_list_name[0]
TG_D2 = topo.tg_dut_list_name[1]
tg_ob = topo['T1{}P1_tg_obj'.format(TG_D1)]
bgp_handle = topo['T1{}P1_ipv6_tg_bh'.format(TG_D1)]
tg_d1_ip = topo['T1{}P1_ipv6'.format(TG_D1)]
tg_d2_ip = topo['T1{}P1_ipv6'.format(TG_D2)]
tc_fail_flag = 0
spine_as = int(bgplib.data['spine_as'])
st.log("Advertising 500 IPv6 Routes from TG connected to DUT1")
bgp_route = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='add', ip_version='6',
num_routes='500', prefix='1001::1', as_path='as_seq:1')
bgp_ctrl = tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='start')
# Sleep for update delay timer and the check the route count in neighbour
st.wait(15)
if not utils.poll_wait(bgpapi.verify_bgp_neighborship, 120, topo.dut_list[0], family="ipv6", shell="sonic",
neighbor=self.local_topo['dut2_addr_ipv6'], state='Established', asn=self.local_topo['dut1_as']):
utils.exec_all(True, [[bgpapi.show_bgp_ipv6_neighbor_vtysh, topo.dut_list[0]],
[bgpapi.show_bgp_ipv6_neighbor_vtysh, topo.dut_list[1]]])
st.error("BGP Neighbor failed to Establish between DUT1 and TG")
st.log("{} - Neighbor {} is failed to Establish".format(topo.dut_list[0],
self.local_topo['dut2_addr_ipv6']))
result = False
bgp_summary_spine_after_update_timer = bgpapi.show_bgp_ipv6_summary(topo.dut_list[1])
rib_entries_after_update_timer = bgp_summary_spine_after_update_timer[0]['ribentries']
st.log('RIB Entries after update delay timer expiry : {}'.format(rib_entries_after_update_timer))
if int(rib_entries_after_update_timer) < 500:
st.error('Routes are not advertised to peer DUT, even after the update delay timer expiry')
tc_fail_flag = 1
st.log("Initiating the Ipv6 traffic for those Routes from TG connected to DUT2")
src_handle = 'handle'
if tg_ob.tg_type == 'ixia':
src_handle = 'ipv6_handle'
tr1 = tg_ob.tg_traffic_config(port_handle=topo['T1{}P1_ipv6_tg_ph'.format(TG_D2)],
emulation_src_handle=topo['T1{}P1_ipv6_tg_ih'.format(TG_D2)][src_handle],
emulation_dst_handle=bgp_route['handle'], circuit_endpoint_type='ipv6',
mode='create',
transmit_mode='single_burst', pkts_per_burst='2000', length_mode='fixed',
rate_pps=1000)
stream_id1 = tr1['stream_id']
tg_ob.tg_traffic_control(action='run', handle=stream_id1)
st.wait(20)
tg1_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D1)])
tg2_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D2)])
if not (int(tg2_stats.tx.total_packets) and int(tg1_stats.rx.total_packets)):
st.error('Received ZERO stats.')
tc_fail_flag = 1
else:
percent_rx = float(int(tg1_stats.rx.total_packets) - int(tg2_stats.tx.total_packets)) / int(
tg2_stats.tx.total_packets) * 100
st.log('tg1_stats.rx.total_packets : {}'.format(tg1_stats.rx.total_packets))
st.log('tg2_stats.tx.total_packets : {}'.format(tg2_stats.tx.total_packets))
st.log('percent_rx : {}'.format(percent_rx))
if int(tg1_stats.rx.total_packets) < int(tg2_stats.tx.total_packets)*0.95:
tc_fail_flag = 1
tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='stop')
if tc_fail_flag:
st.report_fail("traffic_verification_failed")
st.report_pass('test_case_passed')
TG_D1 = topo.tg_dut_list_name[0]
TG_D2 = topo.tg_dut_list_name[1]
tg_ob = topo['T1{}P1_tg_obj'.format(TG_D2)]
bgp_handle = topo['T1{}P1_ipv6_tg_bh'.format(TG_D2)]
tc_fail_flag = 0
leaf_as = int(bgplib.data['leaf_as'])
st.log("Advertising 500 IPv6 Routes from TG connected to DUT2")
bgp_route = tg_ob.tg_emulation_bgp_route_config(handle=bgp_handle['handle'], mode='add', ip_version='6',
num_routes='500', prefix='1002::1', as_path='as_seq:2')
bgp_ctrl = tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='start')
# Check for route count in neighbour, before update delay timer expiry
# Sleep for update delay timer and the check the route count in neighbour
st.wait(15)
if not utils.poll_wait(bgpapi.verify_bgp_neighborship, 120, topo.dut_list[0], family="ipv6", shell="sonic",
neighbor=self.local_topo['dut2_addr_ipv6'], state='Established',
asn=self.local_topo['dut1_as']):
utils.exec_all(True, [[bgpapi.show_bgp_ipv6_neighbor_vtysh, topo.dut_list[0]],
[bgpapi.show_bgp_ipv6_neighbor_vtysh, topo.dut_list[1]]])
st.error("BGP Neighbor failed to Establish between DUT1 and TG")
st.log("{} - Neighbor {} is failed to Establish".format(topo.dut_list[0],
self.local_topo['dut2_addr_ipv6']))
result = False
bgp_summary_spine_after_update_timer = bgpapi.show_bgp_ipv6_summary(topo.dut_list[0])
rib_entries_after_update_timer = bgp_summary_spine_after_update_timer[0]['ribentries']
st.log('RIB Entries after update delay timer expiry : {}'.format(rib_entries_after_update_timer))
if int(rib_entries_after_update_timer) < 1000:
st.error('Routes are not advertised to peer DUT, even after the update delay timer expiry')
tc_fail_flag = 1
st.log("Initiating the Ipv6 traffic for those Routes from TG connected to DUT1")
src_handle = 'handle'
if tg_ob.tg_type == 'ixia':
src_handle = 'ipv6_handle'
tr1 = tg_ob.tg_traffic_config(port_handle=topo['T1{}P1_ipv6_tg_ph'.format(TG_D1)],
emulation_src_handle=topo['T1{}P1_ipv6_tg_ih'.format(TG_D1)][src_handle],
emulation_dst_handle=bgp_route['handle'], circuit_endpoint_type='ipv6',
mode='create',
transmit_mode='single_burst', pkts_per_burst='2000', length_mode='fixed',
rate_pps=1000)
stream_id1 = tr1['stream_id']
tg_ob.tg_traffic_control(action='run', handle=stream_id1)
st.wait(20)
tg1_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D2)])
tg2_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D1)])
if not (int(tg2_stats.tx.total_packets) and int(tg1_stats.rx.total_packets)):
st.error('Received ZERO stats.')
tc_fail_flag = 1
else:
percent_rx = float(int(tg1_stats.rx.total_packets) - int(tg2_stats.tx.total_packets)) / int(
tg2_stats.tx.total_packets) * 100
st.log('tg1_stats.rx.total_packets : {}'.format(tg1_stats.rx.total_packets))
st.log('tg2_stats.tx.total_packets : {}'.format(tg2_stats.tx.total_packets))
st.log('percent_rx : {}'.format(percent_rx))
if int(tg1_stats.rx.total_packets) < int(tg2_stats.tx.total_packets)*0.95:
tc_fail_flag = 1
tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='stop')
if tc_fail_flag:
st.report_fail("traffic_verification_failed")
st.report_pass('test_case_passed')
# below API will change routing mode to split and save the sonic config.
bgpapi.enable_docker_routing_config_mode(dut=topo.dut_list[0])
st.vtysh(topo.dut_list[0], "copy running-config startup-config")
st.reboot(topo.dut_list[0], 'fast')
st.wait(3)
if not utils.poll_wait(bgpapi.verify_ipv6_bgp_summary, 120, topo.dut_list[0],
neighbor=self.local_topo['dut2_addr_ipv6'], state='500'):
utils.exec_all(True, [[bgpapi.show_bgp_ipv6_neighbor_vtysh, topo.dut_list[0]],
[bgpapi.show_bgp_ipv6_neighbor_vtysh, topo.dut_list[1]]])
st.error("BGP Neighbor failed to Establish between DUT1 and DUT2")
st.log("{} - Neighbor {} is failed to Establish".format(topo.dut_list[0],
self.local_topo['dut2_addr_ipv6']))
result = False
bgp_summary_spine_after_update_timer = bgpapi.show_bgp_ipv6_summary(topo.dut_list[0])
rib_entries_after_update_timer = bgp_summary_spine_after_update_timer[0]['ribentries']
st.log('RIB Entries after reboot : {}'.format(rib_entries_after_update_timer))
# without BGP helper, after reboot, no routes sent by DUT2 will be seen in dut1.
if int(rib_entries_after_update_timer) < 500:
st.error('Routes are not advertised to peer DUT, even after the update delay timer expiry')
tc_fail_flag = 1
if tc_fail_flag:
st.report_fail("traffic_verification_failed")
st.report_pass('test_case_passed')
# testcase: FtOtSoRtBgpPlFn002, Verify ipv6 route aggregation.
def test_route_aggregate_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
limit = 3
ip6_rt_list = ["2018:3:1::/64", "2018:3:2::/64", "2018:3:3::/64", "2018:3:4::/64"]
ip6_adr_list = ["2019:1::1", "2019:2::1", "2019:3::1", "2019:4::1"]
aggr_addr = "2018:3::/32"
for i in range(0, limit):
ipapi.create_static_route(self.local_topo['dut1'], 'blackhole', ip6_rt_list[i], family='ipv6')
# configure aggregate address prefix
bgpapi.create_bgp_aggregate_address(self.local_topo['dut1'], local_asn=self.local_topo['dut1_as'],
address_range=aggr_addr, summary=True, family="ipv6", config="add",cli_type=bgp_cli_type)
my_cmd = 'router bgp\n'
my_cmd += 'address-family ipv6 unicast\n'
my_cmd += 'redistribute static\n'
my_cmd += 'end'
st.vtysh_config(self.local_topo['dut1'], my_cmd)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut1'], family='ipv6')
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
select=['network', 'as_path', 'next_hop'])
list_of_learned_routes_on_dut2_by_dut = list(x['network'] for x in output)
if set(ip6_rt_list).isdisjoint(set(list_of_learned_routes_on_dut2_by_dut)):
st.log("Routes falling under aggregate prefix are not distributed")
aggregation = True
else:
st.log("Routes falling under aggregate prefix are distributed")
aggregation = False
if (aggr_addr in list_of_learned_routes_on_dut2_by_dut) and aggregation:
st.log("Aggregation happened")
result = True
else:
st.log("Aggregation not happened")
result = False
bgpapi.create_bgp_aggregate_address(self.local_topo['dut1'], local_asn=self.local_topo['dut1_as'],
address_range=aggr_addr, summary=True, family="ipv6", config="delete",cli_type=bgp_cli_type)
for i in range(0, limit):
ipapi.delete_static_route(self.local_topo['dut1'], 'blackhole', ip6_rt_list[i], family='ipv6')
my_cmd = 'router bgp\n'
my_cmd += 'address-family ipv6 unicast\n'
my_cmd += 'no redistribute static\n'
my_cmd += 'end'
st.vtysh_config(self.local_topo['dut1'], my_cmd)
if result:
st.report_pass("test_case_passed")
else:
st.report_fail("test_case_failed")
# testcase: FtOtSoRtBgpPlFn005, verify static blackhole route redistribution with metric set in route-map
def test_static_blackhole_rt_redistribute_with_routemap_ipv6(self, bgp_ipvx_route_adv_filter_fixture):
ipapi.create_static_route(self.local_topo['dut1'], 'Null0', '2012:1::/64', family='ipv6')
ipapi.config_route_map(dut=self.local_topo['dut1'], route_map='rmap_blackhole', config='yes', sequence='10',
metric='50')
my_cmd = 'router bgp\n'
my_cmd += 'address-family ipv6 unicast\n'
my_cmd += 'redistribute static route-map rmap_blackhole\n'
my_cmd += 'end'
st.vtysh_config(self.local_topo['dut1'], my_cmd)
output = bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6',
select=['network', 'as_path', 'metric'])
metric = bgplib.get_route_attribute(output, 'metric', network = '2012:1::/64')
if metric == '50':
st.log('static blackhole route with metric 50 redistributed from dut1 to dut2')
result = True
else:
st.log('static blackhole route is not learned on dut2')
result = False
my_cmd = 'router bgp\n'
my_cmd += 'address-family ipv6 unicast\n'
my_cmd += 'no redistribute static route-map rmap_blackhole\n'
my_cmd += 'end'
st.vtysh_config(self.local_topo['dut1'], my_cmd)
ipapi.config_route_map(dut=self.local_topo['dut1'], route_map='rmap_blackhole', config='no', sequence='10', metric='50')
ipapi.delete_static_route(self.local_topo['dut1'], 'Null0', '2012:1::/64', family='ipv6')
if result:
st.report_pass("test_case_passed")
else:
st.report_fail("test_case_failed")
"""
BGP IPv4 and IPv6 router distribution and filtering TCs: End
"""
"""
BGP Neighbor over VE over LAG fixture, class and test cases - START
"""
def bgp_ve_lag_pre_config():
global topo
st.banner("BGP VE LAG CLASS CONFIG - START")
# underlay config - configure ve over lag
bgplib.l3tc_underlay_config_unconfig(config='yes', config_type='veLag')
# config ip on underlay interface
bgplib.l3tc_vrfipv4v6_address_leafspine_config_unconfig(config='yes', config_type='all')
# Ping Verification
if not bgplib.l3tc_vrfipv4v6_address_leafspine_ping_test(config_type='all', ping_count=3):
st.error("Ping failed in between Spine - Leaf")
st.report_fail('test_case_failed')
bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_config(config='yes')
bgplib.l3tc_vrfipv4v6_address_leafspine_tg_bgp_config(config='yes', config_type='all', class_reconfig='Yes')
st.wait(10)
# BGP Neighbour Verification
if not utils.poll_wait(bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_check, 10, config_type='all'):
st.error("Neighbour is failed to Establish between Spine - Leaf")
st.report_fail('test_case_failed')
st.log("Getting all topology info related to connectivity / TG and other parameters between duts")
topo = bgplib.get_leaf_spine_topology_info()
st.banner("BGP VE LAG CLASS CONFIG - END")
def bgp_ve_lag_pre_config_cleanup():
st.banner("BGP VE LAG CLASS CONFIG CLEANUP - START")
bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_config(config='no', config_type='veLag')
bgplib.l3tc_vrfipv4v6_address_leafspine_config_unconfig(config='no')
bgplib.l3tc_underlay_config_unconfig(config='no', config_type='veLag')
st.banner("BGP VE LAG CLASS CONFIG CLEANUP - END")
@pytest.fixture(scope='class')
def bgp_ve_lag_class_hook(request):
bgp_ve_lag_pre_config()
yield
bgp_ve_lag_pre_config_cleanup()
# TestBGPVeLag Class
@pytest.mark.usefixtures('bgp_ve_lag_class_hook')
class TestBGPVeLag(TestBGPCommon):
# test v4 and v6 neighbors
@pytest.mark.bgp_clear
@pytest.mark.bgp_ft
# tests both v4 and v6 neighbors
def test_ft_bgp_clear(self):
TestBGPCommon.ft_bgp_clear(self)
# tests both v4 and v6 neighbors
@pytest.mark.bgp_traffic
@pytest.mark.bgp_ft
def test_ft_bgp_peer_traffic_check(self):
TestBGPCommon.ft_bgp_peer_traffic_check(self)
"""
BGP Neighbor over VE over LAG fixture, class and test cases - END
"""
"""
BGP Neighbor over L3 over LAG fixture, class and test cases - START
"""
def bgp_l3_lag_pre_config():
global topo
st.banner("BGP L3 OVER LAG CLASS CONFIG - START")
# underlay config - configure ve over lag
bgplib.l3tc_underlay_config_unconfig(config='yes', config_type='l3Lag')
# config ip on underlay interface
bgplib.l3tc_vrfipv4v6_address_leafspine_config_unconfig(config='yes', config_type='all')
# Ping Verification
if not bgplib.l3tc_vrfipv4v6_address_leafspine_ping_test(config_type='all', ping_count=3):
st.error("Ping failed in between Spine - Leaf")
st.report_fail('test_case_failed')
bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_config(config='yes')
bgplib.l3tc_vrfipv4v6_address_leafspine_tg_bgp_config(config='yes', config_type='all', class_reconfig='Yes')
st.wait(10)
# BGP Neighbour Verification
if not utils.poll_wait(bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_check, 10, config_type='all'):
st.error("Neighbour is failed to Establish between Spine - Leaf")
st.report_fail('test_case_failed')
st.log("Getting all topology info related to connectivity / TG and other parameters between duts")
topo = bgplib.get_leaf_spine_topology_info()
st.banner("BGP L3 LAG CLASS CONFIG - END")
def bgp_l3_lag_pre_config_cleanup():
st.banner("BGP L3 OVER LAG CLASS CONFIG CLEANUP - START")
bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_config(config='no')
bgplib.l3tc_vrfipv4v6_address_leafspine_config_unconfig(config='no')
bgpapi.cleanup_bgp_config(st.get_dut_names())
ipapi.clear_ip_configuration(st.get_dut_names(), family='all', thread=True)
bgplib.l3tc_underlay_config_unconfig(config='no', config_type='l3Lag')
st.banner("BGP L3 OVER LAG CLASS CONFIG CLEANUP - END")
@pytest.fixture(scope='class')
def bgp_l3_lag_class_hook(request):
bgp_l3_lag_pre_config()
yield
bgp_l3_lag_pre_config_cleanup()
# TestBGPVeLag Class
@pytest.mark.usefixtures('bgp_l3_lag_class_hook')
class TestBGPL3Lag(TestBGPCommon):
@pytest.mark.bgp_l3lag_traffic
def test_ft_bgp_l3lag_peer_traffic_check(self):
TestBGPCommon.ft_bgp_peer_traffic_check(self)
"""
BGP Neighbor In L3 Over LAG fixture, class and test cases - END
"""
|
py | 1a425189439cb30efdad99acf3139eecdd9e0091 | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************\
import torch
import random
import common.layers as layers
from common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
class MelAudioLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) computes mel-spectrograms from audio files.
"""
def __init__(self, dataset_path, audiopaths_and_text, args):
self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)
self.max_wav_value = args.max_wav_value
self.sampling_rate = args.sampling_rate
self.stft = layers.TacotronSTFT(
args.filter_length, args.hop_length, args.win_length,
args.n_mel_channels, args.sampling_rate, args.mel_fmin,
args.mel_fmax)
self.segment_length = args.segment_length
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
def get_mel_audio_pair(self, filename):
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
# Take segment
if audio.size(0) >= self.segment_length:
max_audio_start = audio.size(0) - self.segment_length
audio_start = random.randint(0, max_audio_start)
audio = audio[audio_start:audio_start+self.segment_length]
else:
audio = torch.nn.functional.pad(
audio, (0, self.segment_length - audio.size(0)), 'constant').data
audio = audio / self.max_wav_value
audio_norm = audio.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = melspec.squeeze(0)
return (melspec, audio, len(audio))
def __getitem__(self, index):
return self.get_mel_audio_pair(self.audiopaths_and_text[index][0])
def __len__(self):
return len(self.audiopaths_and_text)
def batch_to_gpu(batch):
x, y, len_y = batch
x = to_gpu(x).float()
y = to_gpu(y).float()
len_y = to_gpu(torch.sum(len_y))
return ((x, y), y, len_y)
|
py | 1a425278ac72b47d6e46ca28040633ae5f131fc4 | from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from kornia.utils.helpers import _torch_svd_cast
__all__ = ["zca_mean", "zca_whiten", "linear_transform", "ZCAWhitening"]
class ZCAWhitening(nn.Module):
r"""Compute the ZCA whitening matrix transform and the mean vector and applies the transform to the data.
The data tensor is flattened, and the mean :math:`\mathbf{\mu}`
and covariance matrix :math:`\mathbf{\Sigma}` are computed from
the flattened data :math:`\mathbf{X} \in \mathbb{R}^{N \times D}`, where
:math:`N` is the sample size and :math:`D` is flattened dimensionality
(e.g. for a tensor with size 5x3x2x2 :math:`N = 5` and :math:`D = 12`). The ZCA whitening
transform is given by:
.. math::
\mathbf{X}_{\text{zca}} = (\mathbf{X - \mu})(US^{-\frac{1}{2}}U^T)^T
where :math:`U` are the eigenvectors of :math:`\Sigma` and :math:`S` contain the corresponding
eigenvalues of :math:`\Sigma`. After the transform is applied, the output is reshaped to same shape.
args:
dim: Determines the dimension that represents the samples axis.
eps: a small number used for numerical stability.
unbiased: Whether to use the biased estimate of the covariance matrix.
compute_inv: Compute the inverse transform matrix.
detach_transforms: Detaches gradient from the ZCA fitting.
shape:
- x: :math:`(D_0,...,D_{\text{dim}},...,D_N)` is a batch of N-D tensors.
- x_whiten: :math:`(D_0,...,D_{\text{dim}},...,D_N)` same shape as input.
.. note::
See a working example `here <https://colab.sandbox.google.com/github/kornia/tutorials/
blob/master/source/zca_whitening.ipynb>`__.
Examples:
>>> x = torch.tensor([[0,1],[1,0],[-1,0],[0,-1]], dtype = torch.float32)
>>> zca = ZCAWhitening().fit(x)
>>> x_whiten = zca(x)
>>> zca = ZCAWhitening()
>>> x_whiten = zca(x, include_fit = True) # Includes the fitting step
>>> x_whiten = zca(x) # Can run now without the fitting set
>>> # Enable backprop through ZCA fitting process
>>> zca = ZCAWhitening(detach_transforms = False)
>>> x_whiten = zca(x, include_fit = True) # Includes the fitting step
Note:
This implementation uses :py:meth:`~torch.svd` which yields NaNs in the backwards step
if the singular values are not unique. See `here <https://pytorch.org/docs/stable/torch.html#torch.svd>`_ for
more information.
References:
[1] `Stanford PCA & ZCA whitening tutorial <http://ufldl.stanford.edu/tutorial/unsupervised/PCAWhitening/>`_
"""
def __init__(
self,
dim: int = 0,
eps: float = 1e-6,
unbiased: bool = True,
detach_transforms: bool = True,
compute_inv: bool = False,
) -> None:
super().__init__()
self.dim = dim
self.eps = eps
self.unbiased = unbiased
self.detach_transforms = detach_transforms
self.compute_inv = compute_inv
self.fitted = False
def fit(self, x: torch.Tensor):
r"""Fit ZCA whitening matrices to the data.
Args:
x: Input data.
returns:
Returns a fitted ZCAWhiten object instance.
"""
T, mean, T_inv = zca_mean(x, self.dim, self.unbiased, self.eps, self.compute_inv)
self.mean_vector: torch.Tensor = mean
self.transform_matrix: torch.Tensor = T
if T_inv is None:
self.transform_inv: Optional[torch.Tensor] = torch.empty([0])
else:
self.transform_inv = T_inv
if self.detach_transforms:
self.mean_vector = self.mean_vector.detach()
self.transform_matrix = self.transform_matrix.detach()
self.transform_inv = self.transform_inv.detach()
self.fitted = True
return self
def forward(self, x: torch.Tensor, include_fit: bool = False) -> torch.Tensor:
r"""Apply the whitening transform to the data.
Args:
x: Input data.
include_fit: Indicates whether to fit the data as part of the forward pass.
Returns:
The transformed data.
"""
if include_fit:
self.fit(x)
if not self.fitted:
raise RuntimeError("Needs to be fitted first before running. Please call fit or set include_fit to True.")
x_whiten = linear_transform(x, self.transform_matrix, self.mean_vector, self.dim)
return x_whiten
def inverse_transform(self, x: torch.Tensor) -> torch.Tensor:
r"""Apply the inverse transform to the whitened data.
Args:
x: Whitened data.
Returns:
Original data.
"""
if not self.fitted:
raise RuntimeError("Needs to be fitted first before running. Please call fit or set include_fit to True.")
if not self.compute_inv:
raise RuntimeError("Did not compute inverse ZCA. Please set compute_inv to True")
mean_inv: torch.Tensor = -self.mean_vector.mm(self.transform_matrix) # type: ignore
y = linear_transform(x, self.transform_inv, mean_inv) # type: ignore
return y
def zca_mean(
inp: torch.Tensor, dim: int = 0, unbiased: bool = True, eps: float = 1e-6, return_inverse: bool = False
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
r"""Compute the ZCA whitening matrix and mean vector.
The output can be used with :py:meth:`~kornia.color.linear_transform`.
See :class:`~kornia.color.ZCAWhitening` for details.
Args:
inp: input data tensor.
dim: Specifies the dimension that serves as the samples dimension.
unbiased: Whether to use the unbiased estimate of the covariance matrix.
eps: a small number used for numerical stability.
return_inverse: Whether to return the inverse ZCA transform.
Shapes:
- inp: :math:`(D_0,...,D_{\text{dim}},...,D_N)` is a batch of N-D tensors.
- transform_matrix: :math:`(\Pi_{d=0,d\neq \text{dim}}^N D_d, \Pi_{d=0,d\neq \text{dim}}^N D_d)`
- mean_vector: :math:`(1, \Pi_{d=0,d\neq \text{dim}}^N D_d)`
- inv_transform: same shape as the transform matrix
Returns:
A tuple containing the ZCA matrix and the mean vector. If return_inverse is set to True,
then it returns the inverse ZCA matrix, otherwise it returns None.
.. note::
See a working example `here <https://colab.sandbox.google.com/github/kornia/tutorials/
blob/master/source/zca_whitening.ipynb>`__.
Examples:
>>> x = torch.tensor([[0,1],[1,0],[-1,0],[0,-1]], dtype = torch.float32)
>>> transform_matrix, mean_vector,_ = zca_mean(x) # Returns transformation matrix and data mean
>>> x = torch.rand(3,20,2,2)
>>> transform_matrix, mean_vector, inv_transform = zca_mean(x, dim = 1, return_inverse = True)
>>> # transform_matrix.size() equals (12,12) and the mean vector.size equal (1,12)
"""
if not isinstance(inp, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(inp)}")
if not isinstance(eps, float):
raise TypeError(f"eps type is not a float. Got{type(eps)}")
if not isinstance(unbiased, bool):
raise TypeError(f"unbiased type is not bool. Got{type(unbiased)}")
if not isinstance(dim, int):
raise TypeError(f"Argument 'dim' must be of type int. Got {type(dim)}")
if not isinstance(return_inverse, bool):
raise TypeError(f"Argument return_inverse must be of type bool {type(return_inverse)}")
inp_size = inp.size()
if dim >= len(inp_size) or dim < -len(inp_size):
raise IndexError(
"Dimension out of range (expected to be in range of [{},{}], but got {}".format(
-len(inp_size), len(inp_size) - 1, dim
)
)
if dim < 0:
dim = len(inp_size) + dim
feat_dims = torch.cat([torch.arange(0, dim), torch.arange(dim + 1, len(inp_size))])
new_order: List[int] = torch.cat([torch.tensor([dim]), feat_dims]).tolist()
inp_permute = inp.permute(new_order)
N = inp_size[dim]
feature_sizes = torch.tensor(inp_size[0:dim] + inp_size[dim + 1::])
num_features: int = int(torch.prod(feature_sizes).item())
mean: torch.Tensor = torch.mean(inp_permute, dim=0, keepdim=True)
mean = mean.reshape((1, num_features))
inp_center_flat: torch.Tensor = inp_permute.reshape((N, num_features)) - mean
cov = inp_center_flat.t().mm(inp_center_flat)
if unbiased:
cov = cov / float(N - 1)
else:
cov = cov / float(N)
U, S, _ = _torch_svd_cast(cov)
S = S.reshape(-1, 1)
S_inv_root: torch.Tensor = torch.rsqrt(S + eps)
T: torch.Tensor = (U).mm(S_inv_root * U.t())
T_inv: Optional[torch.Tensor] = None
if return_inverse:
T_inv = (U).mm(torch.sqrt(S + eps) * U.t())
return T, mean, T_inv
def zca_whiten(inp: torch.Tensor, dim: int = 0, unbiased: bool = True, eps: float = 1e-6) -> torch.Tensor:
r"""Apply ZCA whitening transform.
See :class:`~kornia.color.ZCAWhitening` for details.
Args:
inp: input data tensor.
dim: Specifies the dimension that serves as the samples dimension.
unbiased: Whether to use the unbiased estimate of the covariance matrix.
eps: a small number used for numerical stability.
Returns:
Whiten Input data.
.. note::
See a working example `here <https://colab.sandbox.google.com/github/kornia/tutorials/
blob/master/source/zca_whitening.ipynb>`__.
Examples:
>>> x = torch.tensor([[0,1],[1,0],[-1,0]], dtype = torch.float32)
>>> zca_whiten(x)
tensor([[ 0.0000, 1.1547],
[ 1.0000, -0.5773],
[-1.0000, -0.5773]])
"""
if not isinstance(inp, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(inp)}")
if not isinstance(eps, float):
raise TypeError(f"eps type is not a float. Got{type(eps)}")
if not isinstance(unbiased, bool):
raise TypeError(f"unbiased type is not bool. Got{type(unbiased)}")
if not isinstance(dim, int):
raise TypeError(f"Argument 'dim' must be of type int. Got {type(dim)}")
transform, mean, _ = zca_mean(inp, dim, unbiased, eps, False)
inp_whiten = linear_transform(inp, transform, mean, dim)
return inp_whiten
def linear_transform(
inp: torch.Tensor, transform_matrix: torch.Tensor, mean_vector: torch.Tensor, dim: int = 0
) -> torch.Tensor:
r"""
Given a transformation matrix and a mean vector, this function will flatten
the input tensor along the given dimension and subtract the mean vector
from it. Then the dot product with the transformation matrix will be computed
and then the resulting tensor is reshaped to the original input shape.
.. math::
\mathbf{X}_{T} = (\mathbf{X - \mu})(T)
Args:
inp: Input data :math:`X`.
transform_matrix: Transform matrix :math:`T`.
mean_vector: mean vector :math:`\mu`.
dim: Batch dimension.
Shapes:
- inp: :math:`(D_0,...,D_{\text{dim}},...,D_N)` is a batch of N-D tensors.
- transform_matrix: :math:`(\Pi_{d=0,d\neq \text{dim}}^N D_d, \Pi_{d=0,d\neq \text{dim}}^N D_d)`
- mean_vector: :math:`(1, \Pi_{d=0,d\neq \text{dim}}^N D_d)`
Returns:
Transformed data.
Example:
>>> # Example where dim = 3
>>> inp = torch.ones((10,3,4,5))
>>> transform_mat = torch.ones((10*3*4,10*3*4))
>>> mean = 2*torch.ones((1,10*3*4))
>>> out = linear_transform(inp, transform_mat, mean, 3)
>>> print(out.shape, out.unique()) # Should a be (10,3,4,5) tensor of -120s
torch.Size([10, 3, 4, 5]) tensor([-120.])
>>> # Example where dim = 0
>>> inp = torch.ones((10,2))
>>> transform_mat = torch.ones((2,2))
>>> mean = torch.zeros((1,2))
>>> out = linear_transform(inp, transform_mat, mean)
>>> print(out.shape, out.unique()) # Should a be (10,2) tensor of 2s
torch.Size([10, 2]) tensor([2.])
"""
inp_size = inp.size()
if dim >= len(inp_size) or dim < -len(inp_size):
raise IndexError(
"Dimension out of range (expected to be in range of [{},{}], but got {}".format(
-len(inp_size), len(inp_size) - 1, dim
)
)
if dim < 0:
dim = len(inp_size) + dim
feat_dims = torch.cat([torch.arange(0, dim), torch.arange(dim + 1, len(inp_size))])
perm = torch.cat([torch.tensor([dim]), feat_dims])
perm_inv = torch.argsort(perm)
new_order: List[int] = perm.tolist()
inv_order: List[int] = perm_inv.tolist()
feature_sizes = torch.tensor(inp_size[0:dim] + inp_size[dim + 1::])
num_features: int = int(torch.prod(feature_sizes).item())
inp_permute = inp.permute(new_order)
inp_flat = inp_permute.reshape((-1, num_features))
inp_center = inp_flat - mean_vector
inp_transformed = inp_center.mm(transform_matrix)
inp_transformed = inp_transformed.reshape(inp_permute.size())
inp_transformed = inp_transformed.permute(inv_order)
return inp_transformed
|
py | 1a4252aaa513dd338112af0574ffa391ec8016f3 | import collections
import claripy
class SimVariable(object):
__slots__ = ['ident', 'name', 'region', 'category']
def __init__(self, ident=None, name=None, region=None, category=None):
"""
:param ident: A unique identifier provided by user or the program. Usually a string.
:param str name: Name of this variable.
"""
self.ident = ident
self.name = name
self.region = region if region is not None else ""
self.category = category
@property
def phi(self):
return False
class SimConstantVariable(SimVariable):
__slots__ = ['value', '_hash']
def __init__(self, ident=None, value=None, region=None):
super(SimConstantVariable, self).__init__(ident=ident, region=region)
self.value = value
self._hash = None
def __repr__(self):
s = "<%s|const %s>" % (self.region, self.value)
return s
def __eq__(self, other):
if not isinstance(other, SimConstantVariable):
return False
if self.value is None or other.value is None:
# they may or may not represent the same constant. return not equal to be safe
return False
return self.ident == other.ident and self.value == other.value and self.region == other.region
def __hash__(self):
if self._hash is None:
self._hash = hash(('const', self.value, self.ident, self.region, self.ident))
return self._hash
class SimTemporaryVariable(SimVariable):
__slots__ = ['tmp_id', '_hash']
def __init__(self, tmp_id):
SimVariable.__init__(self)
self.tmp_id = tmp_id
self._hash = None
def __repr__(self):
s = "<tmp %d>" % (self.tmp_id)
return s
def __hash__(self):
if self._hash is None:
self._hash = hash('tmp_%d' % (self.tmp_id))
return self._hash
def __eq__(self, other):
if isinstance(other, SimTemporaryVariable):
return hash(self) == hash(other)
return False
class SimRegisterVariable(SimVariable):
__slots__ = ['reg', 'size', '_hash']
def __init__(self, reg_offset, size, ident=None, name=None, region=None, category=None):
SimVariable.__init__(self, ident=ident, name=name, region=region, category=category)
self.reg = reg_offset
self.size = size
self._hash = None
def __repr__(self):
ident_str = "[%s]" % self.ident if self.ident else ""
region_str = hex(self.region) if isinstance(self.region, int) else self.region
phi_str = ("phi(%s)|" % (",".join(v.ident for v in self.variables))) if self.phi else "" #pylint:disable=no-member
s = "<%s%s%s|Reg %s, %sB>" % (phi_str, region_str, ident_str, self.reg, self.size)
return s
def __hash__(self):
if self._hash is None:
self._hash = hash(('reg', self.region, self.reg, self.size, self.ident))
return self._hash
def __eq__(self, other):
if isinstance(other, SimRegisterVariable):
return self.ident == other.ident and \
self.name == other.name and \
self.reg == other.reg and \
self.size == other.size and \
self.region == other.region and \
self.phi == other.phi
return False
class SimRegisterVariablePhi(SimRegisterVariable):
__slots__ = ['variables', '_hash']
def __init__(self, ident=None, name=None, region=None, variables=None):
var = next(iter(variables))
reg_offset = var.reg
size = var.size
super(SimRegisterVariablePhi, self).__init__(reg_offset, size, ident=ident, name=name, region=region)
self.variables = set(variables)
self._hash = None
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self.region, self.size, self.ident, tuple(self.variables)))
return self._hash
def __eq__(self, other):
if type(other) is not SimRegisterVariablePhi:
return False
return self.ident == other.ident and \
self.variables == other.variables and \
self.name == other.name and \
self.region == other.region and \
self.size == other.size
@property
def phi(self):
return True
class SimMemoryVariable(SimVariable):
__slots__ = ['addr', 'size', '_hash']
def __init__(self, addr, size, ident=None, name=None, region=None, category=None):
SimVariable.__init__(self, ident=ident, name=name, region=region, category=category)
self.addr = addr
if isinstance(size, claripy.ast.BV) and not size.symbolic:
# Convert it to a concrete number
size = size._model_concrete.value
self.size = size
self._hash = None
def __repr__(self):
if type(self.size) is int:
size = '%d' % self.size
else:
size = '%s' % self.size
if type(self.addr) is int:
s = "<%s|Mem %#x %s>" % (self.region, self.addr, size)
else:
s = "<%s|Mem %s %s>" % (self.region, self.addr, size)
return s
def __hash__(self):
if self._hash is not None:
return self._hash
if isinstance(self.addr, AddressWrapper):
addr_hash = hash(self.addr)
elif type(self.addr) is int:
addr_hash = self.addr
elif self.addr._model_concrete is not self.addr:
addr_hash = hash(self.addr._model_concrete)
elif self.addr._model_vsa is not self.addr:
addr_hash = hash(self.addr._model_vsa)
elif self.addr._model_z3 is not self.addr:
addr_hash = hash(self.addr._model_z3)
else:
addr_hash = hash(self.addr)
self._hash = hash((addr_hash, hash(self.size), self.ident))
return self._hash
def __eq__(self, other):
if isinstance(other, SimMemoryVariable):
return self.ident == other.ident and \
self.addr == other.addr and \
self.name == other.name and \
self.size == other.size and \
self.phi == other.phi
return False
class SimMemoryVariablePhi(SimMemoryVariable):
__slots__ = ['variables', '_hash']
def __init__(self, ident=None, name=None, region=None, variables=None):
var = next(iter(variables))
addr = var.addr
size = var.size
super(SimMemoryVariablePhi, self).__init__(addr, size, ident=ident, name=name, region=region)
self.variables = set(variables)
self._hash = None
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self.region, self.size, self.ident, tuple(self.variables)))
return self._hash
def __eq__(self, other):
if type(other) is not SimMemoryVariablePhi:
return False
return self.ident == other.ident and \
self.variables == other.variables and \
self.addr == other.addr and \
self.name == other.name and \
self.region == other.region and \
self.size == other.size
@property
def phi(self):
return True
class SimStackVariable(SimMemoryVariable):
__slots__ = ['base', 'offset']
def __init__(self, offset, size, base='sp', base_addr=None, ident=None, name=None, region=None, category=None):
if offset > 0x1000000 and isinstance(offset, int):
# I don't think any positive stack offset will be greater than that...
# convert it to a negative number
mask = (1 << offset.bit_length()) - 1
offset = - ((0 - offset) & mask)
if base_addr is not None:
addr = offset + base_addr
else:
# TODO: this is not optimal
addr = offset
super(SimStackVariable, self).__init__(addr, size, ident=ident, name=name, region=region, category=category)
self.base = base
self.offset = offset
def __repr__(self):
if type(self.size) is int:
size = '%d' % self.size
else:
size = '%s' % self.size
prefix = "%s(stack)" % self.name if self.name is not None else "Stack"
ident = "[%s]" % self.ident if self.ident else ""
region_str = hex(self.region) if isinstance(self.region, int) else self.region
phi_str = "phi|" if self.phi else ""
if type(self.offset) is int:
if self.offset < 0:
offset = "%#x" % self.offset
elif self.offset > 0:
offset = "+%#x" % self.offset
else:
offset = ""
s = "<%s%s%s|%s %s%s, %s B>" % (phi_str, region_str, ident, prefix, self.base, offset, size)
else:
s = "<%s%s%s|%s %s%s, %s B>" % (phi_str, region_str, ident, prefix, self.base, self.addr, size)
return s
def __eq__(self, other):
if type(other) is not SimStackVariable:
return False
return self.ident == other.ident and \
self.name == other.name and \
self.base == other.base and \
self.offset == other.offset and \
self.size == other.size and \
self.phi == other.phi
def __hash__(self):
return hash((self.ident, self.name, self.base, self.offset, self.size, self.phi))
class SimStackVariablePhi(SimStackVariable):
__slots__ = ['variables', '_hash']
def __init__(self, ident=None, name=None, region=None, variables=None):
var = next(iter(variables))
offset = var.addr
size = var.size
super(SimStackVariablePhi, self).__init__(offset, size, ident=ident, name=name, region=region)
self.variables = set(variables)
self._hash = None
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self.region, self.size, self.ident, tuple(self.variables)))
return self._hash
def __eq__(self, other):
if type(other) is not SimStackVariablePhi:
return False
return self.ident == other.ident and \
self.variables == other.variables and \
self.addr == other.addr and \
self.name == other.name and \
self.region == other.region and \
self.size == other.size
@property
def phi(self):
return True
class SimVariableSet(collections.MutableSet):
"""
A collection of SimVariables.
"""
def __init__(self):
self.register_variables = set()
# For the sake of performance optimization, all elements in register_variables must be concrete integers which
# representing register offsets..
# There shouldn't be any problem apart from GetI/PutI instructions. We simply ignore them for now.
# TODO: Take care of register offsets that are not aligned to (arch.bytes)
# TODO: arch.bits/what? That number has no power here anymore.
self.register_variable_offsets = set()
# memory_variables holds SimMemoryVariable objects
self.memory_variables = set()
# For the sake of performance, we have another set that stores memory addresses of memory_variables
self.memory_variable_addresses = set()
def add(self, item):
if type(item) is SimRegisterVariable:
if not self.contains_register_variable(item):
self.add_register_variable(item)
elif type(item) is SimMemoryVariable:
if not self.contains_memory_variable(item):
self.add_memory_variable(item)
else:
# TODO:
raise Exception('WTF')
def add_register_variable(self, reg_var):
self.register_variables.add(reg_var)
self.register_variable_offsets.add(reg_var.reg)
def add_memory_variable(self, mem_var):
self.memory_variables.add(mem_var)
base_address = mem_var.addr.address # Dealing with AddressWrapper
for i in range(mem_var.size):
self.memory_variable_addresses.add(base_address + i)
def discard(self, item):
if type(item) is SimRegisterVariable:
if self.contains_register_variable(item):
self.discard_register_variable(item)
elif isinstance(item, SimMemoryVariable):
if self.contains_memory_variable(item):
self.discard_memory_variable(item)
else:
# TODO:
raise Exception('')
def discard_register_variable(self, reg_var):
self.register_variables.remove(reg_var)
self.register_variable_offsets.remove(reg_var.reg)
def discard_memory_variable(self, mem_var):
self.memory_variables.remove(mem_var)
for i in range(mem_var.size):
self.memory_variable_addresses.remove(mem_var.addr.address + i)
def __len__(self):
return len(self.register_variables) + len(self.memory_variables)
def __iter__(self):
for i in self.register_variables: yield i
for i in self.memory_variables: yield i
def add_memory_variables(self, addrs, size):
for a in addrs:
var = SimMemoryVariable(a, size)
self.add_memory_variable(var)
def copy(self):
s = SimVariableSet()
s.register_variables |= self.register_variables
s.register_variable_offsets |= self.register_variable_offsets
s.memory_variables |= self.memory_variables
s.memory_variable_addresses |= self.memory_variable_addresses
return s
def complement(self, other):
"""
Calculate the complement of `self` and `other`.
:param other: Another SimVariableSet instance.
:return: The complement result.
"""
s = SimVariableSet()
s.register_variables = self.register_variables - other.register_variables
s.register_variable_offsets = self.register_variable_offsets - other.register_variable_offsets
s.memory_variables = self.memory_variables - other.memory_variables
s.memory_variable_addresses = self.memory_variable_addresses - other.memory_variable_addresses
return s
def contains_register_variable(self, reg_var):
reg_offset = reg_var.reg
# TODO: Make sure reg_offset is aligned to machine-word length
return reg_offset in self.register_variable_offsets
def contains_memory_variable(self, mem_var):
a = mem_var.addr
if type(a) in (tuple, list): a = a[-1]
return a in self.memory_variable_addresses
def __ior__(self, other):
# other must be a SimVariableSet
self.register_variables |= other.register_variables
self.register_variable_offsets |= other.register_variable_offsets
self.memory_variables |= other.memory_variables
self.memory_variable_addresses |= other.memory_variable_addresses
def __contains__(self, item):
if type(item) is SimRegisterVariable:
return self.contains_register_variable(item)
elif type(item) is SimMemoryVariable:
# TODO: Make it better!
return self.contains_memory_variable(item)
else:
__import__('ipdb').set_trace()
raise Exception("WTF is this variable?")
from .storage.memory import AddressWrapper
|
py | 1a4252ded0a56b76cce0c3576ffc90326fe34909 | """Definitions for command-line (Click) commands for invoking Annif
operations and printing the results to console."""
import collections
import os.path
import re
import sys
import click
import click_log
from flask import current_app
from flask.cli import FlaskGroup, ScriptInfo
import annif
import annif.corpus
import annif.parallel
import annif.project
import annif.registry
from annif.project import Access
from annif.suggestion import SuggestionFilter, ListSuggestionResult
from annif.exception import ConfigurationException, NotSupportedException
logger = annif.logger
click_log.basic_config(logger)
cli = FlaskGroup(create_app=annif.create_app, add_version_option=False)
cli = click.version_option(message='%(version)s')(cli)
def get_project(project_id):
"""
Helper function to get a project by ID and bail out if it doesn't exist"""
try:
return annif.registry.get_project(project_id,
min_access=Access.private)
except ValueError:
click.echo(
"No projects found with id \'{0}\'.".format(project_id),
err=True)
sys.exit(1)
def open_documents(paths, docs_limit):
"""Helper function to open a document corpus from a list of pathnames,
each of which is either a TSV file or a directory of TXT files. The
corpus will be returned as an instance of DocumentCorpus or
LimitingDocumentCorpus."""
def open_doc_path(path):
"""open a single path and return it as a DocumentCorpus"""
if os.path.isdir(path):
return annif.corpus.DocumentDirectory(path, require_subjects=True)
return annif.corpus.DocumentFile(path)
if len(paths) == 0:
logger.warning('Reading empty file')
docs = open_doc_path(os.path.devnull)
elif len(paths) == 1:
docs = open_doc_path(paths[0])
else:
corpora = [open_doc_path(path) for path in paths]
docs = annif.corpus.CombinedCorpus(corpora)
if docs_limit is not None:
docs = annif.corpus.LimitingDocumentCorpus(docs, docs_limit)
return docs
def parse_backend_params(backend_param, project):
"""Parse a list of backend parameters given with the --backend-param
option into a nested dict structure"""
backend_params = collections.defaultdict(dict)
for beparam in backend_param:
backend, param = beparam.split('.', 1)
key, val = param.split('=', 1)
validate_backend_params(backend, beparam, project)
backend_params[backend][key] = val
return backend_params
def validate_backend_params(backend, beparam, project):
if backend != project.config['backend']:
raise ConfigurationException(
'The backend {} in CLI option "-b {}" not matching the project'
' backend {}.'
.format(backend, beparam, project.config['backend']))
BATCH_MAX_LIMIT = 15
def generate_filter_batches(subjects):
import annif.eval
filter_batches = collections.OrderedDict()
for limit in range(1, BATCH_MAX_LIMIT + 1):
for threshold in [i * 0.05 for i in range(20)]:
hit_filter = SuggestionFilter(subjects, limit, threshold)
batch = annif.eval.EvaluationBatch(subjects)
filter_batches[(limit, threshold)] = (hit_filter, batch)
return filter_batches
def set_project_config_file_path(ctx, param, value):
"""Override the default path or the path given in env by CLI option"""
with ctx.ensure_object(ScriptInfo).load_app().app_context():
if value:
current_app.config['PROJECTS_FILE'] = value
def common_options(f):
"""Decorator to add common options for all CLI commands"""
f = click.option(
'-p', '--projects', help='Set path to projects.cfg',
type=click.Path(dir_okay=False, exists=True),
callback=set_project_config_file_path, expose_value=False,
is_eager=True)(f)
return click_log.simple_verbosity_option(logger)(f)
def backend_param_option(f):
"""Decorator to add an option for CLI commands to override BE parameters"""
return click.option(
'--backend-param', '-b', multiple=True,
help='Override backend parameter of the config file. ' +
'Syntax: "-b <backend>.<parameter>=<value>".')(f)
@cli.command('list-projects')
@common_options
@click_log.simple_verbosity_option(logger, default='ERROR')
def run_list_projects():
"""
List available projects.
"""
template = "{0: <25}{1: <45}{2: <10}{3: <7}"
header = template.format(
"Project ID", "Project Name", "Language", "Trained")
click.echo(header)
click.echo("-" * len(header))
for proj in annif.registry.get_projects(
min_access=Access.private).values():
click.echo(template.format(
proj.project_id, proj.name, proj.language, str(proj.is_trained)))
@cli.command('show-project')
@click.argument('project_id')
@common_options
def run_show_project(project_id):
"""
Show information about a project.
"""
proj = get_project(project_id)
click.echo(f'Project ID: {proj.project_id}')
click.echo(f'Project Name: {proj.name}')
click.echo(f'Language: {proj.language}')
click.echo(f'Access: {proj.access.name}')
click.echo(f'Trained: {proj.is_trained}')
click.echo(f'Modification time: {proj.modification_time}')
@cli.command('clear')
@click.argument('project_id')
@common_options
def run_clear_project(project_id):
"""
Initialize the project to its original, untrained state.
"""
proj = get_project(project_id)
proj.remove_model_data()
@cli.command('loadvoc')
@click.argument('project_id')
@click.argument('subjectfile', type=click.Path(exists=True, dir_okay=False))
@common_options
def run_loadvoc(project_id, subjectfile):
"""
Load a vocabulary for a project.
"""
proj = get_project(project_id)
if annif.corpus.SubjectFileSKOS.is_rdf_file(subjectfile):
# SKOS/RDF file supported by rdflib
subjects = annif.corpus.SubjectFileSKOS(subjectfile, proj.language)
else:
# probably a TSV file
subjects = annif.corpus.SubjectFileTSV(subjectfile)
proj.vocab.load_vocabulary(subjects, proj.language)
@cli.command('train')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--cached/--no-cached', '-c/-C', default=False,
help='Reuse preprocessed training data from previous run')
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@click.option('--jobs',
'-j',
default=0,
help='Number of parallel jobs (0 means choose automatically)')
@backend_param_option
@common_options
def run_train(project_id, paths, cached, docs_limit, jobs, backend_param):
"""
Train a project on a collection of documents.
"""
proj = get_project(project_id)
backend_params = parse_backend_params(backend_param, proj)
if cached:
if len(paths) > 0:
raise click.UsageError(
"Corpus paths cannot be given when using --cached option.")
documents = 'cached'
else:
documents = open_documents(paths, docs_limit)
proj.train(documents, backend_params, jobs)
@cli.command('learn')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@backend_param_option
@common_options
def run_learn(project_id, paths, docs_limit, backend_param):
"""
Further train an existing project on a collection of documents.
"""
proj = get_project(project_id)
backend_params = parse_backend_params(backend_param, proj)
documents = open_documents(paths, docs_limit)
proj.learn(documents, backend_params)
@cli.command('suggest')
@click.argument('project_id')
@click.option('--limit', '-l', default=10, help='Maximum number of subjects')
@click.option('--threshold', '-t', default=0.0, help='Minimum score threshold')
@backend_param_option
@common_options
def run_suggest(project_id, limit, threshold, backend_param):
"""
Suggest subjects for a single document from standard input.
"""
project = get_project(project_id)
text = sys.stdin.read()
backend_params = parse_backend_params(backend_param, project)
hit_filter = SuggestionFilter(project.subjects, limit, threshold)
hits = hit_filter(project.suggest(text, backend_params))
for hit in hits.as_list(project.subjects):
click.echo(
"<{}>\t{}\t{}".format(
hit.uri,
'\t'.join(filter(None, (hit.label, hit.notation))),
hit.score))
@cli.command('index')
@click.argument('project_id')
@click.argument('directory', type=click.Path(exists=True, file_okay=False))
@click.option(
'--suffix',
'-s',
default='.annif',
help='File name suffix for result files')
@click.option('--force/--no-force', '-f/-F', default=False,
help='Force overwriting of existing result files')
@click.option('--limit', '-l', default=10, help='Maximum number of subjects')
@click.option('--threshold', '-t', default=0.0, help='Minimum score threshold')
@backend_param_option
@common_options
def run_index(project_id, directory, suffix, force,
limit, threshold, backend_param):
"""
Index a directory with documents, suggesting subjects for each document.
Write the results in TSV files with the given suffix.
"""
project = get_project(project_id)
backend_params = parse_backend_params(backend_param, project)
hit_filter = SuggestionFilter(project.subjects, limit, threshold)
for docfilename, dummy_subjectfn in annif.corpus.DocumentDirectory(
directory, require_subjects=False):
with open(docfilename, encoding='utf-8-sig') as docfile:
text = docfile.read()
subjectfilename = re.sub(r'\.txt$', suffix, docfilename)
if os.path.exists(subjectfilename) and not force:
click.echo(
"Not overwriting {} (use --force to override)".format(
subjectfilename))
continue
with open(subjectfilename, 'w', encoding='utf-8') as subjfile:
results = project.suggest(text, backend_params)
for hit in hit_filter(results).as_list(project.subjects):
line = "<{}>\t{}\t{}".format(
hit.uri,
'\t'.join(filter(None, (hit.label, hit.notation))),
hit.score)
click.echo(line, file=subjfile)
@cli.command('eval')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--limit', '-l', default=10, help='Maximum number of subjects')
@click.option('--threshold', '-t', default=0.0, help='Minimum score threshold')
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@click.option(
'--results-file',
'-r',
type=click.File(
'w',
encoding='utf-8',
errors='ignore',
lazy=True),
help="""Specify file in order to write non-aggregated results per subject.
File directory must exist, existing file will be overwritten.""")
@click.option('--jobs',
'-j',
default=1,
help='Number of parallel jobs (0 means all CPUs)')
@backend_param_option
@common_options
def run_eval(
project_id,
paths,
limit,
threshold,
docs_limit,
results_file,
jobs,
backend_param):
"""
Analyze documents and evaluate the result.
Compare the results of automated indexing against a gold standard. The
path may be either a TSV file with short documents or a directory with
documents in separate files.
"""
project = get_project(project_id)
backend_params = parse_backend_params(backend_param, project)
import annif.eval
eval_batch = annif.eval.EvaluationBatch(project.subjects)
if results_file:
try:
print('', end='', file=results_file)
click.echo('Writing per subject evaluation results to {!s}'.format(
results_file.name))
except Exception as e:
raise NotSupportedException(
"cannot open results-file for writing: " + str(e))
docs = open_documents(paths, docs_limit)
jobs, pool_class = annif.parallel.get_pool(jobs)
project.initialize(parallel=True)
psmap = annif.parallel.ProjectSuggestMap(
project.registry, [project_id], backend_params, limit, threshold)
with pool_class(jobs) as pool:
for hits, uris, labels in pool.imap_unordered(
psmap.suggest, docs.documents):
eval_batch.evaluate(hits[project_id],
annif.corpus.SubjectSet((uris, labels)))
template = "{0:<30}\t{1}"
for metric, score in eval_batch.results(results_file=results_file).items():
click.echo(template.format(metric + ":", score))
@cli.command('optimize')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@backend_param_option
@common_options
def run_optimize(project_id, paths, docs_limit, backend_param):
"""
Analyze documents, testing multiple limits and thresholds.
Evaluate the analysis results for a directory with documents against a
gold standard given in subject files. Test different limit/threshold
values and report the precision, recall and F-measure of each combination
of settings.
"""
project = get_project(project_id)
backend_params = parse_backend_params(backend_param, project)
filter_batches = generate_filter_batches(project.subjects)
ndocs = 0
docs = open_documents(paths, docs_limit)
for doc in docs.documents:
raw_hits = project.suggest(doc.text, backend_params)
hits = raw_hits.filter(project.subjects, limit=BATCH_MAX_LIMIT)
assert isinstance(hits, ListSuggestionResult), \
"Optimize should only be done with ListSuggestionResult " + \
"as it would be very slow with VectorSuggestionResult."
gold_subjects = annif.corpus.SubjectSet((doc.uris, doc.labels))
for hit_filter, batch in filter_batches.values():
batch.evaluate(hit_filter(hits), gold_subjects)
ndocs += 1
click.echo("\t".join(('Limit', 'Thresh.', 'Prec.', 'Rec.', 'F1')))
best_scores = collections.defaultdict(float)
best_params = {}
template = "{:d}\t{:.02f}\t{:.04f}\t{:.04f}\t{:.04f}"
# Store the batches in a list that gets consumed along the way
# This way GC will have a chance to reclaim the memory
filter_batches = list(filter_batches.items())
while filter_batches:
params, filter_batch = filter_batches.pop(0)
metrics = ['Precision (doc avg)',
'Recall (doc avg)',
'F1 score (doc avg)']
results = filter_batch[1].results(metrics=metrics)
for metric, score in results.items():
if score >= best_scores[metric]:
best_scores[metric] = score
best_params[metric] = params
click.echo(
template.format(
params[0],
params[1],
results['Precision (doc avg)'],
results['Recall (doc avg)'],
results['F1 score (doc avg)']))
click.echo()
template2 = "Best {:>19}: {:.04f}\tLimit: {:d}\tThreshold: {:.02f}"
for metric in metrics:
click.echo(
template2.format(
metric,
best_scores[metric],
best_params[metric][0],
best_params[metric][1]))
click.echo("Documents evaluated:\t{}".format(ndocs))
@cli.command('hyperopt')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@click.option('--trials', '-T', default=10, help='Number of trials')
@click.option('--jobs',
'-j',
default=1,
help='Number of parallel runs (0 means all CPUs)')
@click.option('--metric', '-m', default='NDCG',
help='Metric to optimize (default: NDCG)')
@click.option(
'--results-file',
'-r',
type=click.File(
'w',
encoding='utf-8',
errors='ignore',
lazy=True),
help="""Specify file path to write trial results as CSV.
File directory must exist, existing file will be overwritten.""")
@common_options
def run_hyperopt(project_id, paths, docs_limit, trials, jobs, metric,
results_file):
"""
Optimize the hyperparameters of a project using a validation corpus.
"""
proj = get_project(project_id)
documents = open_documents(paths, docs_limit)
click.echo(f"Looking for optimal hyperparameters using {trials} trials")
rec = proj.hyperopt(documents, trials, jobs, metric, results_file)
click.echo(f"Got best {metric} score {rec.score:.4f} with:")
click.echo("---")
for line in rec.lines:
click.echo(line)
click.echo("---")
if __name__ == '__main__':
cli()
|
py | 1a425325b4e8ccc08d9c000a480051a02b043dcd | # Generated by Django 2.2.4 on 2020-08-27 03:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0008_auto_20200827_0849'),
]
operations = [
migrations.AddField(
model_name='post',
name='snippet',
field=models.CharField(default='Click the link to read the blog', max_length=255),
),
]
|
py | 1a4253c4b65ace6e30949ace26e1a4047a99d38a | import os
from setup import basedir
class BaseConfig(object):
SECRET_KEY = "SO_SECURE"
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# SQLALCHEMY_DATABASE_URI = "postgresql://localhost/Cathal"
MONGODB_URI = os.environ['MONGODB_URL']
SQLALCHEMY_TRACK_MODIFICATIONS = True
JSON_AS_ASCII = False
GOOGLE_CLIENT_ID = os.environ['GOOGLE_CLIENT_ID']
GOOGLE_CLIENT_SECRET = os.environ['GOOGLE_CLIENT_SECRET']
class TestingConfig(object):
"""Development configuration."""
TESTING = True
DEBUG = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
DEBUG_TB_ENABLED = True
PRESERVE_CONTEXT_ON_EXCEPTION = False
|
py | 1a4254b289bef45cd3f0981e203bbeb4d09a0563 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import cslib
def get_drive_mapped_path_dict():
def get_mapped_path_for_drive(drive):
# use window API (WNetGetConnectionW)
try:
import ctypes
from ctypes import wintypes
mpr = ctypes.WinDLL('mpr')
ERROR_SUCCESS = 0x0000
ERROR_MORE_DATA = 0x00EA
wintypes.LPDWORD = ctypes.POINTER(wintypes.DWORD)
mpr.WNetGetConnectionW.restype = wintypes.DWORD
mpr.WNetGetConnectionW.argtypes = (wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.LPDWORD)
length = (wintypes.DWORD * 1)()
result = mpr.WNetGetConnectionW(drive, None, length)
if result != ERROR_MORE_DATA:
return ''
remote_name = (wintypes.WCHAR * length[0])()
result = mpr.WNetGetConnectionW(drive, remote_name, length)
if result != ERROR_SUCCESS:
return ''
return remote_name.value.replace('\\\\', '')
except Exception:
import libcsbuild
libcsbuild.write_csbuild_log('EXCEPTION_IN_PROCESSING_NETWORK_DRIVE: %s' % drive)
return ''
drive_mapped_path_dict = {}
if not cslib.is_windows():
return drive_mapped_path_dict
import win32api
import libcsbuild
drive_letter_list = [drive_letter.replace('\\', '') for drive_letter in
win32api.GetLogicalDriveStrings().split('\000')[:-1] if drive_letter != '']
for drive_letter in drive_letter_list:
key = get_mapped_path_for_drive(drive_letter)
if key == '':
continue
libcsbuild.write_csbuild_log('network_drive: %s, path: %s' % (drive_letter, key))
drive_mapped_path_dict[key] = drive_letter
libcsbuild.write_csbuild_log(str(drive_mapped_path_dict))
return drive_mapped_path_dict
def convert_network_drive_path(open_file, mapped_dict):
unc_prefix = '\\Device\\Mup'
if not cslib.is_windows() or not open_file.startswith(unc_prefix):
return open_file
for key in mapped_dict.keys():
inx = open_file.find(key)
if inx == -1:
continue
import libcsbuild
libcsbuild.write_csbuild_log(
'%s -> %s' % (open_file, os.path.join(mapped_dict[key], open_file[inx + len(key):])))
open_file = os.path.join(mapped_dict[key], open_file[inx + len(key):])
return open_file
|
py | 1a4254ed21031073d7d1645544435da52197ba06 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import collections
import errno
import os
import sys
import unittest
try:
import fcntl
except ImportError: # pragma: no cover
# Doesn't exist on Windows. See also crbug.com/515704.
pass
from testing_support import auto_stub
from infra.libs.service_utils import daemon
import mock
Stat = collections.namedtuple('Stat', ['st_ino'])
class TestFlock(auto_stub.TestCase):
@unittest.skipIf(sys.platform == 'win32', 'Requires not windows')
def setUp(self):
super(TestFlock, self).setUp()
@contextlib.contextmanager
def _assert_reached(self):
reached = {'yup': False}
yield reached
self.assertTrue(reached['yup'])
def _mock_basic_fs_calls(self):
"""Mocks os.open, os.close as well as os.fstat."""
def _noop_handler(*_args, **_kwargs):
return 1
def _noop_os_close(*_args, **_kwargs):
pass
def _noop_fstat(*_args, **_kwargs):
return Stat(st_ino=45678)
self.mock(os, 'open', _noop_handler)
self.mock(os, 'close', _noop_os_close)
self.mock(os, 'fstat', _noop_fstat)
def _set_lock_status(self, success=True):
"""Mocks os.fcntl and whether the mock succeeds or not."""
def _lock_status(_fd, flags, **_kwargs):
if flags != fcntl.LOCK_UN: # We don't care if unlock fails.
if not success:
raise IOError('Couldn\'t get lock.')
self.mock(fcntl, 'lockf', _lock_status)
def _set_stat_status(self, success=True, matching=True):
"""Mocks os.stat, sets its success and if st_ino matches os.fstat mock."""
def _stat_handler(*_args, **_kwargs):
if not success:
raise OSError('Not found.')
if matching:
return Stat(st_ino=45678)
return Stat(st_ino=67890)
self.mock(os, 'stat', _stat_handler)
def _set_unlink_status(self, success=True):
"""Mocks os.unlink and sets whether it succeeds or not."""
def _unlink_handler(*_args, **_kwargs):
if not success:
raise OSError('Not found.')
self.mock(os, 'unlink', _unlink_handler)
#### Tests.
def testGetLock(self):
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status()
self._set_unlink_status()
with self._assert_reached() as reached:
with daemon.flock('bogus'):
reached['yup'] = True
def testDontGetLock(self):
self._mock_basic_fs_calls()
self._set_lock_status(success=False)
self._set_stat_status()
self._set_unlink_status()
with self.assertRaises(daemon.LockAlreadyLocked):
with daemon.flock('bogus'):
# Should never reach this.
# pylint: disable=redundant-unittest-assert
self.assertTrue(False) # pragma: no cover
def testFileDeletedAfterLockAcquired(self):
"""Test that we abort if we acquire a lock but the file has been deleted."""
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status(success=False)
self._set_unlink_status()
with self.assertRaises(daemon.LockAlreadyLocked):
with daemon.flock('bogus'):
# Should never reach this.
# pylint: disable=redundant-unittest-assert
self.assertTrue(False) # pragma: no cover
def testLockfileRecreated(self):
"""Test that we abort if a new lockfile is created under us."""
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status(matching=False)
self._set_unlink_status()
with self.assertRaises(daemon.LockAlreadyLocked):
with daemon.flock('bogus'):
# Should never reach this.
# pylint: disable=redundant-unittest-assert
self.assertTrue(False) # pragma: no cover
def testDeleteWhenDone(self):
"""Test that we delete the lockfile when we're done."""
data = {'count': 0}
def _mock_unlink(*_args, **_kwargs):
data['count'] += 1
self.mock(os, 'unlink', _mock_unlink)
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status()
with self._assert_reached() as reached:
with daemon.flock('bogus'):
reached['yup'] = True
self.assertEqual(data['count'], 1)
def testUnlinkFailureDoesntBreak(self):
"""Test that a failing unlink doesn't break us."""
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status()
self._set_unlink_status(success=False)
with self._assert_reached() as reached:
with daemon.flock('bogus'):
reached['yup'] = True
@mock.patch('os.fork', return_value=0)
@mock.patch('os.setsid')
@mock.patch('os.close')
@mock.patch('os.open')
@mock.patch('os.dup2')
@mock.patch('os.chdir')
@mock.patch('os._exit')
class TestBecomeDaemon(unittest.TestCase):
@unittest.skipIf(sys.platform == 'win32', 'Requires not windows')
def setUp(self):
super(TestBecomeDaemon, self).setUp()
def testClosesFds(self, _mock_exit, _mock_chdir, _mock_dup2, _mock_open,
mock_close, _mock_setsid, _mock_fork):
daemon.become_daemon()
self.assertEqual(2048, mock_close.call_count)
self.assertEqual([((i,),) for i in reversed(range(2048))],
mock_close.call_args_list)
def testClosesFdWithExceptions(self, _mock_exit, _mock_chdir, _mock_dup2,
_mock_open, mock_close, _mock_setsid,
_mock_fork):
daemon.become_daemon(keep_fds={42})
self.assertEqual(2047, mock_close.call_count)
self.assertEqual([((i,),) for i in reversed(range(2048)) if i != 42],
mock_close.call_args_list)
def testClosesFdsKeepingAll(self, _mock_exit, _mock_chdir, _mock_dup2,
_mock_open, mock_close, _mock_setsid,
_mock_fork):
daemon.become_daemon(keep_fds=True)
self.assertEqual(0, mock_close.call_count)
def testClosesInvalidFds(self, _mock_exit, _mock_chdir, _mock_dup2,
_mock_open, mock_close, _mock_setsid, _mock_fork):
mock_close.side_effect = EnvironmentError(errno.EIO, '')
with self.assertRaises(EnvironmentError):
daemon.become_daemon()
mock_close.side_effect = EnvironmentError(errno.EBADF, '')
daemon.become_daemon()
def testOpensDevNull(self, _mock_exit, _mock_chdir, mock_dup2, mock_open,
_mock_close, _mock_setsid, _mock_fork):
handle = object()
mock_open.return_value = handle
daemon.become_daemon()
self.assertEqual([
((handle, 0),),
((handle, 1),),
((handle, 2),),
], mock_dup2.call_args_list)
def testOpensDevNullWithExceptions(self, _mock_exit, _mock_chdir, mock_dup2,
mock_open, _mock_close, _mock_setsid,
_mock_fork):
handle = object()
mock_open.return_value = handle
daemon.become_daemon(keep_fds={1})
self.assertEqual([
((handle, 0),),
((handle, 2),),
], mock_dup2.call_args_list)
def testChangesToRoot(self, _mock_exit, mock_chdir, _mock_dup2, _mock_open,
_mock_close, _mock_setsid, _mock_fork):
daemon.become_daemon()
mock_chdir.assert_called_with('/')
def testForkExitsParent(self, mock_exit, _mock_chdir, _mock_dup2, _mock_open,
_mock_close, _mock_setsid, mock_fork):
mock_fork.return_value = 0
daemon.become_daemon()
self.assertFalse(mock_exit.called)
mock_fork.return_value = 123
daemon.become_daemon()
self.assertTrue(mock_exit.called)
|
py | 1a4254ed428bd5e7c4fadd57082e85a697533663 | #!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Projects manager."""
import empower.apps
import empower.primitives
from empower.main import srv_or_die
from empower.core.service import EService
from empower.managers.projectsmanager.project import Project
from empower.managers.projectsmanager.project import EmbeddedWiFiProps
from empower.managers.projectsmanager.project import EmbeddedLTEProps
from empower.managers.projectsmanager.project import T_BSSID_TYPE_SHARED
from empower.managers.projectsmanager.project import T_BSSID_TYPE_UNIQUE
from empower.managers.projectsmanager.cataloghandler import CatalogHandler
from empower.managers.projectsmanager.appshandler import AppsHandler
from empower.managers.projectsmanager.projectshandler import ProjectsHandler, \
ProjectsWiFiACLHandler, ProjectsWiFiSlicesHandler, \
ProjectsLTESlicesHandler, ProjectLVAPsHandler
class ProjectsManager(EService):
"""Projects manager."""
HANDLERS = [CatalogHandler, AppsHandler, ProjectLVAPsHandler,
ProjectsHandler, ProjectsWiFiACLHandler,
ProjectsWiFiSlicesHandler, ProjectsLTESlicesHandler]
projects = {}
accounts_manager = None
def start(self):
"""Start projects manager."""
super().start()
self.accounts_manager = srv_or_die("accountsmanager")
for project in Project.objects.all():
self.projects[project.project_id] = project
self.projects[project.project_id].start_services()
@property
def catalog(self):
"""Return available apps."""
results = {}
results.update(self.walk_module(empower.apps))
results.update(self.walk_module(empower.primitives))
return results
def load_project_by_ssid(self, ssid):
"""Find a project by SSID."""
for project in self.projects.values():
if not project.wifi_props:
continue
if project.wifi_props.ssid == ssid:
break
else:
project = None
return project
def load_project_by_plmnid(self, plmnid):
"""Find a project by PLMNID."""
for project in self.projects.values():
if not project.lte_props:
continue
if project.lte_props.plmnid == plmnid:
break
else:
project = None
return project
def get_available_ssids(self, sta, block):
"""Return the list of available networks for the specified sta."""
networks = list()
for project in self.projects.values():
if not project.wifi_props:
continue
if sta not in project.wifi_props.allowed:
continue
if project.wifi_props.bssid_type == T_BSSID_TYPE_SHARED:
bssid = project.generate_bssid(block.hwaddr)
ssid = project.wifi_props.ssid
networks.append((bssid, ssid))
elif project.wifi_props.bssid_type == T_BSSID_TYPE_UNIQUE:
bssid = project.generate_bssid(sta)
ssid = project.wifi_props.ssid
networks.append((bssid, ssid))
else:
self.log.error("Invalid BSSID type: %s",
project.wifi_props.bssid_type)
return networks
def create(self, desc, project_id, owner, wifi_props=None, lte_props=None):
"""Create new project."""
if project_id in self.projects:
raise ValueError("Project %s already defined" % project_id)
if owner not in self.accounts_manager.accounts:
raise ValueError("Undefined account %s" % owner)
project = Project(project_id=project_id, desc=desc, owner=owner)
if wifi_props:
project.wifi_props = EmbeddedWiFiProps(**wifi_props)
if lte_props:
project.lte_props = EmbeddedLTEProps(**lte_props)
project.save()
self.projects[project_id] = project
project.upsert_wifi_slice(slice_id=0)
project.upsert_lte_slice(slice_id=0)
self.projects[project_id].start_services()
return self.projects[project_id]
def update(self, project_id, wifi_props=None, lte_props=None):
"""Update project."""
if project_id not in self.projects:
raise ValueError("Project %s not available" % project_id)
project = self.projects[project_id]
print(wifi_props["allowed"])
try:
# not all wifi props can be modified
if wifi_props:
if "allowed" in wifi_props:
project.wifi_props.allowed = wifi_props["allowed"]
# not all lte props can be modified
if lte_props:
pass
project.save()
finally:
project.refresh_from_db()
return self.projects[project_id]
def remove_all(self):
"""Remove all projects."""
for project_id in list(self.projects):
self.remove(project_id)
def remove(self, project_id):
"""Remove project."""
# Check if project exists
if project_id not in self.projects:
raise KeyError("%s not registered" % project_id)
# Fetch project
project = self.projects[project_id]
# Remove hosted LVAPs
for lvap in list(project.lvaps.values()):
# The LVAP is associated
if lvap.ssid and lvap.wtp.connection:
lvap.wtp.connection.send_client_leave_message_to_self(lvap)
# Reset the LVAP
del lvap.wtp.connection.manager.lvaps[lvap.addr]
lvap.clear_blocks()
# Remove hosted VAPs
for vap in list(project.vaps.values()):
# Reset the LVAP
del vap.wtp.connection.manager.vaps[vap.bssid]
vap.clear_block()
# Stop running services
self.projects[project_id].stop_services()
# Delete project from datase and manager
project.delete()
del self.projects[project_id]
def launch(context, service_id):
""" Initialize the module. """
return ProjectsManager(context=context, service_id=service_id)
|
py | 1a42569accfb359db15988d22a2902bdabafd9ca | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions required to interact with Docker to build and run images, shells
and notebooks in a Docker environment.
"""
from typing import List, Optional
from blessings import Terminal
import caliban.config as c
import caliban.docker.build as b
import caliban.platform.shell as ps
import caliban.util.fs as ufs
t = Terminal()
def run_notebook(job_mode: c.JobMode,
port: Optional[int] = None,
lab: Optional[bool] = None,
version: Optional[bool] = None,
run_args: Optional[List[str]] = None,
**run_interactive_kwargs) -> None:
"""Start a notebook in the current working directory; the process will run
inside of a Docker container that's identical to the environment available to
Cloud jobs that are submitted by `caliban cloud`, or local jobs run with
`caliban run.`
if you pass mount_home=True your jupyter settings will persist across calls.
Keyword args:
- port: the port to pass to Jupyter when it boots, useful if you have
multiple instances running on one machine.
- lab: if True, starts jupyter lab, else jupyter notebook.
- version: explicit Jupyter version to install.
run_interactive_kwargs are all extra arguments taken by run_interactive.
"""
if port is None:
port = ufs.next_free_port(8888)
if lab is None:
lab = False
if run_args is None:
run_args = []
inject_arg = b.NotebookInstall.lab if lab else b.NotebookInstall.jupyter
jupyter_cmd = "lab" if lab else "notebook"
jupyter_args = [
"-m", "jupyter", jupyter_cmd, \
"--ip=0.0.0.0", \
"--port={}".format(port), \
"--no-browser"
]
docker_args = ["-p", "{}:{}".format(port, port)] + run_args
ps.run_interactive(job_mode,
entrypoint="python",
entrypoint_args=jupyter_args,
run_args=docker_args,
inject_notebook=inject_arg,
jupyter_version=version,
**run_interactive_kwargs)
|
py | 1a4258b5c24719b27397fb44396a0dc0d8417565 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# IMPORTS
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import modules.globals as sg
# CLASS DEFINITION
class MailHelper:
# Constructor
def __init__(self):
self.load_conf()
self.smtp = None
try:
self.smtp = smtplib.SMTP(self.smtp_host, self.smtp_port, None, 5)
self.smtp.ehlo()
if self.smtp_tls:
self.smtp.starttls()
self.smtp.ehlo()
# self.smtp.login(self.smtp_from, self.smtp_pwd)
except Exception as e:
sg.logger.error('Failed to bind to smtp server: %s' % str(e))
# Configuration loader
def load_conf(self):
self.smtp_host = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_HOST]
self.smtp_port = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_PORT]
self.smtp_tls = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_TLS]
self.smtp_from = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_FROM]
self.smtp_pwd = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_PWD]
def build_gmail(self):
if not hasattr(self, 'sender') or not hasattr(self, 'code'):
sg.logger.warning('Detected GMAIL but failed to parse sender or code...')
return
sender = self.sender
code = self.code
# Build the answer
subject = '[SCIZ] Code de confirmation de transfert GMAIL'
text = 'Votre code de transfert GMAIL pour %s est : %s' % (sg.user.mail, code)
html = '''
<html>
<head></head>
<body>
<p>Votre code de transfert pour %s est : %s</p>
</body>
</html>
''' % (sg.user.mail, code)
# Send the mail
self.send_mail(sender, subject, text, html)
def build_yahoo(self):
if not hasattr(self, 'link'):
sg.logger.warning('Detected YAHOO but failed to parse link...')
return
link = self.link
# Build the answer
subject = '[SCIZ] Lien de confirmation de transfert YAHOO'
text = 'Votre code de transfert YAHOO pour %s est : %s' % (sg.user.mail, link)
html = '''
<html>
<head></head>
<body>
<p>Votre lien de transfert pour %s est : %s</p>
</body>
</html>
''' % (sg.user.mail, link)
# Send the mail
self.send_mail(None, subject, text, html)
def send_mail(self, to, subject, body_text, body_html):
if self.smtp is None:
sg.logger.error('An attempt was made to send a mail but no previous bind to a SMTP server was successful')
return
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = self.smtp_from
msg['To'] = to if to is not None else sg.user.user_mail
msg.attach(MIMEText(body_text, 'plain'))
msg.attach(MIMEText(body_html, 'html'))
if msg['To'] is not None:
self.smtp.sendmail(msg['From'], [msg['To']], msg.as_string())
else:
sg.logger.warning('No address to send back...')
def __del__(self):
if self.smtp:
self.smtp.quit()
|
py | 1a42594de4ca4f1c10058a070302bfdf7e1121b0 | """This module contains the general information for BiosVfProcessorC1E ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfProcessorC1EConsts:
VP_PROCESSOR_C1_E_DISABLED = "Disabled"
VP_PROCESSOR_C1_E_ENABLED = "Enabled"
_VP_PROCESSOR_C1_E_DISABLED = "disabled"
_VP_PROCESSOR_C1_E_ENABLED = "enabled"
VP_PROCESSOR_C1_E_PLATFORM_DEFAULT = "platform-default"
class BiosVfProcessorC1E(ManagedObject):
"""This is BiosVfProcessorC1E class."""
consts = BiosVfProcessorC1EConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfProcessorC1E", "biosVfProcessorC1E", "Processor-C1E", VersionMeta.Version151f, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], ['biosPlatformDefaults', 'biosSettings'], [], ["Get", "Set"]),
"modular": MoMeta("BiosVfProcessorC1E", "biosVfProcessorC1E", "Processor-C1E", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], ['biosPlatformDefaults', 'biosSettings'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_processor_c1_e": MoPropertyMeta("vp_processor_c1_e", "vpProcessorC1E", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_processor_c1_e": MoPropertyMeta("vp_processor_c1_e", "vpProcessorC1E", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpProcessorC1E": "vp_processor_c1_e",
"childAction": "child_action",
},
"modular": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpProcessorC1E": "vp_processor_c1_e",
"childAction": "child_action",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.status = None
self.vp_processor_c1_e = None
self.child_action = None
ManagedObject.__init__(self, "BiosVfProcessorC1E", parent_mo_or_dn, **kwargs)
|
py | 1a425959470f124384b60946b9591dc96414b65f | import urllib.parse
from django.contrib import auth
from django.db import close_old_connections
from channels.middleware import BaseMiddleware
import rest_framework_jwt.serializers
import rest_framework.exceptions
import jwt.exceptions
import backend.auth
class TokenMiddleware(object):
"""
Middleware that authenticates against a token in the http authorization header.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
self.process_request(request)
response = self.get_response(request)
return response
def process_request(self, request):
auth_header = request.META.get('HTTP_AUTHORIZATION', b'').split()
print("asd")
if not auth_header:
return None
user = auth.authenticate()
if user:
request.user = user
class JWTAuthMiddleware(BaseMiddleware):
"""
Middleware to authenticate a user with a JSON Web Token.
"""
def populate_scope(self, scope):
# Populate top level of scope.
if "user" not in scope:
raise ValueError(
"JWTAuthMiddleware cannot find user in scope. AuthMiddleware must be above it."
)
async def resolve_scope(self, scope):
if not scope["user"]._wrapped.is_anonymous:
return
if not "query_string" in scope:
return
qs = urllib.parse.parse_qs(scope['query_string'].decode('utf-8'))
user = None
try:
qs['token'] = qs['token'][0]
validated = rest_framework_jwt.serializers.VerifyJSONWebTokenSerializer().validate(qs)
# If no exception is thrown, the token is valid. Store it in the session if it is a kit.
user = backend.auth.downcast_user_type(validated['user'])
except (KeyError, jwt.exceptions.InvalidTokenError, rest_framework.exceptions.ValidationError):
pass
close_old_connections()
# Set the user.
if user:
scope["user"]._wrapped = user
|
py | 1a4259a64d60298e6d4223156b6057a7abacc8ce | """Commands the vehicle simulator to drive autonomously based on a given keras model.
Usage:
Use `model.h5` to drive in autonomous mode
`python drive.py model.h5`
Or, use `model.h5` to drive in autonomous mode, and save dashcam photos of the run to `./run1/`
`python drive.py model.h5 run1`
"""
#----------------------------------------------------------------------------------------------------------------------------------------------
import argparse
import base64
import os
import shutil
from datetime import datetime
from io import BytesIO
import eventlet.wsgi
import h5py
import numpy as np
import socketio
import tensorflow as tf
from PIL import Image
from flask import Flask
from keras import __version__ as keras_version, backend as K
from keras.models import load_model
MAX_SPEED = 18
MIN_SPEED = 8
speed_limit = MAX_SPEED
K.clear_session()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.set_session(session)
#----------------------------------------------------------------------------------------------------------------------------------------------
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
#----------------------------------------------------------------------------------------------------------------------------------------------
class SimplePIController:
def __init__(self, Kp, Ki):
self.Kp = Kp
self.Ki = Ki
self.set_point = 0.
self.error = 0.
self.integral = 0.
def set_desired(self, desired):
self.set_point = desired
def update(self, measurement):
# proportional error
self.error = self.set_point - measurement
# integral error
self.integral += self.error
return self.Kp * self.error + self.Ki * self.integral
#----------------------------------------------------------------------------------------------------------------------------------------------
controller = SimplePIController(0.1, 0.002)
# Force desired driving speed.
set_speed = 25
controller.set_desired(set_speed)
#----------------------------------------------------------------------------------------------------------------------------------------------
@sio.on('telemetry')
def telemetry(sid, data):
if data:
# The current steering angle of the car
steering_angle = float(data["steering_angle"])
# The current throttle of the car
throttle = float(data["throttle"])
# The current speed of the car
speed = float(data["speed"])
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))
global speed_limit
if speed > speed_limit:
speed_limit = MIN_SPEED # slow down
else:
speed_limit = MAX_SPEED
#throttle = controller.update(float(speed))
throttle = 1.0 - ( (steering_angle)**2 ) - ( (speed/speed_limit)**2 )
print(steering_angle, throttle, speed)
send_control(steering_angle, throttle)
# save frame
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
# NOTE: DON'T EDIT THIS.
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
#----------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
parser.add_argument(
'maxspeed',
type=int,
nargs='?',
default=MAX_SPEED,
help='Maximum speed limit'
)
parser.add_argument(
'minspeed',
type=str,
nargs='?',
default=MIN_SPEED,
help='Minimum speed limit'
)
args = parser.parse_args()
MIN_SPEED = args.minspeed
MAX_SPEED = args.maxspeed
# check that model Keras version is same as local Keras version
f = h5py.File(args.model, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')
if model_version != keras_version:
print('You are using Keras version ', keras_version,
', but the model was built using ', model_version)
model = load_model(args.model)
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("RECORDING THIS RUN ...")
else:
print("NOT RECORDING THIS RUN ...")
# wrap Flask application with middleware
app = socketio.Middleware(sio, app)
# deploy as an WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
#----------------------------------------------------------------------------------------------------------------------------------------------
|
py | 1a425a3feea07fb0a5f1b832a104d9f1c6f8e148 | from ebaysdk.finding import Connection
from ebaysdk.exception import ConnectionError
import time
import psycopg2
import re
from gen_utils import database_connection, get_api_key, get_search_words, get_test_search_words, get_trace_and_log
class SearchRequest(object):
def __init__(self, api_key, keyword):
self.api_key, self.keyword = api_key, keyword
# define which site we wish to connect to and feed in our api-key
self.api = Connection(siteid='EBAY-US', appid=self.api_key, config_file=None)
# create a live db cursor
self.cursor = database_connection()
# establish lists for appending data to
self.completed_product_ids = []
self.completed_product_nick = []
self.completed_product_titles = []
self.completed_product_prices = []
self.completed_product_cat_names = []
self.completed_product_cat_ids = []
self.completed_product_img_thumb = []
self.completed_product_img_url = []
self.completed_product_lst_type = []
self.completed_product_con = []
self.completed_product_loc = []
self.completed_product_start = []
self.completed_product_end = []
self.completed_product_depth = []
self.depthCountStorage = []
# outline our search body paramaters
self.search_body_pages = {
'keywords': keyword,
'itemFilter': [
# US only sellers -- can also limit by feedback score, business type, top-rated status, charity, etc.
{'name': 'MinPrice', 'value': '59', 'paramName': 'Currency', 'paramValue': 'USD'},
{'name': 'MaxPrice', 'value': '9999999', 'paramName': 'Currency', 'paramValue': 'USD'},
# sold items only
{'name': 'SoldItemsOnly', 'value': 'true'},
],
'paginationInput': {
'entriesPerPage': '100',
# always 1, as we want to pull the maximum number of pages given a maximum of 100 results per page
'pageNumber': '1'
},
# can filter this to multiple different options as well (Best Offer, Most Watched, etc.)
'sortOrder': 'PricePlusShippingLowest'
}
def get_pages(self):
"""() -> dict
Connects to the API,
Executes a query to find items by their category and takes in predefined parameters search_body_pages,
Returns the data in dictionary form,
Returns an integer with the total number of pages.
"""
try:
self.api.execute('findCompletedItems', self.search_body_pages)
self.data = self.api.response.dict()
self.pages = int(self.data['paginationOutput']['totalPages'])
return self.pages
except Exception as e:
get_trace_and_log(e)
def fetch_completed_data(self, pages):
"""() -> dict
Connects to the API,
Iterates over each page in the previously established range of 1 -> the total number of pages,
Establishes search_body_data parameters,
Executes a query to find items by their category and takes in predefined parameters search_body_data,
Returns the data in dictionary form,
Iterates over each item in the returned data dictionary and appends the various data points to their respective lists,
Prints the values.
"""
try:
search_body_data = {
'keywords': self.keyword,
'itemFilter': [
{'name': 'MinPrice', 'value': '5', 'paramName': 'Currency', 'paramValue': 'USD'},
{'name': 'MaxPrice', 'value': '99999999', 'paramName': 'Currency', 'paramValue': 'USD'},
# sold items only
{'name': 'SoldItemsOnly', 'value': 'true'},
],
'paginationInput':
{'entriesPerPage': '100',
'pageNumber': f'{page}'},
'sortOrder': 'PricePlusShippingLowest'}
self.api.execute('findCompletedItems', search_body_data)
self.data = self.api.response.dict()
time.sleep(1) # wait a second before continuing (be kind ^^)
except Exception as e:
get_trace_and_log(e)
outliers = [
re.compile(r"\bposter\b", re.I),
re.compile(r"\bproxy\b", re.I),
re.compile(r"\bmisprint\b", re.I),
re.compile(r"\bpuzzle\b", re.I),
re.compile(r"\bplaytest\b", re.I),
re.compile(r"\berror\b", re.I),
re.compile(r"\bpromo\b", re.I),
re.compile(r"\bproxy\b", re.I),
re.compile(r"\bframed\b", re.I),
re.compile(r"\breprint\b", re.I),
re.compile(r"\bbooster\b", re.I),
re.compile(r"\bpack\b", re.I),
re.compile(r"\bfactory sealed\b", re.I),
re.compile(r"\brp\b", re.I),
re.compile(r"\bheadlamp\b", re.I),
re.compile(r"\bheadlamps\b", re.I),
re.compile(r"\bcar\b", re.I),
re.compile(r"\btruck\b", re.I),
re.compile(r"\bheadlights\b", re.I),
re.compile(r"\brepack\b", re.I),
re.compile(r"\brepacks\b", re.I),
re.compile(r"\brubber\b", re.I),
re.compile(r"\bseat\b", re.I),
re.compile(r"\bbox\b", re.I),
re.compile(r'\bsticker\b', re.I),
re.compile(r'\bstickers\b', re.I),
re.compile(r'\b5 x\b', re.I), # used to ignore things like '5 x Mox's for sale..', which greatly skew the average.
re.compile(r'\b4 x\b', re.I),
re.compile(r'\b3 x\b', re.I),
re.compile(r'\b2 x\b', re.I),
re.compile(r'\b5x\b', re.I),
re.compile(r'\b4x\b', re.I),
re.compile(r'\b3x\b', re.I),
re.compile(r'\b2x\b', re.I),
re.compile(r'\bx5\b', re.I),
re.compile(r'\bx4\b', re.I),
re.compile(r'\bx3\b', re.I),
re.compile(r'\bx2\b', re.I),
re.compile(r'\bx-2\b', re.I),
re.compile(r'\bx-3\b', re.I),
re.compile(r'\bx-4\b', re.I),
re.compile(r'\bx-5\b', re.I),
re.compile(r'\bx 2\b', re.I),
re.compile(r'\bx 3\b', re.I),
re.compile(r'\bx 4\b', re.I),
re.compile(r'\bx 5\b', re.I),
re.compile(r'\bcustom\b', re.I),
re.compile(r'\bpractice\b', re.I),
re.compile(r'\btime spiral\b', re.I),
re.compile(r'\blions\b', re.I),
re.compile(r'\bstory\b', re.I),
re.compile(r'\bmullet\b', re.I),
re.compile(r'\bplayset\b', re.I),
re.compile(r'\bbb\b', re.I),
re.compile(r'\bblack border\b', re.I),
re.compile(r'\bartist proof\b', re.I),
re.compile(r'\bgerman\b', re.I),
re.compile(r'\bitalian\b', re.I),
re.compile(r'\bfrench\b', re.I),
re.compile(r'\blot\b', re.I),
re.compile(r'\bsealed\b', re.I),
re.compile(r'\bartist\b', re.I),
re.compile(r'\bproof\b', re.I),
re.compile(r'\bcollection\b', re.I),
re.compile(r'\bfbb\b', re.I),
# re.compile(r'\b2\b', re.I),
# re.compile(r'\b3\b', re.I),
# re.compile(r'\b4\b', re.I),
# re.compile(r'\b5\b', re.I),
# re.compile(r'\b6\b', re.I),
re.compile(r'\bcomplete set\b', re.I),
re.compile(r'\bplayset\b', re.I),
re.compile(r'\bplay-set\b', re.I),
re.compile(r'\bset\b', re.I),
re.compile(r'\b(Partial)\b', re.I),
re.compile(r'\bpartial\b', re.I),
re.compile(r'\binfect\b', re.I),
]
try:
# begin filtering magic :D
if word.split(' ')[0] not in {"Collector's", "International"}:
outliers.extend((
re.compile(r"\bce\b", re.I),
re.compile(r"\bie\b", re.I),
re.compile(r"\bcollector\b", re.I),
re.compile(r"\bcollectors\b", re.I),
re.compile(r"\bcollector's\b", re.I),
re.compile(r"\bcollector's edition\b", re.I),
re.compile(r"\binternational\b", re.I),
re.compile(r"\binternationals\b", re.I),
re.compile(r"\binternational edition\b", re.I),
re.compile(r"\bcollector''s\b", re.I),
re.compile(r'\bcollector"s\b', re.I),
))
else:
pass
# print(f'Searching keyword: {word}', end="")
print(f'Searching keyword: {word}')
print(f'Chugging through...{page}/{self.pages} page(s)...')
print()
depthCount = 0
for item in self.data['searchResult']['item']:
if not any(regex.findall(item['title']) for regex in set(outliers)): # sets provide more iterating efficiency than lists.
# end filter magic => begin appending values to respective arrays
try:
self.completed_product_img_thumb.append(item['galleryURL'])
except Exception as e:
self.completed_product_img_thumb.append('No picture')
self.completed_product_nick.append(word)
self.completed_product_titles.append(item['title'])
self.completed_product_ids.append(item['itemId'])
# completed_product_prices.append(item['sellingStatus']['currentPrice']['value'])
self.completed_product_prices.append(item['sellingStatus']['convertedCurrentPrice']['value']) # take the convertedCurrentPrice instead @ 10/10/2018
self.completed_product_cat_names.append(item['primaryCategory']['categoryName'])
self.completed_product_cat_ids.append(item['primaryCategory']['categoryId'])
self.completed_product_img_url.append(item['viewItemURL'])
self.completed_product_lst_type.append(item['listingInfo']['listingType'])
self.completed_product_con.append(item['condition']['conditionDisplayName'])
self.completed_product_loc.append(item['location'])
self.completed_product_start.append(item['listingInfo']['startTime'])
self.completed_product_end.append(item['listingInfo']['endTime'])
depthCount += 1
# if the page is 1 and the max number of pages is 1 then extend the depth to fill up the list,
# otherwise proceed forward
if self.pages == 1 and page == 1:
self.completed_product_depth.extend(depthCount for i in range(depthCount))
elif self.pages > 1 and page == 1:
self.depthCountStorage.append(depthCount)
else:
depthCountMulti = int(self.depthCountStorage[-1]) + depthCount
self.completed_product_depth.extend(depthCountMulti for i in range(depthCountMulti))
except KeyError as e:
get_trace_and_log(e)
def zip_items(self):
"""(lists) -> (zip)
Inherits a series of lists and wraps it up into a comprehensive zip."""
#"begin zipping of all arrays into one big-array, just before inserting into the database
self.completed_products = zip(self.completed_product_nick, self.completed_product_titles, self.completed_product_ids, self.completed_product_prices, self.completed_product_cat_names, self.completed_product_cat_ids, self.completed_product_img_thumb, self.completed_product_img_url, self.completed_product_lst_type, self.completed_product_con, self.completed_product_loc, self.completed_product_start, self.completed_product_end, self.completed_product_depth)
return self.completed_products
def insert_completed_products(self, count):
"""(db cursor, array, count) -> ()
Takes in a database connection (cursor) and an array of data and inserts it into the respective database"""
for a, b, c, d, e, f, g, h, i, j, k, l, m, n in self.completed_products:
try:
self.cursor.execute("""INSERT INTO completed_products(completed_product_nick, completed_product_titles, completed_product_ids, completed_product_prices, completed_product_cat_names, completed_product_cat_ids, completed_product_img_thumb, completed_product_img_url, completed_product_lst_type, completed_product_con, completed_product_loc, completed_product_start, completed_product_end, completed_product_depth)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""", (a, b, c, d, e, f, g, h, i, j, k, l, m, n, )) # MAKE SURE to leave the trailing comma (d-->,<--), this will NOT work otherwise.
print("Unique value inserted...")
except Exception as e:
print("Unique value skipped...")
# get_trace_and_log(e)
print()
print("Successfully piped database.")
if __name__ == '__main__':
# pull in our api key, the list of words to iterate over, and begin zipping lists before piping the db
api_key = get_api_key()
# comment out the above variable and use the one below when testing (includes 3 very common values)
words = get_test_search_words()
# words = ["Collector's Edition Black Lotus MTG", "International Edition Mox Ruby MTG", "Beta Black Lotus MTG"]
count = 0
for word in words:
# print(word)
count += 1
x = SearchRequest(api_key, word)
pages = x.get_pages() + 1
for page in range(1, pages):
x.fetch_completed_data(page)
x.zip_items()
x.insert_completed_products(count)
|
py | 1a425baf8d5d4901e98c5d3f6f020455e42e334b |
"""
Tests for L{imaginary.wiring}
These tests are not particularly good at the moment. They are, however, a
minor step up from nothing.
"""
from zope.interface.verify import verifyObject
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from axiom.store import Store
from axiom.dependency import installOn
from axiom.userbase import LoginSystem, getAccountNames
from xmantissa.ixmantissa import ITerminalServerFactory
from xmantissa.offering import installOffering
from xmantissa.terminal import _AuthenticatedShellViewer
from axiom.plugins.mantissacmd import Mantissa
from imaginary.world import ImaginaryWorld
from imaginary.wiring.textserver import ImaginaryApp
from xmantissa.plugins.imaginaryoff import imaginaryOffering
from twisted.test.proto_helpers import StringTransport
from twisted.conch.insults.insults import ServerProtocol
from characteristic import attributes
@attributes("proto world".split())
class TestWorld(object):
"""
A fixture for testing a terminal protcol.
"""
def buildWorld(testCase):
"""
Build a L{TestWorld}.
"""
# XXX This is too many stores for a unit test to need to create.
siteStore = Store(filesdir=FilePath(testCase.mktemp()))
Mantissa().installSite(siteStore, u'example.com', u'', False)
installOffering(siteStore, imaginaryOffering, {})
login = siteStore.findUnique(LoginSystem)
account = login.addAccount(u'alice', u'example.com', u'password')
userStore = account.avatars.open()
app = ImaginaryApp(store=userStore)
installOn(app, userStore)
imaginary = login.accountByAddress(u'Imaginary', None).avatars.open()
world = imaginary.findUnique(ImaginaryWorld)
# Alice connects to her own ImaginaryApp (all that is possible at the
# moment).
viewer = _AuthenticatedShellViewer(getAccountNames(userStore))
return TestWorld(proto=app.buildTerminalProtocol(viewer),
world=world)
class ImaginaryAppTests(TestCase):
"""
Tests for L{ImaginaryApp}, which provides access to Imaginary via
L{ShellServer}, the top-level Mantissa SSH handler.
"""
def test_interface(self):
"""
L{ImaginaryApp} implements L{ITerminalServerFactory}
"""
self.assertTrue(verifyObject(ITerminalServerFactory, ImaginaryApp()))
def test_powerup(self):
"""
L{installOn} powers up the target for L{ITerminalServerFactory} with
L{ImaginaryApp}.
"""
store = Store()
app = ImaginaryApp(store=store)
installOn(app, store)
self.assertIdentical(ITerminalServerFactory(store), app)
def test_buildTerminalProtocol(self):
"""
L{ImaginaryApp.buildTerminalProtocol} returns a
L{CharacterSelectionTextServer} instance with a role representing the
store it is in, a reference to the L{ImaginaryWorld} installed on the
Imaginary application store, and a list of L{Thing} items shared to the
role.
"""
testWorld = buildWorld(self)
self.assertIdentical(testWorld.proto.world, testWorld.world)
self.assertEqual(testWorld.proto.role.externalID, u'[email protected]')
self.assertEqual(testWorld.proto.choices, [])
def test_connectionMadePrompt(self):
"""
L{CharacterSelectionTextServer} prompts the player upon connection,
giving them the option to create a character.
"""
testWorld = buildWorld(self)
transport = StringTransport()
terminal = ServerProtocol(lambda: testWorld.proto)
terminal.makeConnection(transport)
self.assertIn("0) Create", transport.io.getvalue())
|
py | 1a425c1b65a0016debf41b4d9fd1b8bf58cba87c | """
Classes for curves
# Author: Antonio Martinez-Sanchez (Max Planck Institute for Biochemistry)
# Date: 29.02.2016
"""
__author__ = 'martinez'
import vtk
import math
import numpy as np
from pyseg.globals.utils import angle_2vec_3D, closest_points
###### Global variables
PI_2 = .5 * np.pi
MAX_PER_ANG = .25 * np.pi
MAX_FLOAT = np.finfo('float').max
# ####################################################################################################
# This class represents a spaced curve from a sequence of discrete samples (coordinates in 3D)
# Numerical approximation of discrete differential geometry from
# Boutin M. "Numerically Invariant Signature Curves" Int. J. Comput. Vision, 40(3): 235-248, 2000
#
#
class SpaceCurve(object):
# #### Constructor Area
# samples: array with the sequences samples of the curve
# mode: computation mode, 1: precise, 2 fast (default)
# do_geom: if True (default) curve geometric properties are computed during construction, otherwise not (this is
# useful for temporary curves)
def __init__(self, samples, mode=2, do_geom=True):
self.__samples = np.asarray(samples, dtype=np.float)
self.__mode = mode
self.__apex_id = -1
self.__ds = None
self.__lengths = None
self.__usg_k = None
self.__sg_k = None
self.__usg_t = None
self.__sg_t = None
self.__length = .0
self.__tot_uk = .0
self.__tot_k = .0
self.__tot_ut = .0
self.__tot_t = .0
self.__tot_ukt = .0
self.__per = .0
self.__ns = .0
self.__bs = .0
self.__al = -1.
self.__sin = .0
if do_geom:
self.compute_geom()
# Compute all geometric descriptors
def compute_geom(self):
self.__compute_ds()
self.__length = self.__ds.sum()
self.__compute_lengths()
self.__compute_usg_k()
self.__compute_sg_k()
self.__compute_usg_t()
self.__compute_sg_t()
self.__tot_uk = (self.__usg_k * self.__ds).sum()
self.__tot_k = (self.__sg_k * self.__ds).sum()
self.__tot_ut = (self.__usg_t * self.__ds).sum()
self.__tot_t = (self.__sg_t * self.__ds).sum()
self.__tot_ukt = (np.sqrt((self.__usg_k*self.__usg_k) + (self.__usg_t*self.__usg_t)) * self.__ds).sum()
self.__compute_per_length()
self.__compute_ns()
self.__compute_bs()
self.__compute_al()
self.__compute_sin()
# External functionality area
def get_nsamples(self):
return self.__samples.shape[0]
def get_samples(self):
return self.__samples
def get_sample(self, idx):
return self.__samples[idx, :]
def get_start_sample(self):
return self.__samples[0, :]
def get_end_sample(self):
return self.__samples[-1, :]
def get_lengths(self):
return self.__lengths
def get_length(self):
return self.__length
def get_total_uk(self):
return self.__tot_uk
def get_total_k(self):
return self.__tot_k
def get_total_ut(self):
return self.__tot_ut
def get_total_t(self):
return self.__tot_t
def get_total_ukt(self):
return self.__tot_ukt
def get_normal_symmetry(self):
return self.__ns
def get_binormal_symmetry(self):
return self.__bs
def get_apex_length(self, update=False):
if update:
self.__compute_al()
return self.__al
def get_sinuosity(self):
return self.__sin
def get_ds(self):
return self.__ds
def get_uk(self):
return self.__usg_k
def get_k(self):
return self.__sg_k
def get_ut(self):
return self.__usg_t
def get_t(self):
return self.__sg_t
def get_per_length(self, update=False):
if update:
self.__compute_per_length()
return self.__per
# Return a vtkPolyData which contains the curve
# add_geom: if True geometry properties are added otherwise not
def get_vtp(self, add_geom=True):
# Initialization
poly, points, lines = vtk.vtkPolyData(), vtk.vtkPoints(), vtk.vtkCellArray()
if add_geom:
# Point properties
pds_data = vtk.vtkFloatArray()
pds_data.SetNumberOfComponents(1)
pds_data.SetName('ds')
plens_data = vtk.vtkFloatArray()
plens_data.SetNumberOfComponents(1)
plens_data.SetName('lengths')
puk_data = vtk.vtkFloatArray()
puk_data.SetNumberOfComponents(1)
puk_data.SetName('u_k')
psk_data = vtk.vtkFloatArray()
psk_data.SetNumberOfComponents(1)
psk_data.SetName('s_k')
put_data = vtk.vtkFloatArray()
put_data.SetNumberOfComponents(1)
put_data.SetName('u_t')
pst_data = vtk.vtkFloatArray()
pst_data.SetNumberOfComponents(1)
pst_data.SetName('s_t')
# Cell properties
clen_data = vtk.vtkFloatArray()
clen_data.SetNumberOfComponents(1)
clen_data.SetName('length')
ctuk_data = vtk.vtkFloatArray()
ctuk_data.SetNumberOfComponents(1)
ctuk_data.SetName('u_total_k')
ctk_data = vtk.vtkFloatArray()
ctk_data.SetNumberOfComponents(1)
ctk_data.SetName('total_k')
ctut_data = vtk.vtkFloatArray()
ctut_data.SetNumberOfComponents(1)
ctut_data.SetName('u_total_t')
ctt_data = vtk.vtkFloatArray()
ctt_data.SetNumberOfComponents(1)
ctt_data.SetName('total_t')
cper_data = vtk.vtkFloatArray()
cper_data.SetNumberOfComponents(1)
cper_data.SetName('per_length')
cukt_data = vtk.vtkFloatArray()
cukt_data.SetNumberOfComponents(1)
cukt_data.SetName('total_ukt')
cns_data = vtk.vtkFloatArray()
cns_data.SetNumberOfComponents(1)
cns_data.SetName('normal_sim')
cbs_data = vtk.vtkFloatArray()
cbs_data.SetNumberOfComponents(1)
cbs_data.SetName('binormal_sim')
cal_data = vtk.vtkFloatArray()
cal_data.SetNumberOfComponents(1)
cal_data.SetName('apex_length')
csin_data = vtk.vtkFloatArray()
csin_data.SetNumberOfComponents(1)
csin_data.SetName('sinuosity')
# Line creation
lines.InsertNextCell(self.get_nsamples())
if add_geom:
# Adding cell properties
clen_data.InsertNextTuple((self.__length,))
ctuk_data.InsertNextTuple((self.__tot_uk,))
ctk_data.InsertNextTuple((self.__tot_k,))
ctut_data.InsertNextTuple((self.__tot_ut,))
ctt_data.InsertNextTuple((self.__tot_t,))
cukt_data.InsertNextTuple((self.__tot_ukt,))
cper_data.InsertNextTuple((self.__per,))
cns_data.InsertNextTuple((self.__ns,))
cbs_data.InsertNextTuple((self.__bs,))
cal_data.InsertNextTuple((self.__al,))
csin_data.InsertNextTuple((self.__sin,))
for i, point in enumerate(self.get_samples()):
points.InsertNextPoint(point)
lines.InsertCellPoint(i)
# Adding point properties
pds_data.InsertNextTuple((self.__ds[i],))
plens_data.InsertNextTuple((self.__lengths[i],))
puk_data.InsertNextTuple((self.__usg_k[i],))
psk_data.InsertNextTuple((self.__sg_k[i],))
put_data.InsertNextTuple((self.__usg_t[i],))
pst_data.InsertNextTuple((self.__sg_t[i],))
else:
for i, point in enumerate(self.get_samples()):
points.InsertNextPoint(point)
lines.InsertCellPoint(i)
poly.SetPoints(points)
poly.SetLines(lines)
if add_geom:
# Point properties
poly.GetPointData().AddArray(pds_data)
poly.GetPointData().AddArray(plens_data)
poly.GetPointData().AddArray(puk_data)
poly.GetPointData().AddArray(psk_data)
poly.GetPointData().AddArray(put_data)
poly.GetPointData().AddArray(pst_data)
# Cell properties
poly.GetCellData().AddArray(clen_data)
poly.GetCellData().AddArray(ctuk_data)
poly.GetCellData().AddArray(ctk_data)
poly.GetCellData().AddArray(ctut_data)
poly.GetCellData().AddArray(ctt_data)
poly.GetCellData().AddArray(cukt_data)
poly.GetCellData().AddArray(cper_data)
poly.GetCellData().AddArray(cns_data)
poly.GetCellData().AddArray(cbs_data)
poly.GetCellData().AddArray(cal_data)
poly.GetCellData().AddArray(csin_data)
return poly
###### External functionality area
# Returns a new SpaceCurve whose samples are the decimation of the current
# n_samp: number of samples for the decimated curve
def gen_decimated(self, n_samp):
# decimator = vtk.vtkDecimatePolylineFilter()
decimator = vtk.vtkSplineFilter()
decimator.SetSubdivideToSpecified()
decimator.SetNumberOfSubdivisions(n_samp-1)
poly = self.get_vtp(add_geom=False)
decimator.SetInputData(poly)
decimator.Update()
poly_dec = decimator.GetOutput()
coords = list()
for i in range(poly_dec.GetNumberOfPoints()):
coords.append(np.asarray(poly_dec.GetPoint(i), dtype=np.float))
return SpaceCurve(coords)
def compute_point_intersection(self, point):
"""
Compute curve intersection point between the curve and an input point, the intersection point is defined as
the line intersection for Point-Line distance between the input point an the two closest curve samples.
Point-Line distance estimation taken from: http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
:param point: Input point
:return:
"""
# Finding the two closest samples on the curve
samps = self.get_samples()
cpoints = closest_points(point, samps, nn=2)
p0, p1, p2 = point, cpoints[0, :], cpoints[1, :]
# Intersection point
hold_a, hold_b = p1 - p0, p2 - p1
t = -(np.dot(hold_a, hold_b)) / (hold_b * hold_b).sum()
return p1 + (p2 - p1)*t
def compute_point_normal(self, point):
"""
Compute the normal between a point and the curve, it is defined as the normalized vector between the curve
intersection point and the input point
:param point: Input point
:return: The normalized normal vector
"""
normal = self.compute_point_intersection(point) - point
norm = math.sqrt((normal * normal).sum())
if norm <= 0:
return np.asarray((0., 0., 0.))
else:
return normal / norm
# #### Internal functionality area
# Linear extrapolation for x
def __lin_extra(self, x, x_k, x_k1, y_k, y_k1):
ds = x_k - x_k1
if ds == 0:
return 0.
else:
hold = y_k1 + ((x - x_k1)/ds)*(y_k - y_k1)
if hold < 0:
return 0.
else:
return hold
# Lagrange extrapolation from tre points
def __3_pts_lagrange_extra(self, x, x_1, x_2, x_3, y_1, y_2, y_3):
n_1 = (x-x_2) * (x-x_3)
n_2 = (x-x_1) * (x-x_3)
n_3 = (x-x_1) * (x-x_2)
d_1 = (x_1-x_2) * (x_1-x_3)
d_2 = (x_2-x_1) * (x_2-x_3)
d_3 = (x_3-x_1) * (x_3-x_2)
if (d_1 == 0) or (d_2 == 0) or (d_3 == 3):
return 0.
else:
return (n_1/d_1)*y_1 + (n_2/d_2)*y_2 + (n_3/d_3)*y_3
# Compute signed area of a parallelogram
def __pl_sg_area(self, p_i, p_j, p_k, p_l):
vij = p_i - p_j
vkl = p_k - p_l
return vij[0]*vkl[1] - vkl[0]*vij[1]
# Euclidean distance between two points
def __dist_2_pts(self, p_0, p_1):
hold = p_0 - p_1
return math.sqrt((hold * hold).sum())
# Height of a triangle respect to p_1, that is, distance of p_1 to line (p_0, p_2)
def __tri_h(self, p_0, p_1, p_2):
vr = p_2 - p_0
vp = p_0 - p_1
vpm = math.sqrt((vp*vp).sum())
if vpm <= 0:
return 0.
else:
vh = np.cross(vr, vp)
return math.sqrt((vh*vh).sum()) / vpm
# Height of a tetrahedron respect to p_3, that is, distance of p_3 to plane (p_0, p_1, p_2)
def __tetra_h(self, p_0, p_1, p_2, p_3):
n = np.cross(p_1-p_0, p_2-p_0)
nm = math.sqrt((n*n).sum())
if nm <= 0:
return 0.
else:
return math.fabs(np.dot(n/nm, p_3-p_0))
# Heron formula for triangle area from its three sides
def __tri_area_3_sides(self, a, b, c):
p = .5 * (a + b + c)
hold = p * (p-a) * (p-b) * (p-c)
if hold <= 0:
return 0.
else:
return math.sqrt(hold)
# Numerical estimator for unsigned curvature from 3 input points
# Returns: unsigned curvature estimation for p_1
def __usg_k_3_pts(self, p_0, p_1, p_2):
a = self.__dist_2_pts(p_0, p_1)
b = self.__dist_2_pts(p_1, p_2)
c = self.__dist_2_pts(p_0, p_2)
hold = a * b * c
if hold == 0:
return 0.
else:
return (4.*self.__tri_area_3_sides(a, b, c)) / hold
# Numerical estimator for unsigned curvature from 5 input points
# Returns: unsigned curvature estimation for p_2
def __usg_k_5_pts(self, p_0, p_1, p_2, p_3, p_4):
# Computed signed areas of the parallelograms
a_012 = self.__pl_sg_area(p_0, p_1, p_0, p_2)
a_013 = self.__pl_sg_area(p_0, p_1, p_0, p_3)
a_014 = self.__pl_sg_area(p_0, p_1, p_0, p_4)
a_023 = self.__pl_sg_area(p_0, p_2, p_0, p_3)
a_024 = self.__pl_sg_area(p_0, p_2, p_0, p_4)
a_034 = self.__pl_sg_area(p_0, p_3, p_0, p_4)
a_123 = self.__pl_sg_area(p_1, p_2, p_1, p_3)
a_124 = self.__pl_sg_area(p_1, p_2, p_1, p_4)
a_134 = self.__pl_sg_area(p_1, p_3, p_1, p_4)
a_234 = self.__pl_sg_area(p_2, p_3, p_2, p_4)
a_1234 = self.__pl_sg_area(p_1, p_2, p_3, p_4)
a_1234_2 = a_1234 * a_1234
# Intermediate computations
t = .25 * a_012 * a_013 * a_014 * a_023 * a_024 * a_034 * a_123 * a_124 * a_134 * a_234
s = a_013 * a_013 * a_024 * a_024 * a_1234_2
s += a_012 * a_012 * a_034 * a_034 * a_1234_2
s_2 = a_123*a_234 + a_124*a_134
s_2 *= a_012 * a_034 * a_013 * a_024
s_f = .25 * (s - 2.*s_2)
if t <= 0:
return 0
else:
return s_f / (t**(2./3.))
# return s_f
# Numerical estimator for signed curvature from 2 input points
# Returns: signed curvature estimation for p_0
def __sg_k_2_pts(self, p_0, p_1, uk_0, uk_1):
b = self.__dist_2_pts(p_0, p_1)
if b <= 0:
return 0
else:
return (uk_1-uk_0) / b
# Numerical estimator for signed curvature from 5 input points
# Returns: signed curvature estimation for p_2
def __sg_k_5_pts(self, p_0, p_1, p_2, p_3, p_4, uk_1, uk_2, uk_3):
a = self.__dist_2_pts(p_1, p_2)
b = self.__dist_2_pts(p_2, p_3)
d = self.__dist_2_pts(p_3, p_4)
g = self.__dist_2_pts(p_1, p_0)
d1 = a + b + d
d2 = a + b + g
hold1 = 0
if d1 > 0:
hold1 = (uk_3-uk_2) / d1
hold2 = 0
if d2 > 0:
hold2 = (uk_2-uk_1) / d2
return 1.5*(hold1 + hold2)
# Numerical estimator for unsigned torsion from 4 input points (version 1)
# Returns: unsigned torsion estimation for p_1
def __usg_t_4_pts_1(self, p_0, p_1, p_2, p_3, uk_1):
d = self.__dist_2_pts(p_2, p_3)
e = self.__dist_2_pts(p_1, p_3)
f = self.__dist_2_pts(p_0, p_3)
h = self.__tetra_h(p_0, p_1, p_2, p_3)
hold = d * e * f * uk_1
if hold <= 0:
return .0
else:
return (6.*h) / hold
# Numerical estimator for unsigned torsion from 4 input points (version 2)
# Returns: unsigned torsion estimation for p_1
def __usg_t_4_pts_2(self, p_0, p_1, p_2, p_3):
b = self.__dist_2_pts(p_1, p_2)
d = self.__dist_2_pts(p_2, p_3)
e = self.__dist_2_pts(p_1, p_3)
f = self.__dist_2_pts(p_0, p_3)
hold = f * self.__tri_area_3_sides(e, b, d)
if hold <= 0:
return .0
else:
h = self.__tetra_h(p_0, p_1, p_2, p_3)
return (1.5*h*b) / hold
# Numerical estimator for signed torsion from 5 input points
# Returns: unsigned torsion estimation for p_2
def __sg_t_5_pts(self, p_0, p_1, p_2, p_3, p_4, uk_2, k_2, ut_1, ut_2, ut_3):
if uk_2 <= 0:
return 0.
a = self.__dist_2_pts(p_1, p_2)
b = self.__dist_2_pts(p_2, p_3)
d = self.__dist_2_pts(p_3, p_4)
g = self.__dist_2_pts(p_0, p_1)
h = self.__tri_h(p_1, p_2, p_3)
hold_1 = 2*a + 2*b + 2*d + h + g
if hold_1 <= 0:
return 0.
else:
hold_2 = 2*a + 2*b - 2*d - 3*h + g
hold_3 = (ut_2*k_2) / (6*uk_2)
return 4. * ((ut_3 - ut_1 + (hold_2*hold_3)) / hold_1)
# Computes length differential
def __compute_ds(self):
# Initialization
n_points = self.__samples.shape[0]
ds = np.zeros(shape=n_points, dtype=np.float)
# Regular cases
for i in range(1, n_points):
ds[i] = self.__dist_2_pts(self.__samples[i-1], self.__samples[i])
self.__ds = ds
def __compute_lengths(self):
self.__lengths = np.zeros(shape=self.__ds.shape, dtype=np.float)
for i in range(1, len(self.__ds)):
self.__lengths[i] = self.__lengths[i-1] + self.__ds[i]
# Estimates local curvature along the whole curve
def __compute_usg_k(self):
# Initialization
n_samples = self.__samples.shape[0]
usg_k = np.zeros(shape=n_samples, dtype=np.float)
if n_samples <= 2:
self.__usg_k = usg_k
return
# Regular cases
if self.__mode == 1:
for i in range(2, n_samples-2):
p_0, p_1 = self.__samples[i-2, :], self.__samples[i-1, :]
p_2 = self.__samples[i, :]
p_3, p_4 = self.__samples[i+1, :], self.__samples[i+2, :]
usg_k[i] = self.__usg_k_5_pts(p_0, p_1, p_2, p_3, p_4)
else:
for i in range(1, n_samples-1):
p_0, p_1, p_2 = self.__samples[i-1, :], self.__samples[i, :], self.__samples[i+1, :]
usg_k[i] = self.__usg_k_3_pts(p_0, p_1, p_2)
# Extremal cases
p_0, p_1, p_2 = self.__samples[0, :], self.__samples[1, :], self.__samples[2, :]
usg_k[1] = self.__usg_k_3_pts(p_0, p_1, p_2)
usg_k[0] = self.__lin_extra(0, self.__ds[1], self.__ds[1]+self.__ds[2], usg_k[1], usg_k[2])
p_0, p_1, p_2 = self.__samples[-1, :], self.__samples[-2, :], self.__samples[-3, :]
usg_k[-2] = self.__usg_k_3_pts(p_0, p_1, p_2)
usg_k[-1] = self.__lin_extra(self.__length, self.__length-self.__ds[-1],
self.__length-self.__ds[-1]-self.__ds[-2], usg_k[-2], usg_k[-3])
self.__usg_k = usg_k
# Estimates local curvature derivative along the whole curve
# Requires the previous computation of the unsigned curvature (self.__usg_k)
def __compute_sg_k(self):
# Initialization
n_samples = self.__samples.shape[0]
sg_k = np.zeros(shape=n_samples, dtype=np.float)
if n_samples <= 2:
self.__sg_k = sg_k
return
# Regular cases
for i in range(2, n_samples-2):
p_0, p_1 = self.__samples[i-2, :], self.__samples[i-1, :]
p_2 = self.__samples[i, :]
p_3, p_4 = self.__samples[i+1, :], self.__samples[i+2, :]
uk_1, uk_2, uk_3 = self.__usg_k[i-1], self.__usg_k[i], self.__usg_k[i+1]
sg_k[i] = self.__sg_k_5_pts(p_0, p_1, p_2, p_3, p_4, uk_1, uk_2, uk_3)
# Extremal cases
p_1, p_2 = self.__samples[1, :], self.__samples[2, :]
uk_1, uk_2 = self.__usg_k[1], self.__usg_k[2]
sg_k[1] = self.__sg_k_2_pts(p_1, p_2, uk_1, uk_2)
sg_k[0] = self.__lin_extra(0, self.__ds[:2].sum(), self.__ds[:3].sum(), sg_k[1], sg_k[2])
p_1, p_2 = self.__samples[-3, :], self.__samples[-2, :]
uk_1, uk_2 = self.__usg_k[-3], self.__usg_k[-2]
sg_k[-2] = self.__sg_k_2_pts(p_1, p_2, uk_1, uk_2)
sg_k[-1] = self.__lin_extra(self.__length, self.__length-self.__ds[-1:].sum(),
self.__length-self.__ds[-2:].sum(), sg_k[-2], sg_k[-3])
self.__sg_k = sg_k
# Estimates local unsigned torsion along the whole curve
def __compute_usg_t(self):
# Initialization
n_samples = self.__samples.shape[0]
usg_t = np.zeros(shape=n_samples, dtype=np.float)
if n_samples <= 3:
self.__usg_t = usg_t
return
# Regular cases
for i in range(2, n_samples-2):
p_0, p_1 = self.__samples[i-1, :], self.__samples[i, :]
p_2, p_3 = self.__samples[i+1, :], self.__samples[i+2, :]
uk_1 = self.__usg_k[i]
usg_t_1 = self.__usg_t_4_pts_1(p_0, p_1, p_2, p_3, uk_1)
usg_t_2 = self.__usg_t_4_pts_2(p_0, p_1, p_2, p_3)
usg_t[i] = .5 * (usg_t_1 + usg_t_2)
# Extremal cases
p_0, p_1, p_2, p_3 = self.__samples[0, :], self.__samples[1, :], self.__samples[2, :], \
self.__samples[3, :]
uk_1 = self.__usg_k[1]
usg_t_1 = self.__usg_t_4_pts_1(p_0, p_1, p_2, p_3, uk_1)
usg_t_2 = self.__usg_t_4_pts_2(p_0, p_1, p_2, p_3)
usg_t[1] = .5 * (usg_t_1 + usg_t_2)
usg_t[0] = self.__3_pts_lagrange_extra(0, self.__ds[:2].sum(), self.__ds[:3].sum(), self.__ds[:4].sum(),
usg_t[1], usg_t[2], usg_t[3])
usg_t[-2] = self.__lin_extra(self.__length-self.__ds[-1:].sum(), self.__length-self.__ds[-2:].sum(),
self.__length-self.__ds[-3:].sum(), usg_t[-3], usg_t[-4])
usg_t[-1] = self.__3_pts_lagrange_extra(self.__length, self.__length-self.__ds[-1:].sum(),
self.__length-self.__ds[-2:].sum(),
self.__length-self.__ds[-3:].sum(),
usg_t[-2], usg_t[-3], usg_t[-4])
self.__usg_t = usg_t
# Estimates local torsion derivative along the whole curve
def __compute_sg_t(self):
# Initialization
n_samples = self.__samples.shape[0]
sg_t = np.zeros(shape=n_samples, dtype=np.float)
if n_samples <= 3:
self.__sg_t = sg_t
return
# Regular cases
for i in range(2, n_samples-2):
p_0, p_1 = self.__samples[i-2, :], self.__samples[i-1, :]
p_2 = self.__samples[i, :]
p_3, p_4 = self.__samples[i+1, :], self.__samples[i+2, :]
uk_2, k_2 = self.__usg_k[i], self.__sg_k[i]
ut_1, ut_2, ut_3 = self.__usg_t[i-1], self.__usg_t[i], self.__usg_t[i+1]
sg_t[i] = self.__sg_t_5_pts(p_0, p_1, p_2, p_3, p_4, uk_2, k_2, ut_1, ut_2, ut_3)
# Extremal cases
sg_t[1] = self.__lin_extra(self.__ds[:2].sum(), self.__ds[:3].sum(), self.__ds[:4].sum(),
sg_t[2], sg_t[3])
sg_t[0] = self.__3_pts_lagrange_extra(0, self.__ds[:2].sum(), self.__ds[:3].sum(), self.__ds[:4].sum(),
sg_t[1], sg_t[2], sg_t[3])
sg_t[-2] = self.__lin_extra(self.__length-self.__ds[-1:].sum(), self.__length-self.__ds[-2:].sum(),
self.__length-self.__ds[-3:].sum(), sg_t[-3], sg_t[-4])
sg_t[-1] = self.__3_pts_lagrange_extra(self.__length, self.__length-self.__ds[-1:].sum(),
self.__length-self.__ds[-2:].sum(),
self.__length-self.__ds[-3:].sum(),
sg_t[-2], sg_t[-3], sg_t[-4])
self.__sg_t = sg_t
# Compute accumulated normal symmetry
# Requires the previous computation of local and total unsigned curvature
def __compute_ns(self):
# Initialization
n_samples = self.__samples.shape[0]
if n_samples <= 2:
self.__ns = 1.
return
# Normal accumulation vector
n = np.zeros(shape=3, dtype=np.float)
for i in range(1, n_samples-1):
p_0, p_1, p_2 = self.__samples[i-1, :], self.__samples[i, :], self.__samples[i+1, :]
# Update normal accumulation
n_h = 2*p_1 - p_0 - p_2
n_h_m = math.sqrt((n_h * n_h).sum())
if n_h_m > 0:
n_h /= n_h_m
n += ((self.__usg_k[i]*self.__ds[i]) * n_h)
# Extrema cases (end)
p_0, p_1, p_2 = self.__samples[-3, :], self.__samples[-2, :], self.__samples[-1, :]
n_h = 2*p_1 - p_0 - p_2
n_h_m = math.sqrt((n_h * n_h).sum())
if n_h_m > 0:
n_h /= n_h_m
n += ((self.__usg_k[-1]*self.__ds[-1]) * n_h)
# Compute total value of symmetry
n_m = math.sqrt((n * n).sum())
total = self.__tot_uk
if total <= 0:
self.__ns = 1.
else:
self.__ns = 1. - (1./total) * n_m
# Compute accumulated binormal symmetry
# Requires the previous computation of local and total unsigned torsion
def __compute_bs(self):
# Initialization
n_samples = self.__samples.shape[0]
if n_samples <= 2:
self.__bs = 1.
return
# Normal accumulation vector
b = np.zeros(shape=3, dtype=np.float)
for i in range(1, n_samples-1):
p_0, p_1, p_2 = self.__samples[i-1, :], self.__samples[i, :], self.__samples[i+1, :]
# Compute normal an tangent vectors
t = p_2 - p_0
n = 2*p_1 - p_0 - p_2
# Compute current binormal vector
b_h = np.cross(t, n)
b_h_m = math.sqrt((b_h * b_h).sum())
if b_h_m > 0:
b_h /= b_h_m
# Update accumulated vector
b += ((self.__usg_t[i]*self.__ds[i]) * b_h)
# Extrema cases (end)
p_0, p_1, p_2 = self.__samples[-3, :], self.__samples[-2, :], self.__samples[-1, :]
t = p_2 - p_0
n = 2*p_1 - p_0 - p_2
# Compute current binormal vector
b_h = np.cross(t, n)
b_h_m = math.sqrt((b_h * b_h).sum())
if b_h_m > 0:
b_h /= b_h_m
# Update accumulated vector
b += ((self.__usg_t[-1]*self.__ds[-1]) * b_h)
# Compute total value of symmetry
b_m = math.sqrt((b * b).sum())
total = self.__tot_ut
if total <= 0:
self.__bs = 1.
else:
self.__bs = 1. - (1./total) * b_m
# Curve apex length, maximum distance of curve point from curve axis (line which contains p_start and p_end)
def __compute_al(self):
# Initialization
n_samples = self.__samples.shape[0]
if n_samples <= 2:
self.__al = -1
return
# Compute curve axis line
p_start, p_end = self.__samples[0, :], self.__samples[-1, :]
v_a = p_end - p_start
v_a_m = math.sqrt((v_a * v_a).sum())
if v_a_m <= 0:
self.__al = 0.
# Finding maximum distance
hold = np.cross(v_a, p_start-self.__samples)
# Find apex coordinates
dsts = np.sqrt(np.sum(hold * hold, axis=1))
a_id = np.argmax(dsts)
self.__apex_id = a_id
self.__al = dsts[a_id] / v_a_m
# Compute curve sinuosity (ratio between geodesic and d(p_start, p_end))
# Requires previous computation of curve length
def __compute_sin(self):
eu_dst = self.__samples[-1, :] - self.__samples[0, :]
eu_dst = math.sqrt((eu_dst * eu_dst).sum())
if eu_dst <= 0:
self.__sin = -1.
else:
self.__sin = self.__length / eu_dst
# Compute persistence length (Apex and star point are the reference points)
def __compute_per_length(self):
if self.__apex_id < 0:
self.__compute_al()
# Check that persistence can be computed
if self.__apex_id < 2:
self.__per = -1.
return
# Starting vector
start_v = self.__samples[1] - self.__samples[0]
env_v = self.__samples[self.__apex_id] - self.__samples[self.__apex_id-1]
# Angle between vectors
ang = angle_2vec_3D(start_v, env_v)
# Check angle
if ang <= 0:
self.__per = -1.
elif ang < MAX_PER_ANG:
if self.__ds is None:
self.__compute_ds()
length = self.__ds[:self.__apex_id].sum()
# print 'L=' + str(length) + ', A=' + str(ang)
self.__per = -length / math.log(math.cos(ang))
|
py | 1a425d378d73f65982301cae1a351e6e0efe8ae0 | """django_project_malinina URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('project_first_app.urls'))
]
|
py | 1a425d546e92838b47816894379f4f30bd976e63 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
import platform
import re
import time
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from .storage import Storage
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
from git import Repo
from platform import python_version, uname
from telethon import __version__, version
load_dotenv("config.env")
STORAGE = (lambda n: Storage(Path("data") / n))
StartTime = time.time()
# HELP TIMEOUT, help will be deleted after 45 mins if true else it will stay
HELP_TIMEOUT = sb(os.environ.get("HELP_TIMEOUT") or "False")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get(
"CONSOLE_LOGGER_VERBOSE") or "False")
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info(
"You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting."
)
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = (os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________") or None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY") or None
API_HASH = os.environ.get("API_HASH") or None
SUDO_USERS = {int(x) for x in os.environ.get("SUDO_USERS", "").split()}
DEVS = 850714127, 1391975600, 1258887267, 1549401069
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION") or None
# Deezloader
DEEZER_ARL_TOKEN = os.environ.get("DEEZER_ARL_TOKEN") or None
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID") or 0)
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG") or "False")
if BOTLOG:
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER") or "False")
else:
LOGSPAMMER = False
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN") or "False")
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ") or "False")
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME") or None
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY") or None
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME") or None
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN") or None
# Custom (forked) repo URL and BRANCH for updater.
UPSTREAM_REPO_URL = (os.environ.get("UPSTREAM_REPO_URL")
or "https://github.com/FrosT2k5/ProjectFizilion.git")
UPSTREAM_REPO_BRANCH = os.environ.get("UPSTREAM_REPO_BRANCH") or "demon"
###
FUPSTREAM_REPO_URL = (os.environ.get("FPSTREAM_REPO_URL")
or "https://github.com/Elytra8/ProjectFizilion.git")
FUPSTREAM_REPO_BRANCH = os.environ.get("FPSTREAM_REPO_BRANCH") or "dragon"
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get(
"CONSOLE_LOGGER_VERBOSE") or "False")
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL") or None
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY") or None
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY") or None
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = "/usr/bin/chromedriver"
GOOGLE_CHROME_BIN = "/usr/bin/chromium-browser"
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID") or None
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY") or None
WEATHER_DEFLANG = os.environ.get("WEATHER_DEFLANG") or None
# Genius lyrics API
GENIUS = os.environ.get("GENIUS_ACCESS_TOKEN") or None
# Wolfram Alpha API
WOLFRAM_ID = os.environ.get("WOLFRAM_ID") or None
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT") or "False")
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT") or "False")
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME") or None
# Default .alive logo
ALIVE_LOGO = str(os.environ.get("ALIVE_LOGO") or "https://github.com/ElytrA8/ProjectFizilion/raw/dragon/resources/glitch.gif")
# Custom Alive Message
ALIVE_MESSAGE = str(os.environ.get("ALIVE_MESSAGE") or "")
# .alive and .help timeout
TIMEOUT = sb(os.environ.get("TIMEOUT") or "True")
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY") or "")
TZ_NUMBER = os.environ.get("TZ_NUMBER") or 1
# Version of Project Fizilion
USERBOT_VERSION = "4.0"
# User Terminal alias
USER_TERM_ALIAS = os.environ.get("USER_TERM_ALIAS") or "dem0n"
# Updater alias
UPDATER_ALIAS = os.environ.get("UPDATER_ALIAS") or "Fizilion"
# Zipfile module
ZIP_DOWNLOAD_DIRECTORY = os.environ.get("ZIP_DOWNLOAD_DIRECTORY") or "./zips"
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME") or "True")
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX") or None
DEFAULT_BIO = os.environ.get("DEFAULT_BIO") or None
LASTFM_API = os.environ.get("LASTFM_API") or None
LASTFM_SECRET = os.environ.get("LASTFM_SECRET") or None
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME") or None
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD") or None
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API is not None:
lastfm = LastFMNetwork(
api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS,
)
else:
lastfm = None
# Google Drive Module
G_DRIVE_DATA = os.environ.get("G_DRIVE_DATA") or None
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID") or None
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET") or None
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA") or None
G_DRIVE_FOLDER_ID = os.environ.get("G_DRIVE_FOLDER_ID") or None
GDRIVE_INDEX_URL = os.environ.get("GDRIVE_INDEX_URL") or None
TEMP_DOWNLOAD_DIRECTORY = os.environ.get(
"TMP_DOWNLOAD_DIRECTORY") or "./downloads/"
# Uptobox
USR_TOKEN = os.environ.get("USR_TOKEN_UPTOBOX", None)
#SourceForge
SFUSER = os.environ.get("SFUSER") or "null"
SFPASS = os.environ.get("SFPASS") or "null"
SFDIR = os.environ.get("SFDIR") or "null"
#Mega
MEGA_EMAIL = os.environ.get("MEGA_EMAIL") or None
MEGA_PASSWORD = os.environ.get("MEGA_PASSWORD") or None
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists("bin"):
os.mkdir("bin")
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown": "bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py": "bin/cmrudl",
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except BaseException:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file."
)
quit(1)
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ZALG_LIST = {}
ISAFK = False
AFKREASON = None
DELMSG = False
##Constants
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
repo = Repo()
modules = CMD_HELP
uptime = time.strftime('%X')
##
output = (
"` =============================== `\n"
f"`Fizilion is Up and Running.... `\n"
f"`=============================== `\n"
f"•`Telethon : v{version.__version__} `\n"
f"•`Python : v{python_version()} `\n"
f"•`User : {DEFAULTUSER} `\n"
f"•`Running on : {repo.active_branch.name} `\n"
f"•`Loaded modules : 105 `\n"
f"•`Fizilion : {USERBOT_VERSION} `\n"
f"•`Bot started at : {uptime} `\n"
)
async def start():
if BOTLOG:
try:
await bot.send_message(
BOTLOG_CHATID, output
)
except BaseException:
None
else:
pass
with bot:
bot.loop.run_until_complete(start())
|
py | 1a425e147b8ac870d84e04eb91652b4e1c14943d | # coding:utf-8
# call the toutiao search api to crawl articles
import sys
import requests
import json
import time
import random
import datetime
import threading
import multiprocessing
import redis
import argparse
r = redis.StrictRedis(host='localhost', port=6379, db=1)
#sleep time in secs
sleep_time = 5
def process(key):
toutiao_data = requests.get(URL%(key)).text
data = json.loads(toutiao_data)
if data.get("return_count","0") > "0":
return
articles = data.get("data")
for article in articles:
# remove the invalid record
if not article.get("group_id") or not article.get("title"):
continue
key = article.get("group_id")
val = json.dumps(article)
#print key,article.get("title")
r.set(key,val)
def myworker(sub_keywords):
for key in sub_keywords:
print key
time.sleep(sleep_time)
process(key)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s","--end_date",
)
parser.add_argument("-n","--worker_size",
default = 10,
type = int)
parser.add_argument("-k","--keyword_file",
default = "keywords.select",
help = "the keywords file to use")
args = parser.parse_args()
if args.end_date:
end_date = args.end_date
end_timestamp = datetime.datetime.strptime(end_date,"%Y%m%d").strftime("%s")
else:
end_timestamp = 0
URL = "http://www.toutiao.com/search_content/?offset=20&format=json&keyword=%s&autoload=true&count=100"
pool = multiprocessing.Pool()
keywords = [line.split('\t')[0] for line in open(args.keyword_file,'r').readlines()]
batch = len(keywords) / args.worker_size
for i in range(args.worker_size):
if i == args.worker_size - 1:
sub_keywords = keywords[i * batch : ]
else:
sub_keywords = keywords[i * batch : i * batch + batch]
pool.apply_async(myworker, (sub_keywords,))
pool.close()
pool.join()
print "------all jobs finished!------"
|
py | 1a425f2c8e40f8808ff99e9e47119f94716ca902 | # guac.py
#
# plays Tic Tac Toe
import json
import time
import arena
import re
HOST = "arena-west1.conix.io"
TOPIC = "realm/s/guac/"
REALM = "realm"
SCENE = "guac"
# Globals (yes, Sharon)
cubes = {} # dict of cube objects to be indexed by tuple (x,y)
# grid elements can be:
# -1: unassigned
# 0: red
# 1: blue
grid = [-1, -1, -1], [-1, -1, -1], [-1, -1, -1]
Xcoords = [1, 2, 3]
Ycoords = [1, 2, 3]
redblue = [(255,0,0),(0,0,255)]
messages = []
def solved():
global grid
if grid[0][0] == 1 and grid[0][1] == 1 and grid[0][2] == 1: return True
if grid[1][0] == 1 and grid[1][1] == 1 and grid[1][2] == 1: return True
if grid[2][0] == 1 and grid[2][1] == 1 and grid[2][2] == 1: return True
if grid[0][0] == 0 and grid[0][1] == 0 and grid[0][2] == 0: return True
if grid[1][0] == 0 and grid[1][1] == 0 and grid[1][2] == 0: return True
if grid[2][0] == 0 and grid[2][1] == 0 and grid[2][2] == 0: return True
if grid[0][0] == 1 and grid[1][0] == 1 and grid[2][0] == 1: return True
if grid[0][1] == 1 and grid[1][1] == 1 and grid[2][1] == 1: return True
if grid[0][2] == 1 and grid[1][2] == 1 and grid[2][2] == 1: return True
if grid[0][0] == 0 and grid[1][0] == 0 and grid[2][0] == 0: return True
if grid[0][1] == 0 and grid[1][1] == 0 and grid[2][1] == 0: return True
if grid[0][2] == 0 and grid[1][2] == 0 and grid[2][2] == 0: return True
if grid[0][0] == 0 and grid[1][1] == 0 and grid[2][2] == 0: return True
if grid[0][0] == 1 and grid[1][1] == 1 and grid[2][2] == 1: return True
if grid[0][2] == 0 and grid[1][1] == 0 and grid[2][0] == 0: return True
if grid[0][2] == 1 and grid[1][1] == 1 and grid[2][0] == 1: return True
return False
def stalemate():
global grid
for x in Xcoords:
for y in Ycoords:
if grid[x - 1][y - 1] == -1:
return False
return True
def initCube(x, y, color):
name = "cube_" + str(x) + "_" + str(y)
cubes[(x,y)]=arena.Object(objType=arena.Shape.cube,
persist=True,
objName=name,
physics=arena.Physics.static,
data='{"collision-listener":"", "material": {"transparent":true,"opacity": 0.5},"impulse":{"on":"mouseup","force":"0 40 0","position": "10 1 1"}}',
location=(x,y,-3),
color=color,
scale=(0.6,0.6,0.6),
clickable=True);
def dropCube(x, y):
cubes[(x,y)].update(physics=arena.Physics.dynamic)
def deleteCube(x, y):
cubes[(x,y)].delete()
def launchCube(x, y):
cubes[(x,y)].update(physics=arena.Physics.dynamic)
cubes[(x,y)].fireEvent(arena.Event.mouseup,(0,0,0),"guacprogram")
def deleteAvocado():
global avocado
avocado.delete()
def drawAvocado():
global avocado
avocado = arena.Object(persist=True,
objName="gltf-model_avocadoman",
objType=arena.Shape.gltf_model,
url="assets/avocadoman/scene.gltf",
location=(-1,0.01,-4),
scale=(0.005,0.005,0.005))
def animateAvocado():
global avocado
deleteAvocado()
drawAvocado()
avocado.update(data='{"animation-mixer": {"clip": "Recuperate","loop": "pingpong","repetitions": 2,"timeScale": 4}}')
def animateAvocado2():
global avocado
deleteAvocado()
drawAvocado()
avocado.update(data='{"animation-mixer": {"clip": "Walking", "loop": "pingpong", "repetitions": 2}}')
counter = 0
def draw_board():
global counter
global grid
counter = 0
grid = [-1, -1, -1], [-1, -1, -1], [-1, -1, -1]
drawAvocado()
for x in Xcoords:
for y in Ycoords:
initCube(x, y, (127,127,127))
def animate_win():
animateAvocado()
for x in Xcoords:
for y in Ycoords:
launchCube(x, y)
time.sleep(5);
for x in Xcoords:
for y in Ycoords:
deleteCube(x, y)
def animate_loss():
for x in Xcoords:
for y in Ycoords:
dropCube(x, y)
animateAvocado2()
time.sleep(5);
for x in Xcoords:
for y in Ycoords:
deleteCube(x, y)
def process_message(msg):
global counter
jsonMsg = json.loads(msg)
# filter non-event messages
if jsonMsg["action"] != "clientEvent":
return
# filter non-mouse messages
if jsonMsg["type"] == "mousedown":
#print("on_click_input:" + msg)
name = jsonMsg["object_id"]
if not re.match("cube_\d_\d", name): # test that object name matches pattern e.g. "cube_1_2"
return
color = redblue[counter % 2]
x = int(name.split("_")[1])
y = int(name.split("_")[2])
if grid[(x - 1)][(y - 1)] != -1:
return
counter = counter + 1
grid[(x - 1)][(y - 1)] = counter % 2
colstring = '#%02x%02x%02x' % color
cubes[(x,y)].update(physics=arena.Physics.static,
data='{"impulse": {"on": "mouseup","force":"0 40 0","position":"10 1 1"},"material": {"color":"'+ colstring+'", "transparent": false, "opacity": 1}}',
clickable=True,
location=(x,y,-3),
scale=(0.6, 0.6, 0.6))
if solved():
print("solved")
animate_win()
draw_board()
if stalemate():
print("stalemate")
animate_loss()
draw_board()
else:
return
# start the fun shall we?
arena.init(HOST, REALM, SCENE, process_message, port=3003)
print("starting main loop")
draw_board()
arena.handle_events()
|
py | 1a42608c3625867d127e2eedb1aa446e20f359b2 | from django import forms
from django.utils.translation import ugettext_lazy as _
from fobi.base import BaseFormFieldPluginForm, get_theme
from pldp.forms import SURVEY_MICROCLIMATE_CHOICES
theme = get_theme(request=None, as_instance=True)
class MicroclimateForm(forms.Form, BaseFormFieldPluginForm):
"""MicroclimateForm."""
plugin_data_fields = [
("label", "What are the current weather conditions?"),
("name", "name"),
("default", ""),
("help_text", ""),
("required", False),
]
label = forms.CharField(label="Label",
required=True,
)
name = forms.CharField(required=True, widget=forms.widgets.HiddenInput())
default = forms.ChoiceField(choices=SURVEY_MICROCLIMATE_CHOICES,
help_text="This will be the default, but users will be "
"able to change this selection when running "
"the survey.",
widget=forms.widgets.Select(
attrs={'class': theme.form_element_html_class}
))
help_text = forms.CharField(
label=_("Help text"),
required=False,
widget=forms.widgets.Textarea(
attrs={'class': theme.form_element_html_class}
)
)
required = forms.BooleanField(label="Required", required=False)
|
py | 1a426123f2cdda54913d55a1cfe93c774e1f54e5 | from qutip.qip.circuit import QubitCircuit
def vcnot_1(para,N):
"""
Nearby qubits, start from 0
"""
qc = QubitCircuit(N)
shape = (N//2,)
para = para.reshape(shape)
i = 0
for j in range(N//2):
qc.add_gate("CRX",2*j,2*j+1,para[j])
return qc
def vcnot_2(para,N):
"""
Long range CNOT, cross half circuit
"""
qc = QubitCircuit(N)
shape = (N//2,)
para = para.reshape(shape)
i = 0
for j in range(N//2):
qc.add_gate("CRX",j,j+N//2,para[j])
return qc
def vcnot_3(para,N):
"""
Nearby qubits, start from 1
"""
qc = QubitCircuit(N)
shape = ((N-1)//2,)
para = para.reshape(shape)
i = 0
for j in range((N-1)//2):
if 2*j<N-2:
qc.add_gate("CRX",2*j+1,2*j+2,para[j])
return qc
def local(para,N):
qc = QubitCircuit(N)
shape = (N,3)
para = para.reshape(shape)
i = 0
for angles in para:
qc.add_gate("RZ", i, None, angles[0])
qc.add_gate("RX", i, None, angles[1])
qc.add_gate("RZ", i, None, angles[2])
i+=1
return qc
def regular(para,N):
qc = QubitCircuit(N)
shape = (N,3)
para = para.reshape(shape)
i = 0
for angles in para:
qc.add_gate("RZ", i, None, angles[0])
qc.add_gate("RX", i, None, angles[1])
qc.add_gate("RZ", i, None, angles[2])
i+=1
for j in range(N-1):
qc.add_gate("CNOT",j,j+1)
return qc
def CNN4_1(para,N):
qc = QubitCircuit(N)
shape = (N,3)
para = para.reshape(shape)
i = 0
for angles in para:
qc.add_gate("RZ", i, None, angles[0])
qc.add_gate("RX", i, None, angles[1])
qc.add_gate("RZ", i, None, angles[2])
i+=1
for j in range(N-1):
if j //2 == 0:
qc.add_gate("CNOT",j,j+1)
return qc
def CNN4_2(para,N):
qc = QubitCircuit(N)
shape = (N,3)
para = para.reshape(shape)
i = 0
for angles in para:
qc.add_gate("RZ", i, None, angles[0])
qc.add_gate("RX", i, None, angles[1])
qc.add_gate("RZ", i, None, angles[2])
i+=1
for j in range(N-1):
if j < N/2:
qc.add_gate("CNOT",j,j+N//2)
return qc |
py | 1a42632449720580eb8df2c2deff17261b3bd14e | #!-*- coding:utf-8 -*-
#!/usr/bin/env python
#---------------------------------------------------
#掲示板を表示
#copyright 2010-2012 ABARS all rights reserved.
#---------------------------------------------------
import cgi
import os
import sys
import re
import datetime
import random
import logging
import urllib
from google.appengine.api.labs import taskqueue
import template_select
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import images
from google.appengine.api import memcache
from myapp.Bbs import Bbs
from myapp.Counter import Counter
from myapp.Alert import Alert
from myapp.MappingId import MappingId
from myapp.SetUtf8 import SetUtf8
from myapp.OwnerCheck import OwnerCheck
from myapp.MaintenanceCheck import MaintenanceCheck
from myapp.BbsConst import BbsConst
from myapp.MesThread import MesThread
from myapp.PageGenerate import PageGenerate
from myapp.RecentCommentCache import RecentCommentCache
from myapp.Analyze import Analyze
from myapp.Entry import Entry
from myapp.CssDesign import CssDesign
from myapp.ApiObject import ApiObject
from myapp.CounterWorker import CounterWorker
from myapp.ShowEntry import ShowEntry
from myapp.CategoryList import CategoryList
from myapp.SpamCheck import SpamCheck
class ShowBbs(webapp.RequestHandler):
def get(self,bbs_key):
#日本語対応
SetUtf8.set()
#ホストチェック
if SpamCheck.is_deny(self.request):
self.response.set_status(401)
return
#英語版かどうか
is_english=CssDesign.is_english(self)
#メンテナンス中かどうか
is_maintenance=0
if(MaintenanceCheck.is_appengine_maintenance()):
is_maintenance=1
#掲示板を取得
bbs=ShowBbs.get_bbs(self,bbs_key)
if(bbs==None):
return
#掲示板削除チェック
if(bbs.del_flag) :
if(is_english):
Alert.alert_msg_with_write(self,"This bbs was deleted.")
else:
Alert.alert_msg_with_write(self,"この掲示板は削除されました。")
return
#ページ取得
page = 1
if self.request.get("page"):
try:
page = int(self.request.get("page"))
except:
Alert.alert_msg_with_write(self,"ページ番号が異常です。")
return
if page<1 :
page=1
#描画順を取得
order=ShowBbs.get_order(self,bbs)
#カテゴリ取得
category=""
if(self.request.get("category")):
category=self.request.get("category")
#スレッド一覧を取得
thread_query=ShowBbs.get_thread_query(bbs,category,order)
#1ページのイラスト数を取得
col_num=ShowBbs.get_col_num(bbs,order)
#スレッド数とスレッドを取得
count_limit=(BbsConst.PAGE_LIST_COUNT+page)*col_num #ページ番号生成用にしか使わないのでページ番号のMaxがわかれば良い
if(category==""):
threads_num = bbs.cached_threads_num
else:
threads_num = thread_query.count(limit=count_limit)
all_threads = thread_query.fetch(limit=col_num, offset=(page-1)*col_num)
#返信イラストを取得
all_entries = None
#if(order=="thumbnail"):
# all_entries=ShowBbs.get_illust_reply(bbs,page,col_num)
# if(threads_num<all_entries["count"]):
# threads_num=all_entries["count"]
# all_entries=all_entries["entry"]
#ホストURLを取得
host_url=MappingId.mapping_host_with_scheme(self.request)+"/";
#URLを作成
mapped_category=urllib.quote(category.encode('utf-8'))
page_url=MappingId.get_usr_url(host_url,bbs)
page_url_base=MappingId.get_usr_url(host_url,bbs)+'?order='+order+'&category='+mapped_category+'&page='
page_url_order_base=MappingId.get_usr_url(host_url,bbs)+'?page=1&category='+mapped_category+'&order='
page_url_category_base=MappingId.get_usr_url(host_url,bbs)+'?page=1&order='+order+"&category="
#ページリストを作成
page_list=PageGenerate.generate_page(page,threads_num,col_num)
#ログインユーザを取得
user = users.get_current_user()
logined=0
if(user):
logined=1
owner=user
if(OwnerCheck.check(bbs,user)):
owner=None
#サイドバーコメントを取得
side_comment=RecentCommentCache.get_entry(bbs)
side_thread=RecentCommentCache.get_thread(bbs)
#カテゴリ一覧を取得
category_list=None
if(bbs.category_list):
if(bbs.category_list!=""):
category_list=CategoryList.get_category_list(bbs) #bbs.category_list.split(",")
#ページデザインを取得
css_key=self.request.get("css_key")
design=CssDesign.get_design_object(self,bbs,host_url,0)
#サイドバー一覧を作成
sidebar_list=ShowBbs.get_sidebar(bbs,category_list,side_comment,side_thread)
#新規スレッドを作成できるか
can_create_thread=ShowBbs.get_can_create_thread(bbs,user,logined)
can_create_new_image=ShowBbs.get_can_create_new_image(bbs,owner)
#スレッドを全て取得
all_threads_cached=ApiObject.get_cached_object_list(all_threads)
#コメントフォームを表示するか
show_comment_form=1
if(bbs.comment_login_require and not(owner)):
show_comment_form=0
#フルコメントデバッグ
if(self.request.get("full_comment")):
bbs.enable_full_comment=1
#フルフラット表示をデフォルト化
if(bbs.bbs_mode==BbsConst.BBS_MODE_NO_IMAGE):
bbs.enable_full_flat=0
bbs.enable_full_comment=0
else:
bbs.enable_full_flat=1
#bbs.enable_full_comment=1 #デフォルト化を止める
#コメントを全て取得
#user_name=""
user_name=ShowEntry.get_user_name(user)
if(bbs.enable_full_comment):
admin_user=OwnerCheck.is_admin(user)
ShowEntry.render_comment_list(self,all_threads_cached,host_url,bbs,show_comment_form,logined,admin_user,user_name,user)
#デザインの編集ができるか
can_edit_design=False
is_admin=OwnerCheck.is_admin(user)
if(owner or (is_admin and bbs.bbs_mode==BbsConst.BBS_MODE_EVERYONE)):
can_edit_design=True
#infinite_scrollを使用するかどうか
infinite_scroll=False
#if(bbs.bbs_mode!=BbsConst.BBS_MODE_NO_IMAGE):# and design["is_iphone"]):
infinite_scroll=True
#infinite_scrollの2ページ目以降
contents_only=0
if(self.request.get("contents_only")=="1"):
contents_only=1
#メッセージ
message=memcache.get(BbsConst.OBJECT_BBS_MESSAGE_HEADER+str(bbs.key()))
#カウントアップコメント
if(bbs.counter):
bbs.counter.new_day_update()
count_up_comment=None
#if(bbs.dont_count_owner):
# if(owner):
# count_up_comment="管理人"
# else:
# count_up_comment="ユーザ"
#カテゴリリスト
show_category_list=False
if(self.request.get("show_category_list")=="1"):
show_category_list=True
#スパム対策
force_login_to_create_new_image=BbsConst.FORCE_LOGIN_TO_CREATE_NEW_IMAGE
force_login_to_create_new_comment=BbsConst.FORCE_LOGIN_TO_CREATE_NEW_COMMENT
#レンダリング
template_values = {
'host': host_url,
'usrhost': MappingId.get_usr_url(host_url,bbs),
'threads': all_threads_cached,
'all_entries':all_entries,
'bbs':bbs,
'new_url': 'create_new_thread',
'page':page,
'page_url':page_url,
'page_url_base':page_url_base,
'order':order,
'page_url_order_base':page_url_order_base,
'page_list':page_list,
'user':user,
'owner': owner,
'side_comment':side_comment,
'side_thread':side_thread,
'logined':logined,
'can_create_thread':can_create_thread,
'category_list':category_list,
'page_url_category_base':page_url_category_base,
'now_category':category,
'can_create_new_image':can_create_new_image,
'template_path':design["template_path"],
'css_name':design["css_name"],
'is_iphone':design["is_iphone"],
'is_tablet':design["is_tablet"],
'template_base_color':design["template_base_color"],
'sidebar_list': sidebar_list,
'is_maintenance': is_maintenance,
'css_key': css_key,
'redirect_url': self.request.path,
'show_comment_form': show_comment_form,
'user_name': user_name,
'is_admin': is_admin,
'can_edit_design': can_edit_design,
'infinite_scroll': infinite_scroll,
'infinite_scroll_selecter': ".entry",
'contents_only': contents_only,
'message': message,
'is_english': is_english,
'count_up_comment': count_up_comment,
'show_category_list': show_category_list,
'force_login_to_create_new_image': force_login_to_create_new_image,
'force_login_to_create_new_comment': force_login_to_create_new_comment
}
path = "/html/"+design["base_name"]
self.response.out.write(template_select.render(path, template_values))
if(is_maintenance):
return
CounterWorker.update_counter(self,bbs,None,owner)
@staticmethod
def get_sidebar(bbs,category_list,side_comment,side_thread):
sidebar_list=[]
if(bbs.freearea):
sidebar_list.append("free")
if(bbs.amazon):
sidebar_list.append("affiliate")
if(side_thread):
sidebar_list.append("thread")
if(side_comment):
sidebar_list.append("comment")
if(category_list):
sidebar_list.append("category")
if(not bbs.disable_counter):
sidebar_list.append("counter")
sidebar_list.append("menu")
if(bbs.twitter_enable):
sidebar_list.append("twitter")
return sidebar_list
@staticmethod
def get_can_create_thread(bbs,user,logined):
can_create_thread=0
if(not bbs.disable_create_new_thread):
can_create_thread=1
if(bbs.disable_create_new_thread==1 and user):
can_create_thread=1
if(bbs.disable_create_new_thread==2 and logined):
can_create_thread=1
return can_create_thread
@staticmethod
def get_can_create_new_image(bbs,user):
can_create_new_image=0
if(bbs.bbs_mode==1):
can_create_new_image=1
if(bbs.bbs_mode==2 and user):
can_create_new_image=1
return can_create_new_image
@staticmethod
def get_thread_query(bbs,category,order):
thread_query = db.Query(MesThread,keys_only=True)
thread_query.filter('bbs_key =', bbs)
if(bbs.show_only_movie):
if(order=="illust"):
thread_query.filter("illust_mode =",BbsConst.ILLUSTMODE_ILLUST)
else:
thread_query.filter("illust_mode =",BbsConst.ILLUSTMODE_MOPER)
if(category!=""):
thread_query.filter("category =",category)
if(order=="new"):
thread_query.order('-create_date')
else:
if(order=="comment"):
thread_query.order('-comment_cnt')
else:
if(order=="applause"):
thread_query.order('-applause')
else:
thread_query.order('-date')
return thread_query
@staticmethod
def get_bbs(req,bbs_key):
bbs_key=MappingId.mapping(bbs_key)
if(bbs_key==""):
Alert.alert_msg_notfound(req)
return None
bbs=ApiObject.get_cached_object(bbs_key)
if(bbs == None):
Alert.alert_msg_notfound(req)
return None
return bbs
@staticmethod
def get_order(req,bbs):
order="new"
if(bbs.default_order==2):
order="update"
if(bbs.bbs_mode==BbsConst.BBS_MODE_NO_IMAGE):
order="update"
if req.request.get("order"):
order=req.request.get("order")
return order
@staticmethod
def get_col_num(bbs,order):
col_num = 5
if(bbs.page_illust_n):
col_num=bbs.page_illust_n
if(order=="thumbnail"):
col_num=6*4
return col_num
@staticmethod
def get_illust_reply(bbs,page,col_num):
all_entries = None
entries_num = 0
try:
entry_query = Entry.all().filter("bbs_key =", bbs)
entry_query.filter("illust_reply =",1)
entry_query.filter("del_flag =",1)
entry_query.order("-date")
entries_num=entry_query.count()
all_entries=entry_query.fetch(limit=col_num, offset=(page-1)*col_num)
except:
None
return {"entry":all_entries,"count":entries_num}
|
py | 1a4264338f0cfb7d70246d23603cf93f6bff6dae | #
# PySNMP MIB module CISCO-WAN-MG-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-WAN-MG-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:02:23 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
ciscoWan, = mibBuilder.importSymbols("CISCOWAN-SMI", "ciscoWan")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
IpAddress, Counter32, TimeTicks, Gauge32, Unsigned32, MibIdentifier, Bits, iso, ObjectIdentity, Counter64, ModuleIdentity, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Counter32", "TimeTicks", "Gauge32", "Unsigned32", "MibIdentifier", "Bits", "iso", "ObjectIdentity", "Counter64", "ModuleIdentity", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32")
TruthValue, RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "RowStatus", "DisplayString", "TextualConvention")
ciscoWanMgMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 351, 150, 10))
ciscoWanMgMIB.setRevisions(('2005-05-27 00:00', '2004-01-20 00:00', '2002-06-14 00:00', '2001-05-25 00:00', '2000-07-19 15:00', '2000-03-27 00:00', '1999-11-27 00:00',))
if mibBuilder.loadTexts: ciscoWanMgMIB.setLastUpdated('200505270000Z')
if mibBuilder.loadTexts: ciscoWanMgMIB.setOrganization('Cisco Systems, Inc.')
ciscoWanMgMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 1))
mediaGateway = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 1))
mediaGatewayController = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2))
mediaGatewayEndpoint = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3))
mediaGatewayLine = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 4))
mediaGatewayControllerResolution = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5))
mediaGatewayDomainName = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 6))
mgName = MibScalar((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mgName.setStatus('current')
mgAdministrativeState = MibScalar((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inService", 1), ("commandedOutOfService", 2), ("pendingOutOfService", 3))).clone('commandedOutOfService')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mgAdministrativeState.setStatus('current')
mgAdministrativeStateControl = MibScalar((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inService", 1), ("forcefulOutOfService", 2), ("gracefulOutOfService", 3))).clone('forcefulOutOfService')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mgAdministrativeStateControl.setStatus('current')
mgShutdownGraceTime = MibScalar((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 65535))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: mgShutdownGraceTime.setStatus('current')
mgSupportedProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 1, 7), )
if mibBuilder.loadTexts: mgSupportedProtocolTable.setStatus('current')
mgSupportedProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 1, 7, 1), ).setIndexNames((0, "CISCO-WAN-MG-MIB", "mgProtocolNumber"))
if mibBuilder.loadTexts: mgSupportedProtocolEntry.setStatus('current')
mgProtocolNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 1, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: mgProtocolNumber.setStatus('current')
mgProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 1, 7, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mgProtocolName.setStatus('current')
maxConcurrentMgcs = MibScalar((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setUnits('controllers').setMaxAccess("readonly")
if mibBuilder.loadTexts: maxConcurrentMgcs.setStatus('current')
mgcTable = MibTable((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1), )
if mibBuilder.loadTexts: mgcTable.setStatus('current')
mgcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-WAN-MG-MIB", "mgcNumber"))
if mibBuilder.loadTexts: mgcEntry.setStatus('current')
mgcNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: mgcNumber.setStatus('current')
mgcName = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcName.setStatus('current')
mgcDnsResolution = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mgcDnsResolution.setStatus('deprecated')
mgcAssociationState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("mgcUnassociated", 1), ("mgcAssociated", 2), ("mgcAssociatedCommLoss", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mgcAssociationState.setStatus('deprecated')
mgcAssociationStateControl = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("mgcUnassociate", 1), ("mgcAssociate", 2), ("mgcClear", 3))).clone('mgcUnassociate')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcAssociationStateControl.setStatus('deprecated')
mgcUnassociationPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("mgcNoAction", 1), ("mgcRelease", 2))).clone('mgcNoAction')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcUnassociationPolicy.setStatus('deprecated')
mgcCommLossUnassociationTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 65535)).clone(-1)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcCommLossUnassociationTimeout.setStatus('deprecated')
mgcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcRowStatus.setStatus('current')
mgcProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 2), )
if mibBuilder.loadTexts: mgcProtocolTable.setStatus('deprecated')
mgcProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 2, 1), ).setIndexNames((0, "CISCO-WAN-MG-MIB", "mgcNumber"), (0, "CISCO-WAN-MG-MIB", "mgProtocolNumber"))
if mibBuilder.loadTexts: mgcProtocolEntry.setStatus('deprecated')
mgcProtocolRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 2, 2, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcProtocolRowStatus.setStatus('deprecated')
mgEndpointCreationPolicy = MibScalar((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("dynamic", 1), ("strictDynamic", 2), ("static", 3))).clone('static')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mgEndpointCreationPolicy.setStatus('current')
mgEndpointTable = MibTable((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 1), )
if mibBuilder.loadTexts: mgEndpointTable.setStatus('current')
mgEndpointEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-WAN-MG-MIB", "mgEndpointNumber"))
if mibBuilder.loadTexts: mgEndpointEntry.setStatus('current')
mgEndpointNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: mgEndpointNumber.setStatus('current')
mgEndpointLineNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgEndpointLineNumber.setStatus('current')
mgEndpointName = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 1, 1, 3), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgEndpointName.setStatus('current')
mgEndpointSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setUnits('Kbps').setMaxAccess("readonly")
if mibBuilder.loadTexts: mgEndpointSpeed.setStatus('current')
mgEndpointState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("mgEndpointActive", 1), ("mgEndpointFailed", 2), ("mgEndpointDegraded", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mgEndpointState.setStatus('current')
mgEndpointChannelMap = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgEndpointChannelMap.setStatus('current')
mgEndpointRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgEndpointRowStatus.setStatus('current')
lineAssignmentTable = MibTable((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 4, 1), )
if mibBuilder.loadTexts: lineAssignmentTable.setStatus('current')
lineAssignmentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-WAN-MG-MIB", "lineNumber"))
if mibBuilder.loadTexts: lineAssignmentEntry.setStatus('current')
lineNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: lineNumber.setStatus('current')
channelAssignment = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelAssignment.setStatus('current')
lineName = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 4, 1, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lineName.setStatus('current')
mgcResolutionTable = MibTable((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5, 1), )
if mibBuilder.loadTexts: mgcResolutionTable.setStatus('current')
mgcResolutionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5, 1, 1), ).setIndexNames((0, "CISCO-WAN-MG-MIB", "mgcResolutionIndex"))
if mibBuilder.loadTexts: mgcResolutionEntry.setStatus('current')
mgcResolutionIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: mgcResolutionIndex.setStatus('current')
mgcResolutionName = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcResolutionName.setStatus('current')
mgcResolutionIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5, 1, 1, 3), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcResolutionIpAddress.setStatus('current')
mgcResolutionCommState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("csActive", 1), ("csInactive", 2))).clone('csInactive')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mgcResolutionCommState.setStatus('current')
mgcResolutionPreference = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcResolutionPreference.setStatus('current')
mgcResolutionRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5, 1, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgcResolutionRowStatus.setStatus('current')
mgcDnsResolutionFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 5, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("internal", 1), ("external", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mgcDnsResolutionFlag.setStatus('current')
mgDomainNameTable = MibTable((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 6, 1), )
if mibBuilder.loadTexts: mgDomainNameTable.setStatus('current')
mgDomainNameEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 6, 1, 1), ).setIndexNames((0, "CISCO-WAN-MG-MIB", "mgDomainNameIndex"))
if mibBuilder.loadTexts: mgDomainNameEntry.setStatus('current')
mgDomainNameIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 6, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: mgDomainNameIndex.setStatus('current')
mgDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 6, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgDomainName.setStatus('current')
mgDnsResolutionType = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 6, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("internalOnly", 1), ("externalOnly", 2), ("internalFirst", 3), ("externalFirst", 4))).clone('internalOnly')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgDnsResolutionType.setStatus('current')
mgDomainNameRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 6, 1, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgDomainNameRowStatus.setStatus('current')
mgEndpointExtTable = MibTable((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 3), )
if mibBuilder.loadTexts: mgEndpointExtTable.setStatus('current')
mgEndpointExtEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 3, 1), )
mgEndpointEntry.registerAugmentions(("CISCO-WAN-MG-MIB", "mgEndpointExtEntry"))
mgEndpointExtEntry.setIndexNames(*mgEndpointEntry.getIndexNames())
if mibBuilder.loadTexts: mgEndpointExtEntry.setStatus('current')
mgEndpointRepetition = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 150, 10, 1, 3, 3, 1, 1), Unsigned32().clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mgEndpointRepetition.setStatus('current')
mgMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 3))
mgMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 1))
mgMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2))
mgMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 1, 1)).setObjects(("CISCO-WAN-MG-MIB", "mediaGatewayGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayControllerGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayEndpointGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayLineGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayControllerResolutionGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mgMIBCompliance = mgMIBCompliance.setStatus('deprecated')
mgMIBCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 1, 2)).setObjects(("CISCO-WAN-MG-MIB", "mediaGatewayGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayControllerGroup1"), ("CISCO-WAN-MG-MIB", "mediaGatewayEndpointGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayLineGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayDomainNameGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayControllerResolutionGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mgMIBCompliance1 = mgMIBCompliance1.setStatus('deprecated')
mgMIBCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 1, 3)).setObjects(("CISCO-WAN-MG-MIB", "mediaGatewayGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayControllerGroup2"), ("CISCO-WAN-MG-MIB", "mediaGatewayEndpointGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayLineGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayDomainNameGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayControllerResolutionGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mgMIBCompliance2 = mgMIBCompliance2.setStatus('deprecated')
mgMIBCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 1, 4)).setObjects(("CISCO-WAN-MG-MIB", "mediaGatewayGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayControllerGroup2"), ("CISCO-WAN-MG-MIB", "mediaGatewayEndpointGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayLineGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayDomainNameGroup"), ("CISCO-WAN-MG-MIB", "mediaGatewayControllerResolutionGroup1"), ("CISCO-WAN-MG-MIB", "mediaGatewayEndptRepetitionGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mgMIBCompliance3 = mgMIBCompliance3.setStatus('current')
mediaGatewayGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 1)).setObjects(("CISCO-WAN-MG-MIB", "mgName"), ("CISCO-WAN-MG-MIB", "mgAdministrativeState"), ("CISCO-WAN-MG-MIB", "mgAdministrativeStateControl"), ("CISCO-WAN-MG-MIB", "mgShutdownGraceTime"), ("CISCO-WAN-MG-MIB", "mgProtocolName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayGroup = mediaGatewayGroup.setStatus('current')
mediaGatewayControllerGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 2)).setObjects(("CISCO-WAN-MG-MIB", "maxConcurrentMgcs"), ("CISCO-WAN-MG-MIB", "mgcName"), ("CISCO-WAN-MG-MIB", "mgcDnsResolution"), ("CISCO-WAN-MG-MIB", "mgcAssociationState"), ("CISCO-WAN-MG-MIB", "mgcAssociationStateControl"), ("CISCO-WAN-MG-MIB", "mgcUnassociationPolicy"), ("CISCO-WAN-MG-MIB", "mgcCommLossUnassociationTimeout"), ("CISCO-WAN-MG-MIB", "mgcRowStatus"), ("CISCO-WAN-MG-MIB", "mgcProtocolRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayControllerGroup = mediaGatewayControllerGroup.setStatus('deprecated')
mediaGatewayEndpointGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 3)).setObjects(("CISCO-WAN-MG-MIB", "mgEndpointCreationPolicy"), ("CISCO-WAN-MG-MIB", "mgEndpointName"), ("CISCO-WAN-MG-MIB", "mgEndpointLineNumber"), ("CISCO-WAN-MG-MIB", "mgEndpointSpeed"), ("CISCO-WAN-MG-MIB", "mgEndpointState"), ("CISCO-WAN-MG-MIB", "mgEndpointChannelMap"), ("CISCO-WAN-MG-MIB", "mgEndpointRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayEndpointGroup = mediaGatewayEndpointGroup.setStatus('current')
mediaGatewayLineGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 4)).setObjects(("CISCO-WAN-MG-MIB", "channelAssignment"), ("CISCO-WAN-MG-MIB", "lineName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayLineGroup = mediaGatewayLineGroup.setStatus('current')
mediaGatewayControllerResolutionGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 5)).setObjects(("CISCO-WAN-MG-MIB", "mgcResolutionName"), ("CISCO-WAN-MG-MIB", "mgcResolutionIpAddress"), ("CISCO-WAN-MG-MIB", "mgcResolutionCommState"), ("CISCO-WAN-MG-MIB", "mgcResolutionPreference"), ("CISCO-WAN-MG-MIB", "mgcResolutionRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayControllerResolutionGroup = mediaGatewayControllerResolutionGroup.setStatus('deprecated')
mediaGatewayControllerGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 6)).setObjects(("CISCO-WAN-MG-MIB", "maxConcurrentMgcs"), ("CISCO-WAN-MG-MIB", "mgcName"), ("CISCO-WAN-MG-MIB", "mgcAssociationState"), ("CISCO-WAN-MG-MIB", "mgcAssociationStateControl"), ("CISCO-WAN-MG-MIB", "mgcUnassociationPolicy"), ("CISCO-WAN-MG-MIB", "mgcCommLossUnassociationTimeout"), ("CISCO-WAN-MG-MIB", "mgcRowStatus"), ("CISCO-WAN-MG-MIB", "mgcProtocolRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayControllerGroup1 = mediaGatewayControllerGroup1.setStatus('deprecated')
mediaGatewayControllerResolutionGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 7)).setObjects(("CISCO-WAN-MG-MIB", "mgcResolutionName"), ("CISCO-WAN-MG-MIB", "mgcResolutionIpAddress"), ("CISCO-WAN-MG-MIB", "mgcResolutionCommState"), ("CISCO-WAN-MG-MIB", "mgcResolutionPreference"), ("CISCO-WAN-MG-MIB", "mgcResolutionRowStatus"), ("CISCO-WAN-MG-MIB", "mgcDnsResolutionFlag"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayControllerResolutionGroup1 = mediaGatewayControllerResolutionGroup1.setStatus('current')
mediaGatewayDomainNameGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 8)).setObjects(("CISCO-WAN-MG-MIB", "mgDomainName"), ("CISCO-WAN-MG-MIB", "mgDnsResolutionType"), ("CISCO-WAN-MG-MIB", "mgDomainNameRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayDomainNameGroup = mediaGatewayDomainNameGroup.setStatus('current')
mediaGatewayControllerGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 9)).setObjects(("CISCO-WAN-MG-MIB", "maxConcurrentMgcs"), ("CISCO-WAN-MG-MIB", "mgcName"), ("CISCO-WAN-MG-MIB", "mgcRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayControllerGroup2 = mediaGatewayControllerGroup2.setStatus('current')
mediaGatewayEndptRepetitionGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 10, 3, 2, 10)).setObjects(("CISCO-WAN-MG-MIB", "mgEndpointRepetition"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mediaGatewayEndptRepetitionGroup = mediaGatewayEndptRepetitionGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-WAN-MG-MIB", mgEndpointCreationPolicy=mgEndpointCreationPolicy, mgProtocolNumber=mgProtocolNumber, mgcNumber=mgcNumber, mediaGatewayGroup=mediaGatewayGroup, mgcResolutionIpAddress=mgcResolutionIpAddress, mgEndpointSpeed=mgEndpointSpeed, mgcDnsResolution=mgcDnsResolution, mgAdministrativeStateControl=mgAdministrativeStateControl, mgMIBConformance=mgMIBConformance, mgName=mgName, mgDomainNameTable=mgDomainNameTable, mgMIBCompliance3=mgMIBCompliance3, mgMIBCompliance=mgMIBCompliance, lineAssignmentEntry=lineAssignmentEntry, mgcTable=mgcTable, mediaGatewayControllerResolutionGroup=mediaGatewayControllerResolutionGroup, mgEndpointExtTable=mgEndpointExtTable, mgEndpointExtEntry=mgEndpointExtEntry, mgcProtocolRowStatus=mgcProtocolRowStatus, mgcAssociationState=mgcAssociationState, mgcDnsResolutionFlag=mgcDnsResolutionFlag, mediaGateway=mediaGateway, mediaGatewayEndpoint=mediaGatewayEndpoint, mgSupportedProtocolTable=mgSupportedProtocolTable, mgDomainName=mgDomainName, ciscoWanMgMIBObjects=ciscoWanMgMIBObjects, mgEndpointLineNumber=mgEndpointLineNumber, mgMIBCompliance2=mgMIBCompliance2, channelAssignment=channelAssignment, mgMIBCompliance1=mgMIBCompliance1, mediaGatewayDomainNameGroup=mediaGatewayDomainNameGroup, mgcResolutionPreference=mgcResolutionPreference, mgcProtocolEntry=mgcProtocolEntry, mediaGatewayLine=mediaGatewayLine, mgcResolutionName=mgcResolutionName, mediaGatewayLineGroup=mediaGatewayLineGroup, mediaGatewayControllerGroup=mediaGatewayControllerGroup, mgEndpointRowStatus=mgEndpointRowStatus, mgDomainNameEntry=mgDomainNameEntry, mediaGatewayDomainName=mediaGatewayDomainName, mgEndpointRepetition=mgEndpointRepetition, mgDomainNameIndex=mgDomainNameIndex, mgShutdownGraceTime=mgShutdownGraceTime, mgcEntry=mgcEntry, mgcAssociationStateControl=mgcAssociationStateControl, mgAdministrativeState=mgAdministrativeState, mgcRowStatus=mgcRowStatus, mgEndpointChannelMap=mgEndpointChannelMap, mgDomainNameRowStatus=mgDomainNameRowStatus, mgcUnassociationPolicy=mgcUnassociationPolicy, mediaGatewayControllerGroup1=mediaGatewayControllerGroup1, ciscoWanMgMIB=ciscoWanMgMIB, mediaGatewayEndpointGroup=mediaGatewayEndpointGroup, mediaGatewayController=mediaGatewayController, mediaGatewayControllerResolution=mediaGatewayControllerResolution, mgcResolutionCommState=mgcResolutionCommState, mgcResolutionRowStatus=mgcResolutionRowStatus, mediaGatewayControllerGroup2=mediaGatewayControllerGroup2, mgProtocolName=mgProtocolName, mgSupportedProtocolEntry=mgSupportedProtocolEntry, mgEndpointEntry=mgEndpointEntry, mgDnsResolutionType=mgDnsResolutionType, mgEndpointName=mgEndpointName, mgcResolutionTable=mgcResolutionTable, mgEndpointState=mgEndpointState, mediaGatewayControllerResolutionGroup1=mediaGatewayControllerResolutionGroup1, mgcProtocolTable=mgcProtocolTable, lineAssignmentTable=lineAssignmentTable, PYSNMP_MODULE_ID=ciscoWanMgMIB, mgcResolutionIndex=mgcResolutionIndex, mgcCommLossUnassociationTimeout=mgcCommLossUnassociationTimeout, mgcResolutionEntry=mgcResolutionEntry, mgMIBGroups=mgMIBGroups, mgEndpointNumber=mgEndpointNumber, mgcName=mgcName, mgMIBCompliances=mgMIBCompliances, mediaGatewayEndptRepetitionGroup=mediaGatewayEndptRepetitionGroup, mgEndpointTable=mgEndpointTable, maxConcurrentMgcs=maxConcurrentMgcs, lineName=lineName, lineNumber=lineNumber)
|
py | 1a4264d3780ba560a80a6f226f4b69710e00ae24 | """
ASGI config for kcc_store project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kcc_store.settings')
application = get_asgi_application()
|
py | 1a4265e59f3141c165b68b79ebb3b1d2261061aa | from __future__ import absolute_import
import os
import sys
import weakref
from pyramid.httpexceptions import HTTPException
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
from sentry_sdk._compat import reraise
from sentry_sdk.integrations import Integration
from sentry_sdk.integrations._wsgi_common import RequestExtractor
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
class PyramidIntegration(Integration):
identifier = "pyramid"
transaction_style = None
def __init__(self, transaction_style="route_name"):
TRANSACTION_STYLE_VALUES = ("route_name", "route_pattern")
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
from pyramid.router import Router
old_handle_request = Router.handle_request
def sentry_patched_handle_request(self, request, *args, **kwargs):
hub = Hub.current
integration = hub.get_integration(PyramidIntegration)
if integration is None:
return old_handle_request(self, request, *args, **kwargs)
with hub.configure_scope() as scope:
scope.add_event_processor(
_make_event_processor(weakref.ref(request), integration)
)
try:
return old_handle_request(self, request, *args, **kwargs)
except Exception:
exc_info = sys.exc_info()
_capture_exception(exc_info)
reraise(*exc_info)
Router.handle_request = sentry_patched_handle_request
old_wsgi_call = Router.__call__
def sentry_patched_wsgi_call(self, environ, start_response):
hub = Hub.current
integration = hub.get_integration(PyramidIntegration)
if integration is None:
return old_wsgi_call(self, environ, start_response)
return SentryWsgiMiddleware(lambda *a, **kw: old_wsgi_call(self, *a, **kw))(
environ, start_response
)
Router.__call__ = sentry_patched_wsgi_call
def _capture_exception(exc_info, **kwargs):
if issubclass(exc_info[0], HTTPException):
return
hub = Hub.current
if hub.get_integration(PyramidIntegration) is None:
return
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options,
mechanism={"type": "pyramid", "handled": False},
)
hub.capture_event(event, hint=hint)
class PyramidRequestExtractor(RequestExtractor):
def url(self):
return self.request.path_url
def env(self):
return self.request.environ
def cookies(self):
return self.request.cookies
def raw_data(self):
return self.request.text
def form(self):
return {
key: value
for key, value in self.request.POST.items()
if not getattr(value, "filename", None)
}
def files(self):
return {
key: value
for key, value in self.request.POST.items()
if getattr(value, "filename", None)
}
def size_of_file(self, postdata):
file = postdata.file
try:
return os.fstat(file.fileno()).st_size
except Exception:
return 0
def _make_event_processor(weak_request, integration):
def event_processor(event, hint):
request = weak_request()
if request is None:
return event
if "transaction" not in event:
try:
if integration.transaction_style == "route_name":
event["transaction"] = request.matched_route.name
elif integration.transaction_style == "route_pattern":
event["transaction"] = request.matched_route.pattern
except Exception:
pass
with capture_internal_exceptions():
PyramidRequestExtractor(request).extract_into_event(event)
if _should_send_default_pii():
with capture_internal_exceptions():
user_info = event.setdefault("user", {})
if "id" not in user_info:
user_info["id"] = request.authenticated_userid
return event
return event_processor
|
py | 1a42664a1e357506b87714f0327a87e20ec59e78 | """
Hexpatch
========
Patch a binary file from a simple description, using non-overlapping longest-match context matches.
Useful for instruction-patching executables, if codegen has not changed too much, even different versions will match.
Patch file format
-----------------
- Line-based text file
- comments lines start with # at character 1
- empty lines are ignored
Pairs of lines of what remains form the patch patterns, in hexadecimal. Pattern and replacement don't have
to be the same size, allowing for insertions.
Example:
```
# replace a jump instruction
ab 00 aa bb 75 33 55
ab 00 aa bb ec 33 55
```
"""
import sys
def main(patch, left, right=None):
if right is None:
right = left + ".patched"
with open(left, "rb") as f:
source = f.read()
patterns = []
with open(patch, "rt") as f:
def dataline(it) -> str:
while True:
l = next(it).strip()
if l and not l.startswith('#'):
return l
liter = iter(f)
try:
l = dataline(liter)
a = bytes.fromhex(l)
l = dataline(liter)
b = bytes.fromhex(l)
patterns.append((a, b))
except StopIteration:
pass
patterns.sort(key=lambda p: len(p[0]), reverse=True)
modified = bytearray(source)
wp = 0
while wp < len(modified):
found = False
for pat, rep in patterns:
try:
loc = modified.index(pat, wp)
except ValueError:
continue
modified[loc:loc + len(pat)] = rep
wp += len(rep)
found = True
break
if not found:
break
with open(right, "wb") as f:
f.write(modified)
if __name__ == "__main__":
main(*sys.argv[1:])
|
py | 1a426696eb2dac6b5082e7c54c996bcf971fc975 | import os
import logging
import multiprocessing as mp
# from hexrd.utils.decorators import memoized
from hexrd import imageseries
from .config import Config
from .instrument import Instrument
from .findorientations import FindOrientationsConfig
from .fitgrains import FitGrainsConfig
from .material import MaterialConfig
logger = logging.getLogger('hexrd.config')
class RootConfig(Config):
@property
def analysis_name(self):
return str(self.get('analysis_name', default='analysis'))
@analysis_name.setter
def analysis_name(self, val):
self.set('analysis_name', val)
@property
def analysis_dir(self):
return os.path.join(self.working_dir, self.analysis_name)
@property
def find_orientations(self):
return FindOrientationsConfig(self)
@property
def fit_grains(self):
return FitGrainsConfig(self)
@property
def instrument(self):
if not hasattr(self, '_instr_config'):
instr_file = self.get('instrument')
instr_file = self.check_filename(instr_file, self.working_dir)
self._instr_config = Instrument(self, instr_file)
return self._instr_config
@instrument.setter
def instrument(self, instr_config):
self._instr_config = instr_config
@property
def material(self):
if not hasattr(self, '_material_config'):
self._material_config = MaterialConfig(self)
# !!! must make matl beam energy consistent with the instrument
beam_energy = self.instrument.hedm.beam_energy
self._material_config.beam_energy = beam_energy
return self._material_config
@material.setter
def material(self, material_config):
self._material_config = material_config
@property
def analysis_id(self):
return '_'.join(
[self.analysis_name.strip().replace(' ', '-'),
self.material.active.strip().replace(' ', '-')]
)
@property
def multiprocessing(self):
# determine number of processes to run in parallel
multiproc = self.get('multiprocessing', default=-1)
ncpus = mp.cpu_count()
if multiproc == 'all':
res = ncpus
elif multiproc == 'half':
temp = ncpus // 2
res = temp if temp else 1
elif isinstance(multiproc, int):
if multiproc >= 0:
if multiproc > ncpus:
logger.warning(
'Resuested %s processes, %d available',
multiproc, ncpus
)
res = ncpus
else:
res = multiproc if multiproc else 1
else:
temp = ncpus + multiproc
if temp < 1:
logger.warning(
'Cannot use less than 1 process, requested %d of %d',
temp, ncpus
)
res = 1
else:
res = temp
else:
temp = ncpus - 1
logger.warning(
"Invalid value %s for multiprocessing",
multiproc
)
res = temp
return res
@multiprocessing.setter
def multiprocessing(self, val):
if val in ('half', 'all', -1):
self.set('multiprocessing', val)
elif (val >= 0 and val <= mp.cpu_count):
self.set('multiprocessing', int(val))
else:
raise RuntimeError(
'"multiprocessing": must be 1:%d, got %s'
% (mp.cpu_count(), val)
)
@property
def working_dir(self):
try:
temp = self.get('working_dir')
if not os.path.exists(temp):
raise IOError(
'"working_dir": "%s" does not exist', temp
)
return temp
except RuntimeError:
temp = os.getcwd()
was_dirty = self.dirty
self.working_dir = temp
if not was_dirty:
self._dirty = False
logger.info(
'"working_dir" not specified, defaulting to "%s"' % temp
)
return temp
@working_dir.setter
def working_dir(self, val):
val = os.path.abspath(val)
if not os.path.isdir(val):
raise IOError('"working_dir": "%s" does not exist' % val)
self.set('working_dir', val)
@property
def image_series(self):
"""Return the imageseries dictionary."""
if not hasattr(self, '_image_dict'):
self._image_dict = dict()
fmt = self.get('image_series:format')
imsdata = self.get('image_series:data')
for ispec in imsdata:
fname = self.check_filename(ispec['file'], self.working_dir)
args = ispec['args']
ims = imageseries.open(fname, fmt, **args)
oms = imageseries.omega.OmegaImageSeries(ims)
try:
panel = ispec['panel']
except(KeyError):
panel = oms.metadata['panel']
self._image_dict[panel] = oms
return self._image_dict
@image_series.setter
def image_series(self, ims_dict):
self._image_dict = ims_dict
|
py | 1a4266e82ab54246fc8e01d978a4239acc245b6d | import sys
sys.path.append(".")
from query_representation.query import *
from evaluation.eval_fns import *
from cardinality_estimation.featurizer import *
from cardinality_estimation.algs import *
from cardinality_estimation.fcnn import FCNN
from cardinality_estimation.mscn import MSCN
import glob
import argparse
import random
import json
import klepto
from sklearn.model_selection import train_test_split
import pdb
import copy
def eval_alg(alg, eval_funcs, qreps, samples_type):
'''
'''
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
start = time.time()
alg_name = alg.__str__()
exp_name = alg.get_exp_name()
ests = alg.test(qreps)
for efunc in eval_funcs:
rdir = None
if args.result_dir is not None:
rdir = os.path.join(args.result_dir, exp_name)
make_dir(rdir)
errors = efunc.eval(qreps, ests, args=args, samples_type=samples_type,
result_dir=rdir, user = args.user, db_name = args.db_name,
db_host = args.db_host, port = args.port,
num_processes = args.num_eval_processes,
alg_name = alg_name)
print("{}, {}, {}, #samples: {}, {}: mean: {}, median: {}, 99p: {}"\
.format(args.db_name, samples_type, alg, len(errors),
efunc.__str__(),
np.round(np.mean(errors),3),
np.round(np.median(errors),3),
np.round(np.percentile(errors,99),3)))
print("all loss computations took: ", time.time()-start)
def get_alg(alg):
if alg == "saved":
assert args.model_dir is not None
return SavedPreds(model_dir=args.model_dir)
elif alg == "postgres":
return Postgres()
elif alg == "true":
return TrueCardinalities()
elif alg == "true_rank":
return TrueRank()
elif alg == "true_random":
return TrueRandom()
elif alg == "true_rank_tables":
return TrueRankTables()
elif alg == "random":
return Random()
elif alg == "rf":
return RandomForest(grid_search = False,
n_estimators = 100,
max_depth = 10,
lr = 0.01)
elif alg == "xgb":
return XGBoost(grid_search=False, tree_method="hist",
subsample=1.0, n_estimators = 100,
max_depth=10, lr = 0.01)
elif alg == "fcnn":
return FCNN(max_epochs = args.max_epochs, lr=args.lr,
mb_size = args.mb_size,
weight_decay = args.weight_decay,
load_query_together = args.load_query_together,
result_dir = args.result_dir,
num_hidden_layers=args.num_hidden_layers,
eval_epoch = args.eval_epoch,
optimizer_name=args.optimizer_name,
clip_gradient=args.clip_gradient,
loss_func_name = args.loss_func_name,
hidden_layer_size = args.hidden_layer_size)
elif alg == "mscn":
return MSCN(max_epochs = args.max_epochs, lr=args.lr,
load_padded_mscn_feats = args.load_padded_mscn_feats,
mb_size = args.mb_size,
weight_decay = args.weight_decay,
load_query_together = args.load_query_together,
result_dir = args.result_dir,
# num_hidden_layers=args.num_hidden_layers,
eval_epoch = args.eval_epoch,
optimizer_name=args.optimizer_name,
clip_gradient=args.clip_gradient,
loss_func_name = args.loss_func_name,
hidden_layer_size = args.hidden_layer_size)
else:
assert False
def get_query_fns():
fns = list(glob.glob(args.query_dir + "/*"))
skipped_templates = []
train_qfns = []
test_qfns = []
val_qfns = []
if args.train_test_split_kind == "template":
# the train/test split will be on the template names
sorted_fns = copy.deepcopy(fns)
sorted_fns.sort()
train_tmps, test_tmps = train_test_split(sorted_fns,
test_size=args.test_size,
random_state=args.diff_templates_seed)
for qi,qdir in enumerate(fns):
if ".json" in qdir:
continue
template_name = os.path.basename(qdir)
if args.query_templates != "all":
query_templates = args.query_templates.split(",")
if template_name not in query_templates:
skipped_templates.append(template_name)
continue
# let's first select all the qfns we are going to load
qfns = list(glob.glob(qdir+"/*.pkl"))
qfns.sort()
if args.num_samples_per_template == -1:
qfns = qfns
elif args.num_samples_per_template < len(qfns):
qfns = qfns[0:args.num_samples_per_template]
else:
assert False
if args.train_test_split_kind == "template":
cur_val_fns = []
if qdir in train_tmps:
cur_train_fns = qfns
cur_test_fns = []
elif qdir in test_tmps:
cur_train_fns = []
cur_test_fns = qfns
else:
assert False
elif args.train_test_split_kind == "query":
if args.val_size == 0:
cur_val_fns = []
else:
cur_val_fns, qfns = train_test_split(qfns,
test_size=1-args.val_size,
random_state=args.seed)
cur_train_fns, cur_test_fns = train_test_split(qfns,
test_size=args.test_size,
random_state=args.seed)
train_qfns += cur_train_fns
val_qfns += cur_val_fns
test_qfns += cur_test_fns
print("Skipped templates: ", " ".join(skipped_templates))
if args.train_test_split_kind == "query":
print("""Selected {} train queries, {} test queries, and {} val queries"""\
.format(len(train_qfns), len(test_qfns), len(val_qfns)))
elif args.train_test_split_kind == "template":
train_tmp_names = [os.path.basename(tfn) for tfn in train_tmps]
test_tmp_names = [os.path.basename(tfn) for tfn in test_tmps]
print("""Selected {} train templates, {} test templates"""\
.format(len(train_tmp_names), len(test_tmp_names)))
print("""Training templates: {}\nEvaluation templates: {}""".\
format(",".join(train_tmp_names), ",".join(test_tmp_names)))
# going to shuffle all these lists, so queries are evenly distributed. Plan
# Cost functions for some of these templates take a lot longer; so when we
# compute them in parallel, we want the queries to be shuffled so the
# workload is divided evely
random.shuffle(train_qfns)
random.shuffle(test_qfns)
random.shuffle(val_qfns)
return train_qfns, test_qfns, val_qfns
def load_qdata(fns):
qreps = []
for qfn in fns:
qrep = load_qrep(qfn)
# TODO: can do checks like no queries with zero cardinalities etc.
qreps.append(qrep)
template_name = os.path.basename(os.path.dirname(qfn))
qrep["name"] = os.path.basename(qfn)
qrep["template_name"] = template_name
return qreps
def get_featurizer(trainqs, valqs, testqs):
featurizer = Featurizer(args.user, args.pwd, args.db_name,
args.db_host, args.port)
featdata_fn = os.path.join(args.query_dir, "featdata.json")
if args.regen_featstats or not os.path.exists(featdata_fn):
featurizer.update_column_stats(trainqs+valqs+testqs)
ATTRS_TO_SAVE = ['aliases', 'cmp_ops', 'column_stats', 'joins',
'max_in_degree', 'max_joins', 'max_out_degree', 'max_preds',
'max_tables', 'regex_cols', 'tables']
featdata = {}
for k in dir(featurizer):
if k not in ATTRS_TO_SAVE:
continue
attrvals = getattr(featurizer, k)
if isinstance(attrvals, set):
attrvals = list(attrvals)
featdata[k] = attrvals
f = open(featdata_fn, "w")
json.dump(featdata, f)
f.close()
else:
f = open(featdata_fn, "r")
featdata = json.load(f)
f.close()
featurizer.update_using_saved_stats(featdata)
print("updated featdata from saved file!!")
pdb.set_trace()
if args.algs == "mscn":
feat_type = "set"
else:
feat_type = "combined"
# Look at the various keyword arguments to setup() to change the
# featurization behavior; e.g., include certain features etc.
# these configuration properties do not influence the basic statistics
# collected in the featurizer.update_column_stats call; Therefore, we don't
# include this in the cached version
featurizer.setup(ynormalization=args.ynormalization,
featurization_type=feat_type)
featurizer.update_ystats(trainqs+valqs+testqs)
return featurizer
def main():
train_qfns, test_qfns, val_qfns = get_query_fns()
trainqs = load_qdata(train_qfns)
# Note: can be quite memory intensive to load them all; might want to just
# keep around the qfns and load them as needed
valqs = load_qdata(val_qfns)
testqs = load_qdata(test_qfns)
# only needs featurizer for learned models
if args.algs in ["xgb", "fcnn", "mscn"]:
featurizer = get_featurizer(trainqs, valqs, testqs)
else:
featurizer = None
algs = []
for alg_name in args.algs.split(","):
algs.append(get_alg(alg_name))
eval_fns = []
for efn in args.eval_fns.split(","):
eval_fns.append(get_eval_fn(efn))
for alg in algs:
alg.train(trainqs, valqs=valqs, testqs=testqs,
featurizer=featurizer, result_dir=args.result_dir)
eval_alg(alg, eval_fns, trainqs, "train")
if len(valqs) > 0:
eval_alg(alg, eval_fns, valqs, "val")
if len(testqs) > 0:
eval_alg(alg, eval_fns, testqs, "test")
def read_flags():
parser = argparse.ArgumentParser()
parser.add_argument("--query_dir", type=str, required=False,
default="./queries/imdb/")
## db credentials
parser.add_argument("--db_name", type=str, required=False,
default="imdb")
parser.add_argument("--db_host", type=str, required=False,
default="localhost")
parser.add_argument("--user", type=str, required=False,
default="ceb")
parser.add_argument("--pwd", type=str, required=False,
default="password")
parser.add_argument("--port", type=int, required=False,
default=5432)
parser.add_argument("--result_dir", type=str, required=False,
default="results")
parser.add_argument("--query_templates", type=str, required=False,
default="all")
parser.add_argument("--seed", type=int, required=False,
default=13)
parser.add_argument("--num_eval_processes", type=int, required=False,
default=-1, help="""Used for computing plan costs in parallel. -1 use all cpus; -2: use no cpus; else use n cpus. """)
parser.add_argument("--train_test_split_kind", type=str, required=False,
default="query", help="""query OR template.""")
parser.add_argument("--diff_templates_seed", type=int, required=False,
default=1, help="""Seed used when train_test_split_kind == template""")
parser.add_argument("-n", "--num_samples_per_template", type=int,
required=False, default=-1)
parser.add_argument("--test_size", type=float, required=False,
default=0.5)
parser.add_argument("--val_size", type=float, required=False,
default=0.2)
parser.add_argument("--algs", type=str, required=False,
default="postgres")
parser.add_argument("--eval_fns", type=str, required=False,
default="qerr,ppc,plancost")
# featurizer arguments
parser.add_argument("--regen_featstats", type=int, required=False,
default=1)
parser.add_argument("--ynormalization", type=str, required=False,
default="log")
## NN training features
parser.add_argument("--load_padded_mscn_feats", type=int, required=False, default=0, help="""==1 loads all the mscn features with padded zeros in memory -- speeds up training, but can take too much RAM.""")
parser.add_argument("--weight_decay", type=float, required=False,
default=0.0)
parser.add_argument("--max_epochs", type=int,
required=False, default=10)
parser.add_argument("--eval_epoch", type=int,
required=False, default=1)
parser.add_argument("--mb_size", type=int, required=False,
default=1024)
parser.add_argument("--num_hidden_layers", type=int,
required=False, default=2)
parser.add_argument("--hidden_layer_size", type=int,
required=False, default=128)
parser.add_argument("--load_query_together", type=int, required=False,
default=0)
parser.add_argument("--optimizer_name", type=str, required=False,
default="adamw")
parser.add_argument("--clip_gradient", type=float,
required=False, default=20.0)
parser.add_argument("--lr", type=float,
required=False, default=0.0001)
parser.add_argument("--loss_func_name", type=str, required=False,
default="mse")
return parser.parse_args()
if __name__ == "__main__":
args = read_flags()
main()
|
py | 1a4266ef0b05b06900b4b2d3bc2108d0374c44ce | import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import broadcast
from chainer.functions.array import where
from chainer.functions.math import clip
from chainer.functions.math import exponential
from chainer.functions.math import sqrt
from chainer import utils
from chainer.utils import argument
class Uniform(distribution.Distribution):
"""Uniform Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x; l, h) = \\begin{cases}
\\frac{1}{h - l} & \\text{if }l \\leq x \\leq h \\\\
0 & \\text{otherwise}
\\end{cases}
Args:
low(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the lower bound :math:`l`.
high(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the higher bound :math:`h`.
"""
def __init__(self, **kwargs):
low, high, loc, scale = None, None, None, None
if kwargs:
low, high, loc, scale = argument.parse_kwargs(
kwargs, ('low', low), ('high', high), ('loc', loc),
('scale', scale))
if not (low is None or high is None) ^ (loc is None or scale is None):
raise ValueError(
"Either `low, high` or `loc, scale` (not both) must have a "
"value.")
with chainer.using_config('enable_backprop', True):
if low is None:
self.__loc = chainer.as_variable(loc)
self.__scale = chainer.as_variable(scale)
self.__low = self.__loc
self.__high = self.__loc + self.__scale
else:
self.__low = chainer.as_variable(low)
self.__high = chainer.as_variable(high)
self.__loc = self.__low
self.__scale = self.__high - self.__low
@property
def low(self):
return self.__low
@property
def high(self):
return self.__high
@property
def loc(self):
return self.__loc
@property
def scale(self):
return self.__scale
@property
def batch_shape(self):
return self.low.shape
def cdf(self, x):
return clip.clip((x - self.loc) / self.scale, 0., 1.)
@property
def entropy(self):
return exponential.log(self.scale)
@property
def event_shape(self):
return ()
def icdf(self, x):
return x * self.scale + self.loc
def log_prob(self, x):
if not isinstance(x, chainer.Variable):
x = chainer.Variable(x)
xp = backend.get_array_module(x)
logp = broadcast.broadcast_to(
-exponential.log(self.scale), x.shape)
return where.where(
utils.force_array(
(x.data >= self.low.data) & (x.data <= self.high.data)),
logp, xp.array(-xp.inf, logp.dtype))
@property
def mean(self):
return (self.high + self.low) / 2
def sample_n(self, n):
xp = backend.get_array_module(self.low)
if xp is cuda.cupy:
eps = xp.random.uniform(
0, 1, (n,) + self.low.shape, dtype=self.low.dtype)
else:
eps = xp.random.uniform(
0, 1, (n,) + self.low.shape).astype(self.low.dtype)
noise = self.icdf(eps)
return noise
@property
def stddev(self):
return sqrt.sqrt(self.variance)
@property
def support(self):
return "[low, high]"
@property
def variance(self):
return self.scale ** 2 / 12
@distribution.register_kl(Uniform, Uniform)
def _kl_uniform_uniform(dist1, dist2):
xp = backend.get_array_module(dist1.low)
is_inf = xp.logical_or(dist1.high.data > dist2.high.data,
dist1.low.data < dist2.low.data)
kl = - exponential.log(dist1.high - dist1.low) \
+ exponential.log(dist2.high - dist2.low)
inf = xp.array(xp.inf, dist1.high.dtype)
return where.where(is_inf, inf, kl)
|
py | 1a426791c3be4cb0ce6eba5e9d36b5bd58ecc87d | from rewards.aws.boost import download_past_boosts
if __name__ == "__main__":
download_past_boosts()
|
py | 1a42680335848613f2712783109dfa7bbf23ed90 | #!/usr/bin/env python
import json
import yaml
import urllib
import os
import sys
from jsonref import JsonRef # type: ignore
import click
from openapi2jsonschema.log import info, debug, error
from openapi2jsonschema.util import (
additional_properties,
replace_int_or_string,
allow_null_optional_fields,
change_dict_values,
append_no_duplicates,
)
from openapi2jsonschema.errors import UnsupportedError
@click.command()
@click.option(
"-o",
"--output",
default="schemas",
metavar="PATH",
help="Directory to store schema files",
)
@click.option(
"-p",
"--prefix",
default="_definitions.json",
help="Prefix for JSON references (only for OpenAPI versions before 3.0)",
)
@click.option(
"--stand-alone", is_flag=True, help="Whether or not to de-reference JSON schemas"
)
@click.option(
"--expanded", is_flag=True, help="Expand Kubernetes schemas by API version"
)
@click.option(
"--kubernetes", is_flag=True, help="Enable Kubernetes specific processors"
)
@click.option(
"--strict",
is_flag=True,
help="Prohibits properties not in the schema (additionalProperties: false)",
)
@click.argument("schema", metavar="SCHEMA_URL")
def default(output, schema, prefix, stand_alone, expanded, kubernetes, strict):
"""
Converts a valid OpenAPI specification into a set of JSON Schema files
"""
info("Downloading schema")
if sys.version_info < (3, 0):
response = urllib.urlopen(schema)
else:
if os.path.isfile(schema):
schema = "file://" + os.path.realpath(schema)
req = urllib.request.Request(schema)
response = urllib.request.urlopen(req)
info("Parsing schema")
# Note that JSON is valid YAML, so we can use the YAML parser whether
# the schema is stored in JSON or YAML
data = yaml.load(response.read(), Loader=yaml.SafeLoader)
if "swagger" in data:
version = data["swagger"]
elif "openapi" in data:
version = data["openapi"]
if not os.path.exists(output):
os.makedirs(output)
if version < "3":
with open("%s/_definitions.json" % output, "w") as definitions_file:
info("Generating shared definitions")
definitions = data["definitions"]
if kubernetes:
definitions["io.k8s.apimachinery.pkg.util.intstr.IntOrString"] = {
"oneOf": [{"type": "string"}, {"type": "integer"}]
}
# Although the kubernetes api does not allow `number` as valid
# Quantity type - almost all kubenetes tooling
# recognizes it is valid. For this reason, we extend the API definition to
# allow `number` values.
definitions["io.k8s.apimachinery.pkg.api.resource.Quantity"] = {
"oneOf": [{"type": "string"}, {"type": "number"}]
}
# For Kubernetes, populate `apiVersion` and `kind` properties from `x-kubernetes-group-version-kind`
for type_name in definitions:
type_def = definitions[type_name]
if "x-kubernetes-group-version-kind" in type_def:
for kube_ext in type_def["x-kubernetes-group-version-kind"]:
if expanded and "apiVersion" in type_def["properties"]:
api_version = (
kube_ext["group"] + "/" +
kube_ext["version"]
if kube_ext["group"]
else kube_ext["version"]
)
append_no_duplicates(
type_def["properties"]["apiVersion"],
"enum",
api_version,
)
if "kind" in type_def["properties"]:
kind = kube_ext["kind"]
append_no_duplicates(
type_def["properties"]["kind"], "enum", kind
)
if strict:
definitions = additional_properties(definitions)
definitions_file.write(json.dumps(
{"definitions": definitions}, indent=2))
with open("%s/_definitions.json" % output, 'w') as definitions_file:
definitions = data['definitions']
updated = ovirt_change_array(definitions, prefix, version, False)
definitions_file.write(json.dumps({"definitions": updated}, indent=2))
types = []
info("Generating individual schemas")
if version < "3":
components = updated #data['definitions']
else:
components = data["components"]["schemas"]
for title in components:
kind = title.split(".")[-1].lower()
if kubernetes:
group = title.split(".")[-3].lower()
api_version = title.split(".")[-2].lower()
specification = components[title]
specification["$schema"] = "http://json-schema.org/schema#"
specification.setdefault("type", "object")
if strict:
specification["additionalProperties"] = False
if kubernetes and expanded:
if group in ["core", "api"]:
full_name = "%s-%s" % (kind, api_version)
else:
full_name = "%s-%s-%s" % (kind, group, api_version)
else:
full_name = kind
types.append(title)
try:
debug("Processing %s" % full_name)
# These APIs are all deprecated
if kubernetes:
if title.split(".")[3] == "pkg" and title.split(".")[2] == "kubernetes":
raise UnsupportedError(
"%s not currently supported, due to use of pkg namespace"
% title
)
# This list of Kubernetes types carry around jsonschema for Kubernetes and don't
# currently work with openapi2jsonschema
if (
kubernetes
and stand_alone
and kind
in [
"jsonschemaprops",
"jsonschemapropsorarray",
"customresourcevalidation",
"customresourcedefinition",
"customresourcedefinitionspec",
"customresourcedefinitionlist",
"customresourcedefinitionspec",
"jsonschemapropsorstringarray",
"jsonschemapropsorbool",
]
):
raise UnsupportedError("%s not currently supported" % kind)
updated = change_dict_values(specification, prefix, version)
specification = updated
if stand_alone:
base = "file://%s/%s/" % (os.getcwd(), output)
specification = JsonRef.replace_refs(
specification, base_uri=base)
if "additionalProperties" in specification:
if specification["additionalProperties"]:
updated = change_dict_values(
specification["additionalProperties"], prefix, version
)
specification["additionalProperties"] = updated
if strict and "properties" in specification:
updated = additional_properties(specification["properties"])
specification["properties"] = updated
if kubernetes and "properties" in specification:
updated = replace_int_or_string(specification["properties"])
updated = allow_null_optional_fields(updated)
specification["properties"] = updated
with open("%s/%s.json" % (output, full_name), "w") as schema_file:
debug("Generating %s.json" % full_name)
schema_file.write(json.dumps(specification, indent=2))
except Exception as e:
error("An error occured processing %s: %s" % (kind, e))
with open("%s/all.json" % output, "w") as all_file:
info("Generating schema for all types")
contents = {"oneOf": []}
for title in types:
if version < "3":
contents["oneOf"].append(
{"$ref": "%s#/definitions/%s" % (prefix, title)}
)
else:
contents["oneOf"].append(
{"$ref": (title.replace("#/components/schemas/", "") + ".json")}
)
all_file.write(json.dumps(contents, indent=2))
if __name__ == "__main__":
default()
|
py | 1a426850a6b98c48676272e56d5edc550351c21a | from typing import Text, List, Tuple
from rasa.core.domain import Domain
from rasa.core.training.story_conflict import (
StoryConflict,
find_story_conflicts,
_get_previous_event,
)
from rasa.core.training.generator import TrainingDataGenerator, TrackerWithCachedStates
from rasa.validator import Validator
from rasa.importers.rasa import RasaFileImporter
from tests.core.conftest import DEFAULT_STORIES_FILE, DEFAULT_DOMAIN_PATH_WITH_SLOTS
async def _setup_trackers_for_testing(
domain_path: Text, training_data_file: Text
) -> Tuple[List[TrackerWithCachedStates], Domain]:
importer = RasaFileImporter(
domain_path=domain_path, training_data_paths=[training_data_file],
)
validator = await Validator.from_importer(importer)
trackers = TrainingDataGenerator(
validator.story_graph,
domain=validator.domain,
remove_duplicates=False,
augmentation_factor=0,
).generate()
return trackers, validator.domain
async def test_find_no_conflicts():
trackers, domain = await _setup_trackers_for_testing(
DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert conflicts == []
async def test_find_conflicts_in_short_history():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_1.md"
)
# `max_history = 3` is too small, so a conflict must arise
conflicts = find_story_conflicts(trackers, domain, 3)
assert len(conflicts) == 1
# With `max_history = 4` the conflict should disappear
conflicts = find_story_conflicts(trackers, domain, 4)
assert len(conflicts) == 0
async def test_find_conflicts_checkpoints():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_2.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 1
assert conflicts[0].conflicting_actions == ["utter_goodbye", "utter_default"]
async def test_find_conflicts_or():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_3.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 1
assert conflicts[0].conflicting_actions == ["utter_default", "utter_goodbye"]
async def test_find_conflicts_slots_that_break():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_4.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 1
assert conflicts[0].conflicting_actions == ["utter_default", "utter_greet"]
async def test_find_conflicts_slots_that_dont_break():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_5.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 0
async def test_find_conflicts_multiple_stories():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_6.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 1
assert "and 2 other trackers" in str(conflicts[0])
async def test_add_conflicting_action():
sliced_states = [
None,
{},
{"intent_greet": 1.0, "prev_action_listen": 1.0},
{"prev_utter_greet": 1.0, "intent_greet": 1.0},
]
conflict = StoryConflict(sliced_states)
conflict.add_conflicting_action("utter_greet", "xyz")
conflict.add_conflicting_action("utter_default", "uvw")
assert conflict.conflicting_actions == ["utter_greet", "utter_default"]
async def test_has_prior_events():
sliced_states = [
None,
{},
{"intent_greet": 1.0, "prev_action_listen": 1.0},
{"prev_utter_greet": 1.0, "intent_greet": 1.0},
]
conflict = StoryConflict(sliced_states)
assert conflict.conflict_has_prior_events
async def test_get_previous_event():
assert _get_previous_event({"prev_utter_greet": 1.0, "intent_greet": 1.0}) == (
"action",
"utter_greet",
)
assert _get_previous_event({"intent_greet": 1.0, "prev_utter_greet": 1.0}) == (
"action",
"utter_greet",
)
assert _get_previous_event({"intent_greet": 1.0, "prev_action_listen": 1.0}) == (
"intent",
"greet",
)
async def test_has_no_prior_events():
sliced_states = [None]
conflict = StoryConflict(sliced_states)
assert not conflict.conflict_has_prior_events
|
py | 1a42685ac921b7a52e0171cb3378207341a2b509 | import logging
from collections import namedtuple, defaultdict
from enum import Enum
from itertools import product
from gym import Env
import gym
from gym.utils import seeding
import numpy as np
class Action(Enum):
NONE = 0
NORTH = 1
SOUTH = 2
WEST = 3
EAST = 4
LOAD = 5
class Player:
def __init__(self):
self.controller = None
self.position = None
self.level = None
self.field_size = None
self.score = None
self.reward = 0
self.history = None
self.current_step = None
def setup(self, position, level, field_size):
self.history = []
self.position = position
self.level = level
self.field_size = field_size
self.score = 0
def set_controller(self, controller):
self.controller = controller
def step(self, obs):
return self.controller._step(obs)
@property
def name(self):
if self.controller:
return self.controller.name
else:
return "Player"
class ForagingEnv(Env):
"""
A class that contains rules/actions for the game level-based foraging.
"""
metadata = {"render.modes": ["human"]}
action_set = [Action.NORTH, Action.SOUTH, Action.WEST, Action.EAST, Action.LOAD]
Observation = namedtuple(
"Observation",
["field", "actions", "players", "game_over", "sight", "current_step", "button"],
)
PlayerObservation = namedtuple(
"PlayerObservation", ["position", "level", "history", "reward", "is_self"]
) # reward is available only if is_self
def __init__(
self,
players,
max_player_level,
field_size,
sight,
max_episode_steps,
normalize_reward=True,
):
assert players>1, "Need at least 2 players"
self.logger = logging.getLogger(__name__)
self.seed()
self.players = [Player() for _ in range(players)]
self.field = np.zeros(field_size, np.int32)
self.max_food = 1 #
self._food_spawned = 0.0
self.max_player_level = max_player_level
self.sight = sight
self.force_coop = True #
self._game_over = None
self.button_pressed = False
self._button_loc = None
self._food_loc = None
self.action_space = gym.spaces.Tuple(tuple([gym.spaces.Discrete(6)] * len(self.players)))
self.observation_space = gym.spaces.Tuple(tuple([self._get_observation_space()] * len(self.players)))
# New stuff
# self.share_observation_space = gym.spaces.Tuple(tuple([self._get_observation_space()] * len(self.players)))
self.share_observation_space = gym.spaces.Tuple(tuple([self._get_shared_observation_space()] * len(self.players)))
self._rendering_initialized = False
self._valid_actions = None
self._max_episode_steps = max_episode_steps
self._normalize_reward = normalize_reward
self.viewer = None
self.n_agents = len(self.players)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _get_observation_space(self):
"""The Observation Space for each agent.
- all of the board (board_size^2) with foods
- player description (x, y, level)*player_count
"""
field_x = self.field.shape[1]
field_y = self.field.shape[0]
# field_size = field_x * field_y
max_food = self.max_food
max_food_level = self.max_player_level * len(self.players)
min_obs = [-1, -1, 0] * max_food + [0, 0, 1] * len(self.players) + [0, 0] * 1
max_obs = [field_x, field_y, max_food_level] * max_food + [
field_x,
field_y,
self.max_player_level,
] * len(self.players) + [field_x, field_y] * 1
return gym.spaces.Box(np.array(min_obs), np.array(max_obs), dtype=np.float32)
def _get_shared_observation_space(self):
"""The Observation Space for each agent.
for n_players:
- all of the board (board_size^2) with foods
- player description (x, y, level)*player_count
"""
shared_obs_space_min = self.observation_space[0].low
shared_obs_space_high = self.observation_space[0].high
for obs_space in self.observation_space[1:]:
shared_obs_space_min = np.append(shared_obs_space_min, obs_space.low)
shared_obs_space_high = np.append(shared_obs_space_high, obs_space.high)
return gym.spaces.Box(shared_obs_space_min, shared_obs_space_high, dtype=np.float32)
@classmethod
def from_obs(cls, obs):
players = []
for p in obs.players:
player = Player()
player.setup(p.position, p.level, obs.field.shape)
player.score = p.score if p.score else 0
players.append(player)
env = cls(players, None, None, None, None)
env.field = np.copy(obs.field)
env.current_step = obs.current_step
env.sight = obs.sight
env._gen_valid_moves()
return env
@property
def field_size(self):
return self.field.shape
@property
def rows(self):
return self.field_size[0]
@property
def cols(self):
return self.field_size[1]
@property
def game_over(self):
return self._game_over
def _gen_valid_moves(self):
self._valid_actions = {
player: [
action for action in Action if self._is_valid_action(player, action)
]
for player in self.players
}
def neighborhood(self, row, col, distance=1, ignore_diag=False):
if not ignore_diag:
return self.field[
max(row - distance, 0) : min(row + distance + 1, self.rows),
max(col - distance, 0) : min(col + distance + 1, self.cols),
]
return (
self.field[
max(row - distance, 0) : min(row + distance + 1, self.rows), col
].sum()
+ self.field[
row, max(col - distance, 0) : min(col + distance + 1, self.cols)
].sum()
)
def adjacent_food(self, row, col):
return (
self.field[max(row - 1, 0), col]
+ self.field[min(row + 1, self.rows - 1), col]
+ self.field[row, max(col - 1, 0)]
+ self.field[row, min(col + 1, self.cols - 1)]
)
def adjacent_food_location(self, row, col):
if row > 1 and self.field[row - 1, col] > 0:
return row - 1, col
elif row < self.rows - 1 and self.field[row + 1, col] > 0:
return row + 1, col
elif col > 1 and self.field[row, col - 1] > 0:
return row, col - 1
elif col < self.cols - 1 and self.field[row, col + 1] > 0:
return row, col + 1
def adjacent_players(self, row, col):
return [
player
for player in self.players
if abs(player.position[0] - row) == 1
and player.position[1] == col
or abs(player.position[1] - col) == 1
and player.position[0] == row
]
def spawn_food(self, max_food, max_level):
food_count = 0
attempts = 0
min_level = 2*max_level if self.force_coop else 1 # The fruit is 2x larger than max_level
while food_count < max_food and attempts < 1000:
attempts += 1
row = self.np_random.randint(1, self.rows - 1)
col = self.np_random.randint(1, self.cols - 1)
# check if it has neighbors:
if (
self.neighborhood(row, col).sum() > 0
or self.neighborhood(row, col, distance=2, ignore_diag=True) > 0
or not self._is_empty_location(row, col)
):
continue
self.field[row, col] = min_level
food_count += 1
self._food_loc = (row, col)
self._food_spawned = self.field.sum()
def _is_empty_location(self, row, col):
if self.field[row, col] != 0:
return False
for a in self.players:
if a.position and row == a.position[0] and col == a.position[1]:
return False
return True
def spawn_players(self, max_player_level):
for player in self.players:
attempts = 0
player.reward = 0
while attempts < 1000:
row = self.np_random.randint(0, self.rows - 1)
col = self.np_random.randint(0, self.cols - 1)
if self._is_empty_location(row, col):
player.setup(
(row, col),
self.np_random.randint(1, max_player_level),
self.field_size,
)
break
attempts += 1
def _is_valid_action(self, player, action):
if action == Action.NONE:
return True
elif action == Action.NORTH:
return (
player.position[0] > 0
and self.field[player.position[0] - 1, player.position[1]] == 0
)
elif action == Action.SOUTH:
return (
player.position[0] < self.rows - 1
and self.field[player.position[0] + 1, player.position[1]] == 0
)
elif action == Action.WEST:
return (
player.position[1] > 0
and self.field[player.position[0], player.position[1] - 1] == 0
)
elif action == Action.EAST:
return (
player.position[1] < self.cols - 1
and self.field[player.position[0], player.position[1] + 1] == 0
)
elif action == Action.LOAD:
return self.adjacent_food(*player.position) > 0
self.logger.error("Undefined action {} from {}".format(action, player.name))
raise ValueError("Undefined action")
def _transform_to_neighborhood(self, center, sight, position):
return (
position[0] - center[0] + min(sight, center[0]),
position[1] - center[1] + min(sight, center[1]),
)
def get_valid_actions(self) -> list:
return list(product(*[self._valid_actions[player] for player in self.players]))
def _make_obs(self, player):
return self.Observation(
actions=self._valid_actions[player],
players=[
self.PlayerObservation(
position=self._transform_to_neighborhood(
player.position, self.sight, a.position
),
level=a.level,
is_self=a == player,
history=a.history,
reward=a.reward if a == player else None,
)
for a in self.players
if (
min(
self._transform_to_neighborhood(
player.position, self.sight, a.position
)
)
>= 0
)
and max(
self._transform_to_neighborhood(
player.position, self.sight, a.position
)
)
<= 2 * self.sight
],
# todo also check max?
field=np.copy(self.neighborhood(*player.position, self.sight)),
game_over=self.game_over,
sight=self.sight,
current_step=self.current_step,
button=self._transform_to_neighborhood(player.position, self.sight, self._button_loc)
if (min(self._transform_to_neighborhood(player.position, self.sight, self._button_loc)) >= 0)
and (max(self._transform_to_neighborhood( player.position, self.sight, self._button_loc))<= 2 * self.sight)
else [-1, -1]
)
def _make_gym_obs(self, observations):
def make_obs_array(observation):
obs = np.zeros(self.observation_space[0].shape, dtype=np.float32)
# obs[: observation.field.size] = observation.field.flatten()
# self player is always first
seen_players = [p for p in observation.players if p.is_self] + [
p for p in observation.players if not p.is_self
]
for i in range(self.max_food):
obs[3 * i] = -1
obs[3 * i + 1] = -1
obs[3 * i + 2] = 0
for i, (y, x) in enumerate(zip(*np.nonzero(observation.field))):
obs[3 * i] = y
obs[3 * i + 1] = x
obs[3 * i + 2] = observation.field[y, x]
for i in range(len(self.players)):
obs[self.max_food * 3 + 3 * i] = -1
obs[self.max_food * 3 + 3 * i + 1] = -1
obs[self.max_food * 3 + 3 * i + 2] = 0
for i, p in enumerate(seen_players):
obs[self.max_food * 3 + 3 * i] = p.position[0]
obs[self.max_food * 3 + 3 * i + 1] = p.position[1]
obs[self.max_food * 3 + 3 * i + 2] = p.level
obs[-2:] = np.array(observation.button)
return obs
def get_player_reward(observation):
for p in observation.players:
if p.is_self:
return p.reward
nobs = tuple([make_obs_array(obs) for obs in observations])
nreward = [[get_player_reward(obs)] for obs in observations]
ndone = [obs.game_over for obs in observations]
# ninfo = [{'observation': obs} for obs in observations]
ninfo = {}
return nobs, nreward, ndone, ninfo
def reset(self):
self.field = np.zeros(self.field_size, np.int32)
self.spawn_players(self.max_player_level)
player_levels = sorted([player.level for player in self.players])
self.spawn_food(
self.max_food, max_level=sum(player_levels[:3])
)
self.spawn_button()
# print(self._button_loc)
self.current_step = 0
self._game_over = False
self._gen_valid_moves()
observations = [self._make_obs(player) for player in self.players]
nobs, nreward, ndone, ninfo = self._make_gym_obs(observations)
return nobs
def spawn_button(self):
attempts = 0
while attempts < 1000:
attempts += 1
row = self.np_random.randint(1, self.rows - 1)
col = self.np_random.randint(1, self.cols - 1)
# check if it has neighbors:
if (
self.neighborhood(row, col).sum() > 0
or self.neighborhood(row, col, distance=2, ignore_diag=True) > 0
or not self._is_empty_location(row, col)
):
continue
self._button_loc = np.array([row, col])
return
def step(self, actions):
self.current_step += 1
for p in self.players:
p.reward = 0
actions = [
Action(a) if Action(a) in self._valid_actions[p] else Action.NONE
for p, a in zip(self.players, actions)
]
# check if actions are valid
for i, (player, action) in enumerate(zip(self.players, actions)):
if action not in self._valid_actions[player]:
self.logger.info(
"{}{} attempted invalid action {}.".format(
player.name, player.position, action
)
)
actions[i] = Action.NONE
loading_players = set()
# move players
# if two or more players try to move to the same location they all fail
collisions = defaultdict(list)
# so check for collisions
for player, action in zip(self.players, actions):
if action == Action.NONE:
collisions[player.position].append(player)
elif action == Action.NORTH:
collisions[(player.position[0] - 1, player.position[1])].append(player)
elif action == Action.SOUTH:
collisions[(player.position[0] + 1, player.position[1])].append(player)
elif action == Action.WEST:
collisions[(player.position[0], player.position[1] - 1)].append(player)
elif action == Action.EAST:
collisions[(player.position[0], player.position[1] + 1)].append(player)
elif action == Action.LOAD:
collisions[player.position].append(player)
loading_players.add(player)
# and do movements for non colliding players
for k, v in collisions.items():
if len(v) > 1: # make sure no more than an player will arrive at location
continue
v[0].position = k
# process the button
if not self.button_pressed:
for player in self.players:
if player.position[0] == self._button_loc[0] and player.position[1] == self._button_loc[1]:
self.field[self._food_loc] = int(self.field[self._food_loc]/2)
self.button_pressed = True
# finally process the loadings:
while loading_players:
# find adjacent food
player = loading_players.pop()
frow, fcol = self.adjacent_food_location(*player.position)
food = self.field[frow, fcol]
adj_players = self.adjacent_players(frow, fcol)
adj_players = [
p for p in adj_players if p in loading_players or p is player
]
adj_player_level = sum([a.level for a in adj_players])
loading_players = loading_players - set(adj_players)
if adj_player_level < food:
# failed to load
continue
# else the food was loaded and each player scores points
for a in adj_players:
a.reward = float(a.level * food)
if self._normalize_reward:
a.reward = a.reward / float(
adj_player_level * self._food_spawned
) # normalize reward
# and the food is removed
self.field[frow, fcol] = 0
self._game_over = (
self.field.sum() == 0 or self._max_episode_steps <= self.current_step
)
self._gen_valid_moves()
for p in self.players:
p.score += p.reward
observations = [self._make_obs(player) for player in self.players]
return self._make_gym_obs(observations)
def _init_render(self):
from .rendering_subgoal import Viewer
self.viewer = Viewer((self.rows, self.cols))
self._rendering_initialized = True
def render(self, mode="human"):
if not self._rendering_initialized:
self._init_render()
return self.viewer.render(self, return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close() |
py | 1a42689d50c366c941759c76bbcc03f3f5fd7afb | def run(db, args):
print("TODO") |
py | 1a4268b304e7d7e1864c852529eb912d8534e437 | # -*- coding: utf-8 -*-
from PySide6.QtWidgets import (
QApplication)
from PySide6.QtGui import (
QFontMetrics,
QTextOption)
from PySide6.QtCore import (
QEvent)
from .textline import SourceTextLineBase
from .textviewer import TextViewer
__all__ = ["SourceViewer"]
class SourceTextLine(SourceTextLineBase):
def __init__(self, text, font, option):
super().__init__(text, font, option)
def rehighlight(self):
formats = self._commonHighlightFormats()
if formats:
self._layout.setFormats(formats)
class SourceViewer(TextViewer):
def __init__(self, parent=None):
super().__init__(parent)
self._panel = None
self._blockEventFilter = False
self.verticalScrollBar().valueChanged.connect(
self._onVScrollBarValueChanged)
settings = QApplication.instance().settings()
settings.tabSizeChanged.connect(self.delayUpdateSettings)
settings.showWhitespaceChanged.connect(self.delayUpdateSettings)
settings.diffViewFontChanged.connect(self.delayUpdateSettings)
def toTextLine(self, text):
return SourceTextLine(text, self._font, self._option)
def setPanel(self, panel):
if self._panel:
if panel != self._panel:
self._panel.removeEventFilter(self)
else:
return
self._panel = panel
if panel:
self._updatePanelGeo()
panel.installEventFilter(self)
else:
self.setViewportMargins(0, 0, 0, 0)
@property
def panel(self):
return self._panel
def reloadSettings(self):
settings = QApplication.instance().settings()
self.updateFont(settings.diffViewFont())
fm = QFontMetrics(self._font)
tabSize = settings.tabSize()
tabstopWidth = fm.horizontalAdvance(' ') * tabSize
self._option = QTextOption()
self._option.setTabStopDistance(tabstopWidth)
if settings.showWhitespace():
flags = self._option.flags()
self._option.setFlags(flags | QTextOption.ShowTabsAndSpaces)
self.reloadBugPattern()
def _onVScrollBarValueChanged(self, value):
if self._panel:
self._panel.update()
def _updatePanelGeo(self):
if self._panel:
rc = self.rect()
width = self._panel.width()
self.setViewportMargins(width + 1, 0, 0, 0)
self._panel.setGeometry(rc.left() + 1,
rc.top() + 1,
width,
self.viewport().height())
def _reloadTextLine(self, textLine):
# reload bugPattern
super()._reloadTextLine(textLine)
if isinstance(textLine, SourceTextLineBase):
textLine.setDefOption(self._option)
textLine.setFont(self._font)
def resizeEvent(self, event):
if event.oldSize().height() != event.size().height():
self._blockEventFilter = True
self._updatePanelGeo()
self._blockEventFilter = False
super().resizeEvent(event)
def eventFilter(self, obj, event):
if not self._blockEventFilter and \
obj == self._panel and \
event.type() == QEvent.Resize:
self._updatePanelGeo()
return True
return super().eventFilter(obj, event)
|
py | 1a4269086b649490bbf0010250cd6daaca0f7cc9 | # Generated by Django 3.2 on 2021-07-06 19:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=50)),
('desc', models.CharField(max_length=300)),
('pub_date', models.DateField()),
],
),
]
|
py | 1a42691db8141bfba21776bb145cf2ab6e5e1816 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import __builtin__
import traceback
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue # don't mess with a PEP 302-supplied __file__
try:
prev = m.__file__
new = os.path.abspath(m.__file__)
if prev != new:
m.__file__ = new
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception as err:
print >>sys.stderr, "Error processing line {:d} of {}:\n".format(
n+1, fullname)
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print >>sys.stderr, ' '+line
print >>sys.stderr, "\nRemainder of file ignored"
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
import os
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python).
For each directory present in the global ``PREFIXES``, this function
will find its `site-packages` subdirectory depending on the system
environment, and will return a list of full paths.
"""
is_pypy = '__pypy__' in sys.builtin_module_names
sitepackages = []
seen = set()
for prefix in PREFIXES:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitepackages.append(os.path.join(prefix, "Lib", "site-packages"))
elif is_pypy:
from distutils.sysconfig import get_python_lib
sitedir = get_python_lib(standard_lib=False, prefix=prefix)
sitepackages.append(sitedir)
elif os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitepackages.append(os.path.join(prefix, "lib", "site-python"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
return sitepackages
def addsitepackages(known_paths):
"""Add site-packages (and possibly site-python) to sys.path"""
for sitedir in getsitepackages():
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
##def setcopyright():
## """Set 'copyright' and 'credits' in __builtin__"""
## __builtin__.copyright = _Printer("copyright", sys.copyright)
## if sys.platform[:4] == 'java':
## __builtin__.credits = _Printer(
## "credits",
## "Jython is maintained by the Jython developers (www.jython.org).")
## else:
## __builtin__.credits = _Printer("credits", """\
## Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
## for supporting Python development. See www.python.org for more information.""")
## here = os.path.dirname(os.__file__)
## __builtin__.license = _Printer(
## "license", "See http://www.python.org/%.3s/license.html" % sys.version,
## ["LICENSE.txt", "LICENSE"],
## [os.path.join(here, os.pardir), here, os.curdir])
def setcopyright():
# XXX this is the PyPy-specific version. Should be unified with the above.
__builtin__.copyright = _Printer("copyright", sys.copyright)
__builtin__.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://pypy.org/")
__builtin__.license = _Printer(
"license",
"See https://bitbucket.org/pypy/pypy/src/default/LICENSE")
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc is not None and enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback"
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback"
def import_builtin_stuff():
"""PyPy specific: pre-import a few built-in modules, because
some programs actually rely on them to be in sys.modules :-("""
# encodings is imported sometimes but not always by app_main
import encodings
import exceptions
if 'zipimport' in sys.builtin_module_names:
import zipimport
def main():
global ENABLE_USER_SITE
import_builtin_stuff()
abs__file__()
known_paths = removeduppaths()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print "sys.path = ["
for dir in sys.path:
print " %r," % (dir,)
print "]"
print "USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist")
print "USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist")
print "ENABLE_USER_SITE: %r" % ENABLE_USER_SITE
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print os.pathsep.join(buffer)
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print textwrap.dedent(help % (sys.argv[0], os.pathsep))
sys.exit(10)
if __name__ == '__main__':
_script()
|
py | 1a426929c05ad36363decb5e3552b8daf6fb81f3 | from InitProb import *
from collections import defaultdict
PNFile = "lsup"
OVERRIDE = 1
vertices = [[1, 1], [0, 3], [-1, 1], [-1, -1], [1, -1], [0.5, 0], [0, 0.75], [-0.5, 0], [0, 1]]
edgelists = [[0,1,2,3,4], [5,6,7,8]]
trpl = [[1, 1], [0, 3], [-1, 1]]
c = .1
box = [np.array([ 0.08*c, 0.08*c]),\
np.array([ -0.08*c, 0.08*c]),\
np.array([ -0.08*c,-0.08*c]),\
np.array([ 0.08*c,-0.08*c])]
#def clip(PND, aL):
#Instead of modifying the polychain data, just reproduce it based on PND.PCC
if __name__ == "__main__" or OVERRIDE:
OFF = PolyNodeData()
OFF.loadOFF("lsup.off")
acuteAngles = [] #This will be a list of lists that we fill with vertex idx for each polychain
PND = PolyNodeData()
PND.loadPND(PNFile + ".poly", PNFile + ".node")
aL = identifyAcute(PND)
# L = aL[0]
# A = aL[-3][0]
# out = []
#
# for aI in L:
# for b in box:
# out.append((PND.nodes[aI]+b).tolist())
#
# for b in box:
# out.append((PND.nodes[A]+b).tolist())
#
#
# OFFOut = OFFData()
#
# OFFOut.vertices = out
# OFFOut.elements = [[i*4 + j + 518 for j in range(4)] for i in range(len(OFFOut.vertices)//4)]
# #OFFOut.elements = [[i*4 + j for j in range(4)] for i in range(len(OFFOut.vertices)//4)]
# OFFOut.NV = len(out)
# OFFOut.NE = len(OFFOut.elements)
#
# OFFOut.export("marks.off")
# exportToOFF(vertices, edgelists, "House.off")
# #VVV All meant to determine boundary from a tri/quadrangulation
#
# D = defaultdict(lambda: 0)
# OFF = importOFF("superior.off")
#
# #Determine Boundary and Holes
#
# for ele in OFF.elements:
# for idx in range(len(ele)):
# D[str({ele[idx], ele[(idx+1) % len(ele)]})] += 1
#
# unsortedBoundary = set()
#
# for edge in D:
# if D[edge] == 1:
# unsortedBoundary.add(edge)
|
py | 1a4269bdc044c18e3672c1a71779a35722b324c6 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# flake8: noqa
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import mock
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# to support markdown
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath("../"))
DEPLOY = os.environ.get("READTHEDOCS") == "True"
# -- Project information -----------------------------------------------------
try:
import torch # noqa
except ImportError:
for m in [
"torch",
"torchvision",
"torch.nn",
"torch.nn.parallel",
"torch.distributed",
"torch.multiprocessing",
"torch.autograd",
"torch.autograd.function",
"torch.nn.modules",
"torch.nn.modules.utils",
"torch.utils",
"torch.utils.data",
"torch.onnx",
"torchvision",
"torchvision.ops",
]:
sys.modules[m] = mock.Mock(name=m)
for m in [
"cv2",
"scipy",
"portalocker",
"detectron2._C",
"pycocotools",
"pycocotools.mask",
"pycocotools.coco",
"pycocotools.cocoeval",
"google",
"google.protobuf",
"google.protobuf.internal",
"onnx",
"caffe2",
"caffe2.proto",
"caffe2.python",
"caffe2.python.utils",
"caffe2.python.onnx",
"caffe2.python.onnx.backend",
]:
sys.modules[m] = mock.Mock(name=m)
sys.modules["cv2"].__version__ = "3.4"
import detectron2 # isort: skip
project = "detectron2"
copyright = "2019-2020, detectron2 contributors"
author = "detectron2 contributors"
# The short X.Y version
version = detectron2.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "1.7"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
autodoc_inherit_docstrings = False
autodoc_member_order = "bysource"
if DEPLOY:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.1
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"torch": ("https://pytorch.org/docs/master/", None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_parsers = {".md": CommonMarkParser}
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "detectron2doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"detectron2",
"detectron2 Documentation",
author,
"detectron2",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
_DEPRECATED_NAMES = set()
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, "__HIDE_SPHINX_DOC__", False):
return True
# Hide some names that are deprecated or not intended to be used
if name in _DEPRECATED_NAMES:
return True
return None
def url_resolver(url):
if ".html" not in url:
url = url.replace("../", "")
return "https://github.com/facebookresearch/detectron2/blob/master/" + url
else:
if DEPLOY:
return "http://detectron2.readthedocs.io/" + url
else:
return "/" + url
def setup(app):
from recommonmark.transform import AutoStructify
app.connect("autodoc-skip-member", autodoc_skip_member)
# app.connect('autodoc-skip-member', autodoc_skip_member)
app.add_config_value(
"recommonmark_config",
{
"url_resolver": url_resolver,
"enable_math": True,
"enable_inline_math": True,
"enable_eval_rst": True,
},
True,
)
app.add_transform(AutoStructify)
|
py | 1a4269e0bed1c53b3536a29b0009126cb7cf8c6d | # -*- coding: utf-8 -*-
u"""Simplify rendering jinja2
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkinspect
from pykern import pkio
from pykern import pkresource
from pykern.pkdebug import pkdc, pkdp
import jinja2
#: Implicit extension including '.' added to resources
RESOURCE_SUFFIX = '.jinja'
def render_file(filename, j2_ctx, output=None, strict_undefined=False, jinja_env=None):
"""Render filename as template with j2_ctx.
Args:
basename (str): name without jinja extension
j2_ctx (dict): how to replace values in Jinja2 template
output (str): file name of output; if None, return str
strict_undefined (bool): set `jinja2.StrictUndefined` if True
jinja_env (dict): add values to jinja2 environment
Returns:
str: rendered template
"""
t = pkio.read_text(filename)
kw = dict(
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
extensions=['jinja2.ext.do'],
)
if strict_undefined:
kw['undefined'] = jinja2.StrictUndefined
if jinja_env:
kw.update(jinja_env)
je = jinja2.Environment(**kw)
res = je.from_string(t).render(j2_ctx)
if output:
pkio.write_text(output, res)
return res
def render_resource(basename, *args, **kwargs):
"""Render a pkresource as a jinja template.
Args:
basename (str): name without `RESOURCE_SUFFIX`
args (list): see func:`render_file` for rest of args and return
"""
return render_file(
pkresource.filename(
basename + RESOURCE_SUFFIX,
pkinspect.caller_module(),
),
*args,
**kwargs
)
|
py | 1a426ab8d5d7de02b845b5e6ad314a95c37452dc | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Libraries
import sys
import time
import logging
import platform
import os
import random
# Modules
import core.log
from core.data.wordlist import *
from core.data.crossword import *
from core.data.constants import *
from core.helpers.parse import *
from core.implements.basic_backtracking import *
from core.implements.fc_backtracking import *
from core.implements.live_backtracking import *
from cli.arguments.parsers import DEFAULT_PARSER
from cli.arguments.constants import *
from cli.printers.crossword import *
# Constants
LOGGER = logging.getLogger(__name__)
# Functions
"""
Takes the system arguments vector and tries to parse the arguments in it given
the argument parser specified and returns the namespace generated
@param parser the ArgumentParser objects to use to parse the arguments
"""
def parseArguments(parser):
return parser.parse_args()
"""
Given the origin of the data for the wordlist, loads the wordlist and returns
it, while giving some information about it if it's required
@param origin the source to load the wordlist from
@return wordlist valid object (or None if couldn't load)
"""
def loadWordlist(origin):
LOGGER.info("-> Loading wordlist (from %s)",origin)
wordlist = WordList(origin)
if args.timers > 1: time_load_wordlist_start = time.time()
wordlist.read()
if args.timers > 2:
LOGGER.info("--> Read in %f seconds",time.time()-\
time_load_wordlist_start)
time_load_wordlist_start_parse = time.time()
wordlist.parse()
if args.timers > 2:
LOGGER.info("--> Parsed in %f seconds",time.time()-\
time_load_wordlist_start_parse)
if args.timers > 1:
LOGGER.info("--> Loaded in %f seconds",time.time()-\
time_load_wordlist_start)
if args.show_wordlist:
LOGGER.info(wordlist)
return wordlist
"""
Given the origin of the data for the crossword, loads the crossword and returns
it, while giving some information about it if it's required
@param origin the source to load the wordlist from
@return crossword valid object (or None if couldn't load)
"""
def loadCrossword(origin):
crossword = Crossword(origin)
LOGGER.info("-> Loading crossword (from %s)",origin)
if args.timers > 1: time_load_crossword_start = time.time()
crossword.read().parse()
if args.timers > 1:
LOGGER.info("--> Loaded in %f seconds",time.time()-\
time_load_crossword_start)
if args.show_crossword:
LOGGER.info(crossword)
return crossword
"""
Retrieves the algorithm object to use depending on the arguments
@return algorithm callable object
"""
def selectAlgorithm():
alg = None
LOGGER.info("Chose %s algorithm"%args.algorithm)
if args.algorithm == ALG_BACKTRACKING_SIMPLE:
alg = CrosswordBasicBacktracking(wordlist.getList(),
crossword.getConstraints())
elif args.algorithm == ALG_BACKTRACKING_FC:
alg = CrosswordForwardCheckingBacktracking(wordlist.getList(),
crossword.getConstraints())
elif args.algorithm == ALG_BACKTRACKING_LIVE:
crossword_printer = CrosswordPrinter(crossword,args.frames)
crossword_printer.setStyle(args.style)
alg = CrosswordLiveBacktracking(wordlist.getList(),
crossword.getConstraints(),crossword_printer)
return alg
"""
Given the solution returned from the crossword, searches over the internet for
the definitions of the words appearing in the solution and shows the user the
definitions so they can solve the crossword theyreselves
@param solution solution to show hints
"""
def playGame(solution):
from bs4 import BeautifulSoup
import mwapi
LOGGER.info("---- GAME MODE ----")
LOGGER.info("I want to play a game...")
session = mwapi.Session('https://ca.wiktionary.org')
for word_i in range(len(solution)):
word = "".join(list(map(chr,solution[word_i]))).lower()
var = crossword.getVariableString(word_i)
resp = session.get(action='query',prop='extracts',titles=word)\
["query"]["pages"]
pages = list(resp.keys())
try:
extract = resp[pages[0]]["extract"]
except:
extract = None
parser = None
if extract:
parser = BeautifulSoup(extract,"html.parser").findAll("li")
definition = ""
if parser != None:
valid_defs = []
for info in parser:
text = info.getText()
if "Pronúncia" in text \
or "Exemples" in text \
or "Etimologia" in text \
or "Per a més informació vegeu" in text\
or len(text.split()) < 4:
continue
else:
valid_defs.append(text)
if len(valid_defs):
definition = random.choice(valid_defs)
if definition == "":
definition = word + " (no hem trobat cap definició)"
LOGGER.info("%s: %s",var,definition)
"""
Given a solution from the crossword, tries to print it over the screen, or logs
that no solution was found if necessary
@param solution solution to print
"""
def showSolution(solution):
if solution == None:
LOGGER.info("The algorithm hasn't found any valid solution :(")
else:
printer = CrosswordPrinter(crossword)
printer.setStyle(args.style)
if args.solution:
if args.play:
print(printer)
playGame(solution)
elif args.algorithm != ALG_BACKTRACKING_LIVE:
printer.printSolution(solution)
else:
LOGGER.info("The algorithm has found a valid solution :)")
if __name__ == "__main__":
# Prepare coding
if platform.system() == "Windows":
os.system("chcp 65001")
# Parse arguments
args = parseArguments(DEFAULT_PARSER)
# Set default tablesets
args.style = CHAR_TABLESETS[args.style]
# Welcome
LOGGER.info("Welcome to Crossword solver")
# Load data
LOGGER.info("Loading crossword and wordlist")
if args.timers > 0: time_load_start = time.time()
# Datasets
if args.wordlist == None:
args.wordlist = ITEMSET_BYNAME[args.itemset]["wordlist"]
if args.crossword == None:
args.crossword = ITEMSET_BYNAME[args.itemset]["crossword"]
# Wordlist
wordlist = loadWordlist(args.wordlist)
# Crossword
crossword = loadCrossword(args.crossword)
# Loading ended
if args.timers > 0:
time_load_end = time.time()
LOGGER.info("Loaded all in %f seconds",
time_load_end-time_load_start)
else:
LOGGER.info("Loaded all data succesfully")
# Choose algorithm
alg = selectAlgorithm()
# Solve the problem
LOGGER.info("Started backtracking algorithm")
if args.timers > 0: time_alg_start = time.time()
solution = alg(crossword.getVariables())
if args.timers > 0:
time_alg_end = time.time()
LOGGER.info("Ended alg. in %f seconds",
time_alg_end-time_alg_start)
else:
LOGGER.info("Ended backtracking algorithm")
# Solution
if args.timers > 0:
LOGGER.info("TOTAL TIME: %f seconds",time_alg_end-time_load_start)
showSolution(solution)
LOGGER.info("Thanks for trusting our app ;)")
|
py | 1a426b5a9a570229e6e0db267f5804b6459577c0 | """Test suite main conftest."""
import transaction
import pytest
from mock import Mock
from pyramid.decorator import reify
from pyramid.request import Request
from pyramid import testing
from zope.sqlalchemy import register
import pyramid_basemodel
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.pool import NullPool
from pyramid_localize import build_localize_config
from pyramid_localize.models import Language
def web_request_func():
"""Mock web request for views testing."""
# pylint:disable=import-outside-toplevel
from pyramid_localize.request import LocalizeRequestMixin
from pyramid_localize.request import database_locales
from pyramid_localize.request import locale_id
from pyramid_localize.request import locales
class TestRequest(LocalizeRequestMixin, Request): # pylint:disable=too-many-ancestors
"""Test request object."""
@reify
def _database_locales(self):
return database_locales(self)
@reify
def locale_id(self):
"""Returns a database locale id."""
return locale_id(self)
def locales(self, *args, **kwargs):
"""Return all availablee locales."""
return locales(self, *args, **kwargs)
request = TestRequest({})
localize_config = build_localize_config(
{
"localize.locales.available": ["en", "pl", "de", "cz"],
"localize.domain": "test",
}
)
configurator = testing.setUp()
request.registry = configurator.registry # pylint:disable=attribute-defined-outside-init
request.registry["localize"] = localize_config
return request
@pytest.fixture
def web_request():
"""Mock web request for views testing."""
return web_request_func()
@pytest.fixture
def locale_negotiator_request():
"""Request for locale_negotiator tests."""
request = Mock()
mock_configuration = {
"cookies": {"_LOCALE_": "cz"},
"_LOCALE_": "fr",
"accept_language.best_match.return_value": "de",
"path": "/pl/page",
"registry": {
"localize": build_localize_config(
{
"localize.locales.available": ["en", "pl", "de", "cz", "fr"],
"localize.locales.default": "en",
}
)
},
}
request.configure_mock(**mock_configuration)
return request
@pytest.fixture
def db_session(request):
"""Session for SQLAlchemy."""
from pyramid_localize.models import Base # pylint:disable=import-outside-toplevel
engine = create_engine("sqlite:///localize.sqlite", echo=False, poolclass=NullPool)
pyramid_basemodel.Session = scoped_session(sessionmaker())
register(pyramid_basemodel.Session)
pyramid_basemodel.bind_engine(engine, pyramid_basemodel.Session, should_create=True, should_drop=True)
def destroy():
transaction.commit()
Base.metadata.drop_all(engine)
request.addfinalizer(destroy)
return pyramid_basemodel.Session
@pytest.fixture
def db_locales(db_session): # pylint:disable=redefined-outer-name
"""Add Languages to db_session."""
for locale in ["pl", "cz", "fr"]:
locale_object = Language(name=locale, native_name=locale, language_code=locale)
db_session.add(locale_object)
transaction.commit()
@pytest.fixture
def request_i18n():
"""Create request with i18n subscribers on."""
config = testing.setUp()
config.scan("pyramid_localize.subscribers.i18n")
request = Request({})
request.registry = config.registry
return request
@pytest.fixture
def request_fake():
"""Create request with fake i18n subscribers on."""
config = testing.setUp()
config.scan("pyramid_localize.subscribers.fake")
request = Request({})
request.registry = config.registry
return request
|
py | 1a426c1b8ad7a33016bca5a53e457c469956d160 | import os
from pypaper import latex_tools as lt
test_dir = os.path.dirname(__file__)
def test_compile_bibtex():
ffp = test_dir + "/test_data_files/sample.bib"
citations = ["Safak:1992ub", "Vesic:1975"]
not_cited = ["Rodriguez:2000sr"]
bibtex_str = lt.compile_bibtex(citations, ffp)
print(bibtex_str)
for cite in citations:
assert cite in bibtex_str, cite
for cite in not_cited:
assert cite not in bibtex_str
# can not find
unlisted_citations = ["Rathje:2017ip"]
bibtex_str = lt.compile_bibtex(unlisted_citations, ffp)
for cite in unlisted_citations:
assert cite not in bibtex_str
def test_extract_citations():
ffp = test_dir + "/test_data_files/sample_latex.tex"
expected_citations = ['Vesic:1975', 'Chatzigogos:2008uv', 'Safak:1992ub', 'Raychowdhury:2009hw']
citations = lt.extract_citation_keys_from_latex(latex_ffp=ffp)
assert len(expected_citations) == len(citations)
for ec in expected_citations:
assert ec in citations
def test_extract_multi_citations():
ffp = test_dir + "/test_data_files/sample_latex_w_eg_and_multiple.tex"
expected_citations = ['Vesic:1975', 'Chatzigogos:2008uv', 'Safak:1992ub', 'Raychowdhury:2009hw', "NIST:2013ssi",
'Taylor:1979uc', 'Gajan:2008cs', 'Deng:2012ta']
citations = lt.extract_citation_keys_from_latex(latex_ffp=ffp)
print(citations)
assert len(expected_citations) == len(citations)
for ec in expected_citations:
assert ec in citations, ec
if __name__ == '__main__':
test_extract_multi_citations()
|
py | 1a426cfd99594d665c7f81c98a822893c8a296f0 | # coding=utf-8
# Kevin Manfredy Axpuac Juárez - 15006597
# Miguel Angel Lemus Morales - 14003328
# Archivo Principal
from help import Help
from instructions import Inst
from playerVsPlayer import PlayerVsPlayer
from playerVsMachine import PlayerVsMachine
from machineVsMachine import MachineVsMachine
# menu de el juego
def menu():
try:
while True:
print('\n====================================')
print('*** Bienvenido al juego CONNECT4 ***')
print('====================================')
while True:
print('(1) INSTRUCCIONES')
print('(2) PLAYER vs PLAYER')
print('(3) PLAYER vs MACHINE')
print('(4) MACHINE vs MACHINE')
print('(5) HELP')
print('(6) EXIT')
try:
option = int(input("Ingrese una opcion: "))
if option == 1:
Inst()
elif option == 2:
PlayerVsPlayer()
elif option == 3:
PlayerVsMachine()
elif option == 4:
MachineVsMachine()
elif option == 5:
Help()
elif option == 6:
print('\nGracias por visitar Connect4 !!! ')
print('Saliendo del juego ...')
break
else:
print('\nERROR: Opcion invalida! Solo hay opciones 1, 2, 3, 4, 5 y 6\n')
except ValueError:
print('\nERROR: Opcion invalida! No ingreso un numero entero\n')
break
except:
print()
menu()
|
py | 1a426d9b2d016853dd0e6318b6f91145513d8bc4 | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import timeit
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
num_classes = 1000
num_batches = 10
batch_size = 120
image_w = 128
image_h = 128
num_repeat = 20
cuda_available = torch.cuda.is_available()
print("===================================================")
print("Cuda Available : {}".format(cuda_available))
print("===================================================")
def train(model):
model.train(True)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)
one_hot_indices = torch.LongTensor(batch_size) \
.random_(0, num_classes) \
.view(batch_size, 1)
for _ in range(num_batches):
# generate random inputs and labels
inputs = torch.randn(batch_size, 3, image_w, image_h)
labels = torch.zeros(batch_size, num_classes) \
.scatter_(1, one_hot_indices, 1)
# run forward pass
optimizer.zero_grad()
if cuda_available:
outputs = model(inputs.to('cuda:0'))
else:
outputs = model(inputs)
# print("Output-device {}".format(outputs.device))
# run backward pass
labels = labels.to(outputs.device)
loss_fn(outputs, labels).backward()
optimizer.step()
stmt = "train(model)"
setup = None
if cuda_available:
setup = "model = AlexNet(num_classes=num_classes).to('cuda:0')"
else:
setup = "model = AlexNet(num_classes=num_classes)"
stats = []
for i in range(10):
rn_run_times = timeit.repeat(stmt, setup, number=1, repeat=num_repeat,
globals=globals())
rn_mean, rn_std = np.mean(rn_run_times), np.std(rn_run_times)
stats.append(rn_mean)
print("Single Node Training Time:", rn_mean)
stats_ar = np.array(stats)
mean = stats_ar.mean()
print(" Mean Training Time {}".format(mean))
with open('stats_alexnet_s_v1.csv', 'a+') as fp:
fp.write(str(mean) + "\n")
|
py | 1a426e0b22dd27319e6bd64563c880a679a1055f | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_splashscreen.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
f="Montserrat ExtraBold"
f1="Bahnschrift"
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(600, 400)
MainWindow.setMinimumSize(QtCore.QSize(600, 400))
MainWindow.setMaximumSize(QtCore.QSize(600, 400))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.Splashscreen = QtWidgets.QFrame(self.centralwidget)
self.Splashscreen.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-radius: 20px;\n"
"")
self.Splashscreen.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Splashscreen.setFrameShadow(QtWidgets.QFrame.Raised)
self.Splashscreen.setObjectName("Splashscreen")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.Splashscreen)
self.horizontalLayout_2.setContentsMargins(15, 15, 15, 15)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.Title = QtWidgets.QFrame(self.Splashscreen)
self.Title.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Title.setFrameShadow(QtWidgets.QFrame.Raised)
self.Title.setObjectName("Title")
self.verticalLayout = QtWidgets.QVBoxLayout(self.Title)
self.verticalLayout.setContentsMargins(8, 0, 0, 8)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.lb_title = QtWidgets.QLabel(self.Title)
font = QtGui.QFont()
font.setFamily(f)
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.lb_title.setFont(font)
self.lb_title.setStyleSheet("color: rgb(0, 0, 97);")
self.lb_title.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.lb_title.setObjectName("lb_title")
self.verticalLayout.addWidget(self.lb_title)
self.lb_version = QtWidgets.QLabel(self.Title)
font = QtGui.QFont()
font.setFamily(f)
font.setPointSize(12)
self.lb_version.setFont(font)
self.lb_version.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.lb_version.setObjectName("lb_version")
self.verticalLayout.addWidget(self.lb_version)
self.blank1 = QtWidgets.QFrame(self.Title)
self.blank1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.blank1.setFrameShadow(QtWidgets.QFrame.Raised)
self.blank1.setObjectName("blank1")
self.verticalLayout.addWidget(self.blank1)
self.lb_status = QtWidgets.QLabel(self.Title)
font = QtGui.QFont()
font.setFamily(f)
font.setPointSize(12)
self.lb_status.setFont(font)
self.lb_status.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.lb_status.setObjectName("lb_status")
self.verticalLayout.addWidget(self.lb_status)
self.blank2 = QtWidgets.QFrame(self.Title)
self.blank2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.blank2.setFrameShadow(QtWidgets.QFrame.Raised)
self.blank2.setObjectName("blank2")
self.verticalLayout.addWidget(self.blank2)
self.lb_year = QtWidgets.QLabel(self.Title)
font = QtGui.QFont()
font.setFamily(f)
font.setPointSize(12)
self.lb_year.setFont(font)
self.lb_year.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.lb_year.setObjectName("lb_year")
self.verticalLayout.addWidget(self.lb_year)
self.logo = QtWidgets.QFrame(self.Title)
self.logo.setMaximumSize(QtCore.QSize(16777215, 40))
self.logo.setStyleSheet("border-radius: 0px;\n"
"background-image: url(:/Image/Image/SPACE_AC.png);")
self.logo.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.logo.setFrameShadow(QtWidgets.QFrame.Raised)
self.logo.setObjectName("logo")
self.verticalLayout.addWidget(self.logo)
self.horizontalLayout_2.addWidget(self.Title)
self.InfoBar = QtWidgets.QFrame(self.Splashscreen)
self.InfoBar.setMinimumSize(QtCore.QSize(320, 0))
self.InfoBar.setMaximumSize(QtCore.QSize(320, 16777215))
self.InfoBar.setStyleSheet("background-image: url(:/Image/Image/SPACE_SPLASH2.png);\n"
"border-radius: 12px;")
self.InfoBar.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.InfoBar.setFrameShadow(QtWidgets.QFrame.Raised)
self.InfoBar.setObjectName("InfoBar")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.InfoBar)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.horizontalLayout_2.addWidget(self.InfoBar)
self.horizontalLayout.addWidget(self.Splashscreen)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.lb_title.setText(_translate("MainWindow", "ALIEN SAT GCS"))
self.lb_version.setText(_translate("MainWindow", "Ver.0.1.0 (WAREDTANS)"))
self.lb_status.setText(_translate("MainWindow", "Scanning all COM Ports..."))
self.lb_year.setText(_translate("MainWindow", "2018 - 2021 : SPACE AC"))
import ui_splashscreen_resource_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
py | 1a426ee69e4ec9ee987aca6269c5845b887b4e3a | from tabulate import tabulate
import requests
import argparse
import pprint
import json
import os
class EnvDefault(argparse.Action):
def __init__(self, envvar, required=True, default=None, **kwargs):
if not default and envvar:
if envvar in os.environ:
default = os.environ[envvar]
if required and default:
required = False
super(EnvDefault, self).__init__(default=default, required=required,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-pn', '--package-number', type=str, required=True, help='Package number, you may use the PKG environment variable as well',
action=EnvDefault, envvar="PKG")
parser.add_argument('-ih', '--is-heb', action='store_true', required=False, help='Print in hebrew')
parser.add_argument('-pj', '--print-json', action='store_true', required=False, help='Print json instead of table')
parser.add_argument('-cf', '--cookie-file', type=str, required=False, default='cookie.json', help='Cookie file')
return parser.parse_args()
def _get_state_list(item_code, request_key, verification_key, is_heb):
url = "https://mypost.israelpost.co.il/umbraco/Surface/ItemTrace/GetItemTrace"
payload = f"itemCode={item_code}{'&lcid=1037' if is_heb else ''}&__RequestVerificationToken={request_key}"
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': f'__RequestVerificationToken={verification_key};',
}
response = requests.request("POST", url, headers=headers, data=payload)
return json.loads(response.text)
def _check_response(response):
if response['ReturnCode'] != 0:
print(f"Got the following error: {response['ErrorDescription']}")
return False
else:
return True
def _print_response(response, print_json):
if print_json:
pprint.pprint(response)
else:
print(tabulate(tabular_data=response['Result']['itemcodeinfo']['InfoLines'],
headers=response['Result']['itemcodeinfo']['ColumnHeaders'],
tablefmt="plain",
stralign='right'))
if __name__ == "__main__":
args = _parse_args()
cookie = json.load(open(args.cookie_file))
post_response = _get_state_list(args.package_number, cookie['request_key'], cookie['verification_key'], args.is_heb)
if _check_response(post_response):
_print_response(post_response, args.print_json)
else:
print('Failed to get package information')
|
py | 1a426f3ad6c6af137ebb644c9662910d7c883d04 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from .ps_program_builder import *
from .public import *
__all__ = [
'PsProgramBuilder', 'GeoPsProgramBuilder', 'CpuSyncPsProgramBuilder',
'CpuAsyncPsProgramBuilder', 'GpuPsProgramBuilder',
'HeterAsyncPsProgramBuilder', 'FlPsProgramBuilder'
]
class PsProgramBuilderFactory(object):
def __init__(self):
pass
def _create_ps_program_builder(self, pass_ctx):
attrs = pass_ctx._attrs
if attrs['ps_mode'] == DistributedMode.GEO:
return globals()['GeoPsProgramBuilder'](pass_ctx)
elif attrs['use_ps_gpu']:
return globals()['GpuPsProgramBuilder'](pass_ctx)
elif attrs['is_heter_ps_mode']:
return globals()['HeterAsyncPsProgramBuilder'](pass_ctx)
elif 'is_fl_ps_mode' in attrs and attrs[
'is_fl_ps_mode'] == DistributedMode.FL:
return globals()['FlPsProgramBuilder'](pass_ctx)
else:
return globals()['CpuSyncPsProgramBuilder'](pass_ctx)
|
py | 1a426fa85fbb0da0e3f52629b19b56809eaa7ac4 | # SPDX-FileCopyrightText: Copyright 2022, Siavash Ameli <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
__all__ = ['get_data_type_name']
# ==================
# get data type name
# ==================
def get_data_type_name(data):
"""
Returns the typename of data as string.
"""
if data.dtype in [b'float32', 'float32']:
data_type_name = b'float32'
elif data.dtype in [b'float64', 'float64']:
data_type_name = b'float64'
elif data.dtype in [b'float128', 'float128']:
data_type_name = b'float128'
elif data.dtype in [b'int32', 'int32']:
data_type_name = b'int32'
elif data.dtype in [b'int64', 'int64']:
data_type_name = b'int64'
else:
raise TypeError('Data type should be "float32", "float64", ' +
'"float128", "int32", or "int64".')
return data_type_name
|
py | 1a426fe20555001a0abe38ba2dde3bfaa7b2513d | # import the necessary packages
from PIL import Image, ImageOps
import pytesseract
from pytesseract import Output
import argparse
import cv2
import os
import json
def process_list(text: str) -> dict:
final = {}
items = text.split("\n")[4:][:-1]
for item in items:
if item == "":
continue
x = item.split(" ")
itemtype = x[1].lower()
if itemtype not in final:
final[itemtype] = {}
final[itemtype][x[0]] = {}
return final
def process_query(text: str, existing: dict) -> dict:
current = ("", "")
lines = text.split("\n")
for line in lines:
if line != "":
stuff = line.split(": ")
if stuff[0] == "ID":
for cat in existing:
for item in existing[cat]:
if item == stuff[1]:
current = (cat, stuff[1])
break
if stuff[0] == "LOCATION":
cat, id = current
if cat == "" or id == "":
continue
if id not in existing[cat]:
print("warning - item found in query, not in list", id)
existing[cat][id]["location"] = stuff[1]
return existing
def print_results(items: dict):
for category in items:
print(category.upper())
for item in items[category]:
print(" ├──" + item)
for prop in items[category][item]:
print(" ├── {}: {}".format(prop.upper(), items[category][item][prop]))
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("image",
help="path to input image to be OCR'd")
ap.add_argument("-t", "--type", default="list")
ap.add_argument("-d", "--debug", dest="debug", action="store_true",
help="whether to save results of ocr to file")
ap.add_argument("-p", "--pretty-print", dest="prettyprint", action="store_true",
help="whether to print nice data, or produce json")
ap.set_defaults(debug=False, prettyprint=False)
args = vars(ap.parse_args())
# load the example image and convert it to grayscale
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image as a PIL/Pillow image and apply some basic transformations.
img = Image.open(filename)
os.remove(filename)
img = img.crop((300, 120, 2000, 1300))
img = ImageOps.invert(img)
res = {}
if args["type"] == "list":
res = process_list(pytesseract.image_to_string(img))
f = open("list.json", "w+")
f.write(json.dumps(res))
f.close()
if args["type"] == "query":
f = open("list.json", "r")
existing = json.loads(f.read())
res = process_query(pytesseract.image_to_string(img), existing)
if args["prettyprint"]:
print_results(res)
else:
print(json.dumps(res, indent=2))
if args["debug"]:
img.save(filename)
img = cv2.imread(filename)
os.remove(filename)
d = pytesseract.image_to_data(img, output_type=Output.DICT)
n_boxes = len(d['level'])
for i in range(n_boxes):
(x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.imwrite("f.png", img) |
py | 1a4270837ff34fdce1154425356ce85caba2fdf3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from sparseml.optim import PruningLossSensitivityAnalysis
def test_ks_loss_sensitivity_analysis():
test_samples = [
{
"id": "165",
"name": "input.conv.weight",
"index": 0,
"sparse_measurements": [
[0.0, [0.0, 0.0]],
[0.2, [0.0, 0.0]],
[0.4, [0.0, 0.0]],
[0.6, [1.4423741959035397e-05, 1.4478888260782696e-05]],
[0.7, [0.0003933242114726454, 0.0004009161493740976]],
[0.8, [0.002293953439220786, 0.002319519640877843]],
[0.85, [0.0038978520315140486, 0.003912879154086113]],
[0.9, [0.0024482859298586845, 0.0024934178218245506]],
[0.95, [0.0034274826757609844, 0.003474951023235917]],
[0.99, [0.01961200125515461, 0.01976676657795906]],
],
"averages": {
"0.0": 0.0,
"0.2": 0.0,
"0.4": 0.0,
"0.6": 1.4451315109909046e-05,
"0.7": 0.0003971201804233715,
"0.8": 0.0023067365400493145,
"0.85": 0.0039053655928000808,
"0.9": 0.0024708518758416176,
"0.95": 0.0034512168494984508,
"0.99": 0.019689383916556835,
},
"sparse_average": 0.321111756313514,
"sparse_integral": 0.0010827882658031743,
"sparse_comparison": 0.0024708518758416176,
},
{
"id": "168",
"name": "sections.0.0.depth.conv.weight",
"index": 1,
"sparse_measurements": [
[0.0, [0.0, 0.0]],
[0.2, [0.0, 0.0]],
[0.4, [0.0, 0.0]],
[0.6, [0.001039206050336361, 0.0010454836301505566]],
[0.7, [0.0013909710105508566, 0.001424945192411542]],
[0.8, [0.005448495503515005, 0.005359217524528503]],
[0.85, [0.0024713557213544846, 0.0024402134586125612]],
[0.9, [0.002173610497266054, 0.002142299897968769]],
[0.95, [0.00811902154237032, 0.008098462596535683]],
[0.99, [0.06605849775951356, 0.06613280531018972]],
],
"averages": {
"0.0": 0.0,
"0.2": 0.0,
"0.4": 0.0,
"0.6": 0.0010423448402434587,
"0.7": 0.0014079581014811993,
"0.8": 0.005403856514021754,
"0.85": 0.002455784589983523,
"0.9": 0.0021579551976174116,
"0.95": 0.008108742069453001,
"0.99": 0.06609565153485164,
},
"sparse_average": 0.32383361464238264,
"sparse_integral": 0.002619930187938736,
"sparse_comparison": 0.0021579551976174116,
},
{
"id": "171",
"name": "sections.0.0.point.conv.weight",
"index": 2,
"sparse_measurements": [
[0.0, [0.0, 0.0]],
[0.2, [0.0, 0.0]],
[0.4, [0.0, 0.0]],
[0.6, [-9.29515908687506e-10, -1.6410388603560477e-09]],
[0.7, [5.841321808475186e-07, 6.879848797325394e-07]],
[0.8, [0.00011716883454937488, 0.00011542218999238685]],
[0.85, [3.637020199676044e-05, 3.672009552246891e-05]],
[0.9, [0.00020571750064846128, 0.0002049835748039186]],
[0.95, [0.0002617501886561513, 0.00026932702166959643]],
[0.99, [0.0006772654596716166, 0.0006722339312545955]],
],
"averages": {
"0.0": 0.0,
"0.2": 0.0,
"0.4": 0.0,
"0.6": -1.2852773845217769e-09,
"0.7": 6.36058530290029e-07,
"0.8": 0.00011629551227088086,
"0.85": 3.6545148759614676e-05,
"0.9": 0.00020535053772618994,
"0.95": 0.00026553860516287386,
"0.99": 0.000674749695463106,
},
"sparse_average": 0.3195649557136318,
"sparse_integral": 4.6324591947619074e-05,
"sparse_comparison": 0.00020535053772618994,
},
]
analysis = PruningLossSensitivityAnalysis()
for test in test_samples:
for sparse_measure in test["sparse_measurements"]:
for meas in sparse_measure[1]:
analysis.add_result(
test["id"],
test["name"],
test["index"],
sparse_measure[0],
meas,
baseline=False,
)
comp = analysis.results[-1]
assert test["id"] == comp.id_
assert test["name"] == comp.name
assert test["index"] == comp.index
assert len(test["sparse_measurements"]) == len(comp.sparse_measurements)
assert len(test["averages"]) == len(comp.averages)
assert abs(test["sparse_average"] - comp.sparse_average) < 1e-5
assert abs(test["sparse_integral"] - comp.sparse_integral) < 1e-5
assert abs(test["sparse_comparison"] - comp.sparse_comparison()) < 1e-5
path = os.path.join(tempfile.gettempdir(), "ks-sens-analysis.json")
analysis.save_json(path)
json_analysis = analysis.load_json(path)
for index, test in enumerate(test_samples):
comp = json_analysis.results[index]
assert test["id"] == comp.id_
assert test["name"] == comp.name
assert test["index"] == comp.index
assert len(test["sparse_measurements"]) == len(comp.sparse_measurements)
assert len(test["averages"]) == len(comp.averages)
assert abs(test["sparse_average"] - comp.sparse_average) < 1e-5
assert abs(test["sparse_integral"] - comp.sparse_integral) < 1e-5
assert abs(test["sparse_comparison"] - comp.sparse_comparison()) < 1e-5
path = os.path.join(
tempfile.gettempdir(), "ks-sens-analysis-integral-normalized.png"
)
analysis.plot(path, plot_integral=True)
assert os.path.exists(path)
path = os.path.join(
tempfile.gettempdir(), "ks-sens-analysis-integral-normalized.png"
)
analysis.plot(path, plot_integral=True)
assert os.path.exists(path)
path = os.path.join(tempfile.gettempdir(), "ks-sens-analysis-integral.png")
analysis.plot(path, plot_integral=True, normalize=False)
assert os.path.exists(path)
path = os.path.join(tempfile.gettempdir(), "ks-sens-analysis-avg-normalized.png")
analysis.plot(path, plot_integral=False)
assert os.path.exists(path)
path = os.path.join(tempfile.gettempdir(), "ks-sens-analysis-avg.png")
analysis.plot(path, plot_integral=False, normalize=False)
assert os.path.exists(path)
|
py | 1a4270f4a6fd8effdeac28112bd20509f4273d3a | """Generate MTurk hits for top images."""
import argparse
import pathlib
from typing import Sequence
from src import milannotations
from src.mturk import hits
from src.utils.typing import Layer
parser = argparse.ArgumentParser(description='generate mturk hits')
parser.add_argument('dataset', help='name of top images dataset')
parser.add_argument('hits_csv_file', type=pathlib.Path, help='output csv file')
parser.add_argument(
'--dataset-path',
type=pathlib.Path,
help='directory containing dataset (default: .zoo/datasets/<dataset>)')
parser.add_argument(
'--limit',
type=int,
help='only generate hits for this many units (default: None)')
parser.add_argument('--host-url',
default='https://unitname.csail.mit.edu/dissect',
help='host url for top images (default: csail url)')
parser.add_argument('--no-validate-urls',
action='store_true',
help='do not validate urls')
parser.add_argument('--no-display-progress',
action='store_true',
help='do not show progress bar')
args = parser.parse_args()
dataset = milannotations.load(args.dataset,
path=args.dataset_path,
display_progress=not args.no_display_progress)
if not isinstance(dataset, milannotations.TopImagesDataset):
raise ValueError(f'bad dataset type: {type(dataset).__name__}')
base_url = f'{args.host_url.strip("/")}/{args.dataset}'
def generate_urls(layer: Layer, unit: int, k: int) -> Sequence[str]:
"""Generate top image URLs."""
return [
f'{base_url}/{layer}/unit_{unit}/image_{index}.png'
for index in range(k)
]
args.hits_csv_file.parent.mkdir(parents=True, exist_ok=True)
hits.generate_hits_csv(dataset,
args.hits_csv_file,
generate_urls,
limit=args.limit,
validate_urls=not args.no_validate_urls,
display_progress=not args.no_display_progress)
|
py | 1a42715dc11314a0188dedfaf2ecd0fe982217ef | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from op_test import OpTest
paddle.enable_static()
class TestMaxMinAPI(unittest.TestCase):
def setUp(self):
self.init_case()
self.cal_np_out_and_gradient()
self.place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
def init_case(self):
self.x_np = np.array([[0.2, 0.3, 0.5, 0.9], [0.1, 0.2, 0.6, 0.7]])
self.shape = [2, 4]
self.dtype = 'float64'
self.axis = None
self.keepdim = False
# If there are multiple minimum or maximum elements, max/min/ is non-derivable,
# its gradient check is not supported by unittest framework,
# thus we calculate the gradient by numpy function.
def cal_np_out_and_gradient(self):
def _cal_np_out_and_gradient(func):
if func is 'max':
out = np.max(self.x_np, axis=self.axis, keepdims=self.keepdim)
elif func is 'min':
out = np.min(self.x_np, axis=self.axis, keepdims=self.keepdim)
else:
print('This unittest only test max/min, but now is', func)
self.np_out[func] = out
grad = np.zeros(self.shape)
out_b = np.broadcast_to(out, self.shape)
grad[self.x_np == out_b] = 1
self.np_grad[func] = grad
self.np_out = dict()
self.np_grad = dict()
_cal_np_out_and_gradient('max')
_cal_np_out_and_gradient('min')
def _choose_paddle_func(self, func, x):
if func is 'max':
out = paddle.max(x, self.axis, self.keepdim)
elif func is 'min':
out = paddle.min(x, self.axis, self.keepdim)
else:
print('This unittest only test max/min, but now is', func)
return out
# We check the output between paddle API and numpy in static graph.
def test_static_graph(self):
def _test_static_graph(func):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(startup_program, train_program):
x = fluid.data(name='input', dtype=self.dtype, shape=self.shape)
x.stop_gradient = False
out = self._choose_paddle_func(func, x)
exe = fluid.Executor(self.place)
res = exe.run(fluid.default_main_program(),
feed={'input': self.x_np},
fetch_list=[out])
self.assertTrue((np.array(res[0]) == self.np_out[func]).all())
_test_static_graph('max')
_test_static_graph('min')
# As dygraph is easy to compute gradient, we check the gradient between
# paddle API and numpy in dygraph.
def test_dygraph(self):
def _test_dygraph(func):
paddle.disable_static()
x = paddle.to_tensor(
self.x_np, dtype=self.dtype, stop_gradient=False)
out = self._choose_paddle_func(func, x)
grad_tensor = paddle.ones_like(x)
paddle.autograd.backward([out], [grad_tensor], True)
self.assertEqual(np.allclose(self.np_out[func], out.numpy()), True)
self.assertEqual(np.allclose(self.np_grad[func], x.grad), True)
paddle.enable_static()
_test_dygraph('max')
_test_dygraph('min')
# test multiple minimum or maximum elements
class TestMaxMinAPI2(TestMaxMinAPI):
def init_case(self):
self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]])
self.shape = [2, 4]
self.dtype = 'float64'
self.axis = None
self.keepdim = False
# test different axis
class TestMaxMinAPI3(TestMaxMinAPI):
def init_case(self):
self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]])
self.shape = [2, 4]
self.dtype = 'float64'
self.axis = 0
self.keepdim = False
# test keepdim = True
class TestMaxMinAPI4(TestMaxMinAPI):
def init_case(self):
self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]])
self.shape = [2, 4]
self.dtype = 'float64'
self.axis = 1
self.keepdim = True
# test axis is tuple
class TestMaxMinAPI5(TestMaxMinAPI):
def init_case(self):
self.x_np = np.array(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.int32)
self.shape = [2, 2, 2]
self.dtype = 'int32'
self.axis = (0, 1)
self.keepdim = False
|
py | 1a4271d95298d8e84c60d04cfb346c664e98444a | <<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numbers
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# The maximum input rank to test.
_MAX_RANK = 5
def _powerset(iterable):
"""Helper for generating all possible reduction_axes arguments.
Example:
powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)
Args:
iterable: An iterable of items to generate the powerset of.
Returns:
The powerset of all items in iterable.
"""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
class ReducedShapeTest(test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
self.assertAllEqual(output.eval(), result)
@test_util.run_deprecated_v1
def testSimple(self):
with self.cached_session():
self._check([3], [], [3])
self._check([3], [0], [1])
self._check([5, 3], [], [5, 3])
self._check([5, 3], [0], [1, 3])
self._check([5, 3], [1], [5, 1])
self._check([5, 3], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testZeros(self):
"""Check that reduced_shape does the right thing with zero dimensions."""
with self.cached_session():
self._check([0], [], [0])
self._check([0], [0], [1])
self._check([0, 3], [], [0, 3])
self._check([0, 3], [0], [1, 3])
self._check([0, 3], [1], [0, 1])
self._check([0, 3], [0, 1], [1, 1])
self._check([3, 0], [], [3, 0])
self._check([3, 0], [0], [1, 0])
self._check([3, 0], [1], [3, 1])
self._check([3, 0], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testNegAxes(self):
with self.cached_session():
self._check([10, 10, 10], [-1], [10, 10, 1])
self._check([10, 10, 10], [-1, 2], [10, 10, 1])
self._check([10, 10, 10], [-1, -1], [10, 10, 1])
self._check([10, 10, 10], [-1, 0], [1, 10, 1])
self._check([10, 10, 10], [-3], [1, 10, 10])
class ReductionUnknownShape(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session():
for dtype, reductions in [(dtypes.float32,
(math_ops.reduce_sum, math_ops.reduce_mean,
math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min,
math_ops.reduce_euclidean_norm)),
(dtypes.bool, (math_ops.reduce_all,
math_ops.reduce_any))]:
for reduction in reductions:
x = array_ops.placeholder(
dtype=dtype, shape=None) # Some tensor w/ unknown shape.
y = reduction(x)
self.assertEqual(y.shape, ())
class BaseReductionTest(test.TestCase):
def _tf_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _np_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _makeIncremental(self, shape, dtype):
data = np.arange(np.prod(shape)).reshape(shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _makeRandom(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _compare(self, x, reduction_axes, keepdims, feed_dict=None):
np_ans = self._np_reduce(x, reduction_axes, keepdims)
with self.cached_session(use_gpu=True) as sess:
tf_ans = self._tf_reduce(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, keepdims=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, keepdims=True, feed_dict=feed_dict)
def _compareAllAxes(self, x, feed_dict=None):
self._compareAll(x, None)
for axes in _powerset(range(x.ndim)):
self._compareAll(x, axes, feed_dict)
def _compareGradient(self, x, reduction_axes, rtol=1e-8, atol=1e-8):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol)
with self.cached_session(use_gpu=True):
t = ops.convert_to_tensor(x)
su = self._tf_reduce(t, reduction_axes, False)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, su.get_shape().as_list(), x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=rtol, atol=atol)
def _compareGradientAxes(self, x, rtol=1e-8, atol=1e-8):
self._compareGradient(x, None, rtol=rtol, atol=atol)
self._compareGradient(x, [], rtol=rtol, atol=atol)
self._compareGradient(x, 0, rtol=rtol, atol=atol)
self._compareGradient(x, [1], rtol=rtol, atol=atol)
self._compareGradient(x, [2], rtol=rtol, atol=atol)
self._compareGradient(x, [1, 2], rtol=rtol, atol=atol)
self._compareGradient(x, [0, 1, 2, 3], rtol=rtol, atol=atol)
class SumReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.sum(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float16)
self._compareAllAxes(np_arr)
# test that mean doesn't overflow
# only on GPU, since it has the more accurate implementation
if not test.is_gpu_available():
return
arr = np.ones([68000], dtype=np.float16)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_arr = variables.Variable(arr)
variables.global_variables_initializer().run()
tf_mean = math_ops.reduce_mean(tf_arr, 0, False)
tf_out_mean = self.evaluate(tf_mean)
self.assertAllClose(tf_out_mean, 1.)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
for _ in range(10):
size_x = int(2**np.random.uniform(0, 15))
size_y = int(2**np.random.uniform(0, 15))
if size_x * size_y > 1e7:
size_y = int(1e7 / size_x)
arr = np.ones([size_x, size_y], dtype=np.float32)
col_sum = np.sum(arr, axis=0)
row_sum = np.sum(arr, axis=1)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_row_sum = self._tf_reduce(arr, 1, False)
tf_col_sum = self._tf_reduce(arr, 0, False)
tf_out_row, tf_out_col = self.evaluate([tf_row_sum, tf_col_sum])
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
for size_x in [1, 3, 16, 33]:
for size_y in [1, 3, 16, 33]:
for size_z in [1, 3, 16, 33]:
arr = np.ones([size_x, size_y, size_z], dtype=np.float32)
sum_y = np.sum(arr, axis=1)
sum_xz = np.sum(arr, axis=(0, 2))
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_sum_xz = self._tf_reduce(arr, [0, 2], False)
tf_sum_y = self._tf_reduce(arr, 1, False)
tf_out_sum_xz, tf_out_sum_y = self.evaluate([tf_sum_xz, tf_sum_y])
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [0, 2])
@test_util.run_deprecated_v1
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
c_unknown = array_ops.placeholder(dtypes.float32)
s_unknown = math_ops.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
c_known_rank = array_ops.placeholder(dtypes.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(rank=3))
s_known_rank = math_ops.reduce_sum(
c_known_rank, reduction_axes, keepdims=True)
self.assertEqual(3, s_known_rank.get_shape().rank)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
unknown_indices = array_ops.placeholder(dtypes.int32)
c_unknown_indices = constant_op.constant([[10.0], [20.0]])
s_unknown_indices = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().rank)
@test_util.run_deprecated_v1
def testWrongShapeForReductionIndices(self):
reduction_axes = [[1], [2]]
c_unknown = array_ops.placeholder(dtypes.float32)
with self.assertRaisesWithPredicateMatch(ValueError,
".*must be at most rank 1.*"):
math_ops.reduce_sum(c_unknown, reduction_axes)
# Int64??
@test_util.run_deprecated_v1
def testGradient(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]:
x = self._makeIncremental([2, 3, 4, 2], dtype)
self._compareGradientAxes(x)
@test_util.run_deprecated_v1
def testHighRank(self):
# Do a bunch of random high dimensional reductions
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(4, 10 + 1)
axes, = np.nonzero(np.random.randint(2, size=rank))
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
self._compareAll(data, axes)
# Check some particular axis patterns
for rank in 4, 7, 10:
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
for axes in ([], np.arange(rank), np.arange(0, rank, 2),
np.arange(1, rank, 2)):
self._compareAll(data, axes)
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_sum(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_sum(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
class MeanReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_mean(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
elif isinstance(reduction_axes, numbers.Integral):
reduction_axes = (reduction_axes,)
if reduction_axes is None:
count = np.prod(x.shape)
else:
count = np.prod([x.shape[ax] for ax in reduction_axes])
# np.mean automatically converts integer inputs to float, while TensorFlow's
# reduce_mean does not. For integer inputs, we emulate TensorFlow's behavior
# using np.sum and truncating division.
np_sum = np.sum(x, axis=reduction_axes, keepdims=keepdims)
if np.issubdtype(x.dtype, np.integer):
return np_sum // count
return np_sum / count
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testUint8(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeRandom((2,) * rank, dtypes.uint8)
self._compareAllAxes(np_arr)
# This tests the issue reported in b/145030710.
@test_util.run_deprecated_v1
def testSizeOverflowUint8(self):
np_arr = self._makeRandom((2**8,), dtypes.uint8)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowInt8(self):
np_arr = self._makeRandom((2**7,), dtypes.int8)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowUint16(self):
np_arr = self._makeRandom((2**16,), dtypes.uint16)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowInt16(self):
np_arr = self._makeRandom((2**15,), dtypes.int16)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
x = self._makeIncremental(s, dtype)
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_mean(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
class EuclideanNormReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_euclidean_norm(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
np_fro = np.sqrt(
np.sum(x * np.conj(x), axis=reduction_axes, keepdims=keepdims))
if np.issubdtype(x.dtype, np.integer):
np_fro = np.floor(np_fro)
return np_fro
@test_util.run_deprecated_v1
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True):
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testSingleton(self):
for dtype in [np.float32, np.float64]:
np_arr = np.array([-1.]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_euclidean_norm(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertAllEqual(y, np.zeros(9938))
@test_util.run_deprecated_v1
def testGradient(self):
shape = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
# zero value entry will result NaN gradient if reduction doesn't happen.
# e.g., `tf.math.reduce_sum([0, 1], axis=[])` so add one to avoid it.
x = self._makeIncremental(shape, dtype) + 1.0
self._compareGradientAxes(x, rtol=1e-2, atol=1e-2)
class ProdReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_prod(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.prod(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
# Numpy automatically upgrades the type of np.prod from int32 to int64, so
# Numpy does not overflow an int32 np.prod while TensorFlow does. To avoid
# overflow, divide the incremental int32 array by 2.
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32) / 2
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradientWithZeros(self):
s = [2, 3, 4, 2]
x = self._makeIncremental(s, dtypes.float32) / 20.
# No zeros in input
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
# Zero at beginning
x1 = x.copy()
x1[:, :, 0, :] = 0
self._compareGradientAxes(x1, rtol=1e-3, atol=1e-3)
# Zero at end
x2 = x.copy()
x2[:, :, -1, :] = 0
self._compareGradientAxes(x2, rtol=1e-3, atol=1e-3)
# Zero in middle
x3 = x.copy()
x3[:, :, 2, :] = 0
self._compareGradientAxes(x3, rtol=1e-3, atol=1e-3)
# All zeros
x4 = x.copy()
x4[:, :, :, :] = 0
self._compareGradientAxes(x4, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_prod(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_prod(x, [0])
self.assertAllEqual(y.eval(), np.ones(9938))
class MinReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_min(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
=======
"""Functional tests for reduction ops."""
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.kernel_tests import gradient_checker
class SumReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.reduce_sum(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.arange(1, 6).reshape([5]).astype(np.float32)
self._compareAll(np_arr, [0])
def testFloatReduce2D(self):
# Create a 2D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [0, 1])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
def testFloatReduce5D(self):
# Create a 5D array of floats and reduce across some dimensions
np_arr = np.arange(0, 840).reshape([2, 3, 5, 7, 4]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
self._compareAll(np_arr, [1, 2, 3, 4])
self._compareAll(np_arr, [0, 1, 2, 3, 4])
# Simple tests for various tf.
def testDoubleReduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.float64)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testInt32Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.int32)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = tf.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in e.message):
tf.reduce_sum(input_tensor, [-1])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in e.message):
tf.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in e.message):
tf.reduce_sum(input_tensor, [0, 2])
# Int64??
def _compareGradient(self, shape, sum_shape, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(shape, sum_shape, reduction_axes[0])
x = np.arange(1.0, 49.0).reshape(shape).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_sum(t, reduction_axes)
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t,
shape,
su,
sum_shape,
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
self._compareGradient([2, 3, 4, 2], [2, 2], [1, 2])
def testGradient2(self):
self._compareGradient([2, 3, 4, 2], [2, 4, 2], [1])
def testGradient3(self):
self._compareGradient([2, 3, 4, 2], [2, 3, 2], [2])
def testGradient4(self):
self._compareGradient([2, 3, 4, 2], [], None)
class MeanReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_sum = x
count = 1
for ra in reduction_axes[::-1]:
np_sum = np.sum(np_sum, axis=ra, keepdims=keep_dims)
count *= x.shape[ra]
np_ans = np_sum / count
with self.test_session():
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_mean(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False)
self._compare(x, reduction_axes, True)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_mean(t, [1, 2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [0, 1, 2, 3])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
class ProdReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_ans = x
if reduction_axes is None:
np_ans = np.prod(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.prod(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session():
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_prod(x, reduction_axes, keep_dims)
out = tf_ans.eval()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
<<<<<<< HEAD
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
=======
self._compare(x, reduction_axes, False)
self._compare(x, reduction_axes, True)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
<<<<<<< HEAD
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float32)
=======
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
<<<<<<< HEAD
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
=======
def testGradient(self):
s = [2, 3, 4, 2]
# NOTE(kearnes): divide by 20 so product is a reasonable size
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32) / 20.
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_prod(t, [])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [1, 2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [0, 1, 2, 3])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
# NOTE(kearnes): the current gradient calculation gives NaNs for 0 inputs
x = np.arange(0.0, 48.0).reshape(s).astype(np.float32) / 20.
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_prod(t, [])
jacob_t, _ = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
with self.assertRaisesOpError("Tensor had NaN values"):
tf.check_numerics(jacob_t, message="_ProdGrad NaN test").op.run()
class MinReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_min(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
<<<<<<< HEAD
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_min(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MaxReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_max(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
=======
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1, 2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t)
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
class MaxReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_max(x, reduction_axes, keep_dims)
out = tf_ans.eval()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
<<<<<<< HEAD
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testInt64Reduce3D(self):
# Create a 3D array of int64s and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.int64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float64)
=======
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
<<<<<<< HEAD
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_max(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class AllReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_all(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
=======
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1, 2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [2])
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t)
jacob_t, jacob_n = gradient_checker.ComputeGradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
class AllReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_all(x, reduction_axes, keep_dims)
out = tf_ans.eval()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
<<<<<<< HEAD
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session(use_gpu=True) as sess:
v = math_ops.reduce_all([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
=======
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
<<<<<<< HEAD
def testEmpty(self):
self._compareAll([], [0])
class AnyReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_any(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
=======
class AnyReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_any(x, reduction_axes, keep_dims)
out = tf_ans.eval()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
<<<<<<< HEAD
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session(use_gpu=True) as sess:
v = math_ops.reduce_any([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
=======
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
<<<<<<< HEAD
def testEmpty(self):
self._compareAll([], [0])
class CountNonzeroReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False, zero=0,
feed_dict=None):
np_ans = (x != zero).astype(np.int32)
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keepdims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu) as sess:
tf_ans = math_ops.count_nonzero(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)
@test_util.run_deprecated_v1
def testBoolReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([False, False, True, False, False, True])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([0.0, 1.0, -1.0, 0.0, 0.0, 3.0]).astype(np.float32)
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.floor(np.arange(0.0, 210.0) / 100.0).reshape([2, 3, 5,
7]).astype(
np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testDegenerate(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
for dtype in (dtypes.bool,):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.count_nonzero(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
def testStringReduce(self):
# Test case for GitHub issue 18712
with self.cached_session() as sess:
v = math_ops.count_nonzero(constant_op.constant(["test"]))
self.assertAllClose(self.evaluate(v), 1)
@test_util.run_deprecated_v1
def testStringReduce1D(self):
# Create a 1D array of strings
x = np.asarray(["", "", "a", "", "", "b"])
self._compare(x, None, keepdims=False, zero=np.str(""))
self._compare(x, [], keepdims=False, zero=np.str(""))
self._compare(x, [0], keepdims=False, zero=np.str(""))
self._compare(x, None, keepdims=True, zero=np.str(""))
self._compare(x, [], keepdims=True, zero=np.str(""))
self._compare(x, [0], keepdims=True, zero=np.str(""))
@test_util.run_deprecated_v1
def testStringReduce2D(self):
# Create a 2D array of strings
x = np.asarray([["", "", "a", "", "", "b"],
["", "c", "", "d", "", ""],
["e", "", "f", "", "", ""]])
self._compare(x, None, keepdims=False, zero=np.str(""))
self._compare(x, [], keepdims=False, zero=np.str(""))
self._compare(x, [0], keepdims=False, zero=np.str(""))
self._compare(x, [1], keepdims=False, zero=np.str(""))
self._compare(x, [0, 1], keepdims=False, zero=np.str(""))
self._compare(x, None, keepdims=True, zero=np.str(""))
self._compare(x, [], keepdims=True, zero=np.str(""))
self._compare(x, [0], keepdims=True, zero=np.str(""))
self._compare(x, [0, 1], keepdims=True, zero=np.str(""))
if __name__ == "__main__":
test.main()
=======
def testPartialShapes(self):
# Input shape is unknown.
c_unknown = tf.placeholder(tf.float32)
s_unknown = tf.reduce_sum(c_unknown, [1, 2])
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
# Input shape only has known rank.
c_known_rank = tf.placeholder(tf.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
s_known_rank = tf.reduce_sum(c_known_rank, [1, 2], keep_dims=True)
self.assertEqual(3, s_known_rank.get_shape().ndims)
# Reduction indices are unknown.
unknown_indices = tf.placeholder(tf.int32)
c_unknown_indices = tf.constant([[10.0], [20.0]])
s_unknown_indices = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
if __name__ == "__main__":
tf.test.main()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
|
py | 1a4272713e09b2dc79daecfaad596b053f16b842 | # Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import argparse
def predict(start_date: str,
end_date: str,
path_to_ips_file: str,
output_file_path) -> None:
"""
Generates and saves a file with daily new cases predictions for the given countries, regions and intervention
plans, between start_date and end_date, included.
:param start_date: day from which to start making predictions, as a string, format YYYY-MM-DDD
:param end_date: day on which to stop making predictions, as a string, format YYYY-MM-DDD
:param path_to_ips_file: path to a csv file containing the intervention plans between inception date (Jan 1 2020)
and end_date, for the countries and regions for which a prediction is needed
:param output_file_path: path to file to which to save the the predictions
:return: Nothing. Saves the generated predictions to an output_file_path CSV file
with columns "CountryName,RegionName,Date,PredictedDailyNewCases"
"""
# !!! YOUR CODE HERE !!!
raise NotImplemented
# !!! PLEASE DO NOT EDIT. THIS IS THE OFFICIAL COMPETITION API !!!
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start_date",
dest="start_date",
type=str,
required=True,
help="Start date from which to predict, included, as YYYY-MM-DD. For example 2020-08-01")
parser.add_argument("-e", "--end_date",
dest="end_date",
type=str,
required=True,
help="End date for the last prediction, included, as YYYY-MM-DD. For example 2020-08-31")
parser.add_argument("-ip", "--interventions_plan",
dest="ip_file",
type=str,
required=True,
help="The path to an intervention plan .csv file")
parser.add_argument("-o", "--output_file",
dest="output_file",
type=str,
required=True,
help="The path to the CSV file where predictions should be written")
args = parser.parse_args()
print(f"Generating predictions from {args.start_date} to {args.end_date}...")
predict(args.start_date, args.end_date, args.ip_file, args.output_file)
print("Done!")
|
py | 1a427347552af23f98a5f3c02479460aab79f34f | from google.appengine.ext import vendor
# add any library installed in the lib folder
vendor.add('lib')
|
py | 1a4273697cc6521a0824b79e2a10e59666a87465 | import json
import public_config as c
import logging
import argparse
import shutil
from tinydb import TinyDB, Query
from juriscraper.pacer import (
DocketReport,
PacerSession,
PossibleCaseNumberApi,
FreeOpinionReport,
)
logging.basicConfig(level=logging.DEBUG)
district_dict = {
"00": "med",
"01": "mad",
"02": "nhd",
"03": "rid",
"05": "ctd",
"10": "vtd",
}
class PlymouthState(object):
logging.info("Initializing Plymouth State object")
s = PacerSession(username=c.PACER_USERNAME, password=c.PACER_PASSWORD)
results = []
def get_pacer_case_ids(self):
"""Find PACER Case IDs from iQuery
:return: None
"""
q = Query()
db = TinyDB("db/master.json")
fjc_table = db.table("fjc")
for row in fjc_table.search((q.PACER_CASE_ID == "")):
report = PossibleCaseNumberApi(row["COURT"], self.s)
report.query(row["DOCKET_NO"])
data = report.data(office_number=row["OFFICE"], docket_number_letters="cv")
fjc_table.update(
{"PACER_CASE_ID": data["pacer_case_id"], "TITLE": data["title"]},
doc_ids=[row.doc_id],
)
def get_docket_json(self):
"""Download docket to disk from Pacer
:return: None
"""
q = Query()
db = TinyDB("db/master.json")
fjc_table = db.table("fjc")
for row in fjc_table.search(~(q.PACER_CASE_ID == "") & (q.JSON == "False")):
rep = DocketReport(row["COURT"], self.s)
rep.query(
row["PACER_CASE_ID"],
show_parties_and_counsel=True,
show_terminated_parties=True,
show_list_of_member_cases=True,
include_pdf_headers=True,
show_multiple_docs=False,
)
with open(
"downloads/json/pacer_docket_%s.json" % row["PACER_CASE_ID"], "w"
) as write_file:
json.dump(rep.data, write_file, indent=4, sort_keys=True, default=str)
with open(
"downloads/html/pacer_docket_%s.html" % row["PACER_CASE_ID"], "w"
) as file:
file.write(rep.response.text)
fjc_table.update(
{
"JSON": "True",
"pacer_doc_id": rep.data["docket_entries"][0]["pacer_doc_id"],
},
doc_ids=[row.doc_id],
)
logging.info("Finished collecting JSON and HTML")
def download_pdfs(self):
"""Download the first (presumably complaint) PDF to downlaods dir.
:return: None
"""
q = Query()
db = TinyDB("db/master.json")
fjc_table = db.table("fjc")
for row in fjc_table.search((q.JSON == "True") & (q.PDF == "False")):
logging.info(
"Collecting PDF #%s, in %s" % (row["PACER_CASE_ID"], row["TITLE"])
)
report = FreeOpinionReport(row["COURT"], self.s)
r = report.download_pdf(row["PACER_CASE_ID"], row["pacer_doc_id"])
with open(
"downloads/pdf/pacer_complaint_%s.pdf" % row["PACER_CASE_ID"], "w"
) as file:
file.write(r.content)
fjc_table.update(
{"PDF": "True"}, doc_ids=[row.doc_id],
)
logging.info(
"Collected PDF #%s, in %s" % (row["PACER_CASE_ID"], row["TITLE"])
)
def get_pacer_ids():
"""Use PACER iQuery to Identify PACER unique IDs
:return: None
"""
logging.info("Begin collecting PACER CASE IDS")
p = PlymouthState()
p.get_pacer_case_ids()
def download_json_html():
"""Scrape HTML and JSON from Pacer
Save resp from juriscraper to download/JSON & HTML dir
:return: None
"""
logging.info("Begin collecting Dockets")
p = PlymouthState()
p.get_docket_json()
def get_pdfs():
"""Collect PDF from Pacer
:return: None
"""
logging.info("Begin collecting PDFS")
p = PlymouthState()
p.download_pdfs()
def zip_files():
"""Zip the HTML, PDF and JSON Directories
:return: None
"""
shutil.make_archive("downloads/zip/html_files", "zip", "downloads/html/")
shutil.make_archive("downloads/zip/pdf_files", "zip", "downloads/pdf/")
shutil.make_archive("downloads/zip/json_files", "zip", "downloads/json/")
class Command(object):
help = "Collect cases for Plymouth State client project"
VALID_ACTIONS = {
"get-pacer-ids": get_pacer_ids,
"get-dockets": download_json_html,
"get-pdfs": get_pdfs,
"zip-files": zip_files,
}
parser = argparse.ArgumentParser(description="Process Plymouth State")
parser.add_argument("-a", "--action", help="Must choose an action", required=True)
args = vars(parser.parse_args())
VALID_ACTIONS[args["action"]]()
|
py | 1a427460f5976f42ee8304e0c75a2a2d2aa1f7bf | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 16 18:36:15 2016
@author: kyle
"""
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.svm import LinearSVC
# classification step
# target contains the labels in order
target = pd.read_table('/media/kyle/My Passport/sample_files_183/labels_183.txt', header=None)
del target[0]
label_cv = target[1]
# convert label to binary
label_bin = []
for i in label_cv:
if i == "t2d":
label_bin.append(1)
else:
label_bin.append(0)
label_bin = pd.DataFrame(label_bin)
# if sim_mat is being read in
#sim_mat = np.load("sim_mat_183.npy")
clf = LinearSVC(penalty="l1", dual=False)
scores_accuracy = cross_val_score(clf, X=sim_mat, y=label_bin, cv=5, n_jobs=3, scoring="accuracy")
scores_f1 = cross_val_score(clf, sim_mat, label_bin, cv=5, n_jobs=3, scoring="f1")
scores_rocauc = cross_val_score(clf, sim_mat, label_bin, cv=5, n_jobs=3, scoring="roc_auc")
np.save("sim_mat_183.npy", sim_mat)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.