filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_9159 | from __future__ import print_function
import argparse
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.utils.data
from models import vgg, resnet, densenet, base_cnn, TDNN
# Training settings
parser = argparse.ArgumentParser(description='Test architectures with dummy data')
parser.add_argument('--model', choices=['cnn', 'vgg', 'resnet', 'densenet', 'tdnn'], default='resnet')
parser.add_argument('--nclasses', type=int, default=10, metavar='N', help='number of classes')
args = parser.parse_args()
if args.model == 'cnn':
model = base_cnn.CNN(n_classes=args.nclasses)
elif args.model == 'vgg':
model = vgg.VGG('VGG11', n_classes=args.nclasses)
elif args.model == 'resnet':
model = resnet.ResNet18(n_classes=args.nclasses)
elif args.model == 'densenet':
model = densenet.DenseNet121(n_classes=args.nclasses)
elif args.model == 'tdnn':
model = TDNN.TDNN(n_classes=args.nclasses)
print('\n', model, '\n')
print('\n\nNumber of parameters: {}\n'.format(sum(p.numel() for p in model.parameters())))
batch = torch.rand(3, 1, 257, 257)
out = model.forward(batch)
print(out.size(), '\n') |
the-stack_0_9160 | #!/usr/bin/env python
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/index.html
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
from __future__ import division, print_function, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert']
__version__ = "$Revision$"[10:-1]
from . import _fitpack
from numpy import atleast_1d, array, ones, zeros, sqrt, ravel, transpose, \
dot, sin, cos, pi, arange, empty, iinfo, intc, asarray
myasarray = atleast_1d
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
def _intc_overflow(x, msg=None):
"""Cast the value to an intc and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(intc).max:
if msg is None:
msg = '%r cannot fit into an intc' % x
raise OverflowError(msg)
return intc(x)
_iermess = {0:["""\
The spline has a residual sum of squares fp such that abs(fp-s)/s<=0.001""",None],
-1:["""\
The spline is an interpolating spline (fp=0)""",None],
-2:["""\
The spline is weighted least-squares polynomial of degree k.
fp gives the upper bound fp0 for the smoothing factor s""",None],
1:["""\
The required storage space exceeds the available storage space.
Probable causes: data (x,y) size is too small or smoothing parameter s is too small (fp>s).""",ValueError],
2:["""\
A theoretically impossible results when finding a smoothin spline
with fp = s. Probably causes: s too small. (abs(fp-s)/s>0.001)""",ValueError],
3:["""\
The maximal number of iterations (20) allowed for finding smoothing
spline with fp=s has been reached. Probably causes: s too small.
(abs(fp-s)/s>0.001)""",ValueError],
10:["""\
Error on input data""",ValueError],
'unknown':["""\
An error occurred""",TypeError]}
_iermess2 = {0:["""\
The spline has a residual sum of squares fp such that abs(fp-s)/s<=0.001""",None],
-1:["""\
The spline is an interpolating spline (fp=0)""",None],
-2:["""\
The spline is weighted least-squares polynomial of degree kx and ky.
fp gives the upper bound fp0 for the smoothing factor s""",None],
-3:["""\
Warning. The coefficients of the spline have been computed as the minimal
norm least-squares solution of a rank deficient system.""",None],
1:["""\
The required storage space exceeds the available storage space.
Probably causes: nxest or nyest too small or s is too small. (fp>s)""",ValueError],
2:["""\
A theoretically impossible results when finding a smoothin spline
with fp = s. Probably causes: s too small or badly chosen eps.
(abs(fp-s)/s>0.001)""",ValueError],
3:["""\
The maximal number of iterations (20) allowed for finding smoothing
spline with fp=s has been reached. Probably causes: s too small.
(abs(fp-s)/s>0.001)""",ValueError],
4:["""\
No more knots can be added because the number of B-spline coefficients
already exceeds the number of data points m. Probably causes: either
s or m too small. (fp>s)""",ValueError],
5:["""\
No more knots can be added because the additional knot would coincide
with an old one. Probably cause: s too small or too large a weight
to an inaccurate data point. (fp>s)""",ValueError],
10:["""\
Error on input data""",ValueError],
11:["""\
rwrk2 too small, i.e. there is not enough workspace for computing
the minimal least-squares solution of a rank deficient system of linear
equations.""",ValueError],
'unknown':["""\
An error occurred""",TypeError]}
_parcur_cache = {'t': array([],float), 'wrk': array([],float),
'iwrk':array([],intc), 'u': array([],float),'ub':0,'ue':1}
def splprep(x,w=None,u=None,ub=None,ue=None,k=3,task=0,s=None,t=None,
full_output=0,nest=None,per=0,quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([],float), 'wrk': array([],float),
'iwrk':array([],intc),'u': array([],float),
'ub':0,'ue':1}
x = myasarray(x)
idim,m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
print('Warning: Setting x[%d][%d]=x[%d][0]' % (i,m,i))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = myasarray(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m,float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m-sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = myasarray(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k+2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m+2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m+2*k
else:
nest = m+k+1
nest = max(nest,2*k+3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t,c,o = _fitpack._parcur(ravel(transpose(x)),w,u,ub,ue,k,task,ipar,s,t,
nest,wrk,iwrk,per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier,fp,n = o['ier'],o['fp'],len(t)
u = o['u']
c.shape = idim,n-k-1
tcku = [t,list(c),k],u
if ier <= 0 and not quiet:
print(_iermess[ier][0])
print("\tk=%d n=%d m=%d fp=%f s=%f" % (k,len(t),m,fp,s))
if ier > 0 and not full_output:
if ier in [1,2,3]:
print("Warning: "+_iermess[ier][0])
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tcku,fp,ier,_iermess[ier][0]
except KeyError:
return tcku,fp,ier,_iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([],float), 'wrk': array([],float),
'iwrk':array([],intc)}
def splrep(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
full_output=0,per=0,quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : int
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool
If non-zero, then return optional outputs.
per : bool
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool
Non-zero to suppress messages.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
References
----------
Based on algorithms described in [1], [2], [3], and [4]:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> x = linspace(0, 10, 10)
>>> y = sin(x)
>>> tck = splrep(x, y)
>>> x2 = linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plot(x, y, 'o', x2, y2)
"""
if task <= 0:
_curfit_cache = {}
x,y = map(myasarray,[x,y])
m = len(x)
if w is None:
w = ones(m,float)
if s is None:
s = 0.0
else:
w = myasarray(w)
if s is None:
s = m-sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w),m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. (1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k+2,),float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m+2*k,2*k+3)
else:
nest = max(m+k+1,2*k+3)
t = empty((nest,),float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k+1)+nest*(8+5*k),),float)
else:
_curfit_cache['wrk'] = empty((m*(k+1)+nest*(7+3*k),),float)
_curfit_cache['iwrk'] = empty((nest,),intc)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError:
raise TypeError("must call with task=1 only after"
" call with task=0,-1")
if not per:
n,c,fp,ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk, xb, xe, k, s)
else:
n,c,fp,ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n],c[:n],k)
if ier <= 0 and not quiet:
print(_iermess[ier][0])
print("\tk=%d n=%d m=%d fp=%f s=%f" % (k,len(t),m,fp,s))
if ier > 0 and not full_output:
if ier in [1,2,3]:
print("Warning: "+_iermess[ier][0])
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tck,fp,ier,_iermess[ier][0]
except KeyError:
return tck,fp,ier,_iermess['unknown'][0]
else:
return tck
def _ntlist(l): # return non-trivial list
return l
# if len(l)>1: return l
# return l[0]
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from splrep, then this
is a list of arrays representing the curve in N-dimensional space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t,c,k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der: splev(x, [t,c,k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der,k))
if not ext in (0,1,2):
raise ValueError("ext not in (0, 1, 2)")
x = asarray(x)
shape = x.shape
x = atleast_1d(x)
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a,b,tck,full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t,c,k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return _ntlist(list(map(lambda c,a=a,b=b,t=t,k=k:splint(a,b,[t,c,k]),c)))
else:
aint,wrk = _fitpack._splint(t,c,k,a,b)
if full_output:
return aint,wrk
else:
return aint
def sproot(tck,mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t,c,k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return _ntlist(list(map(lambda c,t=t,k=k,mest=mest:sproot([t,c,k],mest),c)))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z,ier = _fitpack._sproot(t,c,k,mest)
if ier == 10:
raise TypeError("Invalid input data. t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
print("Warning: the number of zeros exceeds mest")
return z
raise TypeError("Unknown error")
def spalde(x,tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t,c,k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return _ntlist(list(map(lambda c,x=x,t=t,k=k:spalde(x,[t,c,k]),c)))
else:
x = myasarray(x)
if len(x) > 1:
return list(map(lambda x,tck=tck:spalde(x,tck),x))
d,ier = _fitpack._spalde(t,c,k,x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([],float),'ty': array([],float),
'wrk': array([],float), 'iwrk':array([],intc)}
def bisplrep(x,y,z,w=None,xb=None,xe=None,yb=None,ye=None,kx=3,ky=3,task=0,
s=None,eps=1e-16,tx=None,ty=None,full_output=0,
nxest=None,nyest=None,quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
x,y,z = map(myasarray,[x,y,z])
x,y,z = map(ravel,[x,y,z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m,float)
else:
w = myasarray(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m-sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = myasarray(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('K nots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = myasarray(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not supported. (1<=k<=5)' % (kx,ky))
if m < (kx+1)*(ky+1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx+sqrt(m/2))
if nyest is None:
nyest = int(ky+sqrt(m/2))
nxest,nyest = max(nxest,2*kx+3),max(nyest,2*ky+3)
if task >= 0 and s == 0:
nxest = int(kx+sqrt(3*m))
nyest = int(ky+sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = myasarray(tx)
_surfit_cache['ty'] = myasarray(ty)
tx,ty = _surfit_cache['tx'],_surfit_cache['ty']
wrk = _surfit_cache['wrk']
iwrk = _surfit_cache['iwrk']
u,v,km,ne = nxest-kx-1,nyest-ky-1,max(kx,ky)+1,max(nxest,nyest)
bx,by = kx*v+ky+1,ky*u+kx+1
b1,b2 = bx,bx+v-ky
if bx > by:
b1,b2 = by,by+u-kx
msg = "Too many data points to interpolate"
lwrk1 = _intc_overflow(u*v*(2+b1+b2)+2*(u+v+km*(m+ne)+ne-kx-ky)+b2+1, msg=msg)
lwrk2 = _intc_overflow(u*v*(b2+1)+b2, msg=msg)
tx,ty,c,o = _fitpack._surfit(x,y,z,w,xb,xe,yb,ye,kx,ky,task,s,eps,
tx,ty,nxest,nyest,wrk,lwrk1,lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier,fp = o['ier'],o['fp']
tck = [tx,ty,c,kx,ky]
ierm = min(11,max(-3,ier))
if ierm <= 0 and not quiet:
print(_iermess2[ierm][0])
print("\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" % (kx,ky,len(tx),
len(ty),m,fp,s))
if ierm > 0 and not full_output:
if ier in [1,2,3,4,5]:
print("Warning: "+_iermess2[ierm][0])
print("\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" % (kx,ky,len(tx),
len(ty),m,fp,s))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError:
raise _iermess2['unknown'][1](_iermess2['unknown'][0])
if full_output:
try:
return tck,fp,ier,_iermess2[ierm][0]
except KeyError:
return tck,fp,ier,_iermess2['unknown'][0]
else:
return tck
def bisplev(x,y,tck,dx=0,dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
"""
tx,ty,c,kx,ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx,kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy,ky))
x,y = map(myasarray,[x,y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z,ier = _fitpack._bispev(tx,ty,c,kx,ky,x,y,dx,dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x),len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa,xb,ya,yb,tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c,kx,ky = tck
return dfitpack.dblint(tx,ty,c,kx,ky,xb,xe,yb,ye)
def insert(x,tck,m=1,per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t,c,k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
|
the-stack_0_9161 | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Python
#import urlparse
import logging
# Django
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
#from django import settings as tower_settings
# AWX
from awx.api.versioning import reverse
from awx.main.models import prevent_search, UnifiedJobTemplate, UnifiedJob
from awx.main.models.notifications import (
NotificationTemplate,
JobNotificationMixin
)
from awx.main.models.base import BaseModel, CreatedModifiedModel, VarsDictProperty
from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR
)
from awx.main.fields import ImplicitRoleField, AskForField
from awx.main.models.mixins import (
ResourceMixin,
SurveyJobTemplateMixin,
SurveyJobMixin,
RelatedJobsMixin,
)
from awx.main.models.jobs import LaunchTimeConfigBase, LaunchTimeConfig, JobTemplate
from awx.main.models.credential import Credential
from awx.main.redact import REPLACE_STR
from awx.main.fields import JSONField
from copy import copy
from urlparse import urljoin
__all__ = ['WorkflowJobTemplate', 'WorkflowJob', 'WorkflowJobOptions', 'WorkflowJobNode', 'WorkflowJobTemplateNode',]
logger = logging.getLogger('awx.main.models.workflow')
class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
class Meta:
abstract = True
app_label = 'main'
success_nodes = models.ManyToManyField(
'self',
blank=True,
symmetrical=False,
related_name='%(class)ss_success',
)
failure_nodes = models.ManyToManyField(
'self',
blank=True,
symmetrical=False,
related_name='%(class)ss_failure',
)
always_nodes = models.ManyToManyField(
'self',
blank=True,
symmetrical=False,
related_name='%(class)ss_always',
)
unified_job_template = models.ForeignKey(
'UnifiedJobTemplate',
related_name='%(class)ss',
blank=False,
null=True,
default=None,
on_delete=models.SET_NULL,
)
def get_parent_nodes(self):
'''Returns queryset containing all parents of this node'''
success_parents = getattr(self, '%ss_success' % self.__class__.__name__.lower()).all()
failure_parents = getattr(self, '%ss_failure' % self.__class__.__name__.lower()).all()
always_parents = getattr(self, '%ss_always' % self.__class__.__name__.lower()).all()
return (success_parents | failure_parents | always_parents).order_by('id')
@classmethod
def _get_workflow_job_field_names(cls):
'''
Return field names that should be copied from template node to job node.
'''
return ['workflow_job', 'unified_job_template',
'extra_data', 'survey_passwords',
'inventory', 'credentials', 'char_prompts']
def create_workflow_job_node(self, **kwargs):
'''
Create a new workflow job node based on this workflow node.
'''
create_kwargs = {}
for field_name in self._get_workflow_job_field_names():
if field_name == 'credentials':
continue
if field_name in kwargs:
create_kwargs[field_name] = kwargs[field_name]
elif hasattr(self, field_name):
create_kwargs[field_name] = getattr(self, field_name)
new_node = WorkflowJobNode.objects.create(**create_kwargs)
if self.pk:
allowed_creds = self.credentials.all()
else:
allowed_creds = []
for cred in allowed_creds:
new_node.credentials.add(cred)
return new_node
class WorkflowJobTemplateNode(WorkflowNodeBase):
FIELDS_TO_PRESERVE_AT_COPY = [
'unified_job_template', 'workflow_job_template', 'success_nodes', 'failure_nodes',
'always_nodes', 'credentials', 'inventory', 'extra_data', 'survey_passwords',
'char_prompts'
]
REENCRYPTION_BLACKLIST_AT_COPY = ['extra_data', 'survey_passwords']
workflow_job_template = models.ForeignKey(
'WorkflowJobTemplate',
related_name='workflow_job_template_nodes',
on_delete=models.CASCADE,
)
def get_absolute_url(self, request=None):
return reverse('api:workflow_job_template_node_detail', kwargs={'pk': self.pk}, request=request)
def create_wfjt_node_copy(self, user, workflow_job_template=None):
'''
Copy this node to a new WFJT, leaving out related fields the user
is not allowed to access
'''
create_kwargs = {}
allowed_creds = []
for field_name in self._get_workflow_job_field_names():
if field_name == 'credentials':
for cred in self.credentials.all():
if user.can_access(Credential, 'use', cred):
allowed_creds.append(cred)
continue
item = getattr(self, field_name, None)
if item is None:
continue
if field_name == 'inventory':
if not user.can_access(item.__class__, 'use', item):
continue
if field_name in ['unified_job_template']:
if not user.can_access(item.__class__, 'start', item, validate_license=False):
continue
create_kwargs[field_name] = item
create_kwargs['workflow_job_template'] = workflow_job_template
new_node = self.__class__.objects.create(**create_kwargs)
for cred in allowed_creds:
new_node.credentials.add(cred)
return new_node
class WorkflowJobNode(WorkflowNodeBase):
job = models.OneToOneField(
'UnifiedJob',
related_name='unified_job_node',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
workflow_job = models.ForeignKey(
'WorkflowJob',
related_name='workflow_job_nodes',
blank=True,
null=True,
default=None,
on_delete=models.CASCADE,
)
ancestor_artifacts = JSONField(
blank=True,
default={},
editable=False,
)
do_not_run = models.BooleanField(
default=False,
help_text=_("Indidcates that a job will not be created when True. Workflow runtime "
"semantics will mark this True if the node is in a path that will "
"decidedly not be ran. A value of False means the node may not run."),
)
def get_absolute_url(self, request=None):
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
def prompts_dict(self, *args, **kwargs):
r = super(WorkflowJobNode, self).prompts_dict(*args, **kwargs)
# Explanation - WFJT extra_vars still break pattern, so they are not
# put through prompts processing, but inventory is only accepted
# if JT prompts for it, so it goes through this mechanism
if self.workflow_job and self.workflow_job.inventory_id:
# workflow job inventory takes precedence
r['inventory'] = self.workflow_job.inventory
return r
def get_job_kwargs(self):
'''
In advance of creating a new unified job as part of a workflow,
this method builds the attributes to use
It alters the node by saving its updated version of
ancestor_artifacts, making it available to subsequent nodes.
'''
# reject/accept prompted fields
data = {}
ujt_obj = self.unified_job_template
if ujt_obj is not None:
# MERGE note: move this to prompts_dict method on node when merging
# with the workflow inventory branch
prompts_data = self.prompts_dict()
if isinstance(ujt_obj, WorkflowJobTemplate):
if self.workflow_job.extra_vars:
prompts_data.setdefault('extra_vars', {})
prompts_data['extra_vars'].update(self.workflow_job.extra_vars_dict)
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**prompts_data)
if errors:
logger.info(_('Bad launch configuration starting template {template_pk} as part of '
'workflow {workflow_pk}. Errors:\n{error_text}').format(
template_pk=ujt_obj.pk,
workflow_pk=self.pk,
error_text=errors))
data.update(accepted_fields) # missing fields are handled in the scheduler
try:
# config saved on the workflow job itself
wj_config = self.workflow_job.launch_config
except ObjectDoesNotExist:
wj_config = None
if wj_config:
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**wj_config.prompts_dict())
accepted_fields.pop('extra_vars', None) # merge handled with other extra_vars later
data.update(accepted_fields)
# build ancestor artifacts, save them to node model for later
aa_dict = {}
is_root_node = True
for parent_node in self.get_parent_nodes():
is_root_node = False
aa_dict.update(parent_node.ancestor_artifacts)
if parent_node.job and hasattr(parent_node.job, 'artifacts'):
aa_dict.update(parent_node.job.artifacts)
if aa_dict and not is_root_node:
self.ancestor_artifacts = aa_dict
self.save(update_fields=['ancestor_artifacts'])
# process password list
password_dict = {}
if '_ansible_no_log' in aa_dict:
for key in aa_dict:
if key != '_ansible_no_log':
password_dict[key] = REPLACE_STR
if self.workflow_job.survey_passwords:
password_dict.update(self.workflow_job.survey_passwords)
if self.survey_passwords:
password_dict.update(self.survey_passwords)
if password_dict:
data['survey_passwords'] = password_dict
# process extra_vars
extra_vars = data.get('extra_vars', {})
if ujt_obj and isinstance(ujt_obj, (JobTemplate, WorkflowJobTemplate)):
if aa_dict:
functional_aa_dict = copy(aa_dict)
functional_aa_dict.pop('_ansible_no_log', None)
extra_vars.update(functional_aa_dict)
if ujt_obj and isinstance(ujt_obj, JobTemplate):
# Workflow Job extra_vars higher precedence than ancestor artifacts
if self.workflow_job and self.workflow_job.extra_vars:
extra_vars.update(self.workflow_job.extra_vars_dict)
if extra_vars:
data['extra_vars'] = extra_vars
# ensure that unified jobs created by WorkflowJobs are marked
data['_eager_fields'] = {'launch_type': 'workflow'}
# Extra processing in the case that this is a slice job
if 'job_slice' in self.ancestor_artifacts and is_root_node:
data['_eager_fields']['allow_simultaneous'] = True
data['_eager_fields']['job_slice_number'] = self.ancestor_artifacts['job_slice']
data['_eager_fields']['job_slice_count'] = self.workflow_job.workflow_job_nodes.count()
data['_prevent_slicing'] = True
return data
class WorkflowJobOptions(BaseModel):
class Meta:
abstract = True
extra_vars = prevent_search(models.TextField(
blank=True,
default='',
))
allow_simultaneous = models.BooleanField(
default=False
)
extra_vars_dict = VarsDictProperty('extra_vars', True)
@property
def workflow_nodes(self):
raise NotImplementedError()
@classmethod
def _get_unified_job_field_names(cls):
return set(f.name for f in WorkflowJobOptions._meta.fields) | set(
# NOTE: if other prompts are added to WFJT, put fields in WJOptions, remove inventory
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'inventory']
)
def _create_workflow_nodes(self, old_node_list, user=None):
node_links = {}
for old_node in old_node_list:
if user:
new_node = old_node.create_wfjt_node_copy(user, workflow_job_template=self)
else:
new_node = old_node.create_workflow_job_node(workflow_job=self)
node_links[old_node.pk] = new_node
return node_links
def _inherit_node_relationships(self, old_node_list, node_links):
for old_node in old_node_list:
new_node = node_links[old_node.pk]
for relationship in ['always_nodes', 'success_nodes', 'failure_nodes']:
old_manager = getattr(old_node, relationship)
for old_child_node in old_manager.all():
new_child_node = node_links[old_child_node.pk]
new_manager = getattr(new_node, relationship)
new_manager.add(new_child_node)
def copy_nodes_from_original(self, original=None, user=None):
old_node_list = original.workflow_nodes.prefetch_related('always_nodes', 'success_nodes', 'failure_nodes').all()
node_links = self._create_workflow_nodes(old_node_list, user=user)
self._inherit_node_relationships(old_node_list, node_links)
def create_relaunch_workflow_job(self):
new_workflow_job = self.copy_unified_job()
if self.unified_job_template_id is None:
new_workflow_job.copy_nodes_from_original(original=self)
return new_workflow_job
class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTemplateMixin, ResourceMixin, RelatedJobsMixin):
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
FIELDS_TO_PRESERVE_AT_COPY = [
'labels', 'instance_groups', 'workflow_job_template_nodes', 'credentials', 'survey_spec'
]
class Meta:
app_label = 'main'
organization = models.ForeignKey(
'Organization',
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='workflows',
)
inventory = models.ForeignKey(
'Inventory',
related_name='%(class)ss',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
help_text=_('Inventory applied to all job templates in workflow that prompt for inventory.'),
)
ask_inventory_on_launch = AskForField(
blank=True,
default=False,
)
admin_role = ImplicitRoleField(parent_role=[
'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
'organization.workflow_admin_role'
])
execute_role = ImplicitRoleField(parent_role=[
'admin_role',
'organization.execute_role',
])
read_role = ImplicitRoleField(parent_role=[
'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
'organization.auditor_role', 'execute_role', 'admin_role'
])
@property
def workflow_nodes(self):
return self.workflow_job_template_nodes
@classmethod
def _get_unified_job_class(cls):
return WorkflowJob
@classmethod
def _get_unified_jt_copy_names(cls):
base_list = super(WorkflowJobTemplate, cls)._get_unified_jt_copy_names()
base_list.remove('labels')
return (base_list |
set(['survey_spec', 'survey_enabled', 'ask_variables_on_launch', 'organization']))
def get_absolute_url(self, request=None):
return reverse('api:workflow_job_template_detail', kwargs={'pk': self.pk}, request=request)
@property
def cache_timeout_blocked(self):
# TODO: don't allow running of job template if same workflow template running
return False
@property
def notification_templates(self):
base_notification_templates = NotificationTemplate.objects.all()
error_notification_templates = list(base_notification_templates
.filter(unifiedjobtemplate_notification_templates_for_errors__in=[self]))
success_notification_templates = list(base_notification_templates
.filter(unifiedjobtemplate_notification_templates_for_success__in=[self]))
any_notification_templates = list(base_notification_templates
.filter(unifiedjobtemplate_notification_templates_for_any__in=[self]))
return dict(error=list(error_notification_templates),
success=list(success_notification_templates),
any=list(any_notification_templates))
def create_unified_job(self, **kwargs):
workflow_job = super(WorkflowJobTemplate, self).create_unified_job(**kwargs)
workflow_job.copy_nodes_from_original(original=self)
return workflow_job
def _accept_or_ignore_job_kwargs(self, **kwargs):
exclude_errors = kwargs.pop('_exclude_errors', [])
prompted_data = {}
rejected_data = {}
errors_dict = {}
# Handle all the fields that have prompting rules
# NOTE: If WFJTs prompt for other things, this logic can be combined with jobs
for field_name, ask_field_name in self.get_ask_mapping().items():
if field_name == 'extra_vars':
accepted_vars, rejected_vars, vars_errors = self.accept_or_ignore_variables(
kwargs.get('extra_vars', {}),
_exclude_errors=exclude_errors,
extra_passwords=kwargs.get('survey_passwords', {}))
if accepted_vars:
prompted_data['extra_vars'] = accepted_vars
if rejected_vars:
rejected_data['extra_vars'] = rejected_vars
errors_dict.update(vars_errors)
continue
if field_name not in kwargs:
continue
new_value = kwargs[field_name]
old_value = getattr(self, field_name)
if new_value == old_value:
continue # no-op case: Counted as neither accepted or ignored
elif getattr(self, ask_field_name):
# accepted prompt
prompted_data[field_name] = new_value
else:
# unprompted - template is not configured to accept field on launch
rejected_data[field_name] = new_value
# Not considered an error for manual launch, to support old
# behavior of putting them in ignored_fields and launching anyway
if 'prompts' not in exclude_errors:
errors_dict[field_name] = _('Field is not configured to prompt on launch.').format(field_name=field_name)
return prompted_data, rejected_data, errors_dict
def can_start_without_user_input(self):
return not bool(self.variables_needed_to_start)
def node_templates_missing(self):
return [node.pk for node in self.workflow_job_template_nodes.filter(
unified_job_template__isnull=True).all()]
def node_prompts_rejected(self):
node_list = []
for node in self.workflow_job_template_nodes.prefetch_related('unified_job_template').all():
ujt_obj = node.unified_job_template
if ujt_obj is None:
continue
prompts_dict = node.prompts_dict()
accepted_fields, ignored_fields, prompts_errors = ujt_obj._accept_or_ignore_job_kwargs(**prompts_dict)
if prompts_errors:
node_list.append(node.pk)
return node_list
'''
RelatedJobsMixin
'''
def _get_related_jobs(self):
return WorkflowJob.objects.filter(workflow_job_template=self)
class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificationMixin, LaunchTimeConfigBase):
class Meta:
app_label = 'main'
ordering = ('id',)
workflow_job_template = models.ForeignKey(
'WorkflowJobTemplate',
related_name='workflow_jobs',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
)
job_template = models.ForeignKey(
'JobTemplate',
related_name='slice_workflow_jobs',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
help_text=_("If automatically created for a sliced job run, the job template "
"the workflow job was created from."),
)
is_sliced_job = models.BooleanField(
default=False
)
@property
def workflow_nodes(self):
return self.workflow_job_nodes
def _get_parent_field_name(self):
if self.job_template_id:
# This is a workflow job which is a container for slice jobs
return 'job_template'
return 'workflow_job_template'
@classmethod
def _get_unified_job_template_class(cls):
return WorkflowJobTemplate
def socketio_emit_data(self):
return {}
def get_absolute_url(self, request=None):
return reverse('api:workflow_job_detail', kwargs={'pk': self.pk}, request=request)
def get_ui_url(self):
return urljoin(settings.TOWER_URL_BASE, '/#/workflows/{}'.format(self.pk))
def notification_data(self):
result = super(WorkflowJob, self).notification_data()
str_arr = ['Workflow job summary:', '']
for node in self.workflow_job_nodes.all().select_related('job'):
if node.job is None:
node_job_description = 'no job.'
else:
node_job_description = ('job #{0}, "{1}", which finished with status {2}.'
.format(node.job.id, node.job.name, node.job.status))
str_arr.append("- node #{0} spawns {1}".format(node.id, node_job_description))
result['body'] = '\n'.join(str_arr)
return result
@property
def task_impact(self):
return 0
def get_ancestor_workflows(self):
"""Returns a list of WFJTs that are indirect parents of this workflow job
say WFJTs are set up to spawn in order of A->B->C, and this workflow job
came from C, then C is the parent and [B, A] will be returned from this.
"""
ancestors = []
wj_ids = set([self.pk])
wj = self.get_workflow_job()
while wj and wj.workflow_job_template_id:
if wj.pk in wj_ids:
logger.critical('Cycles detected in the workflow jobs graph, '
'this is not normal and suggests task manager degeneracy.')
break
wj_ids.add(wj.pk)
ancestors.append(wj.workflow_job_template)
wj = wj.get_workflow_job()
return ancestors
def get_notification_templates(self):
return self.workflow_job_template.notification_templates
def get_notification_friendly_name(self):
return "Workflow Job"
@property
def preferred_instance_groups(self):
return []
@property
def actually_running(self):
# WorkflowJobs don't _actually_ run anything in the dispatcher, so
# there's no point in asking the dispatcher if it knows about this task
return self.status == 'running'
|
the-stack_0_9162 | from bareosdir import *
from bareos_dir_consts import *
def load_bareos_plugin(context):
DebugMessage(context, 100, "load_bareos_plugin called\n");
events = [];
events.append(bDirEventType['bDirEventJobStart']);
events.append(bDirEventType['bDirEventJobEnd']);
events.append(bDirEventType['bDirEventJobInit']);
events.append(bDirEventType['bDirEventJobRun']);
RegisterEvents(context, events);
return bRCs['bRC_OK'];
def handle_plugin_event(context, event):
if event == bDirEventType['bDirEventJobStart']:
DebugMessage(context, 100, "bDirEventJobStart event triggered\n");
jobname = GetValue(context, brDirVariable['bDirVarJobName']);
DebugMessage(context, 100, "Job " + jobname + " starting\n");
elif event == bDirEventType['bDirEventJobEnd']:
DebugMessage(context, 100, "bDirEventJobEnd event triggered\n");
jobname = GetValue(context, brDirVariable['bDirVarJobName']);
DebugMessage(context, 100, "Job " + jobname + " stopped\n");
elif event == bDirEventType['bDirEventJobInit']:
DebugMessage(context, 100, "bDirEventJobInit event triggered\n");
elif event == bDirEventType['bDirEventJobRun']:
DebugMessage(context, 100, "bDirEventJobRun event triggered\n");
return bRCs['bRC_OK'];
|
the-stack_0_9163 | ################################################################################
# BSD LICENSE
#
# Copyright(c) 2019 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
"""
REST API module
"""
import json
import multiprocessing
import os
import signal
import ssl
from copy import deepcopy
from time import sleep
from flask import Flask
from flask_httpauth import HTTPBasicAuth
from flask_restful import Api, Resource, request
from werkzeug.exceptions import HTTPException
import jsonschema
import caps
import common
import log
import pid_ops
import sstbf
from config import ConfigStore
from stats import StatsStore
TLS_CERT_FILE = 'appqos.crt'
TLS_KEY_FILE = 'appqos.key'
class RestError(HTTPException):
"""
RestError exception base class
"""
def __init__(self, code, description):
HTTPException.__init__(self)
self.code = code
self.description = description
class NotFound(RestError):
"""
NotFound exception
"""
def __init__(self, description="Not Found"):
RestError.__init__(self, 404, description)
class BadRequest(RestError):
"""
BadRequest exception
"""
def __init__(self, description="BadRequest"):
RestError.__init__(self, 400, description)
class InternalError(RestError):
"""
InternalError exception
"""
def __init__(self, description="Internal Server Error"):
RestError.__init__(self, 500, description)
class Server:
"""
REST API server
"""
auth = HTTPBasicAuth()
def __init__(self):
self.process = None
self.app = Flask(__name__)
self.app.config['MAX_CONTENT_LENGTH'] = 2 * 1024
self.api = Api(self.app)
# initialize SSL context
self.context = ssl.SSLContext(ssl.PROTOCOL_TLS)
# allow TLS 1.2 and later
self.context.options |= ssl.OP_NO_SSLv2
self.context.options |= ssl.OP_NO_SSLv3
self.context.options |= ssl.OP_NO_TLSv1
self.context.options |= ssl.OP_NO_TLSv1_1
self.api.add_resource(Apps, '/apps')
self.api.add_resource(App, '/apps/<app_id>')
self.api.add_resource(Pools, '/pools')
self.api.add_resource(Pool, '/pools/<pool_id>')
self.api.add_resource(Stats, '/stats')
self.api.add_resource(Caps, '/caps')
if caps.sstbf_enabled():
self.api.add_resource(Sstbf, '/caps/sstbf')
self.api.add_resource(Reset, '/reset')
self.app.register_error_handler(RestError, Server.error_handler)
def start(self, host, port, debug=False):
"""
Start REST server
Parameters:
host: address to bind to
port: port to bind to
debug(bool): Debug flag
Returns:
0 on success
"""
try:
# check for file existence and type
with open(TLS_CERT_FILE, opener=common.check_link):
pass
with open(TLS_KEY_FILE, opener=common.check_link):
pass
self.context.load_cert_chain(TLS_CERT_FILE, TLS_KEY_FILE)
except (FileNotFoundError, PermissionError) as ex:
log.error("SSL cert or key file, {}".format(str(ex)))
return -1
self.process = multiprocessing.Process(target=self.app.run,
kwargs={'host': host,
'port': port,
'ssl_context': self.context,
'debug': debug,
'use_reloader': False,
'processes': 1})
self.process.start()
return 0
def terminate(self):
"""
Terminates server
"""
os.kill(self.process.pid, signal.SIGINT)
sleep(1)
if self.process.is_alive():
self.process.terminate()
self.process.join()
@staticmethod
def error_handler(error):
"""
Error handler
Parameters:
error: error
"""
common.STATS_STORE.general_stats_inc_num_err()
response = {'message': error.message}
return json.dumps(response), error.code
@staticmethod
@auth.verify_password
def verify(username, password):
"""
Authenticate user, HTTP Basic Auth
Parameters:
username: Username
password: Password
Returns:
Authentication result (bool)
"""
if not (username and password):
common.STATS_STORE.general_stats_inc_num_invalid_access()
return False
if 'auth' in common.CONFIG_STORE.get_config():
if username == common.CONFIG_STORE.get_config()['auth']['username'] and \
password == common.CONFIG_STORE.get_config()['auth']['password']:
return True
common.STATS_STORE.general_stats_inc_num_invalid_access()
return False
def validate_str_int(string):
"""
Check if string is valid integer
Parameters:
string: string to be validated
"""
try:
int(string)
except ValueError:
return False
return True
class App(Resource):
"""
Handle /apps/<app_id> HTTP requests
"""
@staticmethod
@Server.auth.login_required
def get(app_id):
"""
Handles HTTP GET /apps/<app_id> request.
Retrieve single app
Raises NotFound, BadRequest
Parameters:
app_id: Id of app to retrieve
Returns:
response, status code
"""
if not validate_str_int(app_id):
raise BadRequest("APP index {} is invalid.".format(str(app_id)))
data = common.CONFIG_STORE.get_config()
if 'apps' not in data:
raise NotFound("No apps in config file")
try:
app = common.CONFIG_STORE.get_app(data, int(app_id))
app['pool_id'] = common.CONFIG_STORE.app_to_pool(int(app_id))
except:
raise NotFound("APP " + str(app_id) + " not found in config")
return app, 200
@staticmethod
@Server.auth.login_required
def delete(app_id):
"""
Handles HTTP DELETE /apps/<app_id> request.
Deletes single App
Raises NotFound, BadRequest
Parameters:
app_id: Id of app to delete
Returns:
response, status code
"""
if not validate_str_int(app_id):
raise BadRequest("APP index {} is invalid.".format(str(app_id)))
data = deepcopy(common.CONFIG_STORE.get_config())
if 'apps' not in data or 'pools' not in data:
raise NotFound("No apps or pools in config file")
for app in data['apps']:
if app['id'] != int(app_id):
continue
# remove app id from pool
for pool in data['pools']:
if 'apps' not in pool:
continue
if app['id'] in pool['apps']:
pool['apps'].remove(app['id'])
break
# remove app
data['apps'].remove(app)
common.CONFIG_STORE.set_config(data)
res = {'message': "APP " + str(app_id) + " deleted"}
return res, 200
raise NotFound("APP " + str(app_id) + " not found in config")
@staticmethod
@Server.auth.login_required
def put(app_id):
# pylint: disable=too-many-branches
"""
Handles HTTP PUT /apps/<app_id> request.
Modifies an App (e.g.: moves to different pool)
Raises NotFound, BadRequest
Parameters:
app_id: Id of app to modify
Returns:
response, status code
"""
if not validate_str_int(app_id):
raise BadRequest("APP index {} is invalid.".format(str(app_id)))
json_data = request.get_json()
# validate app schema
try:
schema, resolver = ConfigStore.load_json_schema('modify_app.json')
jsonschema.validate(json_data, schema, resolver=resolver)
except jsonschema.ValidationError as error:
raise BadRequest("Request validation failed - %s" % (str(error)))
data = deepcopy(common.CONFIG_STORE.get_config())
if 'apps' not in data or 'pools' not in data:
raise NotFound("No apps or pools in config file")
# move to another pool
for app in data['apps']:
if app['id'] != int(app_id):
continue
if 'pool_id' in json_data:
pool_id = json_data['pool_id']
# remove app id from pool
for pool in data['pools']:
if 'apps' in pool:
if app['id'] in pool['apps']:
pool['apps'].remove(app['id'])
break
# add app id to new pool
for pool in data['pools']:
if pool['id'] == int(pool_id):
if not 'apps' in pool:
pool['apps'] = []
pool['apps'].append(app['id'])
break
# set new cores
if 'cores' in json_data:
app['cores'] = json_data['cores']
# set new name
if 'name' in json_data:
app['name'] = json_data['name']
# set new PIDs
if 'pids' in json_data:
app['pids'] = json_data['pids']
try:
common.CONFIG_STORE.validate(data)
except Exception as ex:
raise BadRequest("APP " + str(app_id) + " not updated, " + str(ex))
else:
common.CONFIG_STORE.set_config(data)
if 'pool_id' in json_data:
common.STATS_STORE.general_stats_inc_apps_moves()
res = {'message': "APP " + str(app_id) + " updated"}
return res, 200
raise NotFound("APP " + str(app_id) + " not found in config")
class Apps(Resource):
"""
Handles /apps HTTP requests
"""
@staticmethod
@Server.auth.login_required
def get():
"""
Handles HTTP GET /apps request.
Get all Apps
Raises NotFound
Returns:
response, status code
"""
data = common.CONFIG_STORE.get_config()
if 'apps' not in data or not data['apps']:
raise NotFound("No apps in config file")
apps = data['apps']
for app in apps:
app['pool_id'] = common.CONFIG_STORE.app_to_pool(app['id'])
return (data['apps']), 200
@staticmethod
@Server.auth.login_required
def post():
# pylint: disable=too-many-branches
"""
Handles HTTP POST /apps request.
Add a new App
Raises NotFound, BadRequest
Returns:
response, status code
"""
json_data = request.get_json()
# validate app schema
try:
schema, resolver = ConfigStore.load_json_schema('add_app.json')
jsonschema.validate(json_data, schema, resolver=resolver)
except jsonschema.ValidationError as error:
raise BadRequest("Request validation failed - %s" % (str(error)))
data = deepcopy(common.CONFIG_STORE.get_config())
if 'pools' not in data:
raise NotFound("No pools in config file")
json_data['id'] = common.CONFIG_STORE.get_new_app_id()
if 'pids' in json_data:
# validate pids
for pid in json_data['pids']:
if not pid_ops.is_pid_valid(pid):
raise BadRequest("New APP not added, invalid PID: " + str(pid))
# if pool_id not provided on app creation
if 'pool_id' not in json_data or not json_data['pool_id']:
json_data['pool_id'] = None
# if apps cores list is a subset of existing pool cores list,
# make existing pool a destination pool for app
if 'cores' in json_data and json_data['cores']:
for core in json_data['cores']:
if not common.PQOS_API.check_core(core):
raise BadRequest("New APP not added, invalid core: " + str(core))
for pool in data['pools']:
if set(json_data['cores']).issubset(pool['cores']):
json_data['pool_id'] = pool['id']
break
# if it is not, make default pool a destination pool
if json_data['pool_id'] is None:
json_data['pool_id'] = 0
if 'cores' in json_data:
json_data.pop('cores')
try:
pool = common.CONFIG_STORE.get_pool(data, json_data['pool_id'])
except Exception as ex:
raise BadRequest("New APP not added, " + str(ex))
# update pool configuration to include new app
if not 'apps' in pool:
pool['apps'] = []
pool['apps'].append(json_data['id'])
json_data.pop('pool_id')
data['apps'].append(json_data)
try:
common.CONFIG_STORE.validate(data)
except Exception as ex:
raise BadRequest("New APP not added, " + str(ex))
else:
common.CONFIG_STORE.set_config(data)
res = {
'id': json_data['id'],
'message': "New APP added to pool {}".format(str(pool['id']))
}
return res, 201
class Pool(Resource):
"""
Handles /pools/<pool_id> HTTP requests
"""
@staticmethod
@Server.auth.login_required
def get(pool_id):
"""
Handles HTTP GET /pools/<pool_id> request.
Retrieve single pool
Raises NotFound, BadRequest
Parameters:
pool_id: Id of pool to retrieve
Returns:
response, status code
"""
if not validate_str_int(pool_id):
raise BadRequest("POOL index {} is invalid.".format(str(pool_id)))
data = deepcopy(common.CONFIG_STORE.get_config())
if 'pools' not in data:
raise NotFound("No pools in config file")
try:
pool = common.CONFIG_STORE.get_pool(data, int(pool_id))
except:
raise NotFound("POOL " + str(pool_id) + " not found in config")
return pool, 200
@staticmethod
@Server.auth.login_required
def delete(pool_id):
"""
Handles HTTP DELETE /pool/<pull_id> request.
Deletes single Pool
Raises NotFound, BadRequest
Parameters:
pool_id: Id of pool to delete
Returns:
response, status code
"""
if not validate_str_int(pool_id):
raise BadRequest("POOL index {} is invalid.".format(str(pool_id)))
data = deepcopy(common.CONFIG_STORE.get_config())
if 'pools' not in data:
raise NotFound("No pools in config file")
if int(pool_id) == 0:
raise BadRequest("POOL " + str(pool_id) + " is Default, cannot delete")
for pool in data['pools']:
if pool['id'] != int(pool_id):
continue
if 'apps' in pool and pool['apps']:
raise BadRequest("POOL " + str(pool_id) + " is not empty")
# remove app
data['pools'].remove(pool)
common.CONFIG_STORE.set_config(data)
res = {'message': "POOL " + str(pool_id) + " deleted"}
return res, 200
raise NotFound("POOL " + str(pool_id) + " not found in config")
@staticmethod
@Server.auth.login_required
def put(pool_id):
# pylint: disable=too-many-branches
"""
Handles HTTP PUT /pools/<pool_id> request.
Modifies a Pool
Raises NotFound, BadRequest
Parameters:
pool_id: Id of pool
Returns:
response, status code
"""
def check_alloc_tech(pool_id, json_data):
alloc_tech = []
if 'cbm' in json_data:
alloc_tech.append(common.CAT_CAP)
if 'mba' in json_data:
alloc_tech.append(common.MBA_CAP)
if not alloc_tech:
return True
return pool_id <= common.PQOS_API.get_max_cos_id(alloc_tech)
if not validate_str_int(pool_id):
raise BadRequest("POOL index {} is invalid.".format(str(pool_id)))
json_data = request.get_json()
# validate app schema
try:
schema, resolver = ConfigStore.load_json_schema('modify_pool.json')
jsonschema.validate(json_data, schema, resolver=resolver)
except jsonschema.ValidationError as error:
raise BadRequest("Request validation failed - %s" % (str(error)))
data = deepcopy(common.CONFIG_STORE.get_config())
if 'pools' not in data:
raise NotFound("No pools in config file")
for pool in data['pools']:
if pool['id'] != int(pool_id):
continue
if not check_alloc_tech(int(pool_id), json_data):
raise BadRequest("Pool {} does not support requested technologies!"\
.format(pool_id))
# set new cbm
if 'cbm' in json_data:
cbm = json_data['cbm']
if not isinstance(cbm, int):
cbm = int(cbm, 16)
pool['cbm'] = cbm
# set new mba
if 'mba' in json_data:
pool['mba'] = json_data['mba']
# set new cores
if 'cores' in json_data:
pool['cores'] = json_data['cores']
if 'apps' in pool and pool['apps']:
for app_id in pool['apps']:
for app in data['apps']:
if app['id'] != app_id or 'cores' not in app:
continue
if not set(app['cores']).issubset(pool['cores']):
app.pop('cores')
# set new name
if 'name' in json_data:
pool['name'] = json_data['name']
try:
common.CONFIG_STORE.validate(data)
except Exception as ex:
raise BadRequest("POOL " + str(pool_id) + " not updated, " + str(ex))
else:
common.CONFIG_STORE.set_config(data)
res = {'message': "POOL " + str(pool_id) + " updated"}
return res, 200
raise NotFound("POOL " + str(pool_id) + " not found in config")
class Pools(Resource):
"""
Handles /pools HTTP requests
"""
@staticmethod
@Server.auth.login_required
def get():
"""
Handles HTTP GET /pools request.
Retrieve all pools
Raises NotFound
Returns:
response, status code
"""
data = common.CONFIG_STORE.get_config().copy()
if 'pools' not in data:
raise NotFound("No pools in config file")
return data['pools'], 200
@staticmethod
@Server.auth.login_required
def post():
"""
Handles HTTP POST /pools request.
Add a new Pool
Raises NotFound, BadRequest, InternalError
Returns:
response, status code
"""
json_data = request.get_json()
# validate pool schema
try:
schema, resolver = ConfigStore.load_json_schema('add_pool.json')
jsonschema.validate(json_data, schema, resolver=resolver)
except jsonschema.ValidationError as error:
raise BadRequest("Request validation failed - %s" % (str(error)))
post_data = json_data.copy()
post_data['id'] = common.CONFIG_STORE.get_new_pool_id(post_data)
if post_data['id'] is None:
raise InternalError("New POOL not added, maximum number of POOLS"\
" reached for requested allocation combination")
# convert cbm from string to int
if 'cbm' in post_data:
cbm = post_data['cbm']
if not isinstance(cbm, int):
cbm = int(cbm, 16)
post_data['cbm'] = cbm
data = deepcopy(common.CONFIG_STORE.get_config())
data['pools'].append(post_data)
try:
common.CONFIG_STORE.validate(data)
except Exception as ex:
raise BadRequest("New POOL not added, " + str(ex))
else:
common.CONFIG_STORE.set_config(data)
res = {
'id': post_data['id'],
'message': "New POOL {} added".format(post_data['id'])
}
return res, 201
class Stats(Resource):
"""
Handles /stats HTTP requests
"""
@staticmethod
@Server.auth.login_required
def get():
"""
Handles HTTP GET /stats request.
Retrieve general stats
Returns:
response, status code
"""
res = {
'num_apps_moves': \
common.STATS_STORE.general_stats_get(StatsStore.General.NUM_APPS_MOVES),
'num_err': common.STATS_STORE.general_stats_get(StatsStore.General.NUM_ERR)
}
return res, 200
class Caps(Resource):
"""
Handles /caps HTTP requests
"""
@staticmethod
@Server.auth.login_required
def get():
"""
Handles HTTP GET /caps request.
Retrieve capabilities
Returns:
response, status code
"""
res = {'capabilities': caps.SYSTEM_CAPS}
return res, 200
class Sstbf(Resource):
"""
Handles /caps/sstbf HTTP requests
"""
@staticmethod
@Server.auth.login_required
def get():
"""
Handles HTTP GET /caps/sstbf request.
Retrieve SST-BF capabilities details
Returns:
response, status code
"""
res = {
'configured': sstbf.is_sstbf_configured(),
'hp_cores': sstbf.get_hp_cores(),
'std_cores': sstbf.get_std_cores()
}
return res, 200
@staticmethod
@Server.auth.login_required
def put():
"""
Handles HTTP PUT /caps/sstbf request.
Raises BadRequest, InternalError
Returns:
response, status code
"""
json_data = request.get_json()
# validate app schema
try:
schema, resolver = ConfigStore.load_json_schema('modify_sstbf.json')
jsonschema.validate(json_data, schema, resolver=resolver)
except jsonschema.ValidationError as error:
raise BadRequest("Request validation failed - %s" % (str(error)))
if not sstbf.configure_sstbf(json_data['configured']) == 0:
raise InternalError("Failed to change SST-BF configured state.")
res = {'message': "SST-BF caps modified"}
return res, 200
class Reset(Resource):
"""
Handles /reset HTTP requests
"""
@staticmethod
@Server.auth.login_required
def post():
"""
Handles HTTP POST /reset request.
Resets configuration, reloads config file
Returns:
response, status code
"""
common.CONFIG_STORE.reset()
res = {'message': "Reset performed. Configuration reloaded."}
return res, 200
|
the-stack_0_9164 | """
UI for Nexus Bus
"""
from tabulate import tabulate
import departure.commons.helpers as helpers
def list_stations(stops):
print(
tabulate(
[
[station[0], station[1]]
for station in sorted(stops.items(), key=lambda k: k[1])
],
headers=["id", "name"],
)
)
def list_services(services):
for i, service in enumerate(services):
print(
f"{helpers.ordinal_en(i + 1)} {service.number} "
f"{service.destination} ({service.operator}) "
f"{service.timeLiteral}"
)
|
the-stack_0_9169 | """Tests for applications API functionality"""
from decimal import Decimal
from django.core.files.uploadedfile import SimpleUploadedFile
import pytest
from mitol.common.utils import now_in_utc
from applications.api import (
get_or_create_bootcamp_application,
derive_application_state,
get_required_submission_type,
populate_interviews_in_jobma,
)
from applications.constants import (
AppStates,
REVIEW_STATUS_APPROVED,
REVIEW_STATUS_REJECTED,
SUBMISSION_QUIZ,
SUBMISSION_VIDEO,
)
from applications.factories import (
ApplicationStepFactory,
BootcampApplicationFactory,
BootcampRunApplicationStepFactory,
ApplicationStepSubmissionFactory,
)
from applications.models import ApplicationStepSubmission, VideoInterviewSubmission
from ecommerce.factories import LineFactory
from ecommerce.models import Order
from klasses.factories import BootcampRunFactory, InstallmentFactory
from jobma.factories import InterviewFactory, JobFactory
from jobma.models import Interview
from profiles.factories import ProfileFactory, UserFactory, LegalAddressFactory
pytestmark = pytest.mark.django_db
def test_derive_application_state():
"""derive_application_state should return the correct state based on the bootcamp application and related data"""
bootcamp_run = BootcampRunFactory.create()
installment = InstallmentFactory.create(
bootcamp_run=bootcamp_run, amount=Decimal("100")
)
run_steps = BootcampRunApplicationStepFactory.create_batch(
2, bootcamp_run=bootcamp_run
)
app = BootcampApplicationFactory.create(
bootcamp_run=bootcamp_run,
user__profile=None,
user__legal_address=None,
resume_file=None,
linkedin_url=None,
)
assert derive_application_state(app) == AppStates.AWAITING_PROFILE_COMPLETION.value
ProfileFactory.create(user=app.user)
app.refresh_from_db()
assert derive_application_state(app) == AppStates.AWAITING_PROFILE_COMPLETION.value
LegalAddressFactory.create(user=app.user)
app.refresh_from_db()
assert derive_application_state(app) == AppStates.AWAITING_RESUME.value
app.resume_file = SimpleUploadedFile("resume.txt", b"these are the file contents!")
app.save()
app.refresh_from_db()
assert derive_application_state(app) == AppStates.AWAITING_USER_SUBMISSIONS.value
# The resume requirement should be considered fulfilled if the user uploads a resume *or* provides a LinkedIn URL
app.resume_file = None
app.linkedin_url = "http://example.com/linkedin"
app.save()
app.refresh_from_db()
assert derive_application_state(app) == AppStates.AWAITING_USER_SUBMISSIONS.value
first_submission = ApplicationStepSubmissionFactory.create(
bootcamp_application=app, run_application_step=run_steps[0], is_pending=True
)
assert derive_application_state(app) == AppStates.AWAITING_SUBMISSION_REVIEW.value
first_submission.review_status = REVIEW_STATUS_APPROVED
first_submission.save()
# The user should only be allowed to pay after *all* of the required submissions have been reviewed
assert derive_application_state(app) == AppStates.AWAITING_USER_SUBMISSIONS.value
ApplicationStepSubmissionFactory.create(
bootcamp_application=app,
run_application_step=run_steps[1],
review_status=REVIEW_STATUS_APPROVED,
review_status_date=now_in_utc(),
)
assert derive_application_state(app) == AppStates.AWAITING_PAYMENT.value
LineFactory.create(
order__status=Order.FULFILLED,
order__user=app.user,
order__application=app,
order__total_price_paid=installment.amount,
bootcamp_run=app.bootcamp_run,
price=installment.amount,
)
app.refresh_from_db()
assert derive_application_state(app) == AppStates.COMPLETE.value
def test_derive_application_state_rejected():
"""derive_application_state should return the rejected state if any of the user's submissions were rejected"""
run_step = BootcampRunApplicationStepFactory.create()
app = BootcampApplicationFactory.create(
bootcamp_run=run_step.bootcamp_run,
resume_file=SimpleUploadedFile("resume.txt", b"these are the file contents!"),
)
ApplicationStepSubmissionFactory.create(
bootcamp_application=app,
run_application_step=run_step,
review_status=REVIEW_STATUS_REJECTED,
review_status_date=now_in_utc(),
)
assert derive_application_state(app) == AppStates.REJECTED.value
def test_get_or_create_bootcamp_application(mocker):
"""
get_or_create_bootcamp_application should fetch an existing bootcamp application, or create one with the \
application state set properly
"""
patched_derive_state = mocker.patch(
"applications.api.derive_application_state",
return_value=AppStates.COMPLETE.value,
)
users = UserFactory.create_batch(2)
bootcamp_runs = BootcampRunFactory.create_batch(2)
bootcamp_app, created = get_or_create_bootcamp_application(
bootcamp_run_id=bootcamp_runs[0].id, user=users[0]
)
patched_derive_state.assert_called_once_with(bootcamp_app)
assert bootcamp_app.bootcamp_run == bootcamp_runs[0]
assert bootcamp_app.user == users[0]
assert bootcamp_app.state == patched_derive_state.return_value
assert created is True
# The function should just return the existing application if one exists already
existing_app = BootcampApplicationFactory.create(
user=users[1], bootcamp_run=bootcamp_runs[1]
)
bootcamp_app, created = get_or_create_bootcamp_application(
bootcamp_run_id=bootcamp_runs[1].id, user=users[1]
)
assert bootcamp_app == existing_app
assert created is False
def test_get_or_create_bootcamp_application_for_alumni():
"""
get or create bootcamp application for alumni
"""
user = UserFactory.create()
user.profile.can_skip_application_steps = True
user.profile.save()
bootcamp_run = BootcampRunFactory.create()
bootcamp_run.allows_skipped_steps = True
bootcamp_run.save()
bootcamp_app, created = get_or_create_bootcamp_application(
bootcamp_run_id=bootcamp_run.id, user=user
)
assert bootcamp_app.bootcamp_run == bootcamp_run
assert bootcamp_app.user == user
assert bootcamp_app.state == AppStates.AWAITING_PAYMENT.value
assert created is True
def test_get_required_submission_type(awaiting_submission_app):
""" Test that get_required_submission_type returns the correct submission type"""
# New application for a bootcamp with no steps at all
stepless_app = BootcampApplicationFactory.create()
assert get_required_submission_type(stepless_app) is None
# The fixture has 2 steps (Video, Quiz) and first step has been submitted
assert (
get_required_submission_type(awaiting_submission_app.application)
== SUBMISSION_QUIZ
)
# After submitting all required steps, no type should be returned
ApplicationStepSubmissionFactory.create(
bootcamp_application=awaiting_submission_app.application,
run_application_step=awaiting_submission_app.run_steps[1],
)
assert get_required_submission_type(awaiting_submission_app.application) is None
@pytest.fixture
def application():
"""Application for a user"""
yield BootcampApplicationFactory.create()
@pytest.fixture
def job(application): # pylint: disable=redefined-outer-name
"""Make a job"""
yield JobFactory.create(run=application.bootcamp_run)
@pytest.mark.parametrize("interview_exists", [True, False])
@pytest.mark.parametrize("has_interview_link", [True, False])
def test_populate_interviews_in_jobma(
interview_exists, has_interview_link, mocker, application, job
): # pylint: disable=redefined-outer-name,too-many-arguments
"""
populate_interviews_in_jobma should create interviews on Jobma via REST API
for each relevant BootcampRunApplicationStep
"""
video_app_step = ApplicationStepFactory.create(
bootcamp=application.bootcamp_run.bootcamp, submission_type=SUBMISSION_VIDEO
)
# this step should be ignored since it's not a video
quiz_app_step = ApplicationStepFactory.create(
bootcamp=application.bootcamp_run.bootcamp, submission_type=SUBMISSION_QUIZ
)
for step in (video_app_step, quiz_app_step):
BootcampRunApplicationStepFactory.create(
bootcamp_run=application.bootcamp_run, application_step=step
)
new_interview_link = "http://fake.interview.link"
create_interview = mocker.patch(
"applications.api.create_interview_in_jobma", return_value=new_interview_link
)
if interview_exists:
interview = InterviewFactory.create(job=job, applicant=application.user)
if not has_interview_link:
interview.interview_url = None
interview.save()
populate_interviews_in_jobma(application)
# We should be able to run this repeatedly without creating duplicate objects in the database
populate_interviews_in_jobma(application)
if not interview_exists or not has_interview_link:
interview = Interview.objects.get(job=job, applicant=application.user)
create_interview.assert_any_call(interview)
assert create_interview.call_count == 2
video_submission = VideoInterviewSubmission.objects.get(interview=interview)
step_submission = ApplicationStepSubmission.objects.get()
assert step_submission.content_object == video_submission
else:
create_interview.assert_not_called()
|
the-stack_0_9171 | """Support for MQTT switches."""
from __future__ import annotations
import functools
import voluptuous as vol
from homeassistant.components import switch
from homeassistant.components.switch import DEVICE_CLASSES_SCHEMA, SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_VALUE_TEMPLATE,
STATE_ON,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import MqttValueTemplate, subscription
from .. import mqtt
from .const import (
CONF_COMMAND_TOPIC,
CONF_ENCODING,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
PAYLOAD_NONE,
)
from .debug_info import log_messages
from .mixins import (
MQTT_ENTITY_COMMON_SCHEMA,
MqttEntity,
async_setup_entry_helper,
async_setup_platform_helper,
)
MQTT_SWITCH_ATTRIBUTES_BLOCKED = frozenset(
{
switch.ATTR_CURRENT_POWER_W,
switch.ATTR_TODAY_ENERGY_KWH,
}
)
DEFAULT_NAME = "MQTT Switch"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_OPTIMISTIC = False
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_STATE_OFF): cv.string,
vol.Optional(CONF_STATE_ON): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
DISCOVERY_SCHEMA = PLATFORM_SCHEMA.extend({}, extra=vol.REMOVE_EXTRA)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up MQTT switch through configuration.yaml."""
await async_setup_platform_helper(
hass, switch.DOMAIN, config, async_add_entities, _async_setup_entity
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up MQTT switch dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, switch.DOMAIN, setup, DISCOVERY_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
"""Set up the MQTT switch."""
async_add_entities([MqttSwitch(hass, config, config_entry, discovery_data)])
class MqttSwitch(MqttEntity, SwitchEntity, RestoreEntity):
"""Representation of a switch that can be toggled using MQTT."""
_entity_id_format = switch.ENTITY_ID_FORMAT
_attributes_extra_blocked = MQTT_SWITCH_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the MQTT switch."""
self._state = None
self._state_on = None
self._state_off = None
self._optimistic = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return DISCOVERY_SCHEMA
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
state_on = config.get(CONF_STATE_ON)
self._state_on = state_on if state_on else config[CONF_PAYLOAD_ON]
state_off = config.get(CONF_STATE_OFF)
self._state_off = state_off if state_off else config[CONF_PAYLOAD_OFF]
self._optimistic = (
config[CONF_OPTIMISTIC] or config.get(CONF_STATE_TOPIC) is None
)
self._value_template = MqttValueTemplate(
self._config.get(CONF_VALUE_TEMPLATE), entity=self
).async_render_with_possible_json_value
def _prepare_subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def state_message_received(msg):
"""Handle new MQTT state messages."""
payload = self._value_template(msg.payload)
if payload == self._state_on:
self._state = True
elif payload == self._state_off:
self._state = False
elif payload == PAYLOAD_NONE:
self._state = None
self.async_write_ha_state()
if self._config.get(CONF_STATE_TOPIC) is None:
# Force into optimistic mode.
self._optimistic = True
else:
self._sub_state = subscription.async_prepare_subscribe_topics(
self.hass,
self._sub_state,
{
CONF_STATE_TOPIC: {
"topic": self._config.get(CONF_STATE_TOPIC),
"msg_callback": state_message_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
},
)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
await subscription.async_subscribe_topics(self.hass, self._sub_state)
if self._optimistic and (last_state := await self.async_get_last_state()):
self._state = last_state.state == STATE_ON
@property
def is_on(self) -> bool | None:
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
return self._config.get(CONF_DEVICE_CLASS)
async def async_turn_on(self, **kwargs):
"""Turn the device on.
This method is a coroutine.
"""
await self.async_publish(
self._config[CONF_COMMAND_TOPIC],
self._config[CONF_PAYLOAD_ON],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
await self.async_publish(
self._config[CONF_COMMAND_TOPIC],
self._config[CONF_PAYLOAD_OFF],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = False
self.async_write_ha_state()
|
the-stack_0_9172 | # Copyright [2021] [Dylan Johnson]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import math
# At the equivalence point (when the analyte has been completely neutralized), the pH of the solution is determined
# by the ionization of the salt in water
# E.G. pH of CH3COOH + NaOH -> CH3COO- Na+ + HOH @ equivalence is determined by:
# CH3COO- + HOH <--> CH3COOH + OH- (here Kb=[CH3COOH][OH-]/[CH3COO-] and Kb is calculated from the Ka of CH3COOH)
# Or HCl + NaOH -> NaCl + HOH @ equivalence is pH=7 (NaCl is strong base / strong acid; therefore neutral)
log = lambda x: math.log(x, 10)
switch = lambda k: 10 ** (-(14 - p(k)))
p = lambda x: -log(x)
p_quad = lambda a, b, c: ((-b + (b * b - 4 * a * c) ** 0.5) / (2 * a))
n_quad = lambda a, b, c: ((-b - (b * b - 4 * a * c) ** 0.5) / (2 * a))
# --------------react----------------------
# Solves Stoichiometric neutralizations in the form: (Can be used for any reaction with 2 reactants and 1-2 products)
# Analyte + Titrant --> Salt + Water
# Or
# Analyte + Titrant --> Salt
# ratio = [1,1,1] or [1,1,1,1]
# NOTE: ALWAYS ASSUMES
# C1 -> [Analyte]
# C2 -> [Titrant]
# V1 -> Volume of Analyte
# V2 -> Volume of Titrant
# Unit -> "mL" or "L"
# Returns {'analyte': mols of Analyte remaining, 'titrant': mols of Titrant remaining, 'salt': mols of Salt remaining,
# 'V': volume of solution in L, 'titrant_mol_needed': moles of Titrant needed to reach equivalence pt,
# 'titrant_mol_needed': volume (in L) of Titrant needed to reach equivalence pt}
def react(ratio=[1, 1, 1, 1], C1=5, C2=5, V1=50, V2=25, unit='mL'):
if unit == 'mL':
mol1 = (V1 / 1000) * C1
mol2 = (V2 / 1000) * C2
V = V1 / 1000 + V2 / 1000
elif unit == 'L':
mol1 = V1 * C1
mol2 = V2 * C2
V = V1 + V2
else:
print('Unit Error')
titrant_mol_needed = mol1 / ratio[0] * ratio[1]
titrant_vol_needed = titrant_mol_needed / C2
if len(ratio) == 3 or len(ratio) == 4:
if len(ratio) == 3:
water = 0.0
if (mol1 / ratio[0] <= mol2 / ratio[1]):
analyte = 0.0
titrant = mol2 - mol1 / ratio[0] * ratio[1]
salt = mol1 / ratio[0] * ratio[2]
if len(ratio) == 4:
water = mol1 / ratio[0] * ratio[3]
return {'analyte': analyte, 'titrant': titrant, 'salt': salt, 'V': V, 'water': water,
'titrant_mol_needed': titrant_mol_needed, 'titrant_vol_needed': titrant_vol_needed}
if (mol1 / ratio[0] > mol2 / ratio[1]):
analyte = mol1 - mol2 / ratio[1] * ratio[0]
titrant = 0.0
salt = mol2 / ratio[1] * ratio[2]
if len(ratio) == 4:
water = mol2 / ratio[1] * ratio[3]
return {'analyte': analyte, 'titrant': titrant, 'salt': salt, 'V': V, 'water': water,
'titrant_mol_needed': titrant_mol_needed, 'titrant_vol_needed': titrant_vol_needed}
else:
print('Ratio Length Error')
# acid_or_base 'acid' for buffers where the analyte is an acid, and 'base' for buffers where the analyte is a base
# ka if 'acid', kb if 'base'
def buffer_pH(acid_or_base, k, analyte_mol, salt_mol):
if acid_or_base == 'acid':
return (p(k) + log(salt_mol / analyte_mol))
if acid_or_base == 'base':
return 14 - ((p(k) + log(salt_mol / analyte_mol)))
# acid_or_base = 'acid' or 'base'
# TAKES CONCENTRATION, NOT MOLES
def strong_pH(acid_or_base, conc):
if acid_or_base == 'acid':
return (p(conc))
if acid_or_base == 'base':
return (14 - p(conc))
# acid_or_base = 'acid' or 'base'
# TAKES CONCENTRATION, NOT MOLES
def weak_pH(acid_or_base, k, conc):
if abs(p_quad(-1, -k, k * conc)) != abs(n_quad(-1, -k, k * conc)):
# print('Error: Quadratic did not yield consistent X value.')
if acid_or_base == 'acid':
H = (k * conc) ** 0.5
return (p(H))
if acid_or_base == 'base':
H = switch((k * conc) ** 0.5)
return (p(H))
else:
if acid_or_base == 'acid':
H = abs(p_quad(-1, -k, k * conc))
return (p(H))
if acid_or_base == 'base':
H = switch(abs(p_quad(-1, -k, k * conc)))
return (p(H))
# TAKES CONCENTRATION, NOT MOLES
# acid_or_base is for analyte
def equivalence_pH(acid_or_base, k, salt_conc):
# weak_pH but convert K to opposite
if acid_or_base == 'acid':
return weak_pH('base', switch(k), salt_conc)
if acid_or_base == 'base':
return weak_pH('acid', switch(k), salt_conc)
# rxn_mol -> output of react(), or:
# {'analyte': mols of Analyte remaining, 'titrant': mols of Titrant remaining, 'salt': mols of Salt remaining,
# 'V': volume of solution in L, 'titrant_mol_needed': moles of Titrant needed to reach equivalence pt,
# 'titrant_mol_needed': volume (in L) of Titrant needed to reach equivalence pt}
# Ka/Kb -> constant for analyte (0 for the one that does not apply) (unless strong analyte)
# acid_or_base is for analyte ('acid' or 'base')
# if strong analyte - weak titrant, k is the k of the titrant
# k2 is only for weak-weak titrations, where it is the k of the titrant
def get_pH(rxn_mol, k=1.7e-5, acid_or_base='acid', strong_titrant=True, strong_analyte=False, k2=1.8e-5):
# weak analyte, strong titrant
if not (strong_analyte) and strong_titrant:
# initial pH
if rxn_mol['analyte'] > 0 and rxn_mol['titrant'] == 0 and rxn_mol['salt'] == 0:
return weak_pH(acid_or_base, k, rxn_mol['analyte'] / rxn_mol['V'])
# before equivalence pt (buffer solution)
if rxn_mol['analyte'] > 0 and rxn_mol['titrant'] == 0:
if acid_or_base == 'acid':
return buffer_pH(acid_or_base, k, rxn_mol['analyte'], rxn_mol['salt'])
if acid_or_base == 'base':
return buffer_pH(acid_or_base, k, rxn_mol['analyte'], rxn_mol['salt'])
# @ equivalence pt
if rxn_mol['analyte'] == 0 and rxn_mol['titrant'] == 0:
return equivalence_pH(acid_or_base, k, (rxn_mol['salt'] / rxn_mol['V']))
# past equivalence pt (strong acid/base for this combo)
if rxn_mol['titrant'] > 0 and rxn_mol['analyte'] == 0:
if acid_or_base == 'acid':
return strong_pH('base', rxn_mol['titrant'] / rxn_mol['V'])
if acid_or_base == 'base':
return strong_pH('acid', rxn_mol['titrant'] / rxn_mol['V'])
elif strong_analyte and not (strong_titrant):
# initial pH
if rxn_mol['analyte'] > 0 and rxn_mol['titrant'] == 0 and rxn_mol['salt'] == 0:
return strong_pH(acid_or_base, rxn_mol['analyte'] / rxn_mol['V'])
# before equivalence pt (buffer solution)
if rxn_mol['analyte'] > 0 and rxn_mol['titrant'] == 0:
return strong_pH(acid_or_base, rxn_mol['analyte'] / rxn_mol['V'])
# @ equivalence pt
if rxn_mol['analyte'] == 0 and rxn_mol['titrant'] == 0:
return 7.0
# past equivalence pt (strong acid/base for this combo)
if rxn_mol['titrant'] > 0 and rxn_mol['analyte'] == 0:
if acid_or_base == 'acid':
return weak_pH('base', k, rxn_mol['titrant'] / rxn_mol['V'])
if acid_or_base == 'base':
return weak_pH('acid', k, rxn_mol['titrant'] / rxn_mol['V'])
elif strong_analyte and strong_titrant:
# initial pH
if rxn_mol['analyte'] > 0 and rxn_mol['titrant'] == 0 and rxn_mol['salt'] == 0:
return strong_pH(acid_or_base, rxn_mol['analyte'] / rxn_mol['V'])
# before equivalence pt (buffer solution)
if rxn_mol['analyte'] > 0 and rxn_mol['titrant'] == 0:
return strong_pH(acid_or_base, rxn_mol['analyte'] / rxn_mol['V'])
# @ equivalence pt
if rxn_mol['analyte'] == 0 and rxn_mol['titrant'] == 0:
return 7.0
# past equivalence pt (strong acid/base for this combo)
if rxn_mol['titrant'] > 0 and rxn_mol['analyte'] == 0:
if acid_or_base == 'acid':
return strong_pH('base', rxn_mol['titrant'] / rxn_mol['V'])
if acid_or_base == 'base':
return strong_pH('acid', rxn_mol['titrant'] / rxn_mol['V'])
elif not (strong_analyte) and not (strong_titrant):
print('Weak-Weak titrations have not been implemented yet. They are not good experimental design.')
# # initial pH
# if rxn_mol['analyte']>0 and rxn_mol['titrant']==0 and rxn_mol['salt']==0:
# return weak_pH(acid_or_base,k,rxn_mol['analyte']/rxn_mol['V'])
# # before equivalence pt (buffer solution)
# if rxn_mol['analyte']>0 and rxn_mol['titrant']==0:
# if acid_or_base=='acid':
# return buffer_pH(acid_or_base,k,rxn_mol['analyte'],rxn_mol['salt'])
# if acid_or_base=='base':
# return buffer_pH(acid_or_base,k,rxn_mol['analyte'],rxn_mol['salt'])
# # @ equivalence pt
# if rxn_mol['analyte']==0 and rxn_mol['titrant']==0:
# return 7.0
# # past equivalence pt (strong acid/base for this combo)
# if rxn_mol['titrant']>0 and rxn_mol['analyte']==0:
# if acid_or_base=='acid':
# return strong_pH('base',rxn_mol['titrant']/rxn_mol['V'])
# if acid_or_base=='base':
# return strong_pH('acid',rxn_mol['titrant']/rxn_mol['V'])
# equivalence point of strong-strong if 1:1:1:1 is always pH=7.00
# weak-weak is not a good experimental design
# final_vol is max amount added
# if strong analyte - weak titrant, k (dissociation constant) is the k of the titrant
# k2 is only for weak-weak titrations, where it is the k of the titrant
# acid_or_base is for analyte ('acid' or 'base')
# returns (x,y), where x is a list of the volumes added, and y is a list of the pH at the respective volume added
def plot_titration(initial_vol=0, final_vol=100, increment=0.1, ratio=[1, 1, 1, 1], C1=5, C2=5, V1=50, V2=25, unit='mL',
strong_titrant=True, strong_analyte=False, k=1.7e-5, acid_or_base='acid', k2=1.8e-5):
added = initial_vol
x = []
y = []
while added <= final_vol:
y.append(get_pH(react(ratio=ratio, C1=C1, C2=C2, V1=V1, V2=added, unit=unit),
k=k, acid_or_base=acid_or_base, strong_titrant=strong_titrant, strong_analyte=strong_analyte))
x.append(added)
if y[len(y) - 1] < 0 or y[len(y) - 1] > 14:
del (y[len(y) - 1])
del (x[len(x) - 1])
added += increment
plt.xlabel('Volume Added ' + '(' + unit + ')')
plt.ylabel('pH')
plt.ylim(bottom=0, top=max(y))
plt.plot(x, y)
if acid_or_base == 'acid' and strong_titrant == True and strong_analyte == False:
plt.title('Weak Acid-Strong Base Titration Curve')
if acid_or_base == 'base' and strong_titrant == True and strong_analyte == False:
plt.title('Weak Base-Strong Acid Titration Curve')
tit_mol_needed = react(ratio=ratio, C1=C1, C2=C2, V1=V1, V2=V2, unit=unit)['titrant_mol_needed']
tit_vol_needed = react(ratio=ratio, C1=C1, C2=C2, V1=V1, V2=V2, unit=unit)['titrant_vol_needed'] * 1000
if not (strong_analyte) and strong_titrant:
salt_conc_at_eqvl = react(ratio=ratio, C1=C1, C2=C2, V1=V1, V2=tit_vol_needed, unit='mL')['salt'] / \
react(ratio=ratio, C1=C1, C2=C2, V1=V1, V2=tit_vol_needed, unit='mL')['V']
equ_pH = equivalence_pH(acid_or_base, k, salt_conc_at_eqvl)
elif (strong_analyte and not (strong_titrant)) or (strong_analyte and strong_titrant):
equ_pH = 7.0
print('Initial pH: ' + str(y[0]))
print('pH at Equivalence Point: ' + str(equ_pH))
print('Final pH:', str(y[len(y) - 1]))
print('Volume of Titrant Needed for Equivalence:', str(tit_vol_needed), 'mL')
print('Amount of Titrant Needed for Equivalence:', str(tit_mol_needed), 'mol')
plt.show()
return x, y
# just the default example
# notice the vertical line around equivalence; this is not accruate and should be ignored
plot_titration()
|
the-stack_0_9173 | # -*- coding: utf-8 -*-
from unittest import TestCase
from ddt import ddt, data
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotMFlat import SlotMFlat
from pyleecan.Classes.MagnetType12 import MagnetType12
from pyleecan.Methods.Machine.Magnet.comp_surface import comp_surface
from numpy import exp
Mag12_test = list()
# Internal Slot
lam = LamSlotMag(is_internal=True, Rext=0.1325)
lam.slot = SlotMFlat(H0=5e-3, W0=10e-3, Zs=12)
lam.slot.magnet = [MagnetType12(Hmag=5e-3, Wmag=10e-3)]
Mag12_test.append(
{"test_obj": lam, "S_exp": 5.062918e-5, "Ao": 0.078449, "H_exp": 5e-3}
)
# Outward Slot
lam = LamSlotMag(is_internal=False, Rint=0.1325)
lam.slot = SlotMFlat(H0=5e-3, W0=10e-3, Zs=12)
lam.slot.magnet = [MagnetType12(Hmag=5e-3, Wmag=10e-3)]
Mag12_test.append({"test_obj": lam, "S_exp": 4.937e-5, "Ao": 0.072745, "H_exp": 5e-3})
# For AlmostEqual
DELTA = 1e-4
@ddt
class test_Magnet_Type_12_meth(TestCase):
"""unittest for MagnetType12 methods"""
@data(*Mag12_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
# Compare numerical and analytical results
b = comp_surface(test_obj.slot.magnet[0])
msg = "Analytical: " + str(a) + " Numerical " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag12_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag12_test)
def test_comp_angle_op(self, test_dict):
"""Check that the computation of the opening angle is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_angle_opening()
a = result
b = test_dict["Ao"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
|
the-stack_0_9174 | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import logging
import json
SEP = "[SEP]"
CLS = "[CLS]"
MASK = "[MASK]"
UNK = "[UNK]"
def _parse_text(file_name: str, word_dict: dict, label_dict: dict, word_index_from: int = 5, label_index_from: int = 3,
n_gram:int=6, m_step:int=6, label_name:str='host_tax_id', lower: bool = True):
"""
Read corpus 读取语料
:param file_name:文件名称
:param word_index_from: 词语开始编号
:param label_index_from: 标签开始编号
:param lower: 转化为小写
:param sent_delimiter: 词语分隔符
:param padding: 是否填充
:return:
"""
words = []
labels = []
if os.path.exists(file_name) is False:
logging.error("File is not exists: {}".format(file_name))
return words, labels
try:
file = open(file_name, 'r', encoding="utf-8")
index = 0
for line in file:
virus_entity = json.loads(line)
if virus_entity is None:
continue
word = []
label = []
CLS = '[CLS]'
genomic_seq = virus_entity['refseq']
if len(genomic_seq) == 0:
continue
# Words
if CLS not in word_dict.keys():
word_dict[CLS] = len(word_dict) + word_index_from
word.append(word_dict[CLS])
#for
for ii in range(0, len(genomic_seq), m_step):
if ii + n_gram <= len(genomic_seq):
char = ''
for jj in range(ii, ii + n_gram):
char += genomic_seq[jj]
if char not in word_dict.keys():
word_dict[char] = len(word_dict) + word_index_from
word.append(word_dict[char])
# Tags
tag = virus_entity[label_name]
if tag not in label_dict.keys():
label_dict[tag] = len(label_dict) + label_index_from
label.append(label_dict[tag])
if len(word) > 0 and len(label) > 0:
words.append(np.array(word))
labels.extend(np.array(label))
# ner_labels.append(ner_tags)
index += 1
if index > 0 and index % 100 == 0:
print(index)
except Exception as e:
logging.error(e)
# print("words: ", words)
# print("labels: ", labels)
# print("ner_labels: ", len(ner_labels), ner_labels)
return words, labels
def load_data(train_paths: list,
valid_paths: list,
test_paths: list,
num_words=None,
max_seq_len=25,
word_index_from=5,
label_index_from=3,
lower=True,
sent_delimiter='\t',
padding=False,
word_dict: dict = None,
label_dict: dict = None,
add_to_dict=True,
**kwargs):
"""
Load dataset 读取数据集
"""
if word_dict is None:
word_dict = {}
word_dict[SEP] = 1
word_dict[CLS] = 2
word_dict[MASK] = 3
word_dict[UNK] = 4
if label_dict is None:
label_dict = {}
label_dict[CLS] = 1
label_dict[UNK] = 2
# Load Train set 读取训练语料
x_train = []
y_train = []
count = 0
if train_paths != None and len(train_paths):
for file_name in train_paths:
words, labels = _parse_text(file_name, word_dict, label_dict, word_index_from=word_index_from, label_index_from=label_index_from, n_gram=5, m_step=3)
x_train.extend(words)
y_train.extend(labels)
x_train = np.array(x_train)
y_train = np.array(y_train)
print("x_train: ", x_train.shape)
print("y_train: ", y_train.shape)
x_valid = []
y_valid = []
count = 0
if valid_paths != None and len(valid_paths) >= 0:
for file_name in valid_paths:
words, labels = _parse_text(file_name, word_dict, label_dict, word_index_from=word_index_from, label_index_from=label_index_from)
x_valid.extend(words)
y_valid.extend(labels)
x_valid = np.array(x_valid)
y_valid = np.array(y_valid)
print("x_valid: ", x_valid.shape)
print("y_valid: ", y_valid.shape)
x_test = []
y_test = []
ner_labels_test = []
count = 0
if test_paths != None and len(test_paths) >= 0:
for file_name in test_paths:
words, labels = _parse_text(file_name, word_dict, label_dict, word_index_from=word_index_from, label_index_from=label_index_from)
x_test.extend(words)
y_test.extend(labels)
# ner_labels_test.extend(ner_labels)
print("Test Counter: ", count)
x_test = np.array(x_test)
y_test = np.array(y_test)
print("x_test: ", x_test.shape)
print("y_test: ", y_test.shape)
print("word_dict: ", len(word_dict))
print("label_dict: ", len(label_dict))
if max_seq_len > 0 and padding:
if len(x_train) > 0:
x_train = _remove_long_seq(max_seq_len, x_train)
if not x_train:
raise ValueError('After filtering for sequences shorter than maxlen=' +
str(max_seq_len) + ', no sequence was kept. '
'Increase maxlen.')
x_train = _pad_sequences(max_seq_len, x_train)
x_train = np.array(x_train)
if len(x_valid) > 0:
x_valid = _remove_long_seq(max_seq_len, x_valid)
if not x_valid:
raise ValueError('After filtering for sequences shorter than maxlen=' +
str(max_seq_len) + ', no sequence was kept. '
'Increase maxlen.')
x_valid = _pad_sequences(max_seq_len, x_valid)
x_valid = np.array(x_valid)
y_valid = np.array(y_valid)
if len(x_test) > 0:
x_test = _remove_long_seq(max_seq_len, x_test)
if not x_test:
raise ValueError('After filtering for sequences shorter than maxlen=' +
str(max_seq_len) + ', no sequence was kept. '
'Increase maxlen.')
x_test = _pad_sequences(max_seq_len, x_test)
x_test = np.array(x_test)
y_test = np.array(y_test)
if not num_words:
num_words = len(word_dict)
num_labels = len(label_dict)
print("x_train: ", x_train.shape)
print("y_train: ", y_train.shape)
print("x_valid: ", x_valid.shape)
print("y_valid: ", y_valid.shape)
print("x_test: ", x_test.shape)
print("y_test: ", y_test.shape)
return (x_train, y_train), (x_valid, y_valid), (x_test, y_test), num_words, num_labels, word_dict, label_dict
def _remove_long_seq(maxlen, seq):
"""Removes sequences that exceed the maximum length.
# Arguments
maxlen: Int, maximum length of the output sequences.
seq: List of lists, where each sublist is a sequence.
label: List where each element is an integer.
# Returns
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label = [], []
count = 0
for x in seq:
if len(x) < maxlen:
new_seq.append(x)
else:
new_seq.append(x[0:maxlen])
count += 1
print("Remove: ", count)
return new_seq # , new_value
def _pad_sequences(maxlen, seq, pad_x=0, pad_y=0, pad_v=0):
"""Removes sequences that exceed the maximum length.
# Arguments
maxlen: Int, maximum length of the output sequences.
seq: List of lists, where each sublist is a sequence.
label: List where each element is an integer.
# Returns
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label, new_ner = [], [], []
for x in seq:
x = list(x)
if len(x) < maxlen:
pads_x = x + [pad_x] * (maxlen - len(x))
new_seq.append(pads_x)
else:
new_seq.append(x[0:maxlen])
return new_seq
if __name__ == '__main__':
train_paths = [
"E:\\Research\\Medical\\Data\\virus_host_db.txt",
]
(x_train, y_train), (_, _), (_, _), num_words, num_labels, word_dict, label_dict = load_data(train_paths, None, None, max_seq_len=150, padding=True)
word_index_dict = {}
for d, v in word_dict.items():
word_index_dict[v] = d
label_index_dict = {}
for d, v in label_dict.items():
label_index_dict[v] = d
print("label_index_dict: ", label_index_dict)
x_sent = ""
y_sent = ""
x_raw = []
y_raw = []
example_index = 1
output_file = 'E:\\Research\\Medical\\Data\\virus_host_db_fasttext.txt'
with open(output_file, "w") as fh:
for ii in range(len(x_train)):
x_data = x_train[ii]
x_sent += '__label__' + label_index_dict.get(y_train[ii], '[UNK]')
for jj in range(len(x_data)):
x_sent += ' ' + word_index_dict.get(x_data[jj], '[UNK]')
fh.write(x_sent + '\n')
|
the-stack_0_9175 | import streamlit as st
import pandas as pd
import plotly.express as px
import numpy as np
import time
from datetime import datetime
#Titles and Mode selections
st.sidebar.title("About Us")
st.sidebar.info("""
The aim of this project is to create an interactive Covid-19 Dashboard. This app is maintained by Team number 6 of the Open Source course for the second semester of Master 1 Digital Sciences.
Contributors of this project are Paul Montecot Grall, Marine Menardin and Elizabeth Afolabi.
"""
)
st.sidebar.title("Comments")
st.sidebar.info("Feel free to comment on our work or share your thoughts and suggestions about this project. The github link can be found "
"[here](https://github.com/marinemnrd/Covid_Dash_OpenSource) " 'The Datas come from [John Hopkins University](https://github.com/CSSEGISandData) and [Kaggle](https://www.kaggle.com/tanuprabhu/population-by-country-2020)')
selectbox = st.sidebar.selectbox('Choose the Type of Datas',('Deaths','Cases','Normalised Deaths', 'Normalised Cases'))
st.title("COVID DASHBOARD")
st.write("""
This web application will serve to analyze and visualize the spread of COVID-19 around the world.""")
st.image('Covid19.jpeg')
st.markdown("# A Summary of Covid-19")
st.write("""
COVID-19 is a virus caused by acute respiratory syndrome coronavirus 2 (SARS-CoV-2). The COVID-19 pandemic, also known as the coronavirus pandemic, is an ongoing global pandemic of coronavirus disease 2019 (COVID-19),
The virus was first identified in December 2019 in Wuhan, China. The World Health Organization declared a Public Health Emergency of International Concern regarding COVID-19 on 30 January 2020, and later declared a pandemic on 11 March 2020. As of 11 June 2021,
more than 174 million cases have been confirmed, with more than 3.77 million confirmed deaths attributed to COVID-19, making it one of the deadliest pandemics in history.""")
st.markdown("## Symptoms")
st.markdown(("* Fever or chills\n* Cough\n"
"* Shortness of breath or difficulty breathing\n"
"* Fatigue\n"
"* Muscle or body aches\n"
"* Headache\n"
"* Loss of taste or smell\n"
"* Sore throat\n"
"* Congestion or runny nose\n"
"* Nausea or vomiting\n"
"* Diarrhea\n"))
#dfpop = pd.read_csv(r'https://github.com/marinemnrd/Covid_Dash_OpenSource/blob/0818ef95771496134f063c2948354b7d20c81306/Datas/population_by_country_2020.csv')
#dfpop = dfpop.iloc[:, 0:2]
#Read Data
#dfdead = pd.read_csv(r'C:\Users\CRI User\Documents\GitHub\Covid_Dash_OpenSource\Datas\time_series_covid19_deaths_global.csv')
#Set up Dataset
#dfdead =dfdead.drop(['Lat', 'Long'],axis=1)
#dfdead =dfdead.drop(['Province/State'],axis=1)
#dfdead = dfdead.groupby(['Country/Region']).sum()
#Apply the operation for normalisation
#dfdead = dfdead.applymap(lambda x: x*100000)
#Merge Dataset with Pop Datas For Division
#dfmerged = pd.merge(dfdead, dfpop,how = 'inner', left_on = dfdead.index , right_on = dfpop['Country (or dependency)'] )
#dfmerged = dfmerged.drop(columns=dfmerged.columns[0],)
#dfmerged = dfmerged.set_index('Country (or dependency)')
#Divise All Columns to the Population Column for having data*100000/Pop
#dfnorm = dfmerged.div(dfmerged['Population (2020)'], axis='index')
#Formating for Plotting
#dfnorm = dfnorm.T.reset_index().reindex()
#dfnorm =dfnorm.rename(columns = {'index':'Date'})
#dfnorm = dfnorm[:-1]
#dfnorm['Date'] = pd.to_datetime(dfnorm['Date']).dt.date
#dfnorm = dfnorm.set_index(['Date'])
#dfnorm = dfnorm.astype(int)
#Upload the .csv
#df_normdeath = dfnorm
#dfnormdead.to_csv('Normaliseddead')
#Load Data
df_case = pd.read_csv(r'https://raw.githubusercontent.com/marinemnrd/Covid_Dash_OpenSource/main/Datas/Clean_Confirmed_Case.csv', parse_dates=['Date'])
df_case = df_case.set_index(['Date'])
df_Death = pd.read_csv(r'https://raw.githubusercontent.com/marinemnrd/Covid_Dash_OpenSource/main/Datas/Clean_Death.csv', parse_dates=['Date'])
df_Death = df_Death.set_index(['Date'])
#df_normcase = pd.read_csv(r"C:\Users\CRI User\Desktop\Normalisedcase.csv", parse_dates=['Date'])
#df_normcase = df_normcase.set_index(['Date'])
#df_normdeath = pd.read_csv(r"C:\Users\CRI User\Desktop\Normaliseddead.csv", parse_dates=['Date'])
#df_normdeath = df_normdeath.set_index(['Date'])
print (df_case)
#print (df_normcase)
print (df_case.columns)
#print (df_normcase.columns)
#Chart the Datas
#defaultcol = df_case['France']
if selectbox == 'Cases':
st.title("Cumulative number of cases")
st.text("")
min_ts = min(df_case.index).to_pydatetime()
max_ts = max(df_case.index).to_pydatetime()
# slider to chose date
st.sidebar.subheader("Date")
min_selection, max_selection = st.sidebar.slider("Timeline", min_value=min_ts, max_value=max_ts,
value=[min_ts, max_ts])
df_case = df_case[(df_case.index >= min_selection) & (df_case.index <= max_selection)]
case = st.multiselect('choose country', df_case.columns)
if case == []:
st.error("Please select at least one category.")
print(case)
#fig = px.line(df_case, x=df_case.index, y=case)
fig = px.line(df_case, x=df_case.index, y=case)
st.write(fig)
elif selectbox == 'Deaths':
st.title("Cumulative number of deaths")
st.text("")
min_ts = min(df_Death.index).to_pydatetime()
max_ts = max(df_Death.index).to_pydatetime()
# slider to chose date
st.sidebar.subheader("Date")
min_selection, max_selection = st.sidebar.slider("Timeline", min_value=min_ts, max_value=max_ts,
value=[min_ts, max_ts])
df_Death = df_Death[(df_Death.index >= min_selection) & (df_Death.index <= max_selection)]
death = st.multiselect('choose country', df_Death.columns)
if death == []:
st.error("Please select at least one category.")
print(death)
fig = px.line(df_Death, x=df_Death.index, y=death)
st.write(fig)
elif selectbox == 'Normalised Cases':
st.title("Cumulative number of Cases for 100 000 Hab")
st.text("")
min_ts = min(df_normcase.index).to_pydatetime()
max_ts = max(df_normcase.index).to_pydatetime()
# slider to chose date
st.sidebar.subheader("Date")
min_selection, max_selection = st.sidebar.slider("Timeline", min_value=min_ts, max_value=max_ts,
value=[min_ts, max_ts])
df_normcase = df_normcase[(df_normcase.index >= min_selection) & (df_normcase.index <= max_selection)]
normcase = st.multiselect('choose country', df_normcase.columns)
if normcase == []:
st.error("Please select at least one category.")
print(normcase)
fig = px.line(normcase, x=df_normcase.index, y=normcase)
st.write(fig)
else:
st.title("Cumulative number of deaths for 100 000 Hab")
st.text("")
df_normdeath = pd.DataFrame(df_normdeath)
min_ts = min(df_normdeath.index)#.to_pydatetime()
max_ts = max(df_normdeath.index)#.to_pydatetime()
# slider to chose date
st.sidebar.subheader("Date")
min_selection, max_selection = st.sidebar.slider("Timeline", min_value=min_ts, max_value=max_ts,
value=[min_ts, max_ts])
df_normdeath = df_normdeath[(df_normdeath.index >= min_selection) & (df_normdeath.index <= max_selection)]
normdeath = st.multiselect('choose country', df_normdeath.columns)
if normdeath == []:
st.error("Please select at least one category.")
print(normdeath)
fig = px.line(normdeath, x=df_normdeath.index, y=df_normdeath['France'])
st.write(fig)
|
the-stack_0_9176 | #
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import division
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.compat
import copy
import sys
import unittest
import SCons.Taskmaster
import SCons.Errors
built_text = None
cache_text = []
visited_nodes = []
executed = None
scan_called = 0
class Node(object):
def __init__(self, name, kids = [], scans = []):
self.name = name
self.kids = kids
self.scans = scans
self.cached = 0
self.scanned = 0
self.scanner = None
self.targets = [self]
self.prerequisites = None
class Builder(object):
def targets(self, node):
return node.targets
self.builder = Builder()
self.bsig = None
self.csig = None
self.state = SCons.Node.no_state
self.prepared = None
self.ref_count = 0
self.waiting_parents = set()
self.waiting_s_e = set()
self.side_effect = 0
self.side_effects = []
self.alttargets = []
self.postprocessed = None
self._bsig_val = None
self._current_val = 0
self.always_build = None
def disambiguate(self):
return self
def push_to_cache(self):
pass
def retrieve_from_cache(self):
global cache_text
if self.cached:
cache_text.append(self.name + " retrieved")
return self.cached
def make_ready(self):
pass
def prepare(self):
self.prepared = 1
self.get_binfo()
def build(self):
global built_text
built_text = self.name + " built"
def remove(self):
pass
# The following four methods new_binfo(), del_binfo(),
# get_binfo(), clear() as well as its calls have been added
# to support the cached_execute() test (issue #2720).
# They are full copies (or snippets) of their actual
# counterparts in the Node class...
def new_binfo(self):
binfo = "binfo"
return binfo
def del_binfo(self):
"""Delete the build info from this node."""
try:
delattr(self, 'binfo')
except AttributeError:
pass
def get_binfo(self):
"""Fetch a node's build information."""
try:
return self.binfo
except AttributeError:
pass
binfo = self.new_binfo()
self.binfo = binfo
return binfo
def clear(self):
# The del_binfo() call here isn't necessary for normal execution,
# but is for interactive mode, where we might rebuild the same
# target and need to start from scratch.
self.del_binfo()
def built(self):
global built_text
if not self.cached:
built_text = built_text + " really"
# Clear the implicit dependency caches of any Nodes
# waiting for this Node to be built.
for parent in self.waiting_parents:
parent.implicit = None
self.clear()
def release_target_info(self):
pass
def has_builder(self):
return self.builder is not None
def is_derived(self):
return self.has_builder or self.side_effect
def alter_targets(self):
return self.alttargets, None
def visited(self):
global visited_nodes
visited_nodes.append(self.name)
def children(self):
if not self.scanned:
self.scan()
self.scanned = 1
return self.kids
def scan(self):
global scan_called
scan_called = scan_called + 1
self.kids = self.kids + self.scans
self.scans = []
def scanner_key(self):
return self.name
def add_to_waiting_parents(self, node):
wp = self.waiting_parents
if node in wp:
return 0
wp.add(node)
return 1
def get_state(self):
return self.state
def set_state(self, state):
self.state = state
def set_bsig(self, bsig):
self.bsig = bsig
def set_csig(self, csig):
self.csig = csig
def store_csig(self):
pass
def store_bsig(self):
pass
def is_pseudo_derived(self):
pass
def is_up_to_date(self):
return self._current_val
def depends_on(self, nodes):
for node in nodes:
if node in self.kids:
return 1
return 0
def __str__(self):
return self.name
def postprocess(self):
self.postprocessed = 1
self.waiting_parents = set()
def get_executor(self):
if not hasattr(self, 'executor'):
class Executor(object):
def prepare(self):
pass
def get_action_targets(self):
return self.targets
def get_all_targets(self):
return self.targets
def get_all_children(self):
result = []
for node in self.targets:
result.extend(node.children())
return result
def get_all_prerequisites(self):
return []
def get_action_side_effects(self):
return []
self.executor = Executor()
self.executor.targets = self.targets
return self.executor
class OtherError(Exception):
pass
class MyException(Exception):
pass
class TaskmasterTestCase(unittest.TestCase):
def test_next_task(self):
"""Test fetching the next task
"""
global built_text
n1 = Node("n1")
tm = SCons.Taskmaster.Taskmaster([n1, n1])
t = tm.next_task()
t.prepare()
t.execute()
t = tm.next_task()
assert t is None
n1 = Node("n1")
n2 = Node("n2")
n3 = Node("n3", [n1, n2])
tm = SCons.Taskmaster.Taskmaster([n3])
t = tm.next_task()
t.prepare()
t.execute()
assert built_text == "n1 built", built_text
t.executed()
t.postprocess()
t = tm.next_task()
t.prepare()
t.execute()
assert built_text == "n2 built", built_text
t.executed()
t.postprocess()
t = tm.next_task()
t.prepare()
t.execute()
assert built_text == "n3 built", built_text
t.executed()
t.postprocess()
assert tm.next_task() is None
built_text = "up to date: "
top_node = n3
class MyTask(SCons.Taskmaster.Task):
def execute(self):
global built_text
if self.targets[0].get_state() == SCons.Node.up_to_date:
if self.top:
built_text = self.targets[0].name + " up-to-date top"
else:
built_text = self.targets[0].name + " up-to-date"
else:
self.targets[0].build()
n1.set_state(SCons.Node.no_state)
n1._current_val = 1
n2.set_state(SCons.Node.no_state)
n2._current_val = 1
n3.set_state(SCons.Node.no_state)
n3._current_val = 1
tm = SCons.Taskmaster.Taskmaster(targets = [n3], tasker = MyTask)
t = tm.next_task()
t.prepare()
t.execute()
assert built_text == "n1 up-to-date", built_text
t.executed()
t.postprocess()
t = tm.next_task()
t.prepare()
t.execute()
assert built_text == "n2 up-to-date", built_text
t.executed()
t.postprocess()
t = tm.next_task()
t.prepare()
t.execute()
assert built_text == "n3 up-to-date top", built_text
t.executed()
t.postprocess()
assert tm.next_task() is None
n1 = Node("n1")
n2 = Node("n2")
n3 = Node("n3", [n1, n2])
n4 = Node("n4")
n5 = Node("n5", [n3, n4])
tm = SCons.Taskmaster.Taskmaster([n5])
t1 = tm.next_task()
assert t1.get_target() == n1
t2 = tm.next_task()
assert t2.get_target() == n2
t4 = tm.next_task()
assert t4.get_target() == n4
t4.executed()
t4.postprocess()
t1.executed()
t1.postprocess()
t2.executed()
t2.postprocess()
t3 = tm.next_task()
assert t3.get_target() == n3
t3.executed()
t3.postprocess()
t5 = tm.next_task()
assert t5.get_target() == n5, t5.get_target()
t5.executed()
t5.postprocess()
assert tm.next_task() is None
n4 = Node("n4")
n4.set_state(SCons.Node.executed)
tm = SCons.Taskmaster.Taskmaster([n4])
assert tm.next_task() is None
n1 = Node("n1")
n2 = Node("n2", [n1])
tm = SCons.Taskmaster.Taskmaster([n2,n2])
t = tm.next_task()
t.executed()
t.postprocess()
t = tm.next_task()
assert tm.next_task() is None
n1 = Node("n1")
n2 = Node("n2")
n3 = Node("n3", [n1], [n2])
tm = SCons.Taskmaster.Taskmaster([n3])
t = tm.next_task()
target = t.get_target()
assert target == n1, target
t.executed()
t.postprocess()
t = tm.next_task()
target = t.get_target()
assert target == n2, target
t.executed()
t.postprocess()
t = tm.next_task()
target = t.get_target()
assert target == n3, target
t.executed()
t.postprocess()
assert tm.next_task() is None
n1 = Node("n1")
n2 = Node("n2")
n3 = Node("n3", [n1, n2])
n4 = Node("n4", [n3])
n5 = Node("n5", [n3])
global scan_called
scan_called = 0
tm = SCons.Taskmaster.Taskmaster([n4])
t = tm.next_task()
assert t.get_target() == n1
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n2
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n3
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n4
t.executed()
t.postprocess()
assert tm.next_task() is None
assert scan_called == 4, scan_called
tm = SCons.Taskmaster.Taskmaster([n5])
t = tm.next_task()
assert t.get_target() == n5, t.get_target()
t.executed()
assert tm.next_task() is None
assert scan_called == 5, scan_called
n1 = Node("n1")
n2 = Node("n2")
n3 = Node("n3")
n4 = Node("n4", [n1,n2,n3])
n5 = Node("n5", [n4])
n3.side_effect = 1
n1.side_effects = n2.side_effects = n3.side_effects = [n4]
tm = SCons.Taskmaster.Taskmaster([n1,n2,n3,n4,n5])
t = tm.next_task()
assert t.get_target() == n1
assert n4.state == SCons.Node.executing, n4.state
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n2
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n3
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n4
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n5
assert not tm.next_task()
t.executed()
t.postprocess()
n1 = Node("n1")
n2 = Node("n2")
n3 = Node("n3")
n4 = Node("n4", [n1,n2,n3])
def reverse(dependencies):
dependencies.reverse()
return dependencies
tm = SCons.Taskmaster.Taskmaster([n4], order=reverse)
t = tm.next_task()
assert t.get_target() == n3, t.get_target()
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n2, t.get_target()
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n1, t.get_target()
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n4, t.get_target()
t.executed()
t.postprocess()
n5 = Node("n5")
n6 = Node("n6")
n7 = Node("n7")
n6.alttargets = [n7]
tm = SCons.Taskmaster.Taskmaster([n5])
t = tm.next_task()
assert t.get_target() == n5
t.executed()
t.postprocess()
tm = SCons.Taskmaster.Taskmaster([n6])
t = tm.next_task()
assert t.get_target() == n7
t.executed()
t.postprocess()
t = tm.next_task()
assert t.get_target() == n6
t.executed()
t.postprocess()
n1 = Node("n1")
n2 = Node("n2", [n1])
n1.set_state(SCons.Node.failed)
tm = SCons.Taskmaster.Taskmaster([n2])
assert tm.next_task() is None
n1 = Node("n1")
n2 = Node("n2")
n1.targets = [n1, n2]
n1._current_val = 1
tm = SCons.Taskmaster.Taskmaster([n1])
t = tm.next_task()
t.executed()
t.postprocess()
s = n1.get_state()
assert s == SCons.Node.executed, s
s = n2.get_state()
assert s == SCons.Node.executed, s
def test_make_ready_out_of_date(self):
"""Test the Task.make_ready() method's list of out-of-date Nodes
"""
ood = []
def TaskGen(tm, targets, top, node, ood=ood):
class MyTask(SCons.Taskmaster.Task):
def make_ready(self):
SCons.Taskmaster.Task.make_ready(self)
self.ood.extend(self.out_of_date)
t = MyTask(tm, targets, top, node)
t.ood = ood
return t
n1 = Node("n1")
c2 = Node("c2")
c2._current_val = 1
n3 = Node("n3")
c4 = Node("c4")
c4._current_val = 1
a5 = Node("a5")
a5._current_val = 1
a5.always_build = 1
tm = SCons.Taskmaster.Taskmaster(targets = [n1, c2, n3, c4, a5],
tasker = TaskGen)
del ood[:]
t = tm.next_task()
assert ood == [n1], ood
del ood[:]
t = tm.next_task()
assert ood == [], ood
del ood[:]
t = tm.next_task()
assert ood == [n3], ood
del ood[:]
t = tm.next_task()
assert ood == [], ood
del ood[:]
t = tm.next_task()
assert ood == [a5], ood
def test_make_ready_exception(self):
"""Test handling exceptions from Task.make_ready()
"""
class MyTask(SCons.Taskmaster.Task):
def make_ready(self):
raise MyException("from make_ready()")
n1 = Node("n1")
tm = SCons.Taskmaster.Taskmaster(targets = [n1], tasker = MyTask)
t = tm.next_task()
exc_type, exc_value, exc_tb = t.exception
assert exc_type == MyException, repr(exc_type)
assert str(exc_value) == "from make_ready()", exc_value
def test_make_ready_all(self):
"""Test the make_ready_all() method"""
class MyTask(SCons.Taskmaster.Task):
make_ready = SCons.Taskmaster.Task.make_ready_all
n1 = Node("n1")
c2 = Node("c2")
c2._current_val = 1
n3 = Node("n3")
c4 = Node("c4")
c4._current_val = 1
tm = SCons.Taskmaster.Taskmaster(targets = [n1, c2, n3, c4])
t = tm.next_task()
target = t.get_target()
assert target is n1, target
assert target.state == SCons.Node.executing, target.state
t = tm.next_task()
target = t.get_target()
assert target is c2, target
assert target.state == SCons.Node.up_to_date, target.state
t = tm.next_task()
target = t.get_target()
assert target is n3, target
assert target.state == SCons.Node.executing, target.state
t = tm.next_task()
target = t.get_target()
assert target is c4, target
assert target.state == SCons.Node.up_to_date, target.state
t = tm.next_task()
assert t is None
n1 = Node("n1")
c2 = Node("c2")
n3 = Node("n3")
c4 = Node("c4")
tm = SCons.Taskmaster.Taskmaster(targets = [n1, c2, n3, c4],
tasker = MyTask)
t = tm.next_task()
target = t.get_target()
assert target is n1, target
assert target.state == SCons.Node.executing, target.state
t = tm.next_task()
target = t.get_target()
assert target is c2, target
assert target.state == SCons.Node.executing, target.state
t = tm.next_task()
target = t.get_target()
assert target is n3, target
assert target.state == SCons.Node.executing, target.state
t = tm.next_task()
target = t.get_target()
assert target is c4, target
assert target.state == SCons.Node.executing, target.state
t = tm.next_task()
assert t is None
def test_children_errors(self):
"""Test errors when fetching the children of a node.
"""
class StopNode(Node):
def children(self):
raise SCons.Errors.StopError("stop!")
class ExitNode(Node):
def children(self):
sys.exit(77)
n1 = StopNode("n1")
tm = SCons.Taskmaster.Taskmaster([n1])
t = tm.next_task()
exc_type, exc_value, exc_tb = t.exception
assert exc_type == SCons.Errors.StopError, repr(exc_type)
assert str(exc_value) == "stop!", exc_value
n2 = ExitNode("n2")
tm = SCons.Taskmaster.Taskmaster([n2])
t = tm.next_task()
exc_type, exc_value = t.exception
assert exc_type == SCons.Errors.ExplicitExit, repr(exc_type)
assert exc_value.node == n2, exc_value.node
assert exc_value.status == 77, exc_value.status
def test_cycle_detection(self):
"""Test detecting dependency cycles
"""
n1 = Node("n1")
n2 = Node("n2", [n1])
n3 = Node("n3", [n2])
n1.kids = [n3]
tm = SCons.Taskmaster.Taskmaster([n3])
try:
t = tm.next_task()
except SCons.Errors.UserError as e:
assert str(e) == "Dependency cycle: n3 -> n1 -> n2 -> n3", str(e)
else:
assert 'Did not catch expected UserError'
def test_next_top_level_candidate(self):
"""Test the next_top_level_candidate() method
"""
n1 = Node("n1")
n2 = Node("n2", [n1])
n3 = Node("n3", [n2])
tm = SCons.Taskmaster.Taskmaster([n3])
t = tm.next_task()
assert t.targets == [n1], t.targets
t.fail_stop()
assert t.targets == [n3], list(map(str, t.targets))
assert t.top == 1, t.top
def test_stop(self):
"""Test the stop() method
Both default and overridden in a subclass.
"""
global built_text
n1 = Node("n1")
n2 = Node("n2")
n3 = Node("n3", [n1, n2])
tm = SCons.Taskmaster.Taskmaster([n3])
t = tm.next_task()
t.prepare()
t.execute()
assert built_text == "n1 built", built_text
t.executed()
t.postprocess()
assert built_text == "n1 built really", built_text
tm.stop()
assert tm.next_task() is None
class MyTM(SCons.Taskmaster.Taskmaster):
def stop(self):
global built_text
built_text = "MyTM.stop()"
SCons.Taskmaster.Taskmaster.stop(self)
n1 = Node("n1")
n2 = Node("n2")
n3 = Node("n3", [n1, n2])
built_text = None
tm = MyTM([n3])
tm.next_task().execute()
assert built_text == "n1 built"
tm.stop()
assert built_text == "MyTM.stop()"
assert tm.next_task() is None
def test_executed(self):
"""Test when a task has been executed
"""
global built_text
global visited_nodes
n1 = Node("n1")
tm = SCons.Taskmaster.Taskmaster([n1])
t = tm.next_task()
built_text = "xxx"
visited_nodes = []
n1.set_state(SCons.Node.executing)
t.executed()
s = n1.get_state()
assert s == SCons.Node.executed, s
assert built_text == "xxx really", built_text
assert visited_nodes == ['n1'], visited_nodes
n2 = Node("n2")
tm = SCons.Taskmaster.Taskmaster([n2])
t = tm.next_task()
built_text = "should_not_change"
visited_nodes = []
n2.set_state(None)
t.executed()
s = n2.get_state()
assert s is None, s
assert built_text == "should_not_change", built_text
assert visited_nodes == ['n2'], visited_nodes
n3 = Node("n3")
n4 = Node("n4")
n3.targets = [n3, n4]
tm = SCons.Taskmaster.Taskmaster([n3])
t = tm.next_task()
visited_nodes = []
n3.set_state(SCons.Node.up_to_date)
n4.set_state(SCons.Node.executing)
t.executed()
s = n3.get_state()
assert s == SCons.Node.up_to_date, s
s = n4.get_state()
assert s == SCons.Node.executed, s
assert visited_nodes == ['n3', 'n4'], visited_nodes
def test_prepare(self):
"""Test preparation of multiple Nodes for a task
"""
n1 = Node("n1")
n2 = Node("n2")
tm = SCons.Taskmaster.Taskmaster([n1, n2])
t = tm.next_task()
# This next line is moderately bogus. We're just reaching
# in and setting the targets for this task to an array. The
# "right" way to do this would be to have the next_task() call
# set it up by having something that approximates a real Builder
# return this list--but that's more work than is probably
# warranted right now.
n1.get_executor().targets = [n1, n2]
t.prepare()
assert n1.prepared
assert n2.prepared
n3 = Node("n3")
n4 = Node("n4")
tm = SCons.Taskmaster.Taskmaster([n3, n4])
t = tm.next_task()
# More bogus reaching in and setting the targets.
n3.set_state(SCons.Node.up_to_date)
n3.get_executor().targets = [n3, n4]
t.prepare()
assert n3.prepared
assert n4.prepared
# If the Node has had an exception recorded while it was getting
# prepared, then prepare() should raise that exception.
class MyException(Exception):
pass
built_text = None
n5 = Node("n5")
tm = SCons.Taskmaster.Taskmaster([n5])
t = tm.next_task()
t.exception_set((MyException, "exception value"))
exc_caught = None
exc_actually_caught = None
exc_value = None
try:
t.prepare()
except MyException as e:
exc_caught = 1
exc_value = e
except Exception as e:
exc_actually_caught = e
pass
assert exc_caught, "did not catch expected MyException: %s"%exc_actually_caught
assert str(exc_value) == "exception value", exc_value
assert built_text is None, built_text
# Regression test, make sure we prepare not only
# all targets, but their side effects as well.
n6 = Node("n6")
n7 = Node("n7")
n8 = Node("n8")
n9 = Node("n9")
n10 = Node("n10")
n6.side_effects = [ n8 ]
n7.side_effects = [ n9, n10 ]
tm = SCons.Taskmaster.Taskmaster([n6, n7])
t = tm.next_task()
# More bogus reaching in and setting the targets.
n6.get_executor().targets = [n6, n7]
t.prepare()
assert n6.prepared
assert n7.prepared
assert n8.prepared
assert n9.prepared
assert n10.prepared
# Make sure we call an Executor's prepare() method.
class ExceptionExecutor(object):
def prepare(self):
raise Exception("Executor.prepare() exception")
def get_all_targets(self):
return self.nodes
def get_all_children(self):
result = []
for node in self.nodes:
result.extend(node.children())
return result
def get_all_prerequisites(self):
return []
def get_action_side_effects(self):
return []
n11 = Node("n11")
n11.executor = ExceptionExecutor()
n11.executor.nodes = [n11]
tm = SCons.Taskmaster.Taskmaster([n11])
t = tm.next_task()
try:
t.prepare()
except Exception as e:
assert str(e) == "Executor.prepare() exception", e
else:
raise AssertionError("did not catch expected exception")
def test_execute(self):
"""Test executing a task
"""
global built_text
global cache_text
n1 = Node("n1")
tm = SCons.Taskmaster.Taskmaster([n1])
t = tm.next_task()
t.execute()
assert built_text == "n1 built", built_text
def raise_UserError():
raise SCons.Errors.UserError
n2 = Node("n2")
n2.build = raise_UserError
tm = SCons.Taskmaster.Taskmaster([n2])
t = tm.next_task()
try:
t.execute()
except SCons.Errors.UserError:
pass
else:
self.fail("did not catch expected UserError")
def raise_BuildError():
raise SCons.Errors.BuildError
n3 = Node("n3")
n3.build = raise_BuildError
tm = SCons.Taskmaster.Taskmaster([n3])
t = tm.next_task()
try:
t.execute()
except SCons.Errors.BuildError:
pass
else:
self.fail("did not catch expected BuildError")
# On a generic (non-BuildError) exception from a Builder,
# the target should throw a BuildError exception with the
# args set to the exception value, instance, and traceback.
def raise_OtherError():
raise OtherError
n4 = Node("n4")
n4.build = raise_OtherError
tm = SCons.Taskmaster.Taskmaster([n4])
t = tm.next_task()
try:
t.execute()
except SCons.Errors.BuildError as e:
assert e.node == n4, e.node
assert e.errstr == "OtherError : ", e.errstr
assert len(e.exc_info) == 3, e.exc_info
exc_traceback = sys.exc_info()[2]
assert isinstance(e.exc_info[2], type(exc_traceback)), e.exc_info[2]
else:
self.fail("did not catch expected BuildError")
built_text = None
cache_text = []
n5 = Node("n5")
n6 = Node("n6")
n6.cached = 1
tm = SCons.Taskmaster.Taskmaster([n5])
t = tm.next_task()
# This next line is moderately bogus. We're just reaching
# in and setting the targets for this task to an array. The
# "right" way to do this would be to have the next_task() call
# set it up by having something that approximates a real Builder
# return this list--but that's more work than is probably
# warranted right now.
t.targets = [n5, n6]
t.execute()
assert built_text == "n5 built", built_text
assert cache_text == [], cache_text
built_text = None
cache_text = []
n7 = Node("n7")
n8 = Node("n8")
n7.cached = 1
n8.cached = 1
tm = SCons.Taskmaster.Taskmaster([n7])
t = tm.next_task()
# This next line is moderately bogus. We're just reaching
# in and setting the targets for this task to an array. The
# "right" way to do this would be to have the next_task() call
# set it up by having something that approximates a real Builder
# return this list--but that's more work than is probably
# warranted right now.
t.targets = [n7, n8]
t.execute()
assert built_text is None, built_text
assert cache_text == ["n7 retrieved", "n8 retrieved"], cache_text
def test_cached_execute(self):
"""Test executing a task with cached targets
"""
# In issue #2720 Alexei Klimkin detected that the previous
# workflow for execute() led to problems in a multithreaded build.
# We have:
# task.prepare()
# task.execute()
# task.executed()
# -> node.visited()
# for the Serial flow, but
# - Parallel - - Worker -
# task.prepare()
# requestQueue.put(task)
# task = requestQueue.get()
# task.execute()
# resultQueue.put(task)
# task = resultQueue.get()
# task.executed()
# ->node.visited()
# in parallel. Since execute() used to call built() when a target
# was cached, it could unblock dependent nodes before the binfo got
# restored again in visited(). This resulted in spurious
# "file not found" build errors, because files fetched from cache would
# be seen as not up to date and wouldn't be scanned for implicit
# dependencies.
#
# The following test ensures that execute() only marks targets as cached,
# but the actual call to built() happens in executed() only.
# Like this, the binfo should still be intact after calling execute()...
global cache_text
n1 = Node("n1")
# Mark the node as being cached
n1.cached = 1
tm = SCons.Taskmaster.Taskmaster([n1])
t = tm.next_task()
t.prepare()
t.execute()
assert cache_text == ["n1 retrieved"], cache_text
# If no binfo exists anymore, something has gone wrong...
has_binfo = hasattr(n1, 'binfo')
assert has_binfo, has_binfo
def test_exception(self):
"""Test generic Taskmaster exception handling
"""
n1 = Node("n1")
tm = SCons.Taskmaster.Taskmaster([n1])
t = tm.next_task()
t.exception_set((1, 2))
exc_type, exc_value = t.exception
assert exc_type == 1, exc_type
assert exc_value == 2, exc_value
t.exception_set(3)
assert t.exception == 3
try: 1//0
except:
# Moved from below
t.exception_set(None)
#pass
# import pdb; pdb.set_trace()
# Having this here works for python 2.x,
# but it is a tuple (None, None, None) when called outside
# an except statement
# t.exception_set(None)
exc_type, exc_value, exc_tb = t.exception
assert exc_type is ZeroDivisionError, "Expecting ZeroDevisionError got:%s"%exc_type
exception_values = [
"integer division or modulo",
"integer division or modulo by zero",
"integer division by zero", # PyPy2
]
assert str(exc_value) in exception_values, exc_value
class Exception1(Exception):
pass
# Previously value was None, but while PY2 None = "", in Py3 None != "", so set to ""
t.exception_set((Exception1, ""))
try:
t.exception_raise()
except:
exc_type, exc_value = sys.exc_info()[:2]
assert exc_type == Exception1, exc_type
assert str(exc_value) == '', "Expecting empty string got:%s (type %s)"%(exc_value,type(exc_value))
else:
assert 0, "did not catch expected exception"
class Exception2(Exception):
pass
t.exception_set((Exception2, "xyzzy"))
try:
t.exception_raise()
except:
exc_type, exc_value = sys.exc_info()[:2]
assert exc_type == Exception2, exc_type
assert str(exc_value) == "xyzzy", exc_value
else:
assert 0, "did not catch expected exception"
class Exception3(Exception):
pass
try:
1//0
except:
tb = sys.exc_info()[2]
t.exception_set((Exception3, "arg", tb))
try:
t.exception_raise()
except:
exc_type, exc_value, exc_tb = sys.exc_info()
assert exc_type == Exception3, exc_type
assert str(exc_value) == "arg", exc_value
import traceback
x = traceback.extract_tb(tb)[-1]
y = traceback.extract_tb(exc_tb)[-1]
assert x == y, "x = %s, y = %s" % (x, y)
else:
assert 0, "did not catch expected exception"
def test_postprocess(self):
"""Test postprocessing targets to give them a chance to clean up
"""
n1 = Node("n1")
tm = SCons.Taskmaster.Taskmaster([n1])
t = tm.next_task()
assert not n1.postprocessed
t.postprocess()
assert n1.postprocessed
n2 = Node("n2")
n3 = Node("n3")
tm = SCons.Taskmaster.Taskmaster([n2, n3])
assert not n2.postprocessed
assert not n3.postprocessed
t = tm.next_task()
t.postprocess()
assert n2.postprocessed
assert not n3.postprocessed
t = tm.next_task()
t.postprocess()
assert n2.postprocessed
assert n3.postprocessed
def test_trace(self):
"""Test Taskmaster tracing
"""
import io
trace = io.StringIO()
n1 = Node("n1")
n2 = Node("n2")
n3 = Node("n3", [n1, n2])
tm = SCons.Taskmaster.Taskmaster([n1, n1, n3], trace=trace)
t = tm.next_task()
t.prepare()
t.execute()
t.postprocess()
n1.set_state(SCons.Node.executed)
t = tm.next_task()
t.prepare()
t.execute()
t.postprocess()
n2.set_state(SCons.Node.executed)
t = tm.next_task()
t.prepare()
t.execute()
t.postprocess()
t = tm.next_task()
assert t is None
value = trace.getvalue()
expect = """\
Taskmaster: Looking for a node to evaluate
Taskmaster: Considering node <no_state 0 'n1'> and its children:
Taskmaster: Evaluating <pending 0 'n1'>
Task.make_ready_current(): node <pending 0 'n1'>
Task.prepare(): node <executing 0 'n1'>
Task.execute(): node <executing 0 'n1'>
Task.postprocess(): node <executing 0 'n1'>
Taskmaster: Looking for a node to evaluate
Taskmaster: Considering node <executed 0 'n1'> and its children:
Taskmaster: already handled (executed)
Taskmaster: Considering node <no_state 0 'n3'> and its children:
Taskmaster: <executed 0 'n1'>
Taskmaster: <no_state 0 'n2'>
Taskmaster: adjusted ref count: <pending 1 'n3'>, child 'n2'
Taskmaster: Considering node <no_state 0 'n2'> and its children:
Taskmaster: Evaluating <pending 0 'n2'>
Task.make_ready_current(): node <pending 0 'n2'>
Task.prepare(): node <executing 0 'n2'>
Task.execute(): node <executing 0 'n2'>
Task.postprocess(): node <executing 0 'n2'>
Task.postprocess(): removing <executing 0 'n2'>
Task.postprocess(): adjusted parent ref count <pending 0 'n3'>
Taskmaster: Looking for a node to evaluate
Taskmaster: Considering node <pending 0 'n3'> and its children:
Taskmaster: <executed 0 'n1'>
Taskmaster: <executed 0 'n2'>
Taskmaster: Evaluating <pending 0 'n3'>
Task.make_ready_current(): node <pending 0 'n3'>
Task.prepare(): node <executing 0 'n3'>
Task.execute(): node <executing 0 'n3'>
Task.postprocess(): node <executing 0 'n3'>
Taskmaster: Looking for a node to evaluate
Taskmaster: No candidate anymore.
"""
assert value == expect, value
if __name__ == "__main__":
unittest.main()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_9178 | import hashlib
from rhc.database.dao import DAO
class nullcipher(object):
def encrypt(self, v):
return v
def decrypt(self, v):
return v
CRYPT = nullcipher()
class DAOE(DAO):
ENCRYPT_FIELDS = ()
@staticmethod
def makesha(value):
return hashlib.sha256(value).digest()
def on_load(self, kwargs):
self._sha = {}
self._crypt = {}
for n in self.ENCRYPT_FIELDS:
v = kwargs[n]
if v is not None:
self._crypt[n] = v
clr = CRYPT.decrypt(v)
self._sha[n] = self.makesha(clr)
kwargs[n] = clr
def before_save(self):
if '_sha' not in self.__dict__:
self._sha = {}
self._crypt = {}
self.__crypt_cache = {}
for n in self.ENCRYPT_FIELDS:
v = self.__crypt_cache[n] = getattr(self, n)
if v is not None:
if self._sha.get(n) == self.makesha(v):
v = self._crypt[n]
else:
v = CRYPT.encrypt(v)
self._crypt[n] = v
self._sha[n] = self.makesha(v)
setattr(self, n, v)
def after_save(self):
for n in self.ENCRYPT_FIELDS:
setattr(self, n, self.__crypt_cache[n])
|
the-stack_0_9179 | memo = [False] * 10000000
def sum_digit_groups(num):
if num >= 10 and num < len(memo):
if memo[num]:
return memo[num]
result = []
divisor = 10
while divisor < num:
for m in sum_digit_groups(num % divisor):
result.append(num // divisor + m)
divisor *= 10
result.append(num)
if num >= 10 and num < len(memo):
memo[num] = result
return result
def is_s_number(num):
sq = num * num
sss = set(sum_digit_groups(sq))
return num in sss
t = 0
for x in range(2, 1000000+1):
if is_s_number(x):
t += x*x
print(t)
# 100 => 41333 ok
# 1000000 => 128088830547982 ok
|
the-stack_0_9181 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import DataFrame, Series, Index, Timestamp, DatetimeIndex
import pandas as pd
import pandas.tseries.offsets as offsets
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameTimeSeriesMethods(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
self.assertEqual(result[0].dtype, np.float64)
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
self.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
self.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
self.assertEqual(len(shiftedFrame), len(self.tsframe))
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + offsets.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
self.assert_index_equal(shifted.index, ps.index)
self.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.iloc[:, 0].valid().values,
ps.iloc[:-1, 0].values)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, offsets.BDay())
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis=1)
assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis='columns')
assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
assert_frame_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_truncate(self):
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
self.assertRaises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] + 1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
self.assertFalse((self.tsframe.values[5:11] == 5).any())
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
rule_monthly = self.tsframe.asfreq('BM')
assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad') # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
self.assertIsNot(result, zero_length)
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
tm.assertIsInstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
tm.assertIsInstance(ts.index, DatetimeIndex)
def test_first_last_valid(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
self.assertEqual(index, frame.index[5])
index = frame.last_valid_index()
self.assertEqual(index, frame.index[-6])
# GH12800
empty = DataFrame()
self.assertIsNone(empty.last_valid_index())
self.assertIsNone(empty.first_valid_index())
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT,
pd.Timestamp('2012-05-01')]})
res = df.min()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_datetime_assignment_with_NaT_and_diff_time_units(self):
# GH 7492
data_ns = np.array([1, 'nat'], dtype='datetime64[ns]')
result = pd.Series(data_ns).to_frame()
result['new'] = data_ns
expected = pd.DataFrame({0: [1, None],
'new': [1, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, 'nat'], dtype='datetime64[s]')
result['new'] = data_s
expected = pd.DataFrame({0: [1, None],
'new': [1e9, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
the-stack_0_9183 | ## Compiled from Members.ipynb on Tue Jun 7 15:10:16 2016
## In [1]:
import salib as sl
import numpy as np
from MemberLoads import EF
## In [2]:
class Member(object):
RELEASES = {'MZJ':2, 'MZK':5}
E = 200000.
G = 77000.
def __init__(self,ident,nodej,nodek):
self.id = ident
self.nodej = nodej
self.nodek = nodek
self.dcx,self.dcy,self.L = nodej.to(nodek)
self.KL = None # stiffness matrix, local coords
self.KG = None # stiffness matrix, global coords
self.releases = set()
self.Ix = None
self.A = None
self.Tm = None # transformation matrix, global to local
##self.fefsl = None # fixed end forces, local coordinates
##self.mefs = None # member end forces, local coordinates
def add_release(self,rel):
r = rel.upper()
if r not in self.RELEASES:
raise Exception('Invalid release name: {}'.format(rel))
self.releases.add(r)
def __repr__(self):
return '{}("{}","{}","{}")'.format(self.__class__.__name__,self.id,self.nodej,self.nodek)
def localK(self):
"""Return the member stiffness matrix in local coordinates"""
L = self.L
E = self.E
A = self.A
I = self.Ix
k0 = E*A/L
k12 = 12.*E*I/L**3
k6 = 6.*E*I/L**2
k4 = 4.*E*I/L
k2 = 2.*E*I/L
KL = np.mat([[ k0, 0, 0, -k0, 0, 0],
[ 0, k12, k6, 0, -k12, k6],
[ 0, k6, k4, 0, -k6, k2],
[-k0, 0, 0, k0, 0, 0],
[ 0, -k12, -k6, 0, k12, -k6],
[ 0, k6, k2, 0, -k6, k4]])
for r in self.releases:
KL = self.releaseK(KL,self.RELEASES[r])
self.Kl = KL
return KL
def releaseK(self,Kl,rel):
"""Return a modified stiffness matrix to account for a moment release
at one of the ends. Kl is the original matrix, dx, dy are projections of the
member, and 'rel' is 2 or 5 to identify the local dof # of the released dof.
Both KL and KG are returned if the transformation matrix, T, is provided"""
L = self.L
if rel == 2:
if Kl[5,5] == 0.: # is other end also pinned?
em = np.mat([1.,0.]).T # corrective end moments, far end pinned
else:
em = np.mat([1.,0.5]).T # corrective end moments, far end fixed
elif rel == 5:
if Kl[2,2] == 0.:
em = np.mat([0.,1.]).T
else:
em = np.mat([0.5,1.]).T
else:
raise ValueError("Invalid release #: {}".format(rel))
Tf = np.mat([[0.,0.],[1./L,1./L],[1.,0.],[0.,0.],[-1./L,-1./L],[0.,1.]])
M = Tf*em
K = Kl.copy()
K[:,1] -= M*K[rel,1] # col 1 - forces for unit vertical displacment at j-end
K[:,2] -= M*K[rel,2] # col 2 - forces for unit rotation at j-end
K[:,4] -= M*K[rel,4] # col 4 - forces for unit vertical displacment at k-end
K[:,5] -= M*K[rel,5] # col 5 - forces for unit rotation at k-end
return K
def transform(self):
"""Return a transformation matrix to transform forces and displacements
in global coordinates to local coordinates for the 2-d frame member.
This is called the member transformation matrix, Tm"""
cx = self.dcx
cy = self.dcy
self.Tm = np.mat([[ cx, cy, 0, 0, 0, 0],
[-cy, cx, 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, cx, cy, 0],
[ 0, 0, 0, -cy, cx, 0],
[ 0, 0, 0, 0, 0, 1]])
return self.Tm
def fefs(self,loads_factors):
fef = sum([l.fefs()*f for l,f in loads_factors],EF())
if loads_factors:
for r in self.releases:
fef = self.releaseFEF(fef,self.RELEASES[r])
return fef
def vm(self,loads,mefs=None):
"""Return shear and moment 'diagrams'. Return (xv,v,xm,m) -
xv and xm are positions along span, and v and m are shears and
moments at those points. Use normal sign convention (not beam sign
convention) - on left FBD, moments +ive CCW, shear +ive upwards.
"""
def _getx(self,loads,attr):
degree = 0
pts = [0.,self.L]
for load in loads:
pt1,pt2,d = getattr(load,attr)
for p in pt1,pt2:
if p is not None:
pts.append(p)
if d > degree:
degree = d
ptsv = np.array(pts)
if degree > 1:
ptsv = np.concatenate((ptsv,np.linspace(0,self.L)))
ptsv.sort()
return np.unique(ptsv)
xv = _getx(self,loads,'vpts')
xm = _getx(self,loads,'mpts')
return xv,None,xm,None
def releaseFEF(self,mef,rel):
"""Return a modified fixed end force vector to account for a moment release
at one of the ends. mef is the original member end force, 'rel' is 2 or 5 to identify
the local dof # of the released moment."""
fef = mef.fefs
L = self.L
if rel == 2:
if fef[5,0] == 0.: # is other end also pinned?
em = np.mat([1.,0.]).T # corrective end moments, far end pinned
else:
em = np.mat([1.,0.5]).T # corrective end moments, far end fixed
elif rel == 5:
if fef[2,0] == 0.:
em = np.mat([0.,1.]).T
else:
em = np.mat([0.5,1.]).T
else:
raise ValueError("Invalid release #: {}".format(rel))
Tf = np.mat([[0.,0.],[1./L,1./L],[1.,0.],[0.,0.],[-1./L,-1./L],[0.,1.]])
M = Tf*em
return EF(fef - M*fef[rel])
## In [ ]:
|
the-stack_0_9186 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billing', '0006_auto_20150401_2006'),
]
operations = [
migrations.AlterField(
model_name='rfidcard',
name='identifier',
field=models.CharField(unique=True, max_length=50, verbose_name='identifier'),
preserve_default=True,
),
]
|
the-stack_0_9188 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Jigar Tarpara and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import get_datetime, time_diff_in_hours
from frappe.utils import cint, cstr, flt
class Coating(Document):
def validate(self):
self.manage_reel()
if self.end_dt and self.start_dt:
hours = time_diff_in_hours(self.end_dt, self.start_dt)
frappe.db.set(self, 'operation_hours', hours)
self.calculate_ldap()
def onload(self):
paper_blank_setting = frappe.get_doc("Paper Blank Settings","Paper Blank Settings")
self.set_onload("scrapitemgroup", paper_blank_setting.coating_scrap)
self.set_onload("coated_item_group", paper_blank_setting.coated_item_group)
def calculate_ldap(self):
ldap = 0
for data in self.coating_table:
if not data.half_reel:
ldap += float(data.weight) * 0.08
self.ldpe_bag = ldap
def manage_reel(self):
# setting = frappe.get_doc("PNI Settings","PNI Settings")
reel_outs = []
for data in self.coating_table:
reel_in = frappe.get_doc("Reel",data.reel_in)
if not data.reel_out:
doc = frappe.get_doc({
"doctype": "Reel",
"status": "Draft",
"process_prefix": "CO",
"posting_date": self.date
})
doc.insert()
data.reel_out = doc.name
else:
doc = frappe.get_doc("Reel",data.reel_out)
if data.reel_out not in reel_outs:
reel_outs.append(data.reel_out)
else:
frappe.throw("Duplicate Reel Out Id "+data.reel_out)
if not data.item_out:
frappe.throw("Out Reel Item Not Available {0}".format(data.reel_in))
doc.type = reel_in.type
doc.item = data.item_out
doc.brand = reel_in.brand
doc.supplier_reel_id = reel_in.supplier_reel_id
doc.warehouse = self.fg_warehouse if not data.half_reel else self.src_warehouse
doc.printed_item = reel_in.printed_item
doc.custom_id = data.custom_id
doc.blank_weight = reel_in.blank_weight
doc.coated_reel = True if not data.half_reel else False
doc.printed_reel = reel_in.printed_reel
doc.printed_weight = reel_in.printed_weight
doc.coated_weight = data.weight_out if not data.half_reel else ""
doc.weight = data.weight_out
if data.half_reel:
doc.warehouse = self.src_warehouse
doc.coated_reel = False
doc.coated_weight = ""
doc.save()
def manage_reel_tracking(self):
# setting = frappe.get_doc("PNI Settings","PNI Settings")
for data in self.coating_table:
doc = frappe.get_doc({
"doctype": "Reel Tracking",
"status": "Draft",
"reel": data.reel_in,
"reel_process": "Coating",
"date": frappe.utils.nowdate(),
"time": frappe.utils.nowtime(),
"out_reel": data.reel_out,
"status": "Coating Submit",
"process_reference": self.name,
"note": "" if not data.half_reel else "Half Process Uncoated"
})
doc.insert(ignore_permissions=True)
def cancel_reel_tracking(self):
# setting = frappe.get_doc("PNI Settings","PNI Settings")
for data in self.coating_table:
doc = frappe.get_doc({
"doctype": "Reel Tracking",
"status": "Draft",
"reel": data.reel_in,
"reel_process": "Coating",
"date": frappe.utils.nowdate(),
"time": frappe.utils.nowtime(),
"out_reel": data.reel_out,
"status": "Coating Cancel",
"process_reference": self.name,
"note": "" if not data.half_reel else "Half Process Uncoated"
})
doc.insert(ignore_permissions=True)
def on_submit(self):
# if (not self.end_dt) or (not self.end_dt):
# frappe.throw("Please Select Operation Start and End Time")
for item in self.coating_table:
if (not item.reel_in) or (not item.reel_out) :
frappe.throw("Reel is Compulsory")
if not self.ldpe_warehouse:
frappe.throw("LDPE Warehouse Mandatory")
for data in self.coating_table:
if not data.weight_out:
frappe.throw("Weight Can't be empty")
reel_in = frappe.get_doc("Reel",data.reel_in)
reel_in.status = "Consume"
reel_in.save()
reel_out = frappe.get_doc("Reel",data.reel_out)
reel_out.status = "In Stock"
reel_out.save()
reel_out.submit()
self.manage_reel_tracking()
frappe.db.set(self, 'status', 'Pending For Stock Entry')
def on_cancel(self):
stock_entry = frappe.db.sql("""select name from `tabStock Entry`
where pni_reference = %s and docstatus = 1""", self.name)
if stock_entry:
frappe.throw(_("Cannot cancel because submitted Stock Entry \
{0} exists").format(stock_entry[0][0]))
frappe.db.set(self, 'status', 'Cancelled')
for data in self.coating_table:
reel_in = frappe.get_doc("Reel",data.reel_in)
reel_in.status = "In Stock"
reel_in.save()
reel_out = frappe.get_doc("Reel",data.reel_out)
frappe.msgprint("Reel {0} Canceled.".format(data.reel_out))
if reel_out.docstatus == 1:
reel_out.cancel()
self.cancel_reel_tracking()
def manufacture_entry(self, status):
return self.make_stock_entry(status)
def make_stock_entry(self, status):
stock_entry = frappe.new_doc("Stock Entry")
stock_entry.pni_reference_type = "Coating"
stock_entry.pni_reference = self.name
stock_entry.pni_shift = self.shift_time
stock_entry.posting_date = self.date
stock_entry.set_posting_time = True
stock_entry.stock_entry_type = "Manufacture"
stock_entry = self.set_se_items_finish(stock_entry)
return stock_entry.as_dict()
def get_valuation_rate(self, item):
""" Get weighted average of valuation rate from all warehouses """
total_qty, total_value, valuation_rate = 0.0, 0.0, 0.0
for d in frappe.db.sql("""select actual_qty, stock_value from `tabBin`
where item_code=%s""", item, as_dict=1):
total_qty += flt(d.actual_qty)
total_value += flt(d.stock_value)
if total_qty:
valuation_rate = total_value / total_qty
if valuation_rate <= 0:
last_valuation_rate = frappe.db.sql("""select valuation_rate
from `tabStock Ledger Entry`
where item_code = %s and valuation_rate > 0
order by posting_date desc, posting_time desc, creation desc limit 1""", item)
valuation_rate = flt(last_valuation_rate[0][0]) if last_valuation_rate else 0
if not valuation_rate:
valuation_rate = frappe.db.get_value("Item", item, "valuation_rate")
return flt(valuation_rate)
def set_se_items_finish(self, se):
se.from_warehouse = self.src_warehouse
se.to_warehouse = self.fg_warehouse
raw_material_cost = 0
operating_cost = 0
reelin = []
for item in self.coating_table:
if item.reel_in not in reelin:
se = self.set_se_items(se, item, se.from_warehouse, None, False, reel_in= True)
reelin.append(item.reel_in)
raw_material_cost += self.get_valuation_rate(item.item) * float(item.weight)
paper_blank_setting = frappe.get_doc("Paper Blank Settings","Paper Blank Settings")
ldpe_item =paper_blank_setting.ldpe_bag
if not ldpe_item:
frappe.throw("Please Sett LDPE Item in Paper Blank Settings")
raw_material_cost += self.get_valuation_rate(ldpe_item) * float(self.ldpe_bag)
se = self.set_se_items(se, ldpe_item, self.ldpe_warehouse, None, False, ldpe = True)
production_cost = raw_material_cost + operating_cost
qty_of_total_production = 0
total_sale_value = 0
for item in self.coating_table:
if item.weight_out > 0 and not item.half_reel:
qty_of_total_production = float(qty_of_total_production) + float(item.weight_out)
for item in self.coating_table:
se = self.set_se_items(se, item, None, se.to_warehouse if not item.half_reel else se.from_warehouse, True,
qty_of_total_production, total_sale_value,
production_cost, reel_out = True)
for item in self.coating_scrap:
se = self.set_se_items(se, item, None, self.scrap_warehouse,
False, scrap_item = True)
return se
def set_se_items(self, se, item, s_wh, t_wh, calc_basic_rate=False,
qty_of_total_production=None, total_sale_value=None, production_cost=None,
reel_in = False, reel_out = False, scrap_item = False, ldpe = False):
item_from_reel = {}
class Empty:
pass
if reel_in:
item_from_reel = frappe.get_doc("Reel",item.reel_in)
if reel_out:
item_from_reel = frappe.get_doc("Reel",item.reel_out)
if scrap_item:
item_from_reel = Empty()
item_from_reel.item = item.item
item_from_reel.weight = item.qty
if ldpe:
item_from_reel = Empty()
item_from_reel.item = item
item_from_reel.weight = self.ldpe_bag
expense_account, cost_center = frappe.db.get_values("Company", self.company, \
["default_expense_account", "cost_center"])[0]
item_name, stock_uom, description = frappe.db.get_values("Item", item_from_reel.item, \
["item_name", "stock_uom", "description"])[0]
item_expense_account, item_cost_center = frappe.db.get_value("Item Default",
{
'parent': item_from_reel.item,
'company': self.company
},\
["expense_account", "buying_cost_center"])
if not expense_account and not item_expense_account:
frappe.throw(
_("Please update default Default Cost of Goods Sold Account for company {0}").format(self.company))
if not cost_center and not item_cost_center:
frappe.throw(_("Please update default Cost Center for company {0}").format(self.company))
se_item = se.append("items")
se_item.item_code = item_from_reel.item
se_item.qty = item_from_reel.weight
se_item.s_warehouse = s_wh
se_item.t_warehouse = t_wh
se_item.item_name = item_name
se_item.description = description
se_item.uom = stock_uom
se_item.stock_uom = stock_uom
se_item.expense_account = item_expense_account or expense_account
se_item.cost_center = item_cost_center or cost_center
se_item.transfer_qty = item_from_reel.weight
se_item.conversion_factor = 1.00
item_details = se.run_method( "get_item_details",args = (frappe._dict(
{
"item_code": item_from_reel.item,
"company": self.company,
"uom": stock_uom,
"s_warehouse": s_wh})), for_update=True)
for f in ("uom", "stock_uom", "description", "item_name", "expense_account",
"cost_center", "conversion_factor"):
se_item.set(f, item_details.get(f))
if calc_basic_rate:
se_item.basic_rate = production_cost/qty_of_total_production
if scrap_item:
se_item.basic_rate = self.get_valuation_rate(item_from_reel.item)
return se
|
the-stack_0_9189 | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
@HEADS.register_module()
class BBoxHead(nn.Module):
"""Simplest RoI head, with only two fc layers for classification and
regression respectively."""
def __init__(self,
with_avg_pool=False,
with_cls=True,
with_reg=True,
roi_feat_size=7,
in_channels=256,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=True,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=False,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):
super(BBoxHead, self).__init__()
assert with_cls or with_reg
self.with_avg_pool = with_avg_pool
self.with_cls = with_cls
self.with_reg = with_reg
self.roi_feat_size = _pair(roi_feat_size)
self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
self.in_channels = in_channels
self.num_classes = num_classes
self.reg_class_agnostic = reg_class_agnostic
self.reg_decoded_bbox = reg_decoded_bbox
self.fp16_enabled = False
self.bbox_coder = build_bbox_coder(bbox_coder)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
in_channels = self.in_channels
if self.with_avg_pool:
self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
else:
in_channels *= self.roi_feat_area
if self.with_cls:
# need to add background class
self.fc_cls = nn.Linear(in_channels, num_classes + 1)
if self.with_reg:
out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes
self.fc_reg = nn.Linear(in_channels, out_dim_reg)
self.debug_imgs = None
def init_weights(self):
# conv layers are already initialized by ConvModule
if self.with_cls:
nn.init.normal_(self.fc_cls.weight, 0, 0.01)
nn.init.constant_(self.fc_cls.bias, 0)
if self.with_reg:
nn.init.normal_(self.fc_reg.weight, 0, 0.001)
nn.init.constant_(self.fc_reg.bias, 0)
@auto_fp16()
def forward(self, x):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x) if self.with_cls else None
bbox_pred = self.fc_reg(x) if self.with_reg else None
return cls_score, bbox_pred
def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
pos_gt_labels, cfg):
"""Calculate the ground truth for proposals in the single image
according to the sampling results.
Args:
pos_bboxes (Tensor): Contains all the positive boxes,
has shape (num_pos, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
neg_bboxes (Tensor): Contains all the negative boxes,
has shape (num_neg, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_bboxes (Tensor): Contains all the gt_boxes,
has shape (num_gt, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_labels (Tensor): Contains all the gt_labels,
has shape (num_gt).
cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
Returns:
Tuple[Tensor]: Ground truth for proposals
in a single image. Containing the following Tensors:
- labels(Tensor): Gt_labels for all proposals, has
shape (num_proposals,).
- label_weights(Tensor): Labels_weights for all
proposals, has shape (num_proposals,).
- bbox_targets(Tensor):Regression target for all
proposals, has shape (num_proposals, 4), the
last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- bbox_weights(Tensor):Regression weights for all
proposals, has shape (num_proposals, 4).
"""
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
# original implementation uses new_zeros since BG are set to be 0
# now use empty & fill because BG cat_id = num_classes,
# FG cat_id = [0, num_classes-1]
labels = pos_bboxes.new_full((num_samples, ),
self.num_classes,
dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
pos_bboxes, pos_gt_bboxes)
else:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, both
# the predicted boxes and regression targets should be with
# absolute coordinate format.
pos_bbox_targets = pos_gt_bboxes
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
def get_targets(self,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
concat=True):
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_target_single` function.
Args:
sampling_results (List[obj:SamplingResults]): Assign results of
all images in a batch after sampling.
gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
each tensor has shape (num_gt, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
gt_labels (list[Tensor]): Gt_labels of all images in a batch,
each tensor has shape (num_gt,).
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all
proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target
for all proposals in a batch, each tensor in list
has shape (num_proposals, 4) when `concat=False`,
otherwise just a single tensor has shape
(num_all_proposals, 4), the last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
"""
pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
self._get_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def loss(self,
cls_score,
bbox_pred,
rois,
labels,
label_weights,
bbox_targets,
bbox_weights,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
if cls_score.numel() > 0:
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bg_class_ind = self.num_classes
# 0~self.num_classes-1 are FG, self.num_classes is BG
pos_inds = (labels >= 0) & (labels < bg_class_ind)
# do not perform bounding box regression for BG anymore.
if pos_inds.any():
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`,
# `GIouLoss`, `DIouLoss`) is applied directly on
# the decoded bounding boxes, it decodes the
# already encoded coordinates to absolute format.
bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
if self.reg_class_agnostic:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
else:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), -1,
4)[pos_inds.type(torch.bool),
labels[pos_inds.type(torch.bool)]]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred,
bbox_targets[pos_inds.type(torch.bool)],
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=bbox_targets.size(0),
reduction_override=reduction_override)
else:
losses['loss_bbox'] = bbox_pred[pos_inds].sum()
return losses
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_bboxes(self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes = self.bbox_coder.decode(
rois[:, 1:], bbox_pred, max_shape=img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])
if rescale and bboxes.size(0) > 0:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = (bboxes.view(bboxes.size(0), -1, 4) /
scale_factor).view(bboxes.size()[0], -1)
if cfg is None:
return bboxes, scores
else:
det_bboxes, det_labels = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
@force_fp32(apply_to=('bbox_preds', ))
def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
"""Refine bboxes during training.
Args:
rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
and bs is the sampled RoIs per image. The first column is
the image id and the next 4 columns are x1, y1, x2, y2.
labels (Tensor): Shape (n*bs, ).
bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).
pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
is a gt bbox.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Refined bboxes of each image in a mini-batch.
Example:
>>> # xdoctest: +REQUIRES(module:kwarray)
>>> import kwarray
>>> import numpy as np
>>> from mmdet.core.bbox.demodata import random_boxes
>>> self = BBoxHead(reg_class_agnostic=True)
>>> n_roi = 2
>>> n_img = 4
>>> scale = 512
>>> rng = np.random.RandomState(0)
>>> img_metas = [{'img_shape': (scale, scale)}
... for _ in range(n_img)]
>>> # Create rois in the expected format
>>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
>>> img_ids = torch.randint(0, n_img, (n_roi,))
>>> img_ids = img_ids.float()
>>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
>>> # Create other args
>>> labels = torch.randint(0, 2, (n_roi,)).long()
>>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
>>> # For each image, pretend random positive boxes are gts
>>> is_label_pos = (labels.numpy() > 0).astype(np.int)
>>> lbl_per_img = kwarray.group_items(is_label_pos,
... img_ids.numpy())
>>> pos_per_img = [sum(lbl_per_img.get(gid, []))
... for gid in range(n_img)]
>>> pos_is_gts = [
>>> torch.randint(0, 2, (npos,)).byte().sort(
>>> descending=True)[0]
>>> for npos in pos_per_img
>>> ]
>>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
>>> pos_is_gts, img_metas)
>>> print(bboxes_list)
"""
img_ids = rois[:, 0].long().unique(sorted=True)
assert img_ids.numel() <= len(img_metas)
bboxes_list = []
for i in range(len(img_metas)):
inds = torch.nonzero(
rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
num_rois = inds.numel()
bboxes_ = rois[inds, 1:]
label_ = labels[inds]
bbox_pred_ = bbox_preds[inds]
img_meta_ = img_metas[i]
pos_is_gts_ = pos_is_gts[i]
bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
img_meta_)
# filter gt bboxes
pos_keep = 1 - pos_is_gts_
keep_inds = pos_is_gts_.new_ones(num_rois)
keep_inds[:len(pos_is_gts_)] = pos_keep
bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
return bboxes_list
@force_fp32(apply_to=('bbox_pred', ))
def regress_by_class(self, rois, label, bbox_pred, img_meta):
"""Regress the bbox for the predicted class. Used in Cascade R-CNN.
Args:
rois (Tensor): shape (n, 4) or (n, 5)
label (Tensor): shape (n, )
bbox_pred (Tensor): shape (n, 4*(#class)) or (n, 4)
img_meta (dict): Image meta info.
Returns:
Tensor: Regressed bboxes, the same shape as input rois.
"""
assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)
if not self.reg_class_agnostic:
label = label * 4
inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
bbox_pred = torch.gather(bbox_pred, 1, inds)
assert bbox_pred.size(1) == 4
if rois.size(1) == 4:
new_rois = self.bbox_coder.decode(
rois, bbox_pred, max_shape=img_meta['img_shape'])
else:
bboxes = self.bbox_coder.decode(
rois[:, 1:], bbox_pred, max_shape=img_meta['img_shape'])
new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
return new_rois
|
the-stack_0_9191 | import copy
import itertools
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import random
from scipy.spatial.distance import cdist
from sklearn.preprocessing import normalize
from torch import nn, optim
from torch.utils.data import dataloader
from torchvision import transforms
from torchvision.models.resnet import Bottleneck, resnet50
from torchvision.transforms import functional
from .resnet import ResNet
import cv2
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class BatchCrop(nn.Module):
def __init__(self, ratio):
super(BatchCrop, self).__init__()
self.ratio = ratio
def forward(self, x):
if self.training:
h, w = x.size()[-2:]
rw = int(self.ratio * w)
start = random.randint(0, h - 1)
if start + rw > h:
select = list(range(0, start + rw - h)) + list(range(start, h))
else:
select = list(range(start, start + rw))
mask = x.new_zeros(x.size())
mask[:, :, select, :] = 1
x = x * mask
return x
class BatchDrop(nn.Module):
def __init__(self, h_ratio, w_ratio):
super(BatchDrop, self).__init__()
self.h_ratio = h_ratio
self.w_ratio = w_ratio
def forward(self, x):
if self.training:
h, w = x.size()[-2:]
rh = round(self.h_ratio * h)
rw = round(self.w_ratio * w)
sx = random.randint(0, h - rh)
sy = random.randint(0, w - rw)
mask = x.new_ones(x.size())
mask[:, :, sx:sx + rh, sy:sy + rw] = 0
x = x * mask
return x
class ResNetBuilder(nn.Module):
in_planes = 2048
def __init__(self, num_classes=None, last_stride=1, pretrained=False):
super().__init__()
self.base = ResNet(last_stride)
if pretrained:
model_url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
self.base.load_param(model_zoo.load_url(model_url))
self.num_classes = num_classes
if num_classes is not None:
self.bottleneck = nn.Sequential(
nn.Linear(self.in_planes, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.1),
nn.Dropout(p=0.5)
)
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(512, self.num_classes)
self.classifier.apply(weights_init_classifier)
def forward(self, x):
global_feat = self.base(x)
global_feat = F.avg_pool2d(global_feat, global_feat.shape[2:]) # (b, 2048, 1, 1)
global_feat = global_feat.view(global_feat.shape[0], -1)
if self.training and self.num_classes is not None:
feat = self.bottleneck(global_feat)
cls_score = self.classifier(feat)
return [global_feat], [cls_score]
else:
return global_feat
def get_optim_policy(self):
base_param_group = self.base.parameters()
if self.num_classes is not None:
add_param_group = itertools.chain(self.bottleneck.parameters(), self.classifier.parameters())
return [
{'params': base_param_group},
{'params': add_param_group}
]
else:
return [
{'params': base_param_group}
]
def number_of_certain_probability(sequence, probability):
x = random.uniform(0, 1)
cumulative_probability = 0.0
for item, item_probability in zip(sequence, probability):
cumulative_probability += item_probability
if x < cumulative_probability:
break
return item
class CutMixBatchDrop(nn.Module):
def __init__(self, h_ratio, w_ratio):
super(CutMixBatchDrop, self).__init__()
self.h_ratio = h_ratio
self.w_ratio = w_ratio
def forward(self, x, cam, y, step, k, randy=None):
'''
:param x: feature map with shape[N,2048,24,8]
:param cam: grad cam map with shape[N,24,8]
:param y: gt of x, [N,1]
:return: x_new = mask*x1 + (1-mask)*x2
'''
if self.training:
bs, c, h, w = x.size()
mask = torch.ones(x.size()).cuda()
'''get mask'''
_, f_h, f_w = cam.size()
cam_patchs_row = torch.split(cam, step[0], 1)
patchs_row = torch.stack(cam_patchs_row, 1)
cam_patchs_col = torch.split(patchs_row, step[1], 3)
patchs_col = torch.cat(cam_patchs_col, 1)
patchs = patchs_col.sum(dim=[2, 3]) # N*12
patchs, patchs_idxs = patchs.sort(1, True)
# l = int(f_h / step[0])
for i in range(bs):
for idx in patchs_idxs[i, 0:k]:
if idx < 8:
mask[i, :, idx * step[0]:idx * step[0] + step[0], 0:2] = 0
elif idx < 16:
mask[i, :, (idx - 8) * step[0]:(idx - 8) * step[0] + step[0], 2:4] = 0
elif idx < 24:
mask[i, :, (idx - 16) * step[0]:(idx - 16) * step[0] + step[0], 4:6] = 0
else:
mask[i, :, (idx - 24) * step[0]:(idx - 24) * step[0] + step[0], 6:8] = 0
# print(mask[i,0,:,:])
'''CutMix'''
lamda = k * step[0] * step[1] / (h * w)
bs, c, h, w = x.size()
if randy is not None:
rand_idx = randy
y2 = y[randy]
# y_temp = y.reshape([-1, 4])
# rand_idx = randy
# y2 = y_temp[:, rand_idx].reshape([-1, 1])
# x2 = x.reshape([-1, 4, c, h, w])
# x2 = x2[:,rand_idx,:,:,:].reshape([bs,c,h,w])
else:
# 随机mix
# rand_idx = torch.randperm(bs)
# 同类mix
'''
rand_idx = []
for i in range(bs // 4):
temp = torch.randperm(4) + i * 4
# print(temp)
rand_idx += temp.numpy().tolist()
'''
# 不同类mix
idx = list(range(bs))
rand_idx = []
for i in range(bs // 4):
idx_part = idx[i * 4:(i + 1) * 4]
temp = list(set(idx) - set(idx_part))
rand_idx += [random.choice(temp) for k in range(4)]
# y2 = y[rand_idx]
# y_new = lamda*y + (1-lamda)*y2
x2 = x[rand_idx, :, :, :]
x_new = (mask * x + (torch.ones(mask.size()).cuda() - mask) * x2)
return x_new, rand_idx, lamda
def getCAM(feature, weights, idxs):
bs, c, h, w = feature.size()
output_cam = []
cam_cv =[]
for i in range(bs):
cam = weights[idxs[i]].reshape(1, c).mm(feature[i].reshape((c, h * w)))
cam = cam.reshape(h, w)
output_cam.append(cam)
cam_img = cam.cpu().detach().numpy()
cam_img = cam_img - np.min(cam_img)
cam_img = cam_img / np.max(cam_img)
cam_img = np.uint8(255 * cam_img)
cam_cv.append(cam_img)
cam_cv = np.array(cam_cv)
output_cam = torch.stack(output_cam, dim=0)
return output_cam, cam_cv
class GetGrad():
def __init__(self):
pass
def get_grad(self, grad):
self.grad = grad
def __call__(self, x):
x.register_hook(self.get_grad)
def gradCAM(gradfeature, scores, idxs):
getGrad = GetGrad()
getGrad(gradfeature)
feature = gradfeature.detach()
bs, c, h, w = feature.size()
output_gradCam = []
if torch.cuda.is_available():
score = torch.tensor(0, dtype=torch.float).cuda()
else:
score = torch.tensor(0, dtype=torch.float)
for i in range(bs):
score += scores[i, idxs[i]]
# score = (scores.sum()).sum()
score.backward(retain_graph=True)
grad = getGrad.grad.detach()
# print('grad')
# print(grad.size())
# print(grad[0,0,:,:])
weight = grad.mean(2)
weight = weight.mean(2)
cam_cv = []
for i in range(bs):
grad_cam = weight[i].reshape(1, c).mm(feature[i].reshape((c, h * w)))
grad_cam = grad_cam.reshape(h, w)
grad_cam = F.relu(grad_cam)
output_gradCam.append(grad_cam)
cam_img = grad_cam.cpu().detach().numpy()
cam_img = cam_img - np.min(cam_img)
cam_img = cam_img / np.max(cam_img)
cam_img = np.uint8(255 * cam_img)
cam_cv.append(cam_img)
'''
for i in range(bs):
score = scores[i, idxs[i]]
print('score')
print(score)
score.backward()
grad = getGrad.grad.detach()
print('grad')
print(grad.size())
print(grad[0,0,:,:])
print(grad[2,0,:,:])
print(grad[2,2,:,:])
weight = grad[i].mean(1)
weight = weight.mean(1)
# print('weight')
# print(weight.size())
grad_cam = weight.reshape(1, c).mm(feature[i].reshape((c, h * w)))
grad_cam = grad_cam.reshape(h, w)
grad_cam = F.relu(grad_cam)
output_gradCam.append(grad_cam)
'''
cam_cv = np.array(cam_cv)
output_gradCam = torch.stack(output_gradCam, dim=0)
return output_gradCam, cam_cv
class BFE(nn.Module):
def __init__(self, num_classes, width_ratio=0.5, height_ratio=0.5):
super(BFE, self).__init__()
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1,
resnet.layer2,
resnet.layer3,
)
# Resnet50 stage4
self.res_part = nn.Sequential(
Bottleneck(1024, 512, stride=1, downsample=nn.Sequential(
nn.Conv2d(1024, 2048, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(2048),
)),
Bottleneck(2048, 512),
Bottleneck(2048, 512),
)
self.res_part.load_state_dict(resnet.layer4.state_dict())
reduction = nn.Sequential(
nn.Conv2d(2048, 512, 1), # def __init__(self, in_channels, out_channels, kernel_size, stride=1,
nn.BatchNorm2d(512),
nn.ReLU()
)
# global branch
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.global_softmax = nn.Linear(512, num_classes)
self.global_softmax.apply(weights_init_kaiming)
self.global_reduction = copy.deepcopy(reduction)
self.global_reduction.apply(weights_init_kaiming)
# part branch
# self.bottleneck1 = Bottleneck(2048, 512)
# self.bottleneck2 = Bottleneck(2048, 512)
self.res_part2 = Bottleneck(2048, 512)
self.part_maxpool = nn.AdaptiveMaxPool2d((1, 1))
# cutmix branch1
self.cutmix_batch_drop1 = CutMixBatchDrop(height_ratio, width_ratio)
self.reduction1 = nn.Sequential(
nn.Linear(2048, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU()
)
self.reduction1.apply(weights_init_kaiming)
self.softmax1 = nn.Linear(1024, num_classes)
self.softmax1.apply(weights_init_kaiming)
# cutmix branch2
# self.bottleneck2 = Bottleneck(2048, 512)
self.cutmix_batch_drop2 = CutMixBatchDrop(height_ratio, width_ratio)
self.reduction2 = nn.Sequential(
nn.Linear(2048, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU()
)
self.reduction2.apply(weights_init_kaiming)
self.softmax2 = nn.Linear(512, num_classes)
self.softmax2.apply(weights_init_kaiming)
def forward(self, x, y=None):
"""
:param x: input image tensor of (N, C, H, W)
:return: (prediction, triplet_losses, softmax_losses)
"""
x_img = x.cpu().detach().numpy()
N, _, camH, camW = x.size()
x = self.backbone(x) # N*1024*24*8
x = self.res_part(x) # N*2048*24*8
cam_global_feature = x.detach()
grad_cam = x
predict = []
triplet_features = []
softmax_features = []
# global branch
glob = self.global_avgpool(x) # GAP--N*2048*1
global_triplet_feature = self.global_reduction(glob).squeeze() # N*512
global_softmax_class = self.global_softmax(global_triplet_feature) # N*num_class
softmax_features.append(global_softmax_class)
triplet_features.append(global_triplet_feature)
predict.append(global_triplet_feature)
# part branch
x = self.res_part2(x) # N*2048*24*8
cam_cutmix_feature = x.detach()
# x_cutmix1 = self.bottleneck1(x)
# x_cutmix2 = self.bottleneck2(x)
x_cutmix1 = x
x_cutmix2 = x
'''get grad-cam map'''
if self.training:
# cutmix branch1
gradcam, cam_cv = gradCAM(grad_cam, global_softmax_class, y)
x_cutmix1, idx_cutmix1, lamda = self.cutmix_batch_drop1(x_cutmix1, gradcam, y, [3, 2], 6)
cutmix1_triplet_feature = self.part_maxpool(x_cutmix1).squeeze() # N*2048
cutmix1_feature = self.reduction1(cutmix1_triplet_feature) # N*1024
cutmix1_softmax_feature = self.softmax1(cutmix1_feature) # N*num_class/751
feature_img = x_cutmix1.cpu().detach().numpy()
for i in range(N):
heatmap = cv2.applyColorMap(cv2.resize(cam_cv[i], (camW, camH)), cv2.COLORMAP_JET)
img = feature_img[i, 0, :, :] - np.min(feature_img[i, 0, :, :])
# img = x_img[i] - np.min(x_img[i])
img = img / np.max(img)
img = np.uint8(255 * img).transpose(1, 2, 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (camW, camH))
result = heatmap * 0.5 + img * 0.5
cv2.imwrite('cam-img/feature-img/two_cutmix1/Heat{}.jpg'.format(i), heatmap)
cv2.imwrite('cam-img/feature-img/two_cutmix1/feature{}.jpg'.format(i), img)
cv2.imwrite('feature-img/two_cutmix_best/cutmix1/Heatimg{}.jpg'.format(i), result)
print('ok')
# cutmix branch2
grad_feature = x_cutmix1
gradcam_cutmix, cam_cv = gradCAM(grad_feature, cutmix1_softmax_feature, y)
x_cutmix2, idx_cutmix2, _ = self.cutmix_batch_drop2(x_cutmix2, gradcam_cutmix, y, [3, 2], 6)
# y_temp = y.reshape([-1, 4])
# rand_idx = idx_cutmix2
# y2 = y_temp[:, rand_idx].reshape([-1, 1])
feature_img = x_cutmix2.cpu().detach().numpy()
for i in range(N):
heatmap = cv2.applyColorMap(cv2.resize(cam_cv[i], (camW, camH)), cv2.COLORMAP_JET)
# img = x_img[i] - np.min(x_img[i])
img = feature_img[i, 0, :, :] - np.min(feature_img[i, 0, :, :])
img = img / np.max(img)
# img = np.uint8(255 * img).transpose(1, 2, 0)
img = cv2.resize(img, (camW, camH))
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# result = heatmap * 0.5 + img * 0.5
cv2.imwrite('cam-img/feature-img/two_cutmix2/Heat{}.jpg'.format(i), heatmap)
cv2.imwrite('cam-img/feature-img/two_cutmix2/feature{}.jpg'.format(i), img)
# cv2.imwrite('feature-img/two_cutmix_best/cutmix2/Heatimg{}.jpg'.format(i), result)
print('ok')
'''
feature_img = x_cutmix2.cpu().detach().numpy()
for i in range(N):
heatmap = cv2.applyColorMap(cv2.resize(cam_cv[i], (camW, camH)), cv2.COLORMAP_JET)
# img = feature_img[i,0,:,:] - np.min(feature_img[i,0,:,:])
img = x_img[i] - np.min(x_img[i])
img = img / np.max(img)
img = np.uint8(255 * img).transpose(1, 2, 0)
# img = cv2.resize(img, (camW,camH))
result = heatmap * 0.5 + img * 0.5
cv2.imwrite('cam-img/two_cutmix/cutmix2/CAM{}.jpg'.format(i), heatmap)
cv2.imwrite('cam-img/two_cutmix/cutmix2/img{}.jpg'.format(i), img)
cv2.imwrite('cam-img/two_cutmix/cutmix2/cam-img{}.jpg'.format(i), result)
print('ok')
'''
'''show img'''
# global
paras_re = list(self.global_reduction.parameters())
para_re1 = paras_re[0].detach().squeeze()
para_re2 = paras_re[2].detach().squeeze()
paras_soft = list(self.global_softmax.parameters())
para_soft = paras_soft[0].detach().squeeze()
weights_cam = para_soft.mm(para_re2.reshape(512, 1) * para_re1) # 751*2048
cam_global, cam_cv = getCAM(cam_global_feature, weights_cam, y) # N*24*8
# N, H, W = cam_global.size()
for i in range(N):
heatmap = cv2.applyColorMap(cv2.resize(cam_cv[i], (camW, camH)), cv2.COLORMAP_JET)
# img = feature_img[i,0,:,:] - np.min(feature_img[i,0,:,:])
img = x_img[i] - np.min(x_img[i])
img = img / np.max(img)
img = np.uint8(255 * img).transpose(1, 2, 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = cv2.resize(img, (camW,camH))
result = heatmap * 0.5 + img * 0.5
cv2.imwrite('cam-img/two_cutmix_global/CAM{}.jpg'.format(i), heatmap)
cv2.imwrite('cam-img/two_cutmix_global/img{}.jpg'.format(i), img)
cv2.imwrite('cam-img/two_cutmix_global/cam-img{}.jpg'.format(i), result)
print('ok')
# cutmix1
paras_re = list(self.reduction1.parameters())
para_re1 = paras_re[0].detach().squeeze()
para_re2 = paras_re[2].detach().squeeze()
paras_soft = list(self.softmax1.parameters())
para_soft = paras_soft[0].detach().squeeze()
weights_cam = para_soft.mm(para_re2.reshape(1024, 1) * para_re1) # 751*2048
cam_cutmix1, cam_cv = getCAM(cam_cutmix_feature, weights_cam, y) # N*24*8
for i in range(N):
heatmap = cv2.applyColorMap(cv2.resize(cam_cv[i], (camW, camH)), cv2.COLORMAP_JET)
# img = feature_img[i,0,:,:] - np.min(feature_img[i,0,:,:])
img = x_img[i] - np.min(x_img[i])
img = img / np.max(img)
img = np.uint8(255 * img).transpose(1, 2, 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
result = heatmap * 0.5 + img * 0.5
cv2.imwrite('cam-img/two_cutmix1/CAM{}.jpg'.format(i), heatmap)
cv2.imwrite('cam-img/two_cutmix1/img{}.jpg'.format(i), img)
cv2.imwrite('cam-img/two_cutmix1/cam-img{}.jpg'.format(i), result)
print('ok')
# cutmix2
paras_re = list(self.reduction2.parameters())
para_re1 = paras_re[0].detach().squeeze()
para_re2 = paras_re[2].detach().squeeze()
paras_soft = list(self.softmax2.parameters())
para_soft = paras_soft[0].detach().squeeze()
weights_cam = para_soft.mm(para_re2.reshape(512, 1) * para_re1) # 751*2048
cam_cutmix2, cam_cv = getCAM(cam_cutmix_feature, weights_cam, y) # N*24*8
for i in range(N):
heatmap = cv2.applyColorMap(cv2.resize(cam_cv[i], (camW, camH)), cv2.COLORMAP_JET)
img = x_img[i] - np.min(x_img[i])
img = img / np.max(img)
img = np.uint8(255 * img).transpose(1, 2, 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
result = heatmap * 0.5 + img * 0.5
cv2.imwrite('cam-img/two_cutmix2/CAM{}.jpg'.format(i), heatmap)
cv2.imwrite('cam-img/two_cutmix2/img{}.jpg'.format(i), img)
cv2.imwrite('cam-img/two_cutmix2/cam-img{}.jpg'.format(i), result)
print('ok')
# cutmix1 branch
cutmix1_triplet_feature = self.part_maxpool(x_cutmix1).squeeze() # N*2048
cutmix1_feature = self.reduction1(cutmix1_triplet_feature) # N*1024
cutmix1_softmax_feature = self.softmax1(cutmix1_feature) # N*num_class/751
triplet_features.append(cutmix1_feature)
softmax_features.append(cutmix1_softmax_feature)
predict.append(cutmix1_feature)
# cutmix2 branch
cutmix2_triplet_feature = self.part_maxpool(x_cutmix2).squeeze() # N*2048
cutmix2_feature = self.reduction2(cutmix2_triplet_feature) # N*512
cutmix2_softmax_feature = self.softmax2(cutmix2_feature) # N*num_class/751
triplet_features.append(cutmix2_feature)
softmax_features.append(cutmix2_softmax_feature)
predict.append(cutmix2_feature)
if self.training:
return triplet_features, softmax_features, y[idx_cutmix1]
else:
return torch.cat(predict, 1)
def get_optim_policy(self):
params = [
{'params': self.backbone.parameters()},
{'params': self.res_part.parameters()},
{'params': self.global_reduction.parameters()},
{'params': self.global_softmax.parameters()},
# {'params': self.bottleneck1.parameters()},
# {'params': self.bottleneck2.parameters()},
{'params': self.res_part2.parameters()},
{'params': self.reduction1.parameters()},
{'params': self.softmax1.parameters()},
{'params': self.reduction2.parameters()},
{'params': self.softmax2.parameters()},
]
return params
class Resnet(nn.Module):
def __init__(self, num_classes, resnet=None):
super(Resnet, self).__init__()
if not resnet:
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1, # res_conv2
resnet.layer2, # res_conv3
resnet.layer3, # res_conv4
resnet.layer4
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.softmax = nn.Linear(2048, num_classes)
def forward(self, x):
"""
:param x: input image tensor of (N, C, H, W)
:return: (prediction, triplet_losses, softmax_losses)
"""
x = self.backbone(x)
x = self.global_avgpool(x).squeeze()
feature = self.softmax(x)
if self.training:
return [], [feature]
else:
return feature
def get_optim_policy(self):
return self.parameters()
class IDE(nn.Module):
def __init__(self, num_classes, resnet=None):
super(IDE, self).__init__()
if not resnet:
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1, # res_conv2
resnet.layer2, # res_conv3
resnet.layer3, # res_conv4
resnet.layer4
)
self.global_avgpool = nn.AvgPool2d(kernel_size=(12, 4))
def forward(self, x):
"""
:param x: input image tensor of (N, C, H, W)
:return: (prediction, triplet_losses, softmax_losses)
"""
x = self.backbone(x)
feature = self.global_avgpool(x).squeeze()
if self.training:
return [feature], []
else:
return feature
def get_optim_policy(self):
return self.parameters()
|
the-stack_0_9194 | """
Test the random numbers
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import numpy as np
from numpy.random import RandomState
from smerfs.random import z_standard_normal
def test_zig():
""" Test the Ziggurat generator has approximately normal distribn """
from scipy.special import ndtri # inverse cumulative normal
rand_size =1000000
n_bin = 1000
bins = ndtri((np.arange(n_bin-1)+1)/float(n_bin))
random_state = RandomState(seed=123)
z = z_standard_normal(rand_size, random_state)
z_bin = np.bincount(np.digitize(z, bins), minlength=n_bin)
print('Mean', z.mean(), 'variance', z.var())
print('Bin counts in', z_bin.min(), z_bin.max())
bin_low, bin_high = np.argmin(z_bin), np.argmax(z_bin)
print('Lowest bin %d in i=%d, max %d in %d'%(z_bin[bin_low], bin_low, z_bin[bin_high], bin_high))
mean_bin = rand_size//n_bin
over = z_bin[bin_high]-mean_bin
under = mean_bin - z_bin[bin_low]
assert(over<200)
assert(under<200)
|
the-stack_0_9195 | import logging
from typing import Dict, List, Optional
from chia.consensus.block_record import BlockRecord
from chia.consensus.blockchain_interface import BlockchainInterface
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.header_block import HeaderBlock
from chia.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments
from chia.util.ints import uint32
class BlockCache(BlockchainInterface):
def __init__(
self,
blocks: Dict[bytes32, BlockRecord],
headers: Dict[bytes32, HeaderBlock] = None,
height_to_hash: Dict[uint32, bytes32] = None,
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = None,
):
if sub_epoch_summaries is None:
sub_epoch_summaries = {}
if height_to_hash is None:
height_to_hash = {}
if headers is None:
headers = {}
self._block_records = blocks
self._headers = headers
self._height_to_hash = height_to_hash
self._sub_epoch_summaries = sub_epoch_summaries
self._sub_epoch_segments: Dict[uint32, SubEpochSegments] = {}
self.log = logging.getLogger("__name__")
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self._block_records[header_hash]
def height_to_block_record(self, height: uint32, check_db: bool = False) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self._sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self._sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
if height not in self._height_to_hash:
self.log.warning(f"could not find height in cache {height}")
return None
return self._height_to_hash[height]
def contains_block(self, header_hash: bytes32) -> bool:
return header_hash in self._block_records
def contains_height(self, height: uint32) -> bool:
return height in self._height_to_hash
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return self._block_records
async def get_block_records_at(self, heights: List[uint32]) -> List[BlockRecord]:
block_records: List[BlockRecord] = []
for height in heights:
block_records.append(self.height_to_block_record(height))
return block_records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
return self._block_records[header_hash]
def remove_block_record(self, header_hash: bytes32):
del self._block_records[header_hash]
def add_block_record(self, block: BlockRecord):
self._block_records[block.header_hash] = block
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
return self._headers
async def persist_sub_epoch_challenge_segments(
self, sub_epoch_summary_height: uint32, segments: List[SubEpochChallengeSegment]
):
self._sub_epoch_segments[sub_epoch_summary_height] = SubEpochSegments(segments)
async def get_sub_epoch_challenge_segments(
self,
sub_epoch_summary_height: uint32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments = self._sub_epoch_segments.get(sub_epoch_summary_height)
if segments is None:
return None
return segments.challenge_segments
|
the-stack_0_9198 | #! /usr/bin/doit -f
from pathlib import Path
DOIT_CONFIG = {'default_tasks': ['small_crush']}
BIG_CRUSH_RNG = """pcg32_random_r pcglite32_random
stdcpp_mt19937 stdcpp_mt19937_64
stdcpp_minstd_rand stdcpp_minstd_rand0
stdcpp_knuth_b stdcpp_random_device
murmur1_counter murmur2_counter murmur3_counter
siphash24_counter siphash24_key_counter siphash24_key_counter_64
siphash14_key_counter siphash12_key_counter
xxh32_key_counter xxh32_key_counter_64
xxh64_key_counter xxh64_key_counter_64
xxh64_key_only_counter_64 xxh64_key_only2_counter_64
""".split() # stdcpp_ranlux48 ranlux48 never seem to finish...
CRUSH_RNG = BIG_CRUSH_RNG + """
siphash11_key_counter
stdcpp_ranlux24
""".split()
SMALL_CRUSH_RNG = ["raw_counter"] + CRUSH_RNG
def do_crushes(rngs, kind):
for name in rngs:
target = Path('results/%s-%s.txt' % (kind, name))
if not target.exists():
yield {
'name': name,
'actions': ['build/bin/bbattery_runner %s %s > %s' % (name, kind, target)],
'targets': [str(target)],
'file_dep': []
}
def task_small_crush():
"""Runs small_crush tests (a few minutes)."""
yield from do_crushes(SMALL_CRUSH_RNG, 'small_crush')
def task_crush():
"""Runs crush tests (~30mn per rng)."""
yield from do_crushes(CRUSH_RNG, 'crush')
def task_big_crush():
"""Runs crush tests (~4h per rng)."""
yield from do_crushes(BIG_CRUSH_RNG, 'big_crush')
def testk_summary():
return {
'actions': ['./gen_results_summary.py']
}
|
the-stack_0_9201 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import re
import os
from bomlib.columns import ColumnList
# Check python version to determine which version of ConfirParser to import
if sys.version_info.major >= 3:
import configparser as ConfigParser
else:
import ConfigParser
class BomPref:
SECTION_IGNORE = "IGNORE_COLUMNS"
SECTION_COLUMN_ORDER = "COLUMN_ORDER"
SECTION_GENERAL = "BOM_OPTIONS"
SECTION_ALIASES = "COMPONENT_ALIASES"
SECTION_GROUPING_FIELDS = "GROUP_FIELDS"
SECTION_REGEXCLUDES = "REGEX_EXCLUDE"
SECTION_REGINCLUDES = "REGEX_INCLUDE"
OPT_PCB_CONFIG = "pcb_configuration"
OPT_NUMBER_ROWS = "number_rows"
OPT_GROUP_CONN = "group_connectors"
OPT_USE_REGEX = "test_regex"
OPT_USE_ALT = "use_alt"
OPT_ALT_WRAP = "alt_wrap"
OPT_MERGE_BLANK = "merge_blank_fields"
OPT_IGNORE_DNF = "ignore_dnf"
OPT_BACKUP = "make_backup"
OPT_OUTPUT_FILE_NAME = "output_file_name"
OPT_VARIANT_FILE_NAME_FORMAT = "variant_file_name_format"
OPT_DEFAULT_BOARDS = "number_boards"
OPT_DEFAULT_PCBCONFIG = "board_variant"
OPT_CONFIG_FIELD = "fit_field"
OPT_HIDE_HEADERS = "hide_headers"
OPT_HIDE_PCB_INFO = "hide_pcb_info"
def __init__(self):
# List of headings to ignore in BoM generation
self.ignore = [
ColumnList.COL_PART_LIB,
ColumnList.COL_FP_LIB,
]
self.corder = ColumnList._COLUMNS_DEFAULT
self.useAlt = False # Use alternate reference representation
self.altWrap = None # Wrap to n items when using alt representation
self.ignoreDNF = True # Ignore rows for do-not-fit parts
self.numberRows = True # Add row-numbers to BoM output
self.groupConnectors = True # Group connectors and ignore component value
self.useRegex = True # Test various columns with regex
self.boards = 1 # Quantity of boards to be made
self.mergeBlankFields = True # Blanks fields will be merged when possible
self.hideHeaders = False
self.hidePcbInfo = False
self.verbose = False # By default, is not verbose
self.configField = "Config" # Default field used for part fitting config
self.pcbConfig = ["default"]
self.backup = "%O.tmp"
self.separatorCSV = None
self.outputFileName = "%O_bom_%v%V"
self.variantFileNameFormat = "_(%V)"
self.xlsxwriter_available = False
self.xlsxwriter2_available = False
# Default fields used to group components
self.groups = [
ColumnList.COL_PART,
ColumnList.COL_PART_LIB,
ColumnList.COL_VALUE,
ColumnList.COL_FP,
ColumnList.COL_FP_LIB,
# User can add custom grouping columns in bom.ini
]
self.regIncludes = [] # None by default
self.regExcludes = [
[ColumnList.COL_REFERENCE, '^TP[0-9]*'],
[ColumnList.COL_REFERENCE, '^FID'],
[ColumnList.COL_PART, 'mount.*hole'],
[ColumnList.COL_PART, 'solder.*bridge'],
[ColumnList.COL_PART, 'test.*point'],
[ColumnList.COL_FP, 'test.*point'],
[ColumnList.COL_FP, 'mount.*hole'],
[ColumnList.COL_FP, 'fiducial'],
]
# Default component groupings
self.aliases = [
["c", "c_small", "cap", "capacitor"],
["r", "r_small", "res", "resistor"],
["sw", "switch"],
["l", "l_small", "inductor"],
["zener", "zenersmall"],
["d", "diode", "d_small"]
]
# Check an option within the SECTION_GENERAL group
def checkOption(self, parser, opt, default=False):
if parser.has_option(self.SECTION_GENERAL, opt):
return parser.get(self.SECTION_GENERAL, opt).lower() in ["1", "true", "yes"]
else:
return default
def checkInt(self, parser, opt, default=False):
if parser.has_option(self.SECTION_GENERAL, opt):
return int(parser.get(self.SECTION_GENERAL, opt).lower())
else:
return default
# Read KiBOM preferences from file
def Read(self, file, verbose=False):
file = os.path.abspath(file)
if not os.path.exists(file) or not os.path.isfile(file):
print("{f} is not a valid file!".format(f=file))
return
cf = ConfigParser.RawConfigParser(allow_no_value=True)
cf.optionxform = str
cf.read(file)
# Read general options
if self.SECTION_GENERAL in cf.sections():
self.ignoreDNF = self.checkOption(cf, self.OPT_IGNORE_DNF, default=True)
self.useAlt = self.checkOption(cf, self.OPT_USE_ALT, default=False)
self.altWrap = self.checkInt(cf, self.OPT_ALT_WRAP, default=None)
self.numberRows = self.checkOption(cf, self.OPT_NUMBER_ROWS, default=True)
self.groupConnectors = self.checkOption(cf, self.OPT_GROUP_CONN, default=True)
self.useRegex = self.checkOption(cf, self.OPT_USE_REGEX, default=True)
self.mergeBlankFields = self.checkOption(cf, self.OPT_MERGE_BLANK, default=True)
self.outputFileName = cf.get(self.SECTION_GENERAL, self.OPT_OUTPUT_FILE_NAME)
self.variantFileNameFormat = cf.get(self.SECTION_GENERAL, self.OPT_VARIANT_FILE_NAME_FORMAT)
if cf.has_option(self.SECTION_GENERAL, self.OPT_CONFIG_FIELD):
self.configField = cf.get(self.SECTION_GENERAL, self.OPT_CONFIG_FIELD)
if cf.has_option(self.SECTION_GENERAL, self.OPT_DEFAULT_BOARDS):
self.boards = self.checkInt(cf, self.OPT_DEFAULT_BOARDS, default=None)
if cf.has_option(self.SECTION_GENERAL, self.OPT_DEFAULT_PCBCONFIG):
self.pcbConfig = cf.get(self.SECTION_GENERAL, self.OPT_DEFAULT_PCBCONFIG).strip().split(",")
if cf.has_option(self.SECTION_GENERAL, self.OPT_BACKUP):
self.backup = cf.get(self.SECTION_GENERAL, self.OPT_BACKUP)
else:
self.backup = False
if cf.has_option(self.SECTION_GENERAL, self.OPT_HIDE_HEADERS):
self.hideHeaders = cf.get(self.SECTION_GENERAL, self.OPT_HIDE_HEADERS) == '1'
if cf.has_option(self.SECTION_GENERAL, self.OPT_HIDE_PCB_INFO):
self.hidePcbInfo = cf.get(self.SECTION_GENERAL, self.OPT_HIDE_PCB_INFO) == '1'
# Read out grouping colums
if self.SECTION_GROUPING_FIELDS in cf.sections():
self.groups = [i for i in cf.options(self.SECTION_GROUPING_FIELDS)]
# Read out ignored-rows
if self.SECTION_IGNORE in cf.sections():
self.ignore = [i for i in cf.options(self.SECTION_IGNORE)]
# Read out column order
if self.SECTION_COLUMN_ORDER in cf.sections():
self.corder = [i for i in cf.options(self.SECTION_COLUMN_ORDER)]
# Read out component aliases
if self.SECTION_ALIASES in cf.sections():
self.aliases = [re.split('[ \t]+', a) for a in cf.options(self.SECTION_ALIASES)]
if self.SECTION_REGEXCLUDES in cf.sections():
self.regExcludes = []
for pair in cf.options(self.SECTION_REGEXCLUDES):
if len(re.split('[ \t]+', pair)) == 2:
self.regExcludes.append(re.split('[ \t]+', pair))
if self.SECTION_REGINCLUDES in cf.sections():
self.regIncludes = []
for pair in cf.options(self.SECTION_REGINCLUDES):
if len(re.split('[ \t]+', pair)) == 2:
self.regIncludes.append(re.split('[ \t]+', pair))
# Add an option to the SECTION_GENRAL group
def addOption(self, parser, opt, value, comment=None):
if comment:
if not comment.startswith(";"):
comment = "; " + comment
parser.set(self.SECTION_GENERAL, comment)
parser.set(self.SECTION_GENERAL, opt, "1" if value else "0")
# Write KiBOM preferences to file
def Write(self, file):
file = os.path.abspath(file)
cf = ConfigParser.RawConfigParser(allow_no_value=True)
cf.optionxform = str
cf.add_section(self.SECTION_GENERAL)
cf.set(self.SECTION_GENERAL, "; General BoM options here")
self.addOption(cf, self.OPT_IGNORE_DNF, self.ignoreDNF, comment="If '{opt}' option is set to 1, rows that are not to be fitted on the PCB will not be written to the BoM file".format(opt=self.OPT_IGNORE_DNF))
self.addOption(cf, self.OPT_USE_ALT, self.useAlt, comment="If '{opt}' option is set to 1, grouped references will be printed in the alternate compressed style eg: R1-R7,R18".format(opt=self.OPT_USE_ALT))
self.addOption(cf, self.OPT_ALT_WRAP, self.altWrap, comment="If '{opt}' option is set to and integer N, the references field will wrap after N entries are printed".format(opt=self.OPT_ALT_WRAP))
self.addOption(cf, self.OPT_NUMBER_ROWS, self.numberRows, comment="If '{opt}' option is set to 1, each row in the BoM will be prepended with an incrementing row number".format(opt=self.OPT_NUMBER_ROWS))
self.addOption(cf, self.OPT_GROUP_CONN, self.groupConnectors, comment="If '{opt}' option is set to 1, connectors with the same footprints will be grouped together, independent of the name of the connector".format(opt=self.OPT_GROUP_CONN))
self.addOption(cf, self.OPT_USE_REGEX, self.useRegex, comment="If '{opt}' option is set to 1, each component group will be tested against a number of regular-expressions (specified, per column, below). If any matches are found, the row is ignored in the output file".format(opt=self.OPT_USE_REGEX))
self.addOption(cf, self.OPT_MERGE_BLANK, self.mergeBlankFields, comment="If '{opt}' option is set to 1, component groups with blank fields will be merged into the most compatible group, where possible".format(opt=self.OPT_MERGE_BLANK))
cf.set(self.SECTION_GENERAL, "; Specify output file name format, %O is the defined output name, %v is the version, %V is the variant name which will be ammended according to 'variant_file_name_format'.")
cf.set(self.SECTION_GENERAL, self.OPT_OUTPUT_FILE_NAME, self.outputFileName)
cf.set(self.SECTION_GENERAL, "; Specify the variant file name format, this is a unique field as the variant is not always used/specified. When it is unused you will want to strip all of this.")
cf.set(self.SECTION_GENERAL, self.OPT_VARIANT_FILE_NAME_FORMAT, self.variantFileNameFormat)
cf.set(self.SECTION_GENERAL, '; Field name used to determine if a particular part is to be fitted')
cf.set(self.SECTION_GENERAL, self.OPT_CONFIG_FIELD, self.configField)
cf.set(self.SECTION_GENERAL, '; Make a backup of the bom before generating the new one, using the following template')
cf.set(self.SECTION_GENERAL, self.OPT_BACKUP, self.backup)
cf.set(self.SECTION_GENERAL, '; Default number of boards to produce if none given on CLI with -n')
cf.set(self.SECTION_GENERAL, self.OPT_DEFAULT_BOARDS, self.boards)
cf.set(self.SECTION_GENERAL, '; Default PCB variant if none given on CLI with -r')
cf.set(self.SECTION_GENERAL, self.OPT_DEFAULT_PCBCONFIG, self.pcbConfig)
cf.set(self.SECTION_GENERAL, '; Whether to hide headers from output file')
cf.set(self.SECTION_GENERAL, self.OPT_HIDE_HEADERS, self.hideHeaders)
cf.set(self.SECTION_GENERAL, '; Whether to hide PCB info from output file')
cf.set(self.SECTION_GENERAL, self.OPT_HIDE_PCB_INFO, self.hidePcbInfo)
cf.add_section(self.SECTION_IGNORE)
cf.set(self.SECTION_IGNORE, "; Any column heading that appears here will be excluded from the Generated BoM")
cf.set(self.SECTION_IGNORE, "; Titles are case-insensitive")
for i in self.ignore:
cf.set(self.SECTION_IGNORE, i)
cf.add_section(self.SECTION_COLUMN_ORDER)
cf.set(self.SECTION_COLUMN_ORDER, "; Columns will apear in the order they are listed here")
cf.set(self.SECTION_COLUMN_ORDER, "; Titles are case-insensitive")
for i in self.corder:
cf.set(self.SECTION_COLUMN_ORDER, i)
# Write the component grouping fields
cf.add_section(self.SECTION_GROUPING_FIELDS)
cf.set(self.SECTION_GROUPING_FIELDS, '; List of fields used for sorting individual components into groups')
cf.set(self.SECTION_GROUPING_FIELDS, '; Components which match (comparing *all* fields) will be grouped together')
cf.set(self.SECTION_GROUPING_FIELDS, '; Field names are case-insensitive')
for i in self.groups:
cf.set(self.SECTION_GROUPING_FIELDS, i)
cf.add_section(self.SECTION_ALIASES)
cf.set(self.SECTION_ALIASES, "; A series of values which are considered to be equivalent for the part name")
cf.set(self.SECTION_ALIASES, "; Each line represents a list of equivalent component name values separated by white space")
cf.set(self.SECTION_ALIASES, "; e.g. 'c c_small cap' will ensure the equivalent capacitor symbols can be grouped together")
cf.set(self.SECTION_ALIASES, '; Aliases are case-insensitive')
for a in self.aliases:
cf.set(self.SECTION_ALIASES, "\t".join(a))
cf.add_section(self.SECTION_REGINCLUDES)
cf.set(self.SECTION_REGINCLUDES, '; A series of regular expressions used to include parts in the BoM')
cf.set(self.SECTION_REGINCLUDES, '; If there are any regex defined here, only components that match against ANY of them will be included in the BOM')
cf.set(self.SECTION_REGINCLUDES, '; Column names are case-insensitive')
cf.set(self.SECTION_REGINCLUDES, '; Format is: "[ColumName] [Regex]" (white-space separated)')
for i in self.regIncludes:
if not len(i) == 2:
continue
cf.set(self.SECTION_REGINCLUDES, i[0] + "\t" + i[1])
cf.add_section(self.SECTION_REGEXCLUDES)
cf.set(self.SECTION_REGEXCLUDES, '; A series of regular expressions used to exclude parts from the BoM')
cf.set(self.SECTION_REGEXCLUDES, '; If a component matches ANY of these, it will be excluded from the BoM')
cf.set(self.SECTION_REGEXCLUDES, '; Column names are case-insensitive')
cf.set(self.SECTION_REGEXCLUDES, '; Format is: "[ColumName] [Regex]" (white-space separated)')
for i in self.regExcludes:
if not len(i) == 2:
continue
cf.set(self.SECTION_REGEXCLUDES, i[0] + "\t" + i[1])
with open(file, 'wb') as configfile:
cf.write(configfile)
|
the-stack_0_9202 | import numpy as np
from collections import OrderedDict
from matplotlib import pyplot as plt
class GesturesVisualizer():
def __init__(self, gestures, deviceWidth=360, deviceHeight=640):
self.gestures = gestures
self.width = deviceWidth
self.height = deviceHeight
def plot_gestures(self):
fig = plt.figure(figsize=(3.75, 2.5 * (self.height / self.width)))
ax = fig.add_axes([0.15, 0.05, 0.55, 0.85])
labels = OrderedDict()
for i, _ind in enumerate(self.gestures.index):
labels["gesture_" + str(i)] = np.random.rand(1, 3)
x_data = []
y_data = []
if(len(self.gestures.iloc[i]["data"]) == 0):
continue
x_data.append(self.gestures.iloc[i]["data"][0]["x0"])
y_data.append(self.gestures.iloc[i]["data"][0]["y0"])
if(self.gestures.iloc[i]["type"] == "swipe"):
for d in self.gestures.iloc[i]["data"]:
x_data.append(d["moveX"])
y_data.append(d["moveY"])
keys = list(labels.keys())
if(self.gestures.iloc[i]["type"] == "tap"):
plt.scatter(x_data, y_data, label=keys[i], color = labels[keys[i]][0])
else:
plt.plot(x_data, y_data, label=keys[i], color = labels[keys[i]][0])
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.xlim(0, self.width)
plt.ylim(0, self.height)
plt.xlabel('X - Dimension')
plt.ylabel('Y - Dimension')
plt.gca().invert_yaxis()
plt.legend(by_label.values(), by_label.keys(), bbox_to_anchor=(1.01, 0.5), loc="center left")
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
plt.show() |
the-stack_0_9204 | # Copyright 2021 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from delfin import db
from delfin.api import api_utils
from delfin.api.common import wsgi
from delfin.api.views import storage_host_initiators as \
storage_host_initiator_view
class StorageHostInitiatorController(wsgi.Controller):
def __init__(self):
super(StorageHostInitiatorController, self).__init__()
self.search_options = ['name', 'status', 'wwn', 'id', 'storage_id',
'native_storage_host_id',
'native_storage_host_initiator_id']
def _get_storage_host_initiator_search_options(self):
"""Return storage host initiator search options allowed ."""
return self.search_options
def show(self, req, id):
ctxt = req.environ['delfin.context']
query_params = {"storage_id": id}
query_params.update(req.GET)
# Update options other than filters
sort_keys, sort_dirs = api_utils.get_sort_params(query_params)
marker, limit, offset = api_utils.get_pagination_params(query_params)
# Strip out options except supported search options
api_utils.remove_invalid_options(
ctxt, query_params,
self._get_storage_host_initiator_search_options())
storage_host_initiators = db.storage_host_initiators_get_all(
ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset)
return storage_host_initiator_view.build_storage_host_initiators(
storage_host_initiators)
def create_resource():
return wsgi.Resource(StorageHostInitiatorController())
|
the-stack_0_9205 | import soundfile as sf
import math
from uuid import uuid4
from typing import List
from .exceptions import ShellError
from pathlib import Path
def fftsanitise(fftsettings) -> List[int]:
return [
int(fftsettings[0]),
int(fftsettings[1]),
int(fftsettings[2])
]
def get_buffer(audio_file_path: str, output: str = "list"):
"""Returns an audio files fp32 values as a numpy array"""
data, _ = sf.read(audio_file_path)
data = data.transpose()
if output == "list":
return data.tolist()
if output == "numpy":
return data
def odd_snap(number: int) -> int:
"""snaps a number to the next odd number"""
if (number % 2) == 0:
return number + 1
else:
return number
def fftformat(fftsettings: List[int]) -> int:
"""Handles the FFT size so you can pass maxfftsize"""
fftsize = fftsettings[2]
if fftsize == -1:
fftsize = fftsettings[0]
return math.floor(2 ** math.ceil(math.log(fftsize)/math.log(2)))
def handle_ret(retval: int):
"""Handle return value and raise exceptions if necessary"""
if retval != 0:
raise ShellError(retval)
def make_temp() -> str:
"""Create temporary files in local hidden directory"""
tempfiles = Path.home() / ".python-flucoma"
if not tempfiles.exists():
tempfiles.mkdir()
uuid = str(uuid4().hex)
full_path = tempfiles / f"{uuid}.wav"
return str(full_path)
def cleanup():
tempfiles = Path.home() / ".python-flucoma"
if tempfiles.exists():
for x in tempfiles.iterdir():
x.unlink()
|
the-stack_0_9212 | import tensorflow as tf
from tensorflow.keras import backend
from tensorflow.keras import layers
from tensorflow.keras import models
from models.backbone.resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from models.backbone.resnext import ResNeXt50, ResNeXt101
from models.backbone.efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, EfficientNetB4
from models.backbone.efficientnet import EfficientNetB5, EfficientNetB6, EfficientNetB7, EfficientNetL2
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications import VGG16
from tensorflow.keras.applications import VGG19
from models.backbone.senet import SENet154, SEResNet50, SEResNet101, SEResNet152, SEResNeXt50, SEResNeXt101
DEFAULT_SKIP_CONNECTIONS = {
'vgg16': ('block5_conv3', 'block4_conv3', 'block3_conv3', 'block2_conv2'),
'vgg19': ('block5_conv4', 'block4_conv4', 'block3_conv4', 'block2_conv2'),
'resnet18': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet152': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'inceptionv3': (228, 86, 16, 9),
'inceptionresnetv2': (594, 260, 16, 9),
'densenet121': (311, 139, 51, 4),
'densenet169': (367, 139, 51, 4),
'densenet201': (479, 139, 51, 4),
'efficientnetb0': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb1': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb2': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb3': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb4': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb5': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb6': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb7': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetl2': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'seresnext50': ('activation_65', 'activation_35', 'activation_15', 'activation'),
'seresnext101': ('activation_150', 'activation_35', 'activation_15', 'activation'),
'seresnet50': ('activation_65', 'activation_35', 'activation_15', 'activation'),
'seresnet101': ('activation_150', 'activation_35', 'activation_15', 'activation'),
'seresnet152': ('activation_235', 'activation_55', 'activation_15', 'activation'),
'senet154': ('activation_237', 'activation_55', 'activation_17', 'activation_2'),
}
# ---------------------------------------------------------------------
# PSP Model
# ---------------------------------------------------------------------
def PSPNet(
backbone_name='vgg16',
input_shape=(384, 384, 3),
classes=21,
activation='softmax',
weights=None,
encoder_weights='imagenet',
encoder_freeze=False,
downsample_factor=8,
psp_conv_filters=512,
psp_pooling_type='avg',
psp_use_batchnorm=True,
psp_dropout=None,
**kwargs
):
"""PSPNet_ is a fully convolution neural network for image semantic segmentation
Args:
backbone_name: name of classification model used as feature
extractor to build segmentation model.
input_shape: shape of input data/image ``(H, W, C)``.
``H`` and ``W`` should be divisible by ``6 * downsample_factor`` and **NOT** ``None``!
classes: a number of classes for output (output shape - ``(h, w, classes)``).
activation: name of one of ``keras.activations`` for last model layer
(e.g. ``sigmoid``, ``softmax``, ``linear``).
weights: optional, path to model weights.
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
encoder_freeze: if ``True`` set all layers of encoder (backbone model) as non-trainable.
downsample_factor: one of 4, 8 and 16. Downsampling rate or in other words backbone depth
to construct PSP module on it.
psp_conv_filters: number of filters in ``Conv2D`` layer in each PSP block.
psp_pooling_type: one of 'avg', 'max'. PSP block pooling type (maximum or average).
psp_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
is used.
psp_dropout: dropout rate between 0 and 1.
Returns:
``keras.models.Model``: **PSPNet**
.. _PSPNet:
https://arxiv.org/pdf/1612.01105.pdf
"""
# control image input shape
check_input_shape(input_shape, downsample_factor)
backbone = get_backbone(backbone_name,
input_shape=input_shape,
weights=encoder_weights,
include_top=False)
feature_layers = DEFAULT_SKIP_CONNECTIONS[backbone_name]
if downsample_factor == 16:
psp_layer_idx = feature_layers[0]
elif downsample_factor == 8:
psp_layer_idx = feature_layers[1]
elif downsample_factor == 4:
psp_layer_idx = feature_layers[2]
else:
raise ValueError('Unsupported factor - `{}`, Use 4, 8 or 16.'.format(downsample_factor))
model = build_psp(
backbone,
psp_layer_idx,
pooling_type=psp_pooling_type,
conv_filters=psp_conv_filters,
use_batchnorm=psp_use_batchnorm,
final_upsampling_factor=downsample_factor,
classes=classes,
activation=activation,
dropout=psp_dropout,
)
# lock encoder weights for fine-tuning
if encoder_freeze:
freeze_model(backbone, **kwargs)
# loading model weights
if weights is not None:
model.load_weights(weights)
return model
# ---------------------------------------------------------------------
# PSP Decoder
# ---------------------------------------------------------------------
def build_psp(
backbone,
psp_layer_idx,
pooling_type='avg',
conv_filters=512,
use_batchnorm=True,
final_upsampling_factor=8,
classes=21,
activation='softmax',
dropout=None,
):
input_ = backbone.input
x = (backbone.get_layer(name=psp_layer_idx).output if isinstance(psp_layer_idx, str)
else backbone.get_layer(index=psp_layer_idx).output)
# x = (get_layer_number(backbone, psp_layer_idx) if isinstance(psp_layer_idx, str) else psp_layer_idx)
# build spatial pyramid
x1 = SpatialContextBlock(1, conv_filters, pooling_type, use_batchnorm)(x)
x2 = SpatialContextBlock(2, conv_filters, pooling_type, use_batchnorm)(x)
x3 = SpatialContextBlock(3, conv_filters, pooling_type, use_batchnorm)(x)
x6 = SpatialContextBlock(6, conv_filters, pooling_type, use_batchnorm)(x)
# aggregate spatial pyramid
concat_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.Concatenate(axis=concat_axis, name='psp_concat')([x, x1, x2, x3, x6])
x = Conv1x1BnReLU(conv_filters, use_batchnorm, name='aggregation')(x)
# model regularization
if dropout is not None:
x = layers.SpatialDropout2D(dropout, name='spatial_dropout')(x)
# model head
x = layers.Conv2D(
filters=classes,
kernel_size=(3, 3),
padding='same',
kernel_initializer='glorot_uniform',
name='final_conv',
)(x)
x = layers.UpSampling2D(final_upsampling_factor, name='final_upsampling', interpolation='bilinear')(x)
if activation in {'softmax', 'sigmoid'}:
x = layers.Activation(activation, name=activation)(x)
model = models.Model(input_, x)
return model
# ---------------------------------------------------------------------
# Utility functions
# ---------------------------------------------------------------------
def Conv2dBn(
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_batchnorm=False,
**kwargs
):
"""Extension of Conv2D layer with batchnorm"""
conv_name, act_name, bn_name = None, None, None
block_name = kwargs.pop('name', None)
if block_name is not None:
conv_name = block_name + '_conv'
if block_name is not None and activation is not None:
act_str = activation.__name__ if callable(activation) else str(activation)
act_name = block_name + '_' + act_str
if block_name is not None and use_batchnorm:
bn_name = block_name + '_bn'
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def wrapper(input_tensor):
x = layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=None,
use_bias=not use_batchnorm,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
name=conv_name,
)(input_tensor)
if use_batchnorm:
x = layers.BatchNormalization(axis=bn_axis, name=bn_name)(x)
if activation:
x = layers.Activation(activation, name=act_name)(x)
return x
return wrapper
def check_input_shape(input_shape, factor):
if input_shape is None:
raise ValueError("Input shape should be a tuple of 3 integers, not None!")
h, w = input_shape[:2] if backend.image_data_format() == 'channels_last' else input_shape[1:]
min_size = factor * 6
is_wrong_shape = (
h % min_size != 0 or w % min_size != 0 or
h < min_size or w < min_size
)
if is_wrong_shape:
raise ValueError('Wrong shape {}, input H and W should '.format(input_shape) +
'be divisible by `{}`'.format(min_size))
# ---------------------------------------------------------------------
# Blocks
# ---------------------------------------------------------------------
def Conv1x1BnReLU(filters, use_batchnorm, name=None):
def wrapper(input_tensor):
return Conv2dBn(
filters,
kernel_size=1,
activation='relu',
kernel_initializer='he_uniform',
padding='same',
use_batchnorm=use_batchnorm,
name=name
)(input_tensor)
return wrapper
def SpatialContextBlock(
level,
conv_filters=512,
pooling_type='avg',
use_batchnorm=True,
):
if pooling_type not in ('max', 'avg'):
raise ValueError('Unsupported pooling type - `{}`.'.format(pooling_type) +
'Use `avg` or `max`.')
Pooling2D = layers.MaxPool2D if pooling_type == 'max' else layers.AveragePooling2D
pooling_name = 'psp_level{}_pooling'.format(level)
conv_block_name = 'psp_level{}'.format(level)
upsampling_name = 'psp_level{}_upsampling'.format(level)
def wrapper(input_tensor):
# extract input feature maps size (h, and w dimensions)
input_shape = backend.int_shape(input_tensor)
spatial_size = input_shape[1:3] if backend.image_data_format() == 'channels_last' else input_shape[2:]
# Compute the kernel and stride sizes according to how large the final feature map will be
# When the kernel factor and strides are equal, then we can compute the final feature map factor
# by simply dividing the current factor by the kernel or stride factor
# The final feature map sizes are 1x1, 2x2, 3x3, and 6x6.
pool_size = up_size = [spatial_size[0] // level, spatial_size[1] // level]
x = Pooling2D(pool_size, strides=pool_size, padding='same', name=pooling_name)(input_tensor)
x = Conv1x1BnReLU(conv_filters, use_batchnorm, name=conv_block_name)(x)
x = layers.UpSampling2D(up_size, interpolation='bilinear', name=upsampling_name)(x)
return x
return wrapper
def freeze_model(model, **kwargs):
"""Set all layers non trainable, excluding BatchNormalization layers"""
for layer in model.layers:
if not isinstance(layer, layers.BatchNormalization):
layer.trainable = False
return
def filter_keras_submodules(kwargs):
"""Selects only arguments that define keras_application submodules. """
submodule_keys = kwargs.keys() & {'backend', 'layers', 'models', 'utils'}
return {key: kwargs[key] for key in submodule_keys}
def get_layer_number(model, layer_name):
"""
Help find layer in Keras model by name
Args:
model: Keras `Model`
layer_name: str, name of layer
Returns:
index of layer
Raises:
ValueError: if model does not contains layer with such name
"""
for i, l in enumerate(model.layers):
if l.name == layer_name:
return i
raise ValueError('No layer with name {} in model {}.'.format(layer_name, model.name))
backbones = {
"vgg16": VGG16,
"vgg19": VGG19,
"resnet18": ResNet18,
"resnet34": ResNet34,
"resnet50": ResNet50,
"resnet101": ResNet101,
"resnet152": ResNet152,
"resnext50": ResNeXt50,
"resnext101": ResNeXt101,
"inceptionresnetv2": InceptionResNetV2,
"inceptionv3": InceptionV3,
"densenet121": DenseNet121,
"densenet169": DenseNet169,
"densenet201": DenseNet201,
"efficientnetb0": EfficientNetB0,
"efficientnetb1": EfficientNetB1,
"efficientnetb2": EfficientNetB2,
"efficientnetb3": EfficientNetB3,
"efficientnetb4": EfficientNetB4,
"efficientnetb5": EfficientNetB5,
"efficientnetb6": EfficientNetB6,
"efficientnetb7": EfficientNetB7,
"efficientnetl2": EfficientNetL2,
"seresnext50": SEResNeXt50,
"seresnext101": SEResNeXt101,
"seresnet50": SEResNet50,
"seresnet101": SEResNet101,
"seresnet152": SEResNet152,
'senet154': SENet154
}
def get_backbone(name, *args, **kwargs):
return backbones[name](*args, **kwargs)
if __name__ == "__main__":
model1 = PSPNet('efficientnetb4', (1200, 1200, 3), encoder_weights='imagenet')
model1.summary() |
the-stack_0_9214 | # --Requires--:
# game.get_moves()
# game.execute_move()
# game.undo_move()
# game.is_final()
# game.get_score()
# game.get_states()
# get_board()
# get_turn()
# TODO: update find moves
import numpy as np
import time
class TicTacToe:
def __init__(self):
self.board = np.array([[[0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0]]])
self.history = []
def get_moves(self):
return [x for x in range(9) if self.board[x // 3, x % 3, 0] == self.board[x // 3, x % 3, 1] == 0]
def get_legal_NN_output(self):
return [1 if self.board[x // 3, x % 3, 0] == self.board[x // 3, x % 3, 1] == 0 else 0 for x in range(9)]
# moves = []
# for x in range(9):
# if self.board[x // 3, x % 3, 0] == self.board[x // 3, x % 3, 1] == 0:
# moves.append(x)
# return moves
def execute_move(self, move):
self.board[move // 3, move % 3, len(self.history) % 2] = 1
self.history.append(move)
# poss_moves = self.get_moves()
# if move in poss_moves:
# self.board[move // 3, move % 3, len(self.history) % 2] = 1
# self.history.append(move)
# else:
# print('illegal move')
def undo_move(self):
if len(self.history) > 0:
move = self.history[-1]
self.board[move // 3, move % 3, (len(self.history) - 1) % 2] = 0
self.history.pop()
else:
print('could not undo move')
def _won(self):
player = 1 * (len(self.history) % 2 == 0)
for x in range(3):
# Horizontal
if self.board[x, 0, player] == self.board[x, 1, player] == self.board[x, 2, player] != 0:
return True
# Vertical
if self.board[0, x, player] == self.board[1, x, player] == self.board[2, x, player] != 0:
return True
# Diagonal
if self.board[0, 0, player] == self.board[1, 1, player] == self.board[2, 2, player] != 0:
return True
if self.board[0, 2, player] == self.board[1, 1, player] == self.board[2, 0, player] != 0:
return True
def is_final(self):
if self._won():
return True
if len(self.history) == 9:
return True
return False
def get_score(self):
if self.is_final():
if self._won():
return 2
else:
return 1
else:
print('not final')
def get_outcome(self):
if self.is_final():
if self._won():
return [1, -1] if len(self.history) % 2 == 1 else [-1, 1]
else:
return [0, 0]
else:
print("not finished")
def get_state(self):
# return [str(self.get_board())]
return str(self.history)
def get_turn(self):
return len(self.history) % 2 if not self.is_final() else None
def get_board(self):
return self.board if len(self.history) % 2 == 0 else np.flip(self.board, -1)
def print_board(self):
for x in range(3):
string = '|'
for y in range(3):
string += 'X' * int(self.board[x, y, 0] == 1)
string += 'O' * int(self.board[x, y, 1] == 1)
string += ' ' * int(self.board[x, y, 0] == self.board[x, y, 1] == 0)
string += '|'
print(string)
# game = TicTacToe()
# game.print_board()
# while True:
# inp = int(input("Number:"))
# game.execute_move(inp)
# game.print_board()
# game.undo_move()
|
the-stack_0_9216 | from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import ExponentialLR, StepLR
import torch.nn.functional as F
from sklearn import metrics
from sklearn.model_selection import KFold, StratifiedKFold
from torch.autograd import Variable
import os
import warnings
import math
import numpy as np
from tqdm import tqdm, trange
import time
import random
import csv
from sklearn.ensemble import RandomForestRegressor as RFR
import rdkit
from rdkit import Chem, DataStructs
from rdkit.Chem import QED
from joblib import dump, load
import threading
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from sklearn.externals import joblib
import pickle
def normalize_desc(desc_array, desc_mean=None):
desc_array = np.array(desc_array).reshape(len(desc_array), -1)
ind = np.zeros(desc_array.shape)
for i in range(desc_array.shape[0]):
for j in range(desc_array.shape[1]):
try:
if np.isfinite(desc_array[i, j]):
ind[i, j] = 1
except:
pass
for i in range(desc_array.shape[0]):
for j in range(desc_array.shape[1]):
if ind[i, j] == 0:
desc_array[i, j] = 0
if desc_mean is None:
desc_mean = np.mean(desc_array, axis=0)
for i in range(desc_array.shape[0]):
for j in range(desc_array.shape[1]):
if ind[i, j] == 0:
desc_array[i, j] = desc_mean[j]
return desc_array, desc_mean
class Iterator(object):
"""Abstract base class for data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
if n < batch_size:
raise ValueError('Input data length is shorter than batch_size\nAdjust batch_size')
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class SmilesIterator(Iterator):
"""Iterator yielding data from a SMILES array.
# Arguments
x: Numpy array of SMILES input data.
y: Numpy array of targets data.
smiles_data_generator: Instance of `SmilesEnumerator`
to use for random SMILES generation.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
dtype: dtype to use for returned batch. Set to keras.backend.floatx if using Keras
"""
def __init__(self, x, y, smiles_data_generator,
batch_size=32, shuffle=False, seed=None,
dtype=np.float32
):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
self.x = np.asarray(x)
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.smiles_data_generator = smiles_data_generator
self.dtype = dtype
super(SmilesIterator, self).__init__(x.shape[0], batch_size, shuffle, seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros(
tuple([current_batch_size] + [self.smiles_data_generator.pad, self.smiles_data_generator._charlen]),
dtype=self.dtype)
for i, j in enumerate(index_array):
smiles = self.x[j:j + 1]
x = self.smiles_data_generator.transform(smiles)
batch_x[i] = x
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
def get_desc(smiles, calc):
desc = []
processed_indices = []
invalid_indices = []
for i in range(len(smiles)):
sm = smiles[i]
try:
mol = Chem.MolFromSmiles(sm)
tmp = np.array(calc(mol))
desc.append(tmp)
processed_indices.append(i)
except:
invalid_indices.append(i)
desc_array = np.array(desc)
return desc_array, processed_indices, invalid_indices
def sanitize_smiles(smiles, canonical=True, throw_warning=False):
"""
Takes list of SMILES strings and returns list of their sanitized versions.
For definition of sanitized SMILES check
http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol
Parameters
----------
smiles: list
list of SMILES strings
canonical: bool (default True)
parameter specifying whether SMILES will be converted to canonical
format
throw_warning: bool (default False)
parameter specifying whether warnings will be thrown if a SMILES is
invalid
Returns
-------
new_smiles: list
list of SMILES and NaNs if SMILES string is invalid or unsanitized.
If canonical is True, returns list of canonical SMILES.
When canonical is True this function is analogous to:
canonical_smiles(smiles, sanitize=True).
"""
new_smiles = []
for sm in smiles:
try:
if canonical:
new_smiles.append(Chem.MolToSmiles(Chem.MolFromSmiles(sm, sanitize=True)))
else:
new_smiles.append(sm)
except:
if throw_warning:
warnings.warn('Unsanitized SMILES string: ' + sm, UserWarning)
new_smiles.append('')
return new_smiles
def canonical_smiles(smiles, sanitize=True, throw_warning=False):
"""
Takes list of SMILES strings and returns list of their canonical SMILES.
Parameters
----------
smiles: list
list of SMILES strings to convert into canonical format
sanitize: bool (default True)
parameter specifying whether to sanitize SMILES or not.
For definition of sanitized SMILES check
http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol
throw_warning: bool (default False)
parameter specifying whether warnings will be thrown if a SMILES is
invalid
Returns
-------
new_smiles: list
list of canonical SMILES and NaNs if SMILES string is invalid or
unsanitized (when sanitize is True)
When sanitize is True the function is analogous to:
sanitize_smiles(smiles, canonical=True).
"""
new_smiles = []
for sm in smiles:
try:
mol = Chem.MolFromSmiles(sm, sanitize=sanitize)
new_smiles.append(Chem.MolToSmiles(mol))
except:
if throw_warning:
warnings.warn(sm + ' can not be canonized: invalid '
'SMILES string!', UserWarning)
new_smiles.append('')
return new_smiles
def save_smi_to_file(filename, smiles, unique=True):
"""
Takes path to file and list of SMILES strings and writes SMILES to the specified file.
Args:
filename (str): path to the file
smiles (list): list of SMILES strings
unique (bool): parameter specifying whether to write only unique copies or not.
Output:
success (bool): defines whether operation was successfully completed or not.
"""
if unique:
smiles = list(set(smiles))
else:
smiles = list(smiles)
f = open(filename, 'w')
for mol in smiles:
f.writelines([mol, '\n'])
f.close()
return f.closed
def read_smi_file(filename, unique=True, add_start_end_tokens=False):
"""
Reads SMILES from file. File must contain one SMILES string per line
with \n token in the end of the line.
Args:
filename (str): path to the file
unique (bool): return only unique SMILES
Returns:
smiles (list): list of SMILES strings from specified file.
success (bool): defines whether operation was successfully completed or not.
If 'unique=True' this list contains only unique copies.
"""
f = open(filename, 'r')
molecules = []
for line in f:
if add_start_end_tokens:
molecules.append('<' + line[:-1] + '>')
else:
molecules.append(line[:-1])
if unique:
molecules = list(set(molecules))
else:
molecules = list(molecules)
f.close()
return molecules, f.closed
def tokenize(smiles, tokens=None):
"""
Returns list of unique tokens, token-2-index dictionary and number of
unique tokens from the list of SMILES
Parameters
----------
smiles: list
list of SMILES strings to tokenize.
tokens: list, str (default None)
list of unique tokens
Returns
-------
tokens: list
list of unique tokens/SMILES alphabet.
token2idx: dict
dictionary mapping token to its index.
num_tokens: int
number of unique tokens.
"""
if tokens is None:
tokens = list(set(''.join(smiles)))
tokens = list(np.sort(tokens))
tokens = ''.join(tokens)
token2idx = dict((token, i) for i, token in enumerate(tokens))
num_tokens = len(tokens)
return tokens, token2idx, num_tokens
def time_since(since):
s = time.time() - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
class VanillaQSAR(object):
def __init__(self, model_instance=None, model_params=None,
model_type='classifier', ensemble_size=5, normalization=False):
super(VanillaQSAR, self).__init__()
self.model_instance = model_instance
self.model_params = model_params
self.ensemble_size = ensemble_size
self.model = []
self.normalization = normalization
if model_type not in ['classifier', 'regressor']:
raise InvalidArgumentError("model type must be either"
"classifier or regressor")
self.model_type = model_type
if isinstance(self.model_instance, list):
assert(len(self.model_instance) == self.ensemble_size)
assert(isinstance(self.model_params, list))
assert(len(self.model_params) == self.ensemble_size)
for i in range(self.ensemble_size):
self.model.append(self.model_instance[i](**model_params[i]))
else:
for _ in range(self.ensemble_size):
self.model.append(self.model_instance(**model_params))
if self.normalization:
self.desc_mean = [0]*self.ensemble_size
self.metrics_type = None
def fit_model(self, data, cv_split='stratified'):
eval_metrics = []
x = data.x
if self.model_type == 'classifier' and data.binary_y is not None:
y = data.binary_y
else:
y = data.y
cross_val_data, cross_val_labels = cross_validation_split(x=x, y=y,
split=cv_split,
n_folds=self.ensemble_size)
for i in range(self.ensemble_size):
train_x = np.concatenate(cross_val_data[:i] +
cross_val_data[(i + 1):])
test_x = cross_val_data[i]
train_y = np.concatenate(cross_val_labels[:i] +
cross_val_labels[(i + 1):])
test_y = cross_val_labels[i]
if self.normalization:
train_x, desc_mean = normalize_desc(train_x)
self.desc_mean[i] = desc_mean
test_x, _ = normalize_desc(test_x, desc_mean)
self.model[i].fit(train_x, train_y.ravel())
predicted = self.model[i].predict(test_x)
if self.model_type == 'classifier':
eval_metrics.append(metrics.f1_score(test_y, predicted))
self.metrics_type = 'F1 score'
elif self.model_type == 'regressor':
r2 = metrics.r2_score(test_y, predicted)
eval_metrics.append(r2)
self.metrics_type = 'R^2 score'
else:
raise RuntimeError()
return eval_metrics, self.metrics_type
def load_model(self, path):
# TODO: add iterable path object instead of static path
self.model = joblib.load(path)
if self.normalization:
arr = np.load(path + 'desc_mean.npy')
self.desc_mean = arr
def save_model(self, path):
joblib.dump(self.model, path + '.joblib')
if self.normalization:
np.save(path + 'desc_mean.npy', self.desc_mean)
def predict(self, objects=None, average=True, get_features=None,
**kwargs):
objects = np.array(objects)
invalid_objects = []
processed_objects = []
if get_features is not None:
x, processed_indices, invalid_indices = get_features(objects,
**kwargs)
processed_objects = objects[processed_indices]
invalid_objects = objects[invalid_indices]
else:
x = objects
if len(x) == 0:
processed_objects = []
prediction = []
invalid_objects = objects
else:
prediction = []
for i in range(self.ensemble_size):
m = self.model[i]
if self.normalization:
x, _ = normalize_desc(x, self.desc_mean[i])
prediction.append(m.predict(x))
prediction = np.array(prediction)
if average:
prediction = prediction.mean(axis=0)
return processed_objects, prediction, invalid_objects
class StackAugmentedRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, layer_type='GRU',
n_layers=1, is_bidirectional=False, has_stack=False,
stack_width=None, stack_depth=None, use_cuda=None,
optimizer_instance=torch.optim.Adadelta, lr=0.01):
"""
Constructor for the StackAugmentedRNN object.
Parameters
----------
input_size: int
number of characters in the alphabet
hidden_size: int
size of the RNN layer(s)
output_size: int
again number of characters in the alphabet
layer_type: str (default 'GRU')
type of the RNN layer to be used. Could be either 'LSTM' or 'GRU'.
n_layers: int (default 1)
number of RNN layers
is_bidirectional: bool (default False)
parameter specifying if RNN is bidirectional
has_stack: bool (default False)
parameter specifying if augmented memory stack is used
stack_width: int (default None)
if has_stack is True then this parameter defines width of the
augmented stack memory
stack_depth: int (default None)
if has_stack is True then this parameter define depth of the augmented
stack memory. Hint: no need fo stack depth to be larger than the
length of the longest sequence you plan to generate
use_cuda: bool (default None)
parameter specifying if GPU is used for computations. If left
unspecified, GPU will be used if available
optimizer_instance: torch.optim object (default torch.optim.Adadelta)
optimizer to be used for training
lr: float (default 0.01)
learning rate for the optimizer
"""
super(StackAugmentedRNN, self).__init__()
if layer_type not in ['GRU', 'LSTM']:
raise InvalidArgumentError('Layer type must be GRU or LSTM')
self.layer_type = layer_type
self.is_bidirectional = is_bidirectional
if self.is_bidirectional:
self.num_dir = 2
else:
self.num_dir = 1
if layer_type == 'LSTM':
self.has_cell = True
else:
self.has_cell = False
self.has_stack = has_stack
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
if self.has_stack:
self.stack_width = stack_width
self.stack_depth = stack_depth
self.use_cuda = use_cuda
if self.use_cuda is None:
self.use_cuda = torch.cuda.is_available()
self.n_layers = n_layers
if self.has_stack:
self.stack_controls_layer = nn.Linear(in_features=self.hidden_size *
self.num_dir,
out_features=3)
self.stack_input_layer = nn.Linear(in_features=self.hidden_size *
self.num_dir,
out_features=self.stack_width)
self.encoder = nn.Embedding(input_size, hidden_size)
if self.has_stack:
rnn_input_size = hidden_size + stack_width
else:
rnn_input_size = hidden_size
if self.layer_type == 'LSTM':
self.rnn = nn.LSTM(rnn_input_size, hidden_size, n_layers,
bidirectional=self.is_bidirectional)
self.decoder = nn.Linear(hidden_size * self.num_dir, output_size)
elif self.layer_type == 'GRU':
self.rnn = nn.GRU(rnn_input_size, hidden_size, n_layers,
bidirectional=self.is_bidirectional)
self.decoder = nn.Linear(hidden_size * self.num_dir, output_size)
self.log_softmax = torch.nn.LogSoftmax(dim=1)
if self.use_cuda:
self = self.cuda()
self.criterion = nn.CrossEntropyLoss()
self.lr = lr
self.optimizer_instance = optimizer_instance
self.optimizer = self.optimizer_instance(self.parameters(), lr=lr,
weight_decay=0.00001)
def load_model(self, path):
"""
Loads pretrained parameters from the checkpoint into the model.
Parameters
----------
path: str
path to the checkpoint file model will be loaded from.
"""
weights = torch.load(path, map_location=lambda storage, loc: storage)
self.load_state_dict(weights)
def save_model(self, path):
"""
Saves model parameters into the checkpoint file.
Parameters
----------
path: str
path to the checkpoint file model will be saved to.
"""
torch.save(self.state_dict(), path)
def change_lr(self, new_lr):
"""
Updates learning rate of the optimizer.
Parameters
----------
new_lr: float
new learning rate value
"""
self.optimizer = self.optimizer_instance(self.parameters(), lr=new_lr)
self.lr = new_lr
def forward(self, inp, hidden, stack):
"""
Forward step of the model. Generates probability of the next character
given the prefix.
Parameters
----------
inp: torch.tensor
input tensor that contains prefix string indices
hidden: torch.tensor or tuple(torch.tensor, torch.tensor)
previous hidden state of the model. If layer_type is 'LSTM',
then hidden is a tuple of hidden state and cell state, otherwise
hidden is torch.tensor
stack: torch.tensor
previous state of the augmented memory stack
Returns
-------
output: torch.tensor
tensor with non-normalized probabilities of the next character
next_hidden: torch.tensor or tuple(torch.tensor, torch.tensor)
next hidden state of the model. If layer_type is 'LSTM',
then next_hidden is a tuple of hidden state and cell state,
otherwise next_hidden is torch.tensor
next_stack: torch.tensor
next state of the augmented memory stack
"""
inp = self.encoder(inp.view(1, -1))
if self.has_stack:
if self.has_cell:
hidden_ = hidden[0]
else:
hidden_ = hidden
if self.is_bidirectional:
hidden_2_stack = torch.cat((hidden_[0], hidden_[1]), dim=1)
else:
hidden_2_stack = hidden_.squeeze(0)
stack_controls = self.stack_controls_layer(hidden_2_stack)
stack_controls = F.softmax(stack_controls, dim=1)
stack_input = self.stack_input_layer(hidden_2_stack.unsqueeze(0))
stack_input = torch.tanh(stack_input)
stack = self.stack_augmentation(stack_input.permute(1, 0, 2),
stack, stack_controls)
stack_top = stack[:, 0, :].unsqueeze(0)
inp = torch.cat((inp, stack_top), dim=2)
output, next_hidden = self.rnn(inp.view(1, 1, -1), hidden)
output = self.decoder(output.view(1, -1))
return output, next_hidden, stack
def stack_augmentation(self, input_val, prev_stack, controls):
"""
Augmentation of the tensor into the stack. For more details see
https://arxiv.org/abs/1503.01007
Parameters
----------
input_val: torch.tensor
tensor to be added to stack
prev_stack: torch.tensor
previous stack state
controls: torch.tensor
predicted probabilities for each operation in the stack, i.e
PUSH, POP and NO_OP. Again, see https://arxiv.org/abs/1503.01007
Returns
-------
new_stack: torch.tensor
new stack state
"""
batch_size = prev_stack.size(0)
controls = controls.view(-1, 3, 1, 1)
zeros_at_the_bottom = torch.zeros(batch_size, 1, self.stack_width)
if self.use_cuda:
zeros_at_the_bottom = Variable(zeros_at_the_bottom.cuda())
else:
zeros_at_the_bottom = Variable(zeros_at_the_bottom)
a_push, a_pop, a_no_op = controls[:, 0], controls[:, 1], controls[:, 2]
stack_down = torch.cat((prev_stack[:, 1:], zeros_at_the_bottom), dim=1)
stack_up = torch.cat((input_val, prev_stack[:, :-1]), dim=1)
new_stack = a_no_op * prev_stack + a_push * stack_up + a_pop * stack_down
return new_stack
def init_hidden(self):
"""
Initialization of the hidden state of RNN.
Returns
-------
hidden: torch.tensor
tensor filled with zeros of an appropriate size (taking into
account number of RNN layers and directions)
"""
if self.use_cuda:
return Variable(torch.zeros(self.n_layers * self.num_dir, 1,
self.hidden_size).cuda())
else:
return Variable(torch.zeros(self.n_layers * self.num_dir, 1,
self.hidden_size))
def init_cell(self):
"""
Initialization of the cell state of LSTM. Only used when layers_type is
'LSTM'
Returns
-------
cell: torch.tensor
tensor filled with zeros of an appropriate size (taking into
account number of RNN layers and directions)
"""
if self.use_cuda:
return Variable(torch.zeros(self.n_layers * self.num_dir, 1,
self.hidden_size).cuda())
else:
return Variable(torch.zeros(self.n_layers * self.num_dir, 1,
self.hidden_size))
def init_stack(self):
"""
Initialization of the stack state. Only used when has_stack is True
Returns
-------
stack: torch.tensor
tensor filled with zeros
"""
result = torch.zeros(1, self.stack_depth, self.stack_width)
if self.use_cuda:
return Variable(result.cuda())
else:
return Variable(result)
def train_step(self, inp, target):
"""
One train step, i.e. forward-backward and parameters update, for
a single training example.
Parameters
----------
inp: torch.tensor
tokenized training string from position 0 to position (seq_len - 1)
target:
tokenized training string from position 1 to position seq_len
Returns
-------
loss: float
mean value of the loss function (averaged through the sequence
length)
"""
hidden = self.init_hidden()
if self.has_cell:
cell = self.init_cell()
hidden = (hidden, cell)
if self.has_stack:
stack = self.init_stack()
else:
stack = None
self.optimizer.zero_grad()
loss = 0
for c in range(len(inp)):
output, hidden, stack = self(inp[c], hidden, stack)
loss += self.criterion(output, target[c].unsqueeze(0))
loss.backward()
self.optimizer.step()
return loss.item() / len(inp)
def evaluate(self, data, prime_str='<', end_token='>', predict_len=100):
"""
Generates new string from the model distribution.
Parameters
----------
data: object of type GeneratorData
stores information about the generator data format such alphabet, etc
prime_str: str (default '<')
prime string that will be used as prefix. Deafult value is just the
START_TOKEN
end_token: str (default '>')
when end_token is sampled from the model distribution,
the generation of a new example is finished
predict_len: int (default 100)
maximum length of the string to be generated. If the end_token is
not sampled, the generation will be aborted when the length of the
generated sequence is equal to predict_len
Returns
-------
new_sample: str
Newly generated sample from the model distribution.
"""
hidden = self.init_hidden()
if self.has_cell:
cell = self.init_cell()
hidden = (hidden, cell)
if self.has_stack:
stack = self.init_stack()
else:
stack = None
prime_input = data.char_tensor(prime_str)
new_sample = prime_str
# Use priming string to "build up" hidden state
for p in range(len(prime_str)-1):
_, hidden, stack = self.forward(prime_input[p], hidden, stack)
inp = prime_input[-1]
for p in range(predict_len):
output, hidden, stack = self.forward(inp, hidden, stack)
# Sample from the network as a multinomial distribution
probs = torch.softmax(output, dim=1)
top_i = torch.multinomial(probs.view(-1), 1)[0].cpu().numpy()
# Add predicted character to string and use as next input
predicted_char = data.all_characters[top_i]
new_sample += predicted_char
inp = data.char_tensor(predicted_char)
if predicted_char == end_token:
break
return new_sample
def fit(self, data, n_iterations, all_losses=[], print_every=100,
plot_every=10, augment=False):
"""
This methods fits the parameters of the model. Training is performed to
minimize the cross-entropy loss when predicting the next character
given the prefix.
Parameters
----------
data: object of type GeneratorData
stores information about the generator data format such alphabet, etc
n_iterations: int
how many iterations of training will be performed
all_losses: list (default [])
list to store the values of the loss function
print_every: int (default 100)
feedback will be printed to std_out once every print_every
iterations of training
plot_every: int (default 10)
value of the loss function will be appended to all_losses once every
plot_every iterations of training
augment: bool (default False)
parameter specifying if SMILES enumeration will be used. For mode
details on SMILES enumeration see https://arxiv.org/abs/1703.07076
Returns
-------
all_losses: list
list that stores the values of the loss function (learning curve)
"""
start = time.time()
loss_avg = 0
if augment:
smiles_augmentation = SmilesEnumerator()
else:
smiles_augmentation = None
for epoch in trange(1, n_iterations + 1, desc='Training in progress...'):
inp, target = data.random_training_set(smiles_augmentation)
loss = self.train_step(inp, target)
loss_avg += loss
if epoch % print_every == 0:
print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch,
epoch / n_iterations * 100, loss)
)
print(self.evaluate(data=data, prime_str = '<',
predict_len=100), '\n')
if epoch % plot_every == 0:
all_losses.append(loss_avg / plot_every)
loss_avg = 0
return all_losses
class SmilesEnumerator(object):
"""SMILES Enumerator, vectorizer and devectorizer
#Arguments
charset: string containing the characters for the vectorization
can also be generated via the .fit() method
pad: Length of the vectorization
leftpad: Add spaces to the left of the SMILES
isomericSmiles: Generate SMILES containing information about stereogenic centers
enum: Enumerate the SMILES during transform
canonical: use canonical SMILES during transform (overrides enum)
"""
def __init__(self, charset='@C)(=cOn1S2/H[N]\\', pad=120, leftpad=True, isomericSmiles=True, enum=True,
canonical=False):
self._charset = None
self.charset = charset
self.pad = pad
self.leftpad = leftpad
self.isomericSmiles = isomericSmiles
self.enumerate = enum
self.canonical = canonical
@property
def charset(self):
return self._charset
@charset.setter
def charset(self, charset):
self._charset = charset
self._charlen = len(charset)
self._char_to_int = dict((c, i) for i, c in enumerate(charset))
self._int_to_char = dict((i, c) for i, c in enumerate(charset))
def fit(self, smiles, extra_chars=[], extra_pad=5):
"""Performs extraction of the charset and length of a SMILES datasets and sets self.pad and self.charset
#Arguments
smiles: Numpy array or Pandas series containing smiles as strings
extra_chars: List of extra chars to add to the charset (e.g. "\\\\" when "/" is present)
extra_pad: Extra padding to add before or after the SMILES vectorization
"""
charset = set("".join(list(smiles)))
self.charset = "".join(charset.union(set(extra_chars)))
self.pad = max([len(smile) for smile in smiles]) + extra_pad
def randomize_smiles(self, smiles):
"""Perform a randomization of a SMILES string
must be RDKit sanitizable"""
m = Chem.MolFromSmiles(smiles)
ans = list(range(m.GetNumAtoms()))
np.random.shuffle(ans)
nm = Chem.RenumberAtoms(m, ans)
return Chem.MolToSmiles(nm, canonical=self.canonical, isomericSmiles=self.isomericSmiles)
def transform(self, smiles):
"""Perform an enumeration (randomization) and vectorization of a Numpy array of smiles strings
#Arguments
smiles: Numpy array or Pandas series containing smiles as strings
"""
one_hot = np.zeros((smiles.shape[0], self.pad, self._charlen), dtype=np.int8)
for i, ss in enumerate(smiles):
if self.enumerate: ss = self.randomize_smiles(ss)
for j, c in enumerate(ss):
one_hot[i, j, self._char_to_int[c]] = 1
return one_hot
def reverse_transform(self, vect):
""" Performs a conversion of a vectorized SMILES to a smiles strings
charset must be the same as used for vectorization.
#Arguments
vect: Numpy array of vectorized SMILES.
"""
smiles = []
for v in vect:
# mask v
v = v[v.sum(axis=1) == 1]
# Find one hot encoded index with argmax, translate to char and join to string
smile = "".join(self._int_to_char[i] for i in v.argmax(axis=1))
smiles.append(smile)
return np.array(smiles)
def cross_validation_split(x, y, n_folds=5, split='random', folds=None):
assert(len(x) == len(y))
x = np.array(x)
y = np.array(y)
if split not in ['random', 'stratified', 'fixed']:
raise ValueError('Invalid value for argument \'split\': '
'must be either \'random\', \'stratified\' '
'or \'fixed\'')
if split == 'random':
cv_split = KFold(n_splits=n_folds, shuffle=True)
folds = list(cv_split.split(x, y))
elif split == 'stratified':
cv_split = StratifiedKFold(n_splits=n_folds, shuffle=True)
folds = list(cv_split.split(x, y))
elif split == 'fixed' and folds is None:
raise TypeError(
'Invalid type for argument \'folds\': found None, but must be list')
cross_val_data = []
cross_val_labels = []
if len(folds) == n_folds:
for fold in folds:
cross_val_data.append(x[fold[1]])
cross_val_labels.append(y[fold[1]])
elif len(folds) == len(x) and np.max(folds) == n_folds:
for f in range(n_folds):
left = np.where(folds == f)[0].min()
right = np.where(folds == f)[0].max()
cross_val_data.append(x[left:right + 1])
cross_val_labels.append(y[left:right + 1])
return cross_val_data, cross_val_labels
class PredictorData(object):
def __init__(self, path, delimiter=',', cols=[0, 1], get_features=None,
has_label=True, labels_start=1, **kwargs):
super(PredictorData, self).__init__()
data = read_object_property_file(path, delimiter, cols_to_read=cols)
if has_label:
self.objects = np.array(data[:labels_start]).reshape(-1)
self.y = np.array(data[labels_start:], dtype='float32')
self.y = self.y.reshape(-1, len(cols) - labels_start)
if self.y.shape[1] == 1:
self.y = self.y.reshape(-1)
else:
self.objects = np.array(data[:labels_start]).reshape(-1)
self.y = [None]*len(self.objects)
assert len(self.objects) == len(self.y)
if get_features is not None:
self.x, processed_indices, invalid_indices = \
get_features(self.objects, **kwargs)
self.invalid_objects = self.objects[invalid_indices]
self.objects = self.objects[processed_indices]
self.invalid_y = self.y[invalid_indices]
self.y = self.y[processed_indices]
else:
self.x = self.objects
self.invalid_objects = None
self.invalid_y = None
self.binary_y = None
def binarize(self, threshold):
self.binary_y = np.array(self.y >= threshold, dtype='int32')
class GeneratorData(object):
def __init__(self, training_data_path, tokens=None, start_token='<',
end_token='>', max_len=120, use_cuda=None, **kwargs):
super(GeneratorData, self).__init__()
if 'cols_to_read' not in kwargs:
kwargs['cols_to_read'] = []
data = read_object_property_file(training_data_path,
**kwargs)
self.start_token = start_token
self.end_token = end_token
self.file = []
for i in range(len(data)):
if len(data[i]) <= max_len:
self.file.append(self.start_token + data[i] + self.end_token)
self.file_len = len(self.file)
self.all_characters, self.char2idx, \
self.n_characters = tokenize(self.file, tokens)
self.use_cuda = use_cuda
if self.use_cuda is None:
self.use_cuda = torch.cuda.is_available()
def load_dictionary(self, tokens, char2idx):
self.all_characters = tokens
self.char2idx = char2idx
self.n_characters = len(tokens)
def random_chunk(self):
index = random.randint(0, self.file_len-1)
return self.file[index]
def char_tensor(self, string):
tensor = torch.zeros(len(string)).long()
for c in range(len(string)):
tensor[c] = self.all_characters.index(string[c])
if self.use_cuda:
return torch.tensor(tensor).cuda()
else:
return torch.tensor(tensor)
def random_training_set(self, smiles_augmentation):
chunk = self.random_chunk()
if smiles_augmentation is not None:
chunk = '<' + smiles_augmentation.randomize_smiles(chunk[1:-1]) + '>'
inp = self.char_tensor(chunk[:-1])
target = self.char_tensor(chunk[1:])
return inp, target
def read_sdf_file(self, path, fields_to_read):
raise NotImplementedError
def update_data(self, path):
self.file, success = read_smi_file(path, unique=True)
self.file_len = len(self.file)
assert success
def read_object_property_file(path, delimiter=',', cols_to_read=[0, 1],
keep_header=False):
f = open(path, 'r')
reader = csv.reader(f, delimiter=delimiter)
data_full = np.array(list(reader))
if keep_header:
start_position = 0
else:
start_position = 1
assert len(data_full) > start_position
data = [[] for _ in range(len(cols_to_read))]
for i in range(len(cols_to_read)):
col = cols_to_read[i]
data[i] = data_full[start_position:, col]
f.close()
if len(cols_to_read) == 1:
data = data[0]
return data
def estimate_and_update(generator, predictor, n_to_generate, **kwargs):
generated = []
pbar = tqdm(range(n_to_generate))
for i in pbar:
pbar.set_description("Generating molecules...")
generated.append(generator.evaluate(gen_data, predict_len=120)[1:-1])
sanitized = canonical_smiles(generated, sanitize=False, throw_warning=False)[:-1]
unique_smiles = list(np.unique(sanitized))[1:]
smiles, prediction, nan_smiles = predictor.predict(unique_smiles, get_features=get_fp)
return smiles, prediction
def get_fp(smiles):
fp = []
processed_indices = []
invalid_indices = []
for i in range(len(smiles)):
mol = smiles[i]
tmp = np.array(mol2image(mol, n=2048))
if np.isnan(tmp[0]):
invalid_indices.append(i)
else:
fp.append(tmp)
processed_indices.append(i)
return np.array(fp), processed_indices, invalid_indices
def mol2image(x, n=2048):
try:
m = Chem.MolFromSmiles(x)
fp = Chem.RDKFingerprint(m, maxPath=4, fpSize=n)
res = np.zeros(len(fp))
DataStructs.ConvertToNumpyArray(fp, res)
return res
except:
return [np.nan]
def init():
global use_cuda
global my_generator
global gen_data
global my_predictor
hidden_size = 1500
stack_width = 1500
stack_depth = 200
layer_type = 'GRU'
n_characters = 45
lr = 0.001
optimizer_instance = torch.optim.Adadelta
# model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), './deploy_files')
use_cuda = False
# gen_data_path = model_path +'/6000smiles.csv'
gen_data_path = './deploy_files/6000smiles.csv'
tokens = ['<', '>', '#', '%', ')', '(', '+', '-', '/', '.', '1', '0', '3', '2', '5', '4', '7',
'6', '9', '8', '=', 'A', '@', 'C', 'B', 'F', 'I', 'H', 'O', 'N', 'P', 'S', '[', ']',
'\\', 'c', 'e', 'i', 'l', 'o', 'n', 'p', 's', 'r', '\n']
gen_data = GeneratorData(training_data_path=gen_data_path, delimiter='\t',
cols_to_read=[0], keep_header=True, tokens=tokens)
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters, hidden_size=hidden_size,
output_size=gen_data.n_characters, layer_type=layer_type,
n_layers=1, is_bidirectional=False, has_stack=True,
stack_width=stack_width, stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance, lr=lr)
# gen_model_path = model_path +'/generative_model_max.pth'
gen_model_path = "./deploy_files/generative_model_max.pth"
my_generator.load_model(gen_model_path)
pred_data = PredictorData(path='./deploy_files/jak2_data.csv', get_features=get_fp)
#my_predict = predictor model
model_instance = RFR
model_params = {'n_estimators': 175, 'n_jobs': 10}
my_predictor = VanillaQSAR(model_instance=model_instance,
model_params=model_params,
model_type='regressor')
my_predictor.fit_model(pred_data, cv_split='random')
my_predictor.save_model('./deploy_files/vanillaqsar')
my_predictor.load_model('./deploy_files/vanillaqsar.joblib')
# @input_schema('n_to_generate', NumpyParameterType(input_sample))
# @output_schema(NumpyParameterType(output_sample))
def run(n_to_generate):
try:
smiles, pic50 = estimate_and_update(my_generator,my_predictor,n_to_generate=n_to_generate)
molecules = [Chem.MolFromSmiles(x) for x in smiles]
qed_list = []
for x in molecules:
try:
qed_list.append(QED.qed(x))
except Exception as e:
print("early error")
print(e)
# pass
# return smiles_biased_max.tolist()
return smiles.tolist(), pic50.tolist(), qed_list
except Exception as e:
print("oof error time")
error = str(e)
return error
if __name__ == "__main__":
init()
print(run(5)) |
the-stack_0_9222 | from app.announces.models import Announce
from tests.equipments.fakes import equipment1, equipment2, equipment3, get_equipment
from tests.shops.fakes import shop1, shop2, shop3, get_shop
shop1_equipment1_announce1 = Announce(1, shop1.id, shop1.name, equipment1.id, equipment1.name,
'New', 199.99)
shop1_equipment2_announce1 = Announce(2, shop1.id, shop1.name, equipment2.id, equipment2.name,
'Used', 149.99)
shop2_equipment2_announce1 = Announce(3, shop2.id, shop2.name, equipment2.id, equipment2.name,
'New', 400.00)
shop2_equipment2_announce2 = Announce(4, shop2.id, shop2.name, equipment2.id, equipment2.name,
'Needs repair',
300.00)
shop3_equipment1_announce1 = Announce(5, shop3.id, shop3.name, equipment1.id, equipment1.name,
'Used', 49.99)
shop3_equipment3_announce1 = Announce(6, shop3.id, shop3.name, equipment3.id, equipment3.name,
'Used', 99.99)
shop1.announces = [shop1_equipment1_announce1, shop1_equipment2_announce1]
shop2.announces = [shop2_equipment2_announce1, shop2_equipment2_announce2]
shop3.announces = [shop3_equipment1_announce1, shop3_equipment3_announce1]
equipment1.announces = [shop1_equipment1_announce1, shop3_equipment1_announce1]
equipment2.announces = [shop1_equipment2_announce1, shop2_equipment2_announce1,
shop2_equipment2_announce2]
equipment3.announces = [shop3_equipment3_announce1]
def get_announces_for_shop(shop_id):
return get_shop(shop_id).announces
def get_announces_for_equipment(equipment_id):
return get_equipment(equipment_id).announces
|
the-stack_0_9224 | import os
import secrets
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort
from flaskblog import app, db, bcrypt
from flaskblog.forms import RegistrationForm, LoginForm, UpdateAccountForm, PostForm, CommentForm
from flaskblog.models import User, Post, Comment
from flask_login import login_user, current_user, logout_user, login_required
from flaskblog import request
@app.route("/")
@app.route("/home")
def home():
#page = request.args.get('page', 1, type=int)
quote = request.get_quote()
posts = Post.query.order_by(Post.date_posted.desc()).paginate( per_page=5)
return render_template('home.html', posts=posts, quote=quote)
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
# next_page = request.args.get('next')
return redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/dp', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/account", methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename='dp/' + current_user.image_file)
return render_template('account.html', title='Account',
image_file=image_file, form=form)
@app.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post',
form=form, legend='New Post')
@app.route("/post/<int:post_id>", methods=['GET', 'POST'])
def post(post_id):
post = Post.query.get_or_404(post_id)
comments = Comment.query.all()
form = CommentForm()
if form.validate_on_submit():
comment = Comment(content=form.content.data, author=current_user)
db.session.add(comment)
db.session.commit()
flash('Your comment has been created!', 'success')
return redirect(url_for('home'))
return render_template('post.html', post=post, form=form, comments=comments)
@app.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post',
form=form, legend='Update Post')
@app.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('home'))
@app.route("/user/<string:username>")
def user_posts(username):
page = request.args.get('page', 1, type=int)
user = User.query.filter_by(username=username).first_or_404()
posts = Post.query.filter_by(author=user)\
.order_by(Post.date_posted.desc())\
.paginate(page=page, per_page=5)
return render_template('user_posts.html', posts=posts, user=user)
|
the-stack_0_9225 | import discord
# Imports permissions from discord.commands
from discord.commands import permissions
bot = discord.Bot()
# Note: If you want you can use commands.Bot instead of discord.Bot
# Use discord.Bot if you don't want prefixed message commands
# With discord.Bot you can use @bot.command as an alias
# of @bot.slash_command but this is overridden by commands.Bot
# by default, default_permission is set to True, you can use
# default_permission=False to disable the command for everyone.
# You can add up to 10 permissions per Command for a guild.
# You can either use the following decorators:
# --------------------------------------------
# @permissions.permission(role_id/user_id, permission)
# @permissions.has_role("ROLE_NAME") <-- can use either a name or id
# @permissions.has_any_role("ROLE_NAME", "ROLE_NAME_2") <-- can use either a name or id
# @permissions.is_user(USER_ID) <-- id only
# @permissions.is_owner()
# Note: you can supply "guild_id" to limit it to 1 guild.
# Ex: @permissions.has_role("Admin", guild_id=GUILD_ID)
# --------------------------------------------
# or supply permissions directly in @bot.slash_command
# @bot.slash_command(default_permission=False,
# permissions=[permissions.Permission(id=ID, type=TYPE, permission=True, guild_id=GUILD_ID)])
# Note: Please replace token, GUILD_ID, USER_ID and ROLE_NAME.
# Guild Slash Command Example with User Permissions
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.is_user(USER_ID)
async def user(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Owner Permissions
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.is_owner()
async def owner(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Role Permissions
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.has_role("ROLE_NAME")
async def role(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Any Specified Role Permissions
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.has_any_role("ROLE_NAME", "ROLE_NAME2")
async def multirole(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Permission Decorator
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.permission(user_id=USER_ID, permission=True)
async def permission_decorator(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Permissions Kwarg
@bot.slash_command(
guild_ids=[GUILD_ID],
default_permission=False,
permissions=[permissions.Permission(id=USER_ID, type=2, permission=True)],
)
async def permission_kwarg(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# To learn how to add descriptions, choices to options check slash_options.py
bot.run("token")
|
the-stack_0_9226 | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import pytest
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2PyTorchSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
from kubernetes.client import V1ResourceRequirements
from ..common.utils import predict
from ..common.utils import KFSERVING_TEST_NAMESPACE
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
KFServing = KFServingClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
def test_pytorch():
service_name = 'isvc-pytorch'
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
min_replicas=1,
parallelism=1,
pytorch=V1alpha2PyTorchSpec(
storage_uri='gs://kfserving-samples/models/pytorch/cifar10',
model_class_name="Net",
resources=V1ResourceRequirements(
requests={'cpu': '100m', 'memory': '2Gi'},
limits={'cpu': '100m', 'memory': '2Gi'}))))
isvc = V1alpha2InferenceService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name=service_name, namespace=KFSERVING_TEST_NAMESPACE),
spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))
KFServing.create(isvc)
try:
KFServing.wait_isvc_ready(service_name, namespace=KFSERVING_TEST_NAMESPACE)
except RuntimeError as e:
print(KFServing.api_instance.get_namespaced_custom_object("serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE,
"services", service_name + "-predictor-default"))
pods = KFServing.core_api.list_namespaced_pod(KFSERVING_TEST_NAMESPACE,
label_selector='serving.kubeflow.org/inferenceservice={}'.
format(service_name))
for pod in pods.items:
print(pod)
raise e
res = predict(service_name, './data/cifar_input.json')
assert(np.argmax(res["predictions"]) == 3)
KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
|
the-stack_0_9230 | import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
import numpy as np
from model.unet import UNet
# download the dataset and get info
dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True)
# see the possible keys we can access in the dataset dict.
# this contains the test and train splits.
print(dataset.keys())
# see information about the dataset
print(info)
# Preprocessing Utilities
def random_flip(input_image, input_mask):
"""does a random flip of the image and mask"""
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
return input_image, input_mask
def normalize(input_image, input_mask):
"""
normalizes the input image pixel values to be from [0,1].
subtracts 1 from the mask labels to have a range from [0,2]
"""
input_image = tf.cast(input_image, tf.float32) / 255.0
input_mask -= 1
return input_image, input_mask
@tf.function
def load_image_train(datapoint):
"""resizes, normalizes, and flips the training data"""
input_image = tf.image.resize(datapoint["image"], (128, 128), method="nearest")
input_mask = tf.image.resize(
datapoint["segmentation_mask"], (128, 128), method="nearest"
)
input_image, input_mask = random_flip(input_image, input_mask)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def load_image_test(datapoint):
"""resizes and normalizes the test data"""
input_image = tf.image.resize(datapoint["image"], (128, 128), method="nearest")
input_mask = tf.image.resize(
datapoint["segmentation_mask"], (128, 128), method="nearest"
)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
# preprocess the train and test sets
train = dataset["train"].map(
load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
test = dataset["test"].map(load_image_test)
BATCH_SIZE = 64
BUFFER_SIZE = 1000
# shuffle and group the train set into batches
train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
# do a prefetch to optimize processing
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# group the test set into batches
test_dataset = test.batch(BATCH_SIZE)
# class list of the mask pixels
class_names = ["pet", "background", "outline"]
def display_with_metrics(display_list, iou_list, dice_score_list):
"""displays a list of images/masks and overlays a list of IOU and Dice Scores"""
metrics_by_id = [
(idx, iou, dice_score)
for idx, (iou, dice_score) in enumerate(zip(iou_list, dice_score_list))
if iou > 0.0
]
metrics_by_id.sort(key=lambda tup: tup[1], reverse=True) # sorts in place
display_string_list = [
"{}: IOU: {} Dice Score: {}".format(class_names[idx], iou, dice_score)
for idx, iou, dice_score in metrics_by_id
]
display_string = "\n\n".join(display_string_list)
display(
display_list,
["Image", "Predicted Mask", "True Mask"],
display_string=display_string,
)
def display(display_list, titles=[], display_string=None):
"""displays a list of images/masks"""
plt.figure(figsize=(15, 15))
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
if display_string and i == 1:
plt.xlabel(display_string, fontsize=12)
img_arr = tf.keras.preprocessing.image.array_to_img(display_list[i])
plt.imshow(img_arr)
plt.show()
def show_image_from_dataset(dataset):
"""displays the first image and its mask from a dataset"""
for image, mask in dataset.take(1):
sample_image, sample_mask = image, mask
display([sample_image, sample_mask], titles=["Image", "True Mask"])
def plot_metrics(metric_name, title, ylim=5):
"""plots a given metric from the model history"""
plt.title(title)
plt.ylim(0, ylim)
plt.plot(model_history.history[metric_name], color="blue", label=metric_name)
plt.plot(
model_history.history["val_" + metric_name],
color="green",
label="val_" + metric_name,
)
input_shape = (128, 128, 3, None)
model = UNet()
model.build(input_shape)
model.summary()
breakpoint()
# configure the optimizer, loss and metrics for training
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# configure the training parameters and train the model
TRAIN_LENGTH = info.splits["train"].num_examples
EPOCHS = 10
VAL_SUBSPLITS = 5
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
VALIDATION_STEPS = info.splits["test"].num_examples // BATCH_SIZE // VAL_SUBSPLITS
# this will take around 20 minutes to run
model_history = model.fit(
train_dataset,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
validation_steps=VALIDATION_STEPS,
validation_data=test_dataset,
)
# Prediction Utilities
def get_test_image_and_annotation_arrays():
"""
Unpacks the test dataset and returns the input images and segmentation masks
"""
ds = test_dataset.unbatch()
ds = ds.batch(info.splits["test"].num_examples)
images = []
y_true_segments = []
for image, annotation in ds.take(1):
y_true_segments = annotation.numpy()
images = image.numpy()
y_true_segments = y_true_segments[
: (
info.splits["test"].num_examples
- (info.splits["test"].num_examples % BATCH_SIZE)
)
]
return (
images[
: (
info.splits["test"].num_examples
- (info.splits["test"].num_examples % BATCH_SIZE)
)
],
y_true_segments,
)
def create_mask(pred_mask):
"""
Creates the segmentation mask by getting the channel with the highest probability. Remember that we
have 3 channels in the output of the UNet. For each pixel, the predicition will be the channel with the
highest probability.
"""
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
return pred_mask[0].numpy()
def make_predictions(image, mask, num=1):
"""
Feeds an image to a model and returns the predicted mask.
"""
image = np.reshape(image, (1, image.shape[0], image.shape[1], image.shape[2]))
pred_mask = model.predict(image)
pred_mask = create_mask(pred_mask)
return pred_mask
def class_wise_metrics(y_true, y_pred):
class_wise_iou = []
class_wise_dice_score = []
smoothening_factor = 0.00001
for i in range(3):
intersection = np.sum((y_pred == i) * (y_true == i))
y_true_area = np.sum((y_true == i))
y_pred_area = np.sum((y_pred == i))
combined_area = y_true_area + y_pred_area
iou = (intersection + smoothening_factor) / (
combined_area - intersection + smoothening_factor
)
class_wise_iou.append(iou)
dice_score = 2 * (
(intersection + smoothening_factor) / (combined_area + smoothening_factor)
)
class_wise_dice_score.append(dice_score)
return class_wise_iou, class_wise_dice_score
# Setup the ground truth and predictions.
# get the ground truth from the test set
y_true_images, y_true_segments = get_test_image_and_annotation_arrays()
# feed the test set to th emodel to get the predicted masks
results = model.predict(
test_dataset, steps=info.splits["test"].num_examples // BATCH_SIZE
)
results = np.argmax(results, axis=3)
results = results[..., tf.newaxis]
# compute the class wise metrics
cls_wise_iou, cls_wise_dice_score = class_wise_metrics(y_true_segments, results)
# show the IOU for each class
for idx, iou in enumerate(cls_wise_iou):
spaces = " " * (10 - len(class_names[idx]) + 2)
print("{}{}{} ".format(class_names[idx], spaces, iou))
# show the Dice Score for each class
for idx, dice_score in enumerate(cls_wise_dice_score):
spaces = " " * (10 - len(class_names[idx]) + 2)
print("{}{}{} ".format(class_names[idx], spaces, dice_score))
|
the-stack_0_9231 | """
This problem was asked by LinkedIn.
Given a list of points, a central point, and an integer k, find the nearest k points from the central point.
For example, given the list of points [(0, 0), (5, 4), (3, 1)], the central point (1, 2), and k = 2,
return [(0, 0), (3, 1)].
"""
def k_nearest(points, central, k):
squared_l2_norm = lambda p1, p2 : (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2
distances = {p: squared_l2_norm(p, central) for p in points}
# return the k nearest points after sorting based on distances of points
return [p for p,_ in sorted(distances.items(), key=lambda item: item[1])][:k]
if __name__ == '__main__':
print(k_nearest([(0, 0), (5, 4), (3, 1)], (1, 2), 2)) |
the-stack_0_9232 | # This is a polyfill for dataclasses
# https://docs.python.org/3/library/dataclasses.html
# Original PEP proposal: PEP 557
# https://www.python.org/dev/peps/pep-0557/
import re
import sys
import copy
import types
import inspect
import keyword
__all__ = [
"dataclass",
"field",
"Field",
"FrozenInstanceError",
"InitVar",
"MISSING",
# Helper functions.
"fields",
"asdict",
"astuple",
"make_dataclass",
"replace",
"is_dataclass",
]
# Conditions for adding methods. The boxes indicate what action the
# dataclass decorator takes. For all of these tables, when I talk
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
# referring to the arguments to the @dataclass decorator. When
# checking if a dunder method already exists, I mean check for an
# entry in the class's __dict__. I never check to see if an attribute
# is defined in a base class.
# Key:
# +=========+=========================================+
# + Value | Meaning |
# +=========+=========================================+
# | <blank> | No action: no method is added. |
# +---------+-----------------------------------------+
# | add | Generated method is added. |
# +---------+-----------------------------------------+
# | raise | TypeError is raised. |
# +---------+-----------------------------------------+
# | None | Attribute is set to None. |
# +=========+=========================================+
# __init__
#
# +--- init= parameter
# |
# v | | |
# | no | yes | <--- class has __init__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __repr__
#
# +--- repr= parameter
# |
# v | | |
# | no | yes | <--- class has __repr__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __setattr__
# __delattr__
#
# +--- frozen= parameter
# |
# v | | |
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because not adding these methods would break the "frozen-ness"
# of the class.
# __eq__
#
# +--- eq= parameter
# |
# v | | |
# | no | yes | <--- class has __eq__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __lt__
# __le__
# __gt__
# __ge__
#
# +--- order= parameter
# |
# v | | |
# | no | yes | <--- class has any comparison method in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because to allow this case would interfere with using
# functools.total_ordering.
# __hash__
# +------------------- unsafe_hash= parameter
# | +----------- eq= parameter
# | | +--- frozen= parameter
# | | |
# v v v | | |
# | no | yes | <--- class has explicitly defined __hash__
# +=======+=======+=======+========+========+
# | False | False | False | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | False | True | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | True | False | None | | <-- the default, not hashable
# +-------+-------+-------+--------+--------+
# | False | True | True | add | | Frozen, so hashable, allows override
# +-------+-------+-------+--------+--------+
# | True | False | False | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | False | True | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | False | add | raise | Not frozen, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | True | add | raise | Frozen, so hashable
# +=======+=======+=======+========+========+
# For boxes that are blank, __hash__ is untouched and therefore
# inherited from the base class. If the base is object, then
# id-based hashing is used.
#
# Note that a class may already have __hash__=None if it specified an
# __eq__ method in the class body (not one that was created by
# @dataclass).
#
# See _hash_action (below) for a coded version of this table.
# Raised when an attempt is made to modify a frozen class.
class FrozenInstanceError(AttributeError):
pass
# A sentinel object for default values to signal that a default
# factory will be used. This is given a nice repr() which will appear
# in the function signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return "<factory>"
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
# A sentinel object to detect if a parameter is supplied or not. Use
# a class to give it a better repr.
class _MISSING_TYPE:
pass
MISSING = _MISSING_TYPE()
# Since most per-field metadata will be unused, create an empty
# read-only proxy that can be shared among all fields.
_EMPTY_METADATA = types.MappingProxyType({})
# Markers for the various kinds of fields and pseudo-fields.
class _FIELD_BASE:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
_FIELD = _FIELD_BASE("_FIELD")
_FIELD_CLASSVAR = _FIELD_BASE("_FIELD_CLASSVAR")
_FIELD_INITVAR = _FIELD_BASE("_FIELD_INITVAR")
# The name of an attribute on the class where we store the Field
# objects. Also used to check if a class is a Data Class.
_FIELDS = "__dataclass_fields__"
# The name of an attribute on the class that stores the parameters to
# @dataclass.
_PARAMS = "__dataclass_params__"
# The name of the function, that if it exists, is called at the end of
# __init__.
_POST_INIT_NAME = "__post_init__"
# String regex that string annotations for ClassVar or InitVar must match.
# Allows "identifier.identifier[" or "identifier[".
# https://bugs.python.org/issue33453 for details.
_MODULE_IDENTIFIER_RE = re.compile(r"^(?:\s*(\w+)\s*\.)?\s*(\w+)")
class _InitVarMeta(type):
def __getitem__(self, params):
return self
class InitVar(metaclass=_InitVarMeta):
pass
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
# exposed externally as (conceptually) read-only objects.
#
# name and type are filled in after the fact, not in __init__.
# They're not known at the time this class is instantiated, but it's
# convenient if they're available later.
#
# When cls._FIELDS is filled in with a list of Field objects, the name
# and type fields will have been populated.
class Field:
__slots__ = (
"name",
"type",
"default",
"default_factory",
"repr",
"hash",
"init",
"compare",
"metadata",
"_field_type", # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare, metadata):
self.name = None
self.type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
self.metadata = (
_EMPTY_METADATA
if metadata is None or len(metadata) == 0
else types.MappingProxyType(metadata)
)
self._field_type = None
def __repr__(self):
return (
"Field("
f"name={self.name!r},"
f"type={self.type!r},"
f"default={self.default!r},"
f"default_factory={self.default_factory!r},"
f"init={self.init!r},"
f"repr={self.repr!r},"
f"hash={self.hash!r},"
f"compare={self.compare!r},"
f"metadata={self.metadata!r},"
f"_field_type={self._field_type}"
")"
)
# This is used to support the PEP 487 __set_name__ protocol in the
# case where we're using a field that contains a descriptor as a
# defaul value. For details on __set_name__, see
# https://www.python.org/dev/peps/pep-0487/#implementation-details.
#
# Note that in _process_class, this Field object is overwritten
# with the default value, so the end result is a descriptor that
# had __set_name__ called on it at the right time.
def __set_name__(self, owner, name):
func = getattr(type(self.default), "__set_name__", None)
if func:
# There is a __set_name__ method on the descriptor, call
# it.
func(self.default, owner, name)
class _DataclassParams:
__slots__ = ("init", "repr", "eq", "order", "unsafe_hash", "frozen")
def __init__(self, init, repr, eq, order, unsafe_hash, frozen):
self.init = init
self.repr = repr
self.eq = eq
self.order = order
self.unsafe_hash = unsafe_hash
self.frozen = frozen
def __repr__(self):
return (
"_DataclassParams("
f"init={self.init!r},"
f"repr={self.repr!r},"
f"eq={self.eq!r},"
f"order={self.order!r},"
f"unsafe_hash={self.unsafe_hash!r},"
f"frozen={self.frozen!r}"
")"
)
# This function is used instead of exposing Field creation directly,
# so that a type checker can be told (via overloads) that this is a
# function whose type depends on its parameters.
def field(
*,
default=MISSING,
default_factory=MISSING,
init=True,
repr=True,
hash=None,
compare=True,
metadata=None,
):
"""Return an object to identify dataclass fields.
default is the default value of the field. default_factory is a
0-argument function called to initialize a field's value. If init
is True, the field will be a parameter to the class's __init__()
function. If repr is True, the field will be included in the
object's repr(). If hash is True, the field will be included in
the object's hash(). If compare is True, the field will be used
in comparison functions. metadata, if specified, must be a
mapping which is stored but not otherwise examined by dataclass.
It is an error to specify both default and default_factory.
"""
if default is not MISSING and default_factory is not MISSING:
raise ValueError("cannot specify both default and default_factory")
return Field(default, default_factory, init, repr, hash, compare, metadata)
def _tuple_str(obj_name, fields):
# Return a string representing each field of obj_name as a tuple
# member. So, if fields is ['x', 'y'] and obj_name is "self",
# return "(self.x,self.y)".
# Special case for the 0-tuple.
if not fields:
return "()"
# Note the trailing comma, needed if this turns out to be a 1-tuple.
return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)'
def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING):
# Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
if locals is None:
locals = {}
return_annotation = ""
if return_type is not MISSING:
locals["_return_type"] = return_type
return_annotation = "->_return_type"
args = ",".join(args)
body = "\n".join(f" {b}" for b in body)
# Compute the text of the entire function.
txt = f"def {name}({args}){return_annotation}:\n{body}"
exec(txt, globals, locals)
return locals[name]
def _field_assign(frozen, name, value, self_name):
# If we're a frozen class, then assign to our fields in __init__
# via object.__setattr__. Otherwise, just use a simple
# assignment.
#
# self_name is what "self" is called in this function: don't
# hard-code "self", since that might be a field name.
if frozen:
return f"object.__setattr__({self_name},{name!r},{value})"
return f"{self_name}.{name}={value}"
def _field_init(f, frozen, globals, self_name):
# Return the text of the line in the body of __init__ that will
# initialize this field.
default_name = f"_dflt_{f.name}"
if f.default_factory is not MISSING:
if f.init:
# This field has a default factory. If a parameter is
# given, use it. If not, call the factory.
globals[default_name] = f.default_factory
value = (
f"{default_name}() "
f"if {f.name} is _HAS_DEFAULT_FACTORY "
f"else {f.name}"
)
else:
# This is a field that's not in the __init__ params, but
# has a default factory function. It needs to be
# initialized here by calling the factory function,
# because there's no other way to initialize it.
# For a field initialized with a default=defaultvalue, the
# class dict just has the default value
# (cls.fieldname=defaultvalue). But that won't work for a
# default factory, the factory must be called in __init__
# and we must assign that to self.fieldname. We can't
# fall back to the class dict's value, both because it's
# not set, and because it might be different per-class
# (which, after all, is why we have a factory function!).
globals[default_name] = f.default_factory
value = f"{default_name}()"
else:
# No default factory.
if f.init:
if f.default is MISSING:
# There's no default, just do an assignment.
value = f.name
elif f.default is not MISSING:
globals[default_name] = f.default
value = f.name
else:
# This field does not need initialization. Signify that
# to the caller by returning None.
return None
# Only test this now, so that we can create variables for the
# default. However, return None to signify that we're not going
# to actually do the assignment statement for InitVars.
if f._field_type == _FIELD_INITVAR:
return None
# Now, actually generate the field assignment.
return _field_assign(frozen, f.name, value, self_name)
def _init_param(f):
# Return the __init__ parameter string for this field. For
# example, the equivalent of 'x:int=3' (except instead of 'int',
# reference a variable set to int, and instead of '3', reference a
# variable set to 3).
if f.default is MISSING and f.default_factory is MISSING:
# There's no default, and no default_factory, just output the
# variable name and type.
default = ""
elif f.default is not MISSING:
# There's a default, this will be the name that's used to look
# it up.
default = f"=_dflt_{f.name}"
elif f.default_factory is not MISSING:
# There's a factory function. Set a marker.
default = "=_HAS_DEFAULT_FACTORY"
return f"{f.name}:_type_{f.name}{default}"
def _init_fn(fields, frozen, has_post_init, self_name):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(
f"non-default argument {f.name!r} " "follows default argument"
)
globals = {"MISSING": MISSING, "_HAS_DEFAULT_FACTORY": _HAS_DEFAULT_FACTORY}
body_lines = []
for f in fields:
line = _field_init(f, frozen, globals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ",".join(f.name for f in fields if f._field_type is _FIELD_INITVAR)
body_lines.append(f"{self_name}.{_POST_INIT_NAME}({params_str})")
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ["pass"]
locals = {f"_type_{f.name}": f.type for f in fields}
return _create_fn(
"__init__",
[self_name] + [_init_param(f) for f in fields if f.init],
body_lines,
locals=locals,
globals=globals,
return_type=None,
)
def _repr_fn(fields):
return _create_fn(
"__repr__",
("self",),
[
'return self.__class__.__qualname__ + f"('
+ ", ".join([f"{f.name}={{self.{f.name}!r}}" for f in fields])
+ ')"'
],
)
def _frozen_get_del_attr(cls, fields):
# XXX: globals is modified on the first call to _create_fn, then
# the modified version is used in the second call. Is this okay?
globals = {"cls": cls, "FrozenInstanceError": FrozenInstanceError}
if fields:
fields_str = "(" + ",".join(repr(f.name) for f in fields) + ",)"
else:
# Special case for the zero-length tuple.
fields_str = "()"
return (
_create_fn(
"__setattr__",
("self", "name", "value"),
(
f"if type(self) is cls or name in {fields_str}:",
' raise FrozenInstanceError(f"cannot assign to field {name!r}")',
f"super(cls, self).__setattr__(name, value)",
),
globals=globals,
),
_create_fn(
"__delattr__",
("self", "name"),
(
f"if type(self) is cls or name in {fields_str}:",
' raise FrozenInstanceError(f"cannot delete field {name!r}")',
f"super(cls, self).__delattr__(name)",
),
globals=globals,
),
)
def _cmp_fn(name, op, self_tuple, other_tuple):
# Create a comparison function. If the fields in the object are
# named 'x' and 'y', then self_tuple is the string
# '(self.x,self.y)' and other_tuple is the string
# '(other.x,other.y)'.
return _create_fn(
name,
("self", "other"),
[
"if other.__class__ is self.__class__:",
f" return {self_tuple}{op}{other_tuple}",
"return NotImplemented",
],
)
def _hash_fn(fields):
self_tuple = _tuple_str("self", fields)
return _create_fn("__hash__", ("self",), [f"return hash({self_tuple})"])
def _is_classvar(a_type, typing):
# This test uses a typing internal class, but it's the best way to
# test if this is a ClassVar.
return type(a_type) is typing._ClassVar
def _is_initvar(a_type, dataclasses):
# The module we're checking against is the module we're
# currently in (dataclasses.py).
return a_type is dataclasses.InitVar
def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
# Given a type annotation string, does it refer to a_type in
# a_module? For example, when checking that annotation denotes a
# ClassVar, then a_module is typing, and a_type is
# typing.ClassVar.
# It's possible to look up a_module given a_type, but it involves
# looking in sys.modules (again!), and seems like a waste since
# the caller already knows a_module.
# - annotation is a string type annotation
# - cls is the class that this annotation was found in
# - a_module is the module we want to match
# - a_type is the type in that module we want to match
# - is_type_predicate is a function called with (obj, a_module)
# that determines if obj is of the desired type.
# Since this test does not do a local namespace lookup (and
# instead only a module (global) lookup), there are some things it
# gets wrong.
# With string annotations, cv0 will be detected as a ClassVar:
# CV = ClassVar
# @dataclass
# class C0:
# cv0: CV
# But in this example cv1 will not be detected as a ClassVar:
# @dataclass
# class C1:
# CV = ClassVar
# cv1: CV
# In C1, the code in this function (_is_type) will look up "CV" in
# the module and not find it, so it will not consider cv1 as a
# ClassVar. This is a fairly obscure corner case, and the best
# way to fix it would be to eval() the string "CV" with the
# correct global and local namespaces. However that would involve
# a eval() penalty for every single field of every dataclass
# that's defined. It was judged not worth it.
match = _MODULE_IDENTIFIER_RE.match(annotation)
if match:
ns = None
module_name = match.group(1)
if not module_name:
# No module name, assume the class's module did
# "from dataclasses import InitVar".
ns = sys.modules.get(cls.__module__).__dict__
else:
# Look up module_name in the class's module.
module = sys.modules.get(cls.__module__)
if module and module.__dict__.get(module_name) is a_module:
ns = sys.modules.get(a_type.__module__).__dict__
if ns and is_type_predicate(ns.get(match.group(2)), a_module):
return True
return False
def _get_field(cls, a_name, a_type):
# Return a Field object for this field name and type. ClassVars
# and InitVars are also returned, but marked as such (see
# f._field_type).
# If the default value isn't derived from Field, then it's only a
# normal default value. Convert it to a Field().
default = getattr(cls, a_name, MISSING)
if isinstance(default, Field):
f = default
else:
if isinstance(default, types.MemberDescriptorType):
# This is a field in __slots__, so it has no default value.
default = MISSING
f = field(default=default)
# Only at this point do we know the name and the type. Set them.
f.name = a_name
f.type = a_type
# Assume it's a normal field until proven otherwise. We're next
# going to decide if it's a ClassVar or InitVar, everything else
# is just a normal field.
f._field_type = _FIELD
# In addition to checking for actual types here, also check for
# string annotations. get_type_hints() won't always work for us
# (see https://github.com/python/typing/issues/508 for example),
# plus it's expensive and would require an eval for every stirng
# annotation. So, make a best effort to see if this is a ClassVar
# or InitVar using regex's and checking that the thing referenced
# is actually of the correct type.
# For the complete discussion, see https://bugs.python.org/issue33453
# If typing has not been imported, then it's impossible for any
# annotation to be a ClassVar. So, only look for ClassVar if
# typing has been imported by any module (not necessarily cls's
# module).
typing = sys.modules.get("typing")
if typing:
if _is_classvar(a_type, typing) or (
isinstance(f.type, str)
and _is_type(f.type, cls, typing, typing.ClassVar, _is_classvar)
):
f._field_type = _FIELD_CLASSVAR
# If the type is InitVar, or if it's a matching string annotation,
# then it's an InitVar.
if f._field_type is _FIELD:
# The module we're checking against is the module we're
# currently in (dataclasses.py).
dataclasses = sys.modules[__name__]
if _is_initvar(a_type, dataclasses) or (
isinstance(f.type, str)
and _is_type(f.type, cls, dataclasses, dataclasses.InitVar, _is_initvar)
):
f._field_type = _FIELD_INITVAR
# Validations for individual fields. This is delayed until now,
# instead of in the Field() constructor, since only here do we
# know the field name, which allows for better error reporting.
# Special restrictions for ClassVar and InitVar.
if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):
if f.default_factory is not MISSING:
raise TypeError(f"field {f.name} cannot have a " "default factory")
# Should I check for other field settings? default_factory
# seems the most serious to check for. Maybe add others. For
# example, how about init=False (or really,
# init=<not-the-default-init-value>)? It makes no sense for
# ClassVar and InitVar to specify init=<anything>.
# For real fields, disallow mutable defaults for known types.
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
raise ValueError(
f"mutable default {type(f.default)} for field "
f"{f.name} is not allowed: use default_factory"
)
return f
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
# Decide if/how we're going to create a hash function. Key is
# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to
# take. The common case is to do nothing, so instead of providing a
# function that is a no-op, use None to signify that.
def _hash_set_none(cls, fields):
return None
def _hash_add(cls, fields):
flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
return _hash_fn(flds)
def _hash_exception(cls, fields):
# Raise an exception.
raise TypeError(f"Cannot overwrite attribute __hash__ " f"in class {cls.__name__}")
#
# +-------------------------------------- unsafe_hash?
# | +------------------------------- eq?
# | | +------------------------ frozen?
# | | | +---------------- has-explicit-hash?
# | | | |
# | | | | +------- action
# | | | | |
# v v v v v
_hash_action = {
(False, False, False, False): None,
(False, False, False, True): None,
(False, False, True, False): None,
(False, False, True, True): None,
(False, True, False, False): _hash_set_none,
(False, True, False, True): None,
(False, True, True, False): _hash_add,
(False, True, True, True): None,
(True, False, False, False): _hash_add,
(True, False, False, True): _hash_exception,
(True, False, True, False): _hash_add,
(True, False, True, True): _hash_exception,
(True, True, False, False): _hash_add,
(True, True, False, True): _hash_exception,
(True, True, True, False): _hash_add,
(True, True, True, True): _hash_exception,
}
# See https://bugs.python.org/issue32929#msg312829 for an if-statement
# version of this table.
def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# Now that dicts retain insertion order, there's no reason to use
# an ordered dict. I am leveraging that ordering here, because
# derived class fields overwrite base class fields, but the order
# is defined by the base class, which is found first.
fields = {}
setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order, unsafe_hash, frozen))
# Find our base classes in reverse MRO order, and exclude
# ourselves. In reversed order so that more derived classes
# override earlier field definitions in base classes. As long as
# we're iterating over them, see if any are frozen.
any_frozen_base = False
has_dataclass_bases = False
for b in cls.__mro__[-1:0:-1]:
# Only process classes that have been processed by our
# decorator. That is, they have a _FIELDS attribute.
base_fields = getattr(b, _FIELDS, None)
if base_fields:
has_dataclass_bases = True
for f in base_fields.values():
fields[f.name] = f
if getattr(b, _PARAMS).frozen:
any_frozen_base = True
# Annotations that are defined in this class (not in base
# classes). If __annotations__ isn't present, then this class
# adds no new annotations. We use this to compute fields that are
# added by this class.
#
# Fields are found from cls_annotations, which is guaranteed to be
# ordered. Default values are from class attributes, if a field
# has a default. If the default value is a Field(), then it
# contains additional info beyond (and possibly including) the
# actual default value. Pseudo-fields ClassVars and InitVars are
# included, despite the fact that they're not real fields. That's
# dealt with later.
cls_annotations = cls.__dict__.get("__annotations__", {})
# Now find fields in our class. While doing so, validate some
# things, and set the default values (as class attributes) where
# we can.
cls_fields = [
_get_field(cls, name, type_) for name, type_ in cls_annotations.items()
]
for f in cls_fields:
fields[f.name] = f
# If the class attribute (which is the default value for this
# field) exists and is of type 'Field', replace it with the
# real default. This is so that normal class introspection
# sees a real default value, not a Field.
if isinstance(getattr(cls, f.name, None), Field):
if f.default is MISSING:
# If there's no default, delete the class attribute.
# This happens if we specify field(repr=False), for
# example (that is, we specified a field object, but
# no default value). Also if we're using a default
# factory. The class attribute should not be set at
# all in the post-processed class.
delattr(cls, f.name)
else:
setattr(cls, f.name, f.default)
# Do we have any Field members that don't also have annotations?
for name, value in cls.__dict__.items():
if isinstance(value, Field) and not name in cls_annotations:
raise TypeError(f"{name!r} is a field but has no type annotation")
# Check rules that apply if we are derived from any dataclasses.
if has_dataclass_bases:
# Raise an exception if any of our bases are frozen, but we're not.
if any_frozen_base and not frozen:
raise TypeError("cannot inherit non-frozen dataclass from a " "frozen one")
# Raise an exception if we're frozen, but none of our bases are.
if not any_frozen_base and frozen:
raise TypeError("cannot inherit frozen dataclass from a " "non-frozen one")
# Remember all of the fields on our class (including bases). This
# also marks this class as being a dataclass.
setattr(cls, _FIELDS, fields)
# Was this class defined with an explicit __hash__? Note that if
# __eq__ is defined in this class, then python will automatically
# set __hash__ to None. This is a heuristic, as it's possible
# that such a __hash__ == None was not auto-generated, but it
# close enough.
class_hash = cls.__dict__.get("__hash__", MISSING)
has_explicit_hash = not (
class_hash is MISSING or (class_hash is None and "__eq__" in cls.__dict__)
)
# If we're generating ordering methods, we must be generating the
# eq methods.
if order and not eq:
raise ValueError("eq must be true if order is true")
if init:
# Does this class have a post-init function?
has_post_init = hasattr(cls, _POST_INIT_NAME)
# Include InitVars and regular fields (so, not ClassVars).
flds = [f for f in fields.values() if f._field_type in (_FIELD, _FIELD_INITVAR)]
_set_new_attribute(
cls,
"__init__",
_init_fn(
flds,
frozen,
has_post_init,
# The name to use for the "self"
# param in __init__. Use "self"
# if possible.
"__dataclass_self__" if "self" in fields else "self",
),
)
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
field_list = [f for f in fields.values() if f._field_type is _FIELD]
if repr:
flds = [f for f in field_list if f.repr]
_set_new_attribute(cls, "__repr__", _repr_fn(flds))
if eq:
# Create _eq__ method. There's no need for a __ne__ method,
# since python will call __eq__ and negate it.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str("self", flds)
other_tuple = _tuple_str("other", flds)
_set_new_attribute(
cls, "__eq__", _cmp_fn("__eq__", "==", self_tuple, other_tuple)
)
if order:
# Create and set the ordering methods.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str("self", flds)
other_tuple = _tuple_str("other", flds)
for name, op in [
("__lt__", "<"),
("__le__", "<="),
("__gt__", ">"),
("__ge__", ">="),
]:
if _set_new_attribute(
cls, name, _cmp_fn(name, op, self_tuple, other_tuple)
):
raise TypeError(
f"Cannot overwrite attribute {name} "
f"in class {cls.__name__}. Consider using "
"functools.total_ordering"
)
if frozen:
for fn in _frozen_get_del_attr(cls, field_list):
if _set_new_attribute(cls, fn.__name__, fn):
raise TypeError(
f"Cannot overwrite attribute {fn.__name__} "
f"in class {cls.__name__}"
)
# Decide if/how we're going to create a hash function.
hash_action = _hash_action[
bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash
]
if hash_action:
# No need to call _set_new_attribute here, since by the time
# we're here the overwriting is unconditional.
cls.__hash__ = hash_action(cls, field_list)
if not getattr(cls, "__doc__"):
# Create a class doc-string.
cls.__doc__ = cls.__name__ + str(inspect.signature(cls)).replace(" -> None", "")
return cls
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(
_cls=None,
*,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
):
"""Returns the same class as was passed in, with dunder methods
added based on the fields defined in the class.
Examines PEP 526 __annotations__ to determine fields.
If init is true, an __init__() method is added to the class. If
repr is true, a __repr__() method is added. If order is true, rich
comparison dunder methods are added. If unsafe_hash is true, a
__hash__() method function is added. If frozen is true, fields may
not be assigned to after instance creation.
"""
def wrap(cls):
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
# See if we're being called as @dataclass or @dataclass().
if _cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(_cls)
def fields(class_or_instance):
"""Return a tuple describing the fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
"""
# Might it be worth caching this, per class?
try:
fields = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError("must be called with a dataclass type or instance")
# Exclude pseudo-fields. Note that fields is sorted by insertion
# order, so the order of the tuple is as the fields were defined.
return tuple(f for f in fields.values() if f._field_type is _FIELD)
def _is_dataclass_instance(obj):
"""Returns True if obj is an instance of a dataclass."""
return not isinstance(obj, type) and hasattr(obj, _FIELDS)
def is_dataclass(obj):
"""Returns True if obj is a dataclass or an instance of a
dataclass."""
return hasattr(obj, _FIELDS)
def asdict(obj, *, dict_factory=dict):
"""Return the fields of a dataclass instance as a new dictionary mapping
field names to field values.
Example usage:
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert asdict(c) == {'x': 1, 'y': 2}
If given, 'dict_factory' will be used instead of built-in dict.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)(
(_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory))
for k, v in obj.items()
)
else:
return copy.deepcopy(obj)
def astuple(obj, *, tuple_factory=tuple):
"""Return the fields of a dataclass instance as a new tuple of field values.
Example usage::
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert astuple(c) == (1, 2)
If given, 'tuple_factory' will be used instead of built-in tuple.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("astuple() should be called on dataclass instances")
return _astuple_inner(obj, tuple_factory)
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)(
(_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items()
)
else:
return copy.deepcopy(obj)
def make_dataclass(
cls_name,
fields,
*,
bases=(),
namespace=None,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = "typing.Any"
elif len(item) == 2:
(name, tp) = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f"Invalid field: {item!r}")
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f"Field names must be valid identifers: {name!r}")
if keyword.iskeyword(name):
raise TypeError(f"Field names must not be keywords: {name!r}")
if name in seen:
raise TypeError(f"Field name duplicated: {name!r}")
seen.add(name)
anns[name] = tp
namespace["__annotations__"] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
return dataclass(
cls,
init=init,
repr=repr,
eq=eq,
order=order,
unsafe_hash=unsafe_hash,
frozen=frozen,
)
def replace(obj, **changes):
"""Return a new object replacing specified fields with new values.
This is especially useful for frozen classes. Example usage:
@dataclass(frozen=True)
class C:
x: int
y: int
c = C(1, 2)
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
# We're going to mutate 'changes', but that's okay because it's a
# new dict, even if called with 'replace(obj, **my_changes)'.
if not _is_dataclass_instance(obj):
raise TypeError("replace() should be called on dataclass instances")
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
for f in getattr(obj, _FIELDS).values():
if not f.init:
# Error if this field is specified in changes.
if f.name in changes:
raise ValueError(
f"field {f.name} is declared with "
"init=False, it cannot be specified with "
"replace()"
)
continue
if f.name not in changes:
changes[f.name] = getattr(obj, f.name)
# Create the new object, which calls __init__() and
# __post_init__() (if defined), using all of the init fields we've
# added and/or left in 'changes'. If there are values supplied in
# changes that aren't fields, this will correctly raise a
# TypeError.
return obj.__class__(**changes)
|
the-stack_0_9233 | import numpy as np
import matplotlibex as plx
import ml.gptheano.kernels as krn
import ml.gptheano.gplvmfullfit as gplvm
if __name__ == "__main__":
t = np.linspace(0.0, 3*2*np.pi, num=300)
y = np.vstack((3*np.sin(1*t+0.0), 3*np.sin(2*t+1.5),
1*np.sin(1*t+0.4), 1*np.sin(3*t+1.8),
1*np.sin(1*t+0.8), 1*np.sin(4*t+2.0),
1*np.sin(1*t+1.0), 1*np.sin(5*t+2.2))).T
y = y + 0.1*np.reshape(np.random.normal(size=y.size), y.shape)
XVar = krn.MatrixVariable("X", np.identity(3))
krbfnoise = krn.SumKernel([krn.RBFKernel(XVar, XVar), krn.NoiseKernel(XVar, XVar)])
gp = gplvm.GPLVM(y, 2, krbfnoise)
print("##")
plx.plot_sequence_variance_2d(gp.XVar.val, gp.predict)
|
the-stack_0_9234 | import scipy.sparse as sps
from . import register_class
from ..container import Container
from ..utils import docval, getargs, call_docval_func, to_uint_array, get_data_shape
@register_class('CSRMatrix')
class CSRMatrix(Container):
@docval({'name': 'data', 'type': (sps.csr_matrix, 'array_data'),
'doc': 'the data to use for this CSRMatrix or CSR data array.'
'If passing CSR data array, *indices*, *indptr*, and *shape* must also be provided'},
{'name': 'indices', 'type': 'array_data', 'doc': 'CSR index array', 'default': None},
{'name': 'indptr', 'type': 'array_data', 'doc': 'CSR index pointer array', 'default': None},
{'name': 'shape', 'type': 'array_data', 'doc': 'the shape of the matrix', 'default': None},
{'name': 'name', 'type': str, 'doc': 'the name to use for this when storing', 'default': 'csr_matrix'})
def __init__(self, **kwargs):
call_docval_func(super().__init__, kwargs)
data = getargs('data', kwargs)
if not isinstance(data, sps.csr_matrix):
temp_shape = get_data_shape(data)
temp_ndim = len(temp_shape)
if temp_ndim == 2:
data = sps.csr_matrix(data)
elif temp_ndim == 1:
indptr, indices, shape = getargs('indptr', 'indices', 'shape', kwargs)
if any(_ is None for _ in (indptr, indices, shape)):
raise ValueError("Must specify 'indptr', 'indices', and 'shape' arguments when passing data array.")
indptr = self.__check_arr(indptr, 'indptr')
indices = self.__check_arr(indices, 'indices')
shape = self.__check_arr(shape, 'shape')
if len(shape) != 2:
raise ValueError("'shape' argument must specify two and only two dimensions.")
data = sps.csr_matrix((data, indices, indptr), shape=shape)
else:
raise ValueError("'data' argument cannot be ndarray of dimensionality > 2.")
self.__data = data
@staticmethod
def __check_arr(ar, arg):
try:
ar = to_uint_array(ar)
except ValueError as ve:
raise ValueError("Cannot convert '%s' to an array of unsigned integers." % arg) from ve
if ar.ndim != 1:
raise ValueError("'%s' must be a 1D array of unsigned integers." % arg)
return ar
def __getattr__(self, val):
# NOTE: this provides access to self.data, self.indices, self.indptr, self.shape
attr = getattr(self.__data, val)
if val in ('indices', 'indptr', 'shape'): # needed because sps.csr_matrix may contain int arrays for these
attr = to_uint_array(attr)
return attr
def to_spmat(self):
return self.__data
|
the-stack_0_9236 | from typing import List
from td.session import TdAmeritradeSession
class Quotes():
"""
## Overview
----
Allows the user to query real-time quotes from the TD
API if they have an authorization token otherwise it
will be delayed by 5 minutes.
"""
def __init__(self, session: TdAmeritradeSession) -> None:
"""Initializes the `Quotes` services.
### Parameters
----
session : TdAmeritradeSession
An authenticated `TDAmeritradeSession
object.
"""
self.session = session
def get_quote(self, instrument=str) -> dict:
"""Grabs real-time quotes for an instrument.
### Overview
----
Serves as the mechanism to make a request to the Get
Quote and Get Quotes Endpoint. If one item is provided
a Get Quote request will be made and if more than one
item is provided then a Get Quotes request will be made.
### Documentation
----
https://developer.tdameritrade.com/quotes/apis
### Parameters
----
instruments: str
A list of different financial instruments.
### Usage
----
>>> quote_service = td_client.quotes()
>>> quote_service.get_quote(instrument='AAPL')
"""
params = {
'symbol': instrument
}
content = self.session.make_request(
method='get',
endpoint='marketdata/quotes',
params=params
)
return content
def get_quotes(self, instruments=List[str]) -> dict:
"""Grabs real-time quotes for multiple instruments.
### Overview
----
Serves as the mechanism to make a request to the Get
Quote and Get Quotes Endpoint. If one item is provided
a Get Quote request will be made and if more than one
item is provided then a Get Quotes request will be made.
Only 500 symbols can be sent at a single time.
### Documentation
----
https://developer.tdameritrade.com/quotes/apis
### Parameters
----
instruments: str
A list of different financial instruments.
### Usage
----
>>> quote_service = td_client.quotes()
>>> quote_service.get_quotes(instruments=['AAPL','SQ'])
"""
params = {
'symbol': ','.join(instruments)
}
content = self.session.make_request(
method='get',
endpoint='marketdata/quotes',
params=params
)
return content
|
the-stack_0_9240 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'The StudiMY Project'
copyright = '2019, MARIMORE ENGINEERING SDN. BHD. (925539-H)'
author = 'Chee Yim, Goh and Iqbal Abdullah'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '101Readmedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '101Readme.tex', 'Introduction to the StudiMY Project',
'Chee Yim, Goh \\and Iqbal Abdullah', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, '101readme', 'Introduction to the StudiMY Project',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, '101Readme', 'Introduction to the StudiMY Project',
author, '101Readme', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
the-stack_0_9241 | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from synchrophasor.frame import *
from synchrophasor.pmu import Pmu
from synchrophasor.pmuGen import *
from time import sleep
import threading
SLEEP_TIME = 1.0/100
def test_client_single_pmu():
pmu = create_pmu(9006)
pmu.ieee_data_sample.set_freq(1)
cnt = 0
while True:
sleep(SLEEP_TIME)
if pmu.clients:
pmu.send(pmu.ieee_data_sample)
pmu.join()
def test_client_2_pmus():
pmus = [create_pmu(port) for port in [9007, 9008]]
for i, pmu in enumerate(pmus):
pmu.ieee_data_sample.set_freq(i+1)
cnt = 0
while True:
sleep(SLEEP_TIME)
for pmu in pmus:
pmu.send(pmu.ieee_data_sample)
for pmu in pmus:
pmu.join()
def test_client_10_pmus():
nSources = 4
pmus = [create_pmu(port, log_level='DEBUG') for port in range(9009, 9009+nSources)]
# pmus = [create_pmu(port) for port in range(9009, 9009+nSources)]
for i, pmu in enumerate(pmus):
pmu.ieee_data_sample.set_freq(i+1)
cnt = 0
while True:
# sleep(SLEEP_TIME)
for pmu in pmus:
pmu.send(pmu.ieee_data_sample)
for pmu in pmus:
pmu.join()
if __name__ == "__main__":
test_list = [
# test_client_single_pmu,
# test_client_2_pmus,
test_client_10_pmus
]
threads = list()
for test in test_list:
x = threading.Thread(target=test)
threads.append(x)
x.start()
for index, thread in enumerate(threads):
thread.join()
|
the-stack_0_9243 | ################################################################################
# Example : perform live fire detection in video using FireNet CNN
# Copyright (c) 2017/18 - Andrew Dunnings / Toby Breckon, Durham University, UK
# License : https://github.com/tobybreckon/fire-detection-cnn/blob/master/LICENSE
################################################################################
import cv2
import os
import sys
import math
import requests
################################################################################
import tflearn
from tflearn.layers.core import *
from tflearn.layers.conv import *
from tflearn.layers.normalization import *
from tflearn.layers.estimator import regression
################################################################################
def construct_firenet (x,y):
# Build network as per architecture in [Dunnings/Breckon, 2018]
network = tflearn.input_data(shape=[None, y, x, 3], dtype=tf.float32)
network = conv_2d(network, 64, 5, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 128, 4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 1, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=0.001)
model = tflearn.DNN(network, checkpoint_path='firenet',
max_checkpoints=1, tensorboard_verbose=2)
return model
################################################################################
# construct and display model
model = construct_firenet (224, 224)
print("Constructed FireNet ...")
model.load(os.path.join("models/FireNet", "firenet"),weights_only=True)
print("Loaded CNN network weights ...")
################################################################################
# network input sizes
rows = 224
cols = 224
# display and loop settings
windowName = "Live Fire Detection - FireNet CNN";
keepProcessing = True;
################################################################################
if len(sys.argv) == 2:
# load video file from first command line argument
video = cv2.VideoCapture(sys.argv[1])
print("Loaded video ...")
# create window
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL);
# get video properties
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH));
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = video.get(cv2.CAP_PROP_FPS)
frame_time = round(100000/fps);
while (keepProcessing):
# start a timer (to see how long processing and display takes)
start_t = cv2.getTickCount();
# get video frame from file, handle end of file
ret, frame = video.read()
if not ret:
print("... end of video file reached");
break;
# re-size image to network input size and perform prediction
small_frame = cv2.resize(frame, (rows, cols), cv2.INTER_AREA)
output = model.predict([small_frame])
# label image based on prediction
myFile = open('append.txt', 'a')
if round(output[0][0]) == 1:
print("FIRE")
myFile.write('fire')
r = requests.post('http://linksmartsensing.us-east-2.elasticbeanstalk.com/data/fire', params = {'id':"1",'fire':"true"})
print(r.text)
cv2.rectangle(frame, (0,0), (width,height), (0,0,255), 50)
cv2.putText(frame,'FIRE',(int(width/16),int(height/4)),
cv2.FONT_HERSHEY_SIMPLEX, 4,(255,255,255),10,cv2.LINE_AA);
else:
print("CLEAR")
myFile.write('clear')
r = requests.post('http://linksmartsensing.us-east-2.elasticbeanstalk.com/data/fire', params = {'id':"1",'fire':"false"})
print(r.text)
cv2.rectangle(frame, (0,0), (width,height), (0,255,0), 50)
cv2.putText(frame,'CLEAR',(int(width/16),int(height/4)),
cv2.FONT_HERSHEY_SIMPLEX, 4,(255,255,255),10,cv2.LINE_AA);
# stop the timer and convert to ms. (to see how long processing and display takes)
stop_t = ((cv2.getTickCount() - start_t)/cv2.getTickFrequency()) * 1000;
# image display and key handling
cv2.imshow(windowName, frame);
# wait fps time or less depending on processing time taken (e.g. 1000ms / 25 fps = 40 ms)
key = cv2.waitKey(max(2, frame_time - int(math.ceil(stop_t)))) & 0xFF;
if (key == ord('x')):
keepProcessing = False;
elif (key == ord('f')):
cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN);
else:
print("usage: python firenet.py videofile.ext");
################################################################################
|
the-stack_0_9244 | """
Remove the docs in training set that overlap with test sets or are duplicate
As it loads all data into memory, it requires a large memory machine to run
If you are processing MAG, run pykp.data.mag.post_clearn.py to remove noisy items (abstract contains "Full textFull text is available as a scanned copy of the original print version.") (around 132561 out of 3114539) and remove duplicates by title
"""
import argparse
import json
import os
import string
import nltk
import tqdm
from joblib import Parallel, delayed
from multiprocessing import Pool
import time
from pykp.data.remove_duplicates import init_args, example_iterator_from_json, text2tokens, set_similarity_match
stopwords = nltk.corpus.stopwords.words('english')
stopwords.extend(string.punctuation)
stopwords.extend(string.digits)
stopwords.append('')
def detect_duplicate_job(train_example):
global testsets_dict, title_pool
train_id = train_example['id']
title_tokens = text2tokens(train_example['title'])
text_tokens = text2tokens(train_example['abstract'])
# check if title is duplicate in train data (have been processed before)
title_str = ' '.join(title_tokens)
if title_str in title_pool:
return ('train_log', '%s|%s|%s\n' % (train_id, title_pool[title_str], title_str))
else:
title_pool[title_str] = train_id
# check if title/content is duplicate in valid/test data
title_set = set(title_tokens)
content_set = title_set | set(text_tokens)
for test_dataset_subname, testset in testsets_dict.items():
for test_id, test_example in testset.items():
title_flag, title_sim = set_similarity_match(title_set, test_example['title_set'], 0.7)
content_flag, content_sim = set_similarity_match(content_set, test_example['content_set'], 0.7)
if title_flag or content_flag:
return (test_dataset_subname,
'%s|%s|%s|%s|%f|%f\n' % (test_example['id'], train_example['id'], test_example['title'], train_example['title'], title_sim, content_sim))
# write non-duplicates to disk
return ('train_output', json.dumps(train_example) + '\n')
def run_normal_parallel(n_jobs, examples_iter):
start_time = time.time()
pool = Pool(processes=n_jobs)
# results = pool.map(detect_duplicate_job, examples_iter)
results = []
for r in tqdm.tqdm(pool.imap(detect_duplicate_job, examples_iter), total=len(examples_iter)):
results.append(r)
# result = list(itertools.chain(*result))
print("Job finished, taking time %.2f s" % (time.time()-start_time))
return results
def main():
opt = init_args()
# specify for which dataset (for valid/test) we need to remove duplicate data samples from training data
if opt.datatype == 'paper':
total_num = 20000 #530631
train_dataset_name = 'kp20k_training'
test_dataset_names = ['kp20k', 'inspec', 'nus', 'semeval', 'krapivin']
id_field = None
title_field = 'title'
text_field ='abstract'
keyword_field = 'keywords'
trg_delimiter = ';'
elif opt.datatype == 'qa':
total_num = 298965
train_dataset_name = 'stackexchange_training'
test_dataset_names = ['stackexchange']
id_field = None
title_field = 'title'
text_field ='question'
keyword_field = 'tags'
trg_delimiter = ';'
elif opt.datatype == 'mag':
total_num = 5108427
train_dataset_name = 'mag'
test_dataset_names = ['kp20k', 'inspec', 'nus', 'semeval', 'krapivin']
id_field = 'id'
title_field = 'title'
text_field ='abstract'
keyword_field = 'keywords'
trg_delimiter = None
print("Loading training data...")
train_examples_iter = example_iterator_from_json(path=opt.train_file,
dataset_name=train_dataset_name,
id_field=id_field,
title_field=title_field,
text_field=text_field,
keyword_field=keyword_field,
trg_delimiter=trg_delimiter)
train_examples_iter = list(train_examples_iter)
global pbar, output_cache, testsets_dict, title_pool
testsets_dict = {}
output_dir = opt.test_dataset_dir + '/%s_output/' % opt.datatype
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Loading validation/test data...")
for test_dataset_name in test_dataset_names:
for type in ['validation', 'testing']:
test_dataset_subname = '%s_%s' % (test_dataset_name, type)
source_test_file = os.path.join(opt.test_dataset_dir, test_dataset_name, test_dataset_subname+'.json')
test_examples = list(example_iterator_from_json(path=source_test_file,
dataset_name=test_dataset_subname,
id_field=id_field,
title_field=title_field,
text_field=text_field,
keyword_field=keyword_field,
trg_delimiter = ';'))
testset = {}
for test_num, test_example in enumerate(test_examples):
test_id = test_example['id']
title_tokens = text2tokens(test_example['title'])
text_tokens = text2tokens(test_example['abstract'])
# concatenate title and put it into hashtable
title_set = set(title_tokens)
text_set = set(text_tokens)
content_set = title_set | text_set
test_example['title_set'] = title_set
test_example['content_set'] = content_set
test_example['dup_train_ids'] = []
test_example['dup_train_titles'] = []
testset[test_id] = test_example
testsets_dict[test_dataset_subname] = testset
print("\tsize(%s) = %d" % (test_dataset_subname, len(testset)))
"""
1. clean text, remove stopwords/punctuations
2. Treat as overlaps if title & text match>=70%
3. Build a title hashset to remove training duplicates
"""
print("Cleaning duplicate data...")
global file_writers
file_writers = {}
for test_dataset_name in test_dataset_names:
for type in ['validation', 'testing']:
test_dataset_subname = '%s_%s' % (test_dataset_name, type)
file_writers[test_dataset_subname] = open('%s/%s__dup__%s.log'
% (output_dir, test_dataset_subname, train_dataset_name), 'w')
print("Initializing file writer for %s: %s" % (test_dataset_subname, os.path.abspath('%s/%s__dup__%s.log' % (output_dir, test_dataset_subname, train_dataset_name))))
output_cache = []
file_writers['train_output'] = open('%s/%s_nodup.json' % (output_dir, train_dataset_name), 'w')
file_writers['train_log'] = open('%s/%s__dup.log' % (output_dir, train_dataset_name), 'w')
title_pool = {}
print("Total number of examples = %d" % len(train_examples_iter))
print("Total number of jobs = %d" % opt.n_jobs)
# dataset_line_tuples = Parallel(n_jobs=opt.n_jobs, verbose=len(train_examples_iter))(delayed(detect_duplicate_job)(ex) for ex in train_examples_iter)
dataset_line_tuples = run_normal_parallel(opt.n_jobs, train_examples_iter)
print("Process ends. Got %d data examples" % len(dataset_line_tuples))
for dataset_subname, line in dataset_line_tuples:
writer = file_writers[dataset_subname]
writer.write(line)
for d_name, d_writer in file_writers.items():
print("Closing %s" % d_name)
d_writer.close()
if __name__ == "__main__":
main()
|
the-stack_0_9245 | # -*- coding: utf-8 -*-
import io
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import signals
from fooltrader.api.quote import get_security_list
from fooltrader.contract.files_contract import get_finance_path
from fooltrader.utils.utils import index_df_with_time
class AmericaStockFinanceSpider(scrapy.Spider):
name = "america_stock_finance"
custom_settings = {
# 'DOWNLOAD_DELAY': 2,
# 'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
'SPIDER_MIDDLEWARES': {
'fooltrader.middlewares.FoolErrorMiddleware': 1000,
}
}
def start_requests(self):
security_item = self.settings.get("security_item")
if security_item is not None:
item = security_item
data_url = self.get_finance_url(item['code'])
data_path = get_finance_path(item)
yield Request(url=data_url,
meta={'path': data_path,
'item': item},
callback=self.download_finance_csv)
else:
for _, item in get_security_list(exchanges=['nasdaq']).iterrows():
data_url = self.get_finance_url(item['code'])
data_path = get_finance_path(item)
yield Request(url=data_url,
meta={'path': data_path,
'item': item},
callback=self.download_finance_csv)
def download_finance_csv(self, response):
content_type_header = response.headers.get('content-type', None)
if content_type_header.decode("utf-8") == content_type_header.decode("utf-8") == 'text/csv':
path = response.meta['path']
security_item = response.meta['item']
df = pd.read_csv(io.BytesIO(response.body), na_values='None')
df.columns = [
"reportDate",
"shares",
"sharesAdjusted",
"factor",
"totalAssets",
"totalCurrentAssets",
"totalLiabilities",
"totalCurrentLiabilities",
"bookValue",
"minorityBookValue",
"preferredEquity",
"goodwill",
"longTermBorrowing",
"operatingRevenue",
"netProfit",
"netProfitAttributedToParentCompanyOwner",
"EPS",
"dilutedEPS",
"DPS",
"netCashFlowsFromOperatingActivities",
"netCashFlowsFromInvesting",
"netCashFlowsFromFinancingActivities",
"cashChange",
"cashAtTheEndOfPeriod",
"capitalExpenditures",
"price",
"priceHigh",
"priceLow",
"ROE",
"ROA",
"BVPS",
"PB",
"PE",
"cumulativeDividendsPerShare",
"dividendPayoutRatio",
"longTermDebtToEquityRatio",
"equityToAssetsRatio",
"netMargin",
"assetTurnover",
"freeCashFlowPerShare",
"currentRatio"]
df['code'] = security_item['code']
df['securityId'] = security_item['id']
df['id'] = df[['securityId', 'reportDate']].apply(lambda x: '_'.join(x.astype(str)), axis=1)
df = index_df_with_time(df, index='reportDate')
df.fillna(0, inplace=True)
df.to_csv(path, index=False)
else:
self.logger.exception(
"get finance csv error:url={} content type={} body={}".format(response.url, content_type_header,
response.body))
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(AmericaStockFinanceSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider, reason):
spider.logger.info('Spider closed: %s,%s\n', spider.name, reason)
def get_finance_url(self, code):
return 'http://www.stockpup.com/data/{}_quarterly_financial_data.csv'.format(code)
|
the-stack_0_9246 | #
# Copyright (C) 2006-2017 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Import all RDKit chemistry modules
"""
import sys
import warnings
from collections import namedtuple
import numpy
from rdkit import DataStructs
from rdkit import ForceField
from rdkit import RDConfig
from rdkit import rdBase
from rdkit.Chem import *
from rdkit.Chem.ChemicalFeatures import *
from rdkit.Chem.rdChemReactions import *
from rdkit.Chem.rdDepictor import *
from rdkit.Chem.rdDistGeom import *
from rdkit.Chem.rdForceFieldHelpers import *
from rdkit.Chem.rdMolAlign import *
from rdkit.Chem.rdMolDescriptors import *
from rdkit.Chem.rdMolTransforms import *
from rdkit.Chem.rdPartialCharges import *
from rdkit.Chem.rdReducedGraphs import *
from rdkit.Chem.rdShapeHelpers import *
from rdkit.Chem.rdqueries import *
from rdkit.Chem.rdMolEnumerator import *
from rdkit.Geometry import rdGeometry
from rdkit.RDLogger import logger
from rdkit.Chem.EnumerateStereoisomers import StereoEnumerationOptions, EnumerateStereoisomers
try:
from rdkit.Chem.rdSLNParse import *
except ImportError:
pass
Mol.Compute2DCoords = Compute2DCoords
Mol.ComputeGasteigerCharges = ComputeGasteigerCharges
logger = logger()
def TransformMol(mol, tform, confId=-1, keepConfs=False):
""" Applies the transformation (usually a 4x4 double matrix) to a molecule
if keepConfs is False then all but that conformer are removed
"""
refConf = mol.GetConformer(confId)
TransformConformer(refConf, tform)
if not keepConfs:
if confId == -1:
confId = 0
allConfIds = [c.GetId() for c in mol.GetConformers()]
for cid in allConfIds:
if not cid == confId:
mol.RemoveConformer(cid)
# reset the conf Id to zero since there is only one conformer left
mol.GetConformer(confId).SetId(0)
def ComputeMolShape(mol, confId=-1, boxDim=(20, 20, 20), spacing=0.5, **kwargs):
""" returns a grid representation of the molecule's shape
"""
res = rdGeometry.UniformGrid3D(boxDim[0], boxDim[1], boxDim[2], spacing=spacing)
EncodeShape(mol, res, confId, **kwargs)
return res
def ComputeMolVolume(mol, confId=-1, gridSpacing=0.2, boxMargin=2.0):
""" Calculates the volume of a particular conformer of a molecule
based on a grid-encoding of the molecular shape.
A bit of demo as well as a test of github #1883:
>>> from rdkit import Chem
>>> from rdkit.Chem import AllChem
>>> mol = Chem.AddHs(Chem.MolFromSmiles('C'))
>>> AllChem.EmbedMolecule(mol)
0
>>> ComputeMolVolume(mol)
28...
>>> mol = Chem.AddHs(Chem.MolFromSmiles('O'))
>>> AllChem.EmbedMolecule(mol)
0
>>> ComputeMolVolume(mol)
20...
"""
mol = rdchem.Mol(mol)
conf = mol.GetConformer(confId)
CanonicalizeConformer(conf, ignoreHs=False)
box = ComputeConfBox(conf)
sideLen = (box[1].x - box[0].x + 2 * boxMargin, box[1].y - box[0].y + 2 * boxMargin,
box[1].z - box[0].z + 2 * boxMargin)
shape = rdGeometry.UniformGrid3D(sideLen[0], sideLen[1], sideLen[2], spacing=gridSpacing)
EncodeShape(mol, shape, confId, ignoreHs=False, vdwScale=1.0)
voxelVol = gridSpacing**3
occVect = shape.GetOccupancyVect()
voxels = [1 for x in occVect if x == 3]
vol = voxelVol * len(voxels)
return vol
def GetConformerRMS(mol, confId1, confId2, atomIds=None, prealigned=False):
""" Returns the RMS between two conformations.
By default, the conformers will be aligned to the first conformer
before the RMS calculation and, as a side-effect, the second will be left
in the aligned state.
Arguments:
- mol: the molecule
- confId1: the id of the first conformer
- confId2: the id of the second conformer
- atomIds: (optional) list of atom ids to use a points for
alingment - defaults to all atoms
- prealigned: (optional) by default the conformers are assumed
be unaligned and the second conformer be aligned
to the first
"""
# align the conformers if necessary
# Note: the reference conformer is always the first one
if not prealigned:
if atomIds:
AlignMolConformers(mol, confIds=[confId1, confId2], atomIds=atomIds)
else:
AlignMolConformers(mol, confIds=[confId1, confId2])
# calculate the RMS between the two conformations
conf1 = mol.GetConformer(id=confId1)
conf2 = mol.GetConformer(id=confId2)
ssr = 0
for i in range(mol.GetNumAtoms()):
d = conf1.GetAtomPosition(i).Distance(conf2.GetAtomPosition(i))
ssr += d * d
ssr /= mol.GetNumAtoms()
return numpy.sqrt(ssr)
def GetConformerRMSMatrix(mol, atomIds=None, prealigned=False):
""" Returns the RMS matrix of the conformers of a molecule.
As a side-effect, the conformers will be aligned to the first
conformer (i.e. the reference) and will left in the aligned state.
Arguments:
- mol: the molecule
- atomIds: (optional) list of atom ids to use a points for
alingment - defaults to all atoms
- prealigned: (optional) by default the conformers are assumed
be unaligned and will therefore be aligned to the
first conformer
Note that the returned RMS matrix is symmetrical, i.e. it is the
lower half of the matrix, e.g. for 5 conformers::
rmsmatrix = [ a,
b, c,
d, e, f,
g, h, i, j]
where a is the RMS between conformers 0 and 1, b is the RMS between
conformers 0 and 2, etc.
This way it can be directly used as distance matrix in e.g. Butina
clustering.
"""
# if necessary, align the conformers
# Note: the reference conformer is always the first one
rmsvals = []
confIds = [conf.GetId() for conf in mol.GetConformers()]
if not prealigned:
if atomIds:
AlignMolConformers(mol, atomIds=atomIds, RMSlist=rmsvals)
else:
AlignMolConformers(mol, RMSlist=rmsvals)
else: # already prealigned
for i in range(1, len(confIds)):
rmsvals.append(
GetConformerRMS(mol, confIds[0], confIds[i], atomIds=atomIds, prealigned=prealigned))
# loop over the conformations (except the reference one)
cmat = []
for i in range(1, len(confIds)):
cmat.append(rmsvals[i - 1])
for j in range(1, i):
cmat.append(GetConformerRMS(mol, confIds[i], confIds[j], atomIds=atomIds, prealigned=True))
return cmat
def EnumerateLibraryFromReaction(reaction, sidechainSets, returnReactants=False):
""" Returns a generator for the virtual library defined by
a reaction and a sequence of sidechain sets
>>> from rdkit import Chem
>>> from rdkit.Chem import AllChem
>>> s1=[Chem.MolFromSmiles(x) for x in ('NC','NCC')]
>>> s2=[Chem.MolFromSmiles(x) for x in ('OC=O','OC(=O)C')]
>>> rxn = AllChem.ReactionFromSmarts('[O:2]=[C:1][OH].[N:3]>>[O:2]=[C:1][N:3]')
>>> r = AllChem.EnumerateLibraryFromReaction(rxn,[s2,s1])
>>> [Chem.MolToSmiles(x[0]) for x in list(r)]
['CNC=O', 'CCNC=O', 'CNC(C)=O', 'CCNC(C)=O']
Note that this is all done in a lazy manner, so "infinitely" large libraries can
be done without worrying about running out of memory. Your patience will run out first:
Define a set of 10000 amines:
>>> amines = (Chem.MolFromSmiles('N'+'C'*x) for x in range(10000))
... a set of 10000 acids
>>> acids = (Chem.MolFromSmiles('OC(=O)'+'C'*x) for x in range(10000))
... now the virtual library (1e8 compounds in principle):
>>> r = AllChem.EnumerateLibraryFromReaction(rxn,[acids,amines])
... look at the first 4 compounds:
>>> [Chem.MolToSmiles(next(r)[0]) for x in range(4)]
['NC=O', 'CNC=O', 'CCNC=O', 'CCCNC=O']
"""
if len(sidechainSets) != reaction.GetNumReactantTemplates():
raise ValueError('%d sidechains provided, %d required' %
(len(sidechainSets), reaction.GetNumReactantTemplates()))
def _combiEnumerator(items, depth=0):
for item in items[depth]:
if depth + 1 < len(items):
v = _combiEnumerator(items, depth + 1)
for entry in v:
l = [item]
l.extend(entry)
yield l
else:
yield [item]
ProductReactants = namedtuple('ProductReactants', 'products,reactants')
for chains in _combiEnumerator(sidechainSets):
prodSets = reaction.RunReactants(chains)
for prods in prodSets:
if returnReactants:
yield ProductReactants(prods, chains)
else:
yield prods
def ConstrainedEmbed(mol, core, useTethers=True, coreConfId=-1, randomseed=2342,
getForceField=UFFGetMoleculeForceField, **kwargs):
""" generates an embedding of a molecule where part of the molecule
is constrained to have particular coordinates
Arguments
- mol: the molecule to embed
- core: the molecule to use as a source of constraints
- useTethers: (optional) if True, the final conformation will be
optimized subject to a series of extra forces that pull the
matching atoms to the positions of the core atoms. Otherwise
simple distance constraints based on the core atoms will be
used in the optimization.
- coreConfId: (optional) id of the core conformation to use
- randomSeed: (optional) seed for the random number generator
An example, start by generating a template with a 3D structure:
>>> from rdkit.Chem import AllChem
>>> template = AllChem.MolFromSmiles("c1nn(Cc2ccccc2)cc1")
>>> AllChem.EmbedMolecule(template)
0
>>> AllChem.UFFOptimizeMolecule(template)
0
Here's a molecule:
>>> mol = AllChem.MolFromSmiles("c1nn(Cc2ccccc2)cc1-c3ccccc3")
Now do the constrained embedding
>>> mol = AllChem.ConstrainedEmbed(mol, template)
Demonstrate that the positions are nearly the same with template:
>>> import math
>>> molp = mol.GetConformer().GetAtomPosition(0)
>>> templatep = template.GetConformer().GetAtomPosition(0)
>>> all(math.isclose(v, 0.0, abs_tol=0.01) for v in molp-templatep)
True
>>> molp = mol.GetConformer().GetAtomPosition(1)
>>> templatep = template.GetConformer().GetAtomPosition(1)
>>> all(math.isclose(v, 0.0, abs_tol=0.01) for v in molp-templatep)
True
"""
match = mol.GetSubstructMatch(core)
if not match:
raise ValueError("molecule doesn't match the core")
coordMap = {}
coreConf = core.GetConformer(coreConfId)
for i, idxI in enumerate(match):
corePtI = coreConf.GetAtomPosition(i)
coordMap[idxI] = corePtI
ci = EmbedMolecule(mol, coordMap=coordMap, randomSeed=randomseed, **kwargs)
if ci < 0:
raise ValueError('Could not embed molecule.')
algMap = [(j, i) for i, j in enumerate(match)]
if not useTethers:
# clean up the conformation
ff = getForceField(mol, confId=0)
for i, idxI in enumerate(match):
for j in range(i + 1, len(match)):
idxJ = match[j]
d = coordMap[idxI].Distance(coordMap[idxJ])
ff.AddDistanceConstraint(idxI, idxJ, d, d, 100.)
ff.Initialize()
n = 4
more = ff.Minimize()
while more and n:
more = ff.Minimize()
n -= 1
# rotate the embedded conformation onto the core:
rms = AlignMol(mol, core, atomMap=algMap)
else:
# rotate the embedded conformation onto the core:
rms = AlignMol(mol, core, atomMap=algMap)
ff = getForceField(mol, confId=0)
conf = core.GetConformer()
for i in range(core.GetNumAtoms()):
p = conf.GetAtomPosition(i)
pIdx = ff.AddExtraPoint(p.x, p.y, p.z, fixed=True) - 1
ff.AddDistanceConstraint(pIdx, match[i], 0, 0, 100.)
ff.Initialize()
n = 4
more = ff.Minimize(energyTol=1e-4, forceTol=1e-3)
while more and n:
more = ff.Minimize(energyTol=1e-4, forceTol=1e-3)
n -= 1
# realign
rms = AlignMol(mol, core, atomMap=algMap)
mol.SetProp('EmbedRMS', str(rms))
return mol
def AssignBondOrdersFromTemplate(refmol, mol):
""" assigns bond orders to a molecule based on the
bond orders in a template molecule
Arguments
- refmol: the template molecule
- mol: the molecule to assign bond orders to
An example, start by generating a template from a SMILES
and read in the PDB structure of the molecule
>>> import os
>>> from rdkit.Chem import AllChem
>>> template = AllChem.MolFromSmiles("CN1C(=NC(C1=O)(c2ccccc2)c3ccccc3)N")
>>> mol = AllChem.MolFromPDBFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4DJU_lig.pdb'))
>>> len([1 for b in template.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
>>> len([1 for b in mol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
22
Now assign the bond orders based on the template molecule
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> len([1 for b in newMol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
Note that the template molecule should have no explicit hydrogens
else the algorithm will fail.
It also works if there are different formal charges (this was github issue 235):
>>> template=AllChem.MolFromSmiles('CN(C)C(=O)Cc1ccc2c(c1)NC(=O)c3ccc(cc3N2)c4ccc(c(c4)OC)[N+](=O)[O-]')
>>> mol = AllChem.MolFromMolFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4FTR_lig.mol'))
>>> AllChem.MolToSmiles(mol)
'COC1CC(C2CCC3C(O)NC4CC(CC(O)N(C)C)CCC4NC3C2)CCC1N(O)O'
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> AllChem.MolToSmiles(newMol)
'COc1cc(-c2ccc3c(c2)Nc2ccc(CC(=O)N(C)C)cc2NC3=O)ccc1[N+](=O)[O-]'
"""
refmol2 = rdchem.Mol(refmol)
mol2 = rdchem.Mol(mol)
# do the molecules match already?
matching = mol2.GetSubstructMatch(refmol2)
if not matching: # no, they don't match
# check if bonds of mol are SINGLE
for b in mol2.GetBonds():
if b.GetBondType() != BondType.SINGLE:
b.SetBondType(BondType.SINGLE)
b.SetIsAromatic(False)
# set the bonds of mol to SINGLE
for b in refmol2.GetBonds():
b.SetBondType(BondType.SINGLE)
b.SetIsAromatic(False)
# set atom charges to zero;
for a in refmol2.GetAtoms():
a.SetFormalCharge(0)
for a in mol2.GetAtoms():
a.SetFormalCharge(0)
matching = mol2.GetSubstructMatches(refmol2, uniquify=False)
# do the molecules match now?
if matching:
if len(matching) > 1:
logger.warning("More than one matching pattern found - picking one")
matching = matching[0]
# apply matching: set bond properties
for b in refmol.GetBonds():
atom1 = matching[b.GetBeginAtomIdx()]
atom2 = matching[b.GetEndAtomIdx()]
b2 = mol2.GetBondBetweenAtoms(atom1, atom2)
b2.SetBondType(b.GetBondType())
b2.SetIsAromatic(b.GetIsAromatic())
# apply matching: set atom properties
for a in refmol.GetAtoms():
a2 = mol2.GetAtomWithIdx(matching[a.GetIdx()])
a2.SetHybridization(a.GetHybridization())
a2.SetIsAromatic(a.GetIsAromatic())
a2.SetNumExplicitHs(a.GetNumExplicitHs())
a2.SetFormalCharge(a.GetFormalCharge())
SanitizeMol(mol2)
if hasattr(mol2, '__sssAtoms'):
mol2.__sssAtoms = None # we don't want all bonds highlighted
else:
raise ValueError("No matching found")
return mol2
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
|
the-stack_0_9249 | from glumpy import app, gloo, gl
from contextlib import contextmanager
import numpy as np
try:
import pycuda.driver
from pycuda.gl import graphics_map_flags, BufferObject
_PYCUDA = True
except ImportError as err:
print('pycuda import error:', err)
_PYCUDA = False
import torch
class OffscreenRender:
def __init__(self, viewport_size, out_buffer_location='opengl', clear_color=None):
self._init_buffers(viewport_size, out_buffer_location)
self.clear_color = clear_color if clear_color is not None else (0., 0., 0., 1.)
def _init_buffers(self, viewport_size, out_buffer_location):
assert out_buffer_location in ['torch', 'opengl', 'numpy']
if out_buffer_location == 'torch':
assert _PYCUDA, 'pycuda is not available'
try:
import pycuda.gl.autoinit # this may fails in headless mode
except:
raise RuntimeError('PyCUDA init failed, cannot use torch buffer')
_ = torch.cuda.FloatTensor(1, 3, 512,512) # needs init here, otherwise does not work
color_np = np.zeros((viewport_size[1], viewport_size[0], 4), np.float32)
self.color_buf, self.color_buf_cuda = create_shared_texture(color_np)
self.out_buf = torch.zeros((viewport_size[1], viewport_size[0], 4), dtype=torch.float32).cuda()
elif out_buffer_location == 'opengl':
self.color_buf = np.zeros((viewport_size[1], viewport_size[0], 4), dtype=np.float32).view(gloo.TextureFloat2D)
self.out_buf = self.color_buf
elif out_buffer_location == 'numpy':
self.color_buf = np.zeros((viewport_size[1], viewport_size[0], 4), dtype=np.float32).view(gloo.TextureFloat2D)
self.out_buf = np.zeros((viewport_size[1], viewport_size[0], 3), dtype=np.float32)
self.viewport_size = viewport_size
self.out_buffer_location = out_buffer_location
self.depth_buf = gloo.DepthBuffer(viewport_size[0], viewport_size[1], gl.GL_DEPTH_COMPONENT32)
self.fbo = gloo.FrameBuffer(color=self.color_buf, depth=self.depth_buf)
def render(self, scene, cull_face=True):
self.fbo.activate()
gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glShadeModel(gl.GL_FLAT)
if cull_face:
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_BACK)
else:
gl.glDisable(gl.GL_CULL_FACE)
gl.glClearColor(*self.clear_color)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glViewport(0, 0, self.viewport_size[0], self.viewport_size[1])
if scene.draw_points:
scene.program.draw(gl.GL_POINTS)
else:
assert scene.index_buffer is not None
scene.program.draw(gl.GL_TRIANGLES, scene.index_buffer)
if self.out_buffer_location == 'torch':
frame = cpy_texture_to_tensor(self.color_buf_cuda, self.out_buf).clone()
elif self.out_buffer_location == 'opengl':
frame = self.out_buf
else:
gl.glReadPixels(0, 0, self.viewport_size[0], self.viewport_size[1], gl.GL_RGB, gl.GL_FLOAT, self.out_buf)
frame = self.out_buf.copy()
self.fbo.deactivate()
return frame
@contextmanager
def cuda_activate_array(img):
"""Context manager simplifying use of pycuda.gl.RegisteredImage"""
mapping = img.map()
yield mapping.array(0,0)
mapping.unmap()
@contextmanager
def cuda_activate_buffer(buf):
mapping = buf.map()
yield mapping.device_ptr()
mapping.unmap()
def create_shared_texture(arr, map_flags=None):
"""Create and return a Texture2D with gloo and pycuda views."""
if map_flags is None:
map_flags = graphics_map_flags.WRITE_DISCARD
gl_view = arr.view(gloo.TextureFloat2D)
gl_view.activate() # force gloo to create on GPU
gl_view.deactivate()
cuda_view = pycuda.gl.RegisteredImage(
int(gl_view.handle), gl_view.target, map_flags)
return gl_view, cuda_view
def create_shared_buffer(arr):
"""Create and return a BufferObject with gloo and pycuda views."""
gl_view = arr.view(gloo.VertexBuffer)
gl_view.activate() # force gloo to create on GPU
gl_view.deactivate()
cuda_view = BufferObject(np.long(gl_view.handle))
return gl_view, cuda_view
def cpy_texture_to_tensor(texture, tensor):
"""Copy GL texture (cuda view) to pytorch tensor"""
with cuda_activate_array(texture) as src:
cpy = pycuda.driver.Memcpy2D()
cpy.set_src_array(src)
cpy.set_dst_device(tensor.data_ptr())
cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = tensor.shape[1] * 4 * 4
cpy.height = tensor.shape[0]
cpy(aligned=False)
torch.cuda.synchronize()
return tensor
def cpy_tensor_to_texture(tensor, texture):
"""Copy pytorch tensor to GL texture (cuda view)"""
with cuda_activate_array(texture) as ary:
cpy = pycuda.driver.Memcpy2D()
cpy.set_src_device(tensor.data_ptr())
cpy.set_dst_array(ary)
cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = tensor.shape[1] * 4 * 4
cpy.height = tensor.shape[0]
cpy(aligned=False)
torch.cuda.synchronize()
return tensor
def cpy_buffer_to_tensor(buffer, tensor):
"""Copy GL buffer (cuda view) to pytorch tensor"""
n = tensor.numel()*tensor.element_size()
with cuda_activate_buffer(buffer) as buf_ptr:
pycuda.driver.memcpy_dtod(tensor.data_ptr(), buf_ptr, n)
def cpy_tensor_to_buffer(tensor, buffer):
"""Copy pytorch tensor to GL buffer (cuda view)"""
n = tensor.numel()*tensor.element_size()
with cuda_activate_buffer(buffer) as buf_ptr:
pycuda.driver.memcpy_dtod(buf_ptr, tensor.data_ptr(), n)
|
the-stack_0_9250 | """Provides functionality to interact with image processing services."""
import asyncio
from datetime import timedelta
import logging
from typing import final
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import make_entity_service_schema
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.util.async_ import run_callback_threadsafe
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "image_processing"
SCAN_INTERVAL = timedelta(seconds=10)
DEVICE_CLASSES = [
"alpr", # Automatic license plate recognition
"face", # Face
"ocr", # OCR
]
SERVICE_SCAN = "scan"
SERVICE_ENABLE = "enable_detection"
SERVICE_DISABLE = "disable_detection"
EVENT_DETECT_FACE = "image_processing.detect_face"
ATTR_AGE = "age"
ATTR_CONFIDENCE = "confidence"
ATTR_FACES = "faces"
ATTR_GENDER = "gender"
ATTR_GLASSES = "glasses"
ATTR_MOTION = "motion"
ATTR_TOTAL_FACES = "total_faces"
CONF_CONFIDENCE = "confidence"
DEFAULT_TIMEOUT = 10
DEFAULT_CONFIDENCE = 80
SOURCE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_domain("camera"),
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SOURCE): vol.All(cv.ensure_list, [SOURCE_SCHEMA]),
vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=100)
),
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
async def async_setup(hass, config):
"""Set up the image processing."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
async def async_scan_service(service):
"""Service handler for scan."""
image_entities = await component.async_extract_from_service(service)
update_tasks = []
for entity in image_entities:
entity.async_set_context(service.context)
update_tasks.append(asyncio.create_task(entity.async_update_ha_state(True)))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
DOMAIN, SERVICE_SCAN, async_scan_service, schema=make_entity_service_schema({})
)
component.async_register_entity_service(
SERVICE_ENABLE,
schema=make_entity_service_schema({}),
func="async_enable_detection",
)
component.async_register_entity_service(
SERVICE_DISABLE,
schema=make_entity_service_schema({}),
func="async_disable_detection",
)
return True
class ImageProcessingEntity(Entity):
"""Base entity class for image processing."""
timeout = DEFAULT_TIMEOUT
det = "on"
def enable_detection(self):
"""Enable motion detection in the camera."""
self.det = "on"
raise NotImplementedError()
async def async_enable_detection(self):
"""Call the job and enable motion detection."""
await self.hass.async_add_executor_job(self.enable_detection)
def disable_detection(self):
"""Disable motion detection in camera."""
self.det = "off"
raise NotImplementedError()
async def async_disable_detection(self):
"""Call the job and disable motion detection."""
await self.hass.async_add_executor_job(self.disable_detection)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return None
@property
def confidence(self):
"""Return minimum confidence for do some things."""
return None
@property
def state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_MOTION: self.det}
def process_image(self, image):
"""Process image."""
raise NotImplementedError()
async def async_process_image(self, image):
"""Process image."""
return await self.hass.async_add_executor_job(self.process_image, image)
async def async_update(self):
"""Update image and process it.
This method is a coroutine.
"""
camera = self.hass.components.camera
image = None
try:
image = await camera.async_get_raw_image(
self.camera_entity, timeout=self.timeout
)
except AttributeError:
try:
image = await camera.async_get_image(
self.camera_entity, timeout=self.timeout
)
except HomeAssistantError as err:
_LOGGER.error("Error on receive image from entity: %s", err)
return
# process image data
await self.async_process_image(image.content)
class ImageProcessingFaceEntity(ImageProcessingEntity):
"""Base entity class for face image processing."""
def __init__(self):
"""Initialize base face identify/verify entity."""
self.faces = []
self.total_faces = 0
self.det = "off"
def enable_detection(self):
"""Enable motion detection in the camera."""
self.det = "on"
raise NotImplementedError()
async def async_enable_detection(self):
"""Call the job and enable motion detection."""
await self.hass.async_add_executor_job(self.enable_detection)
def disable_detection(self):
"""Disable motion detection in camera."""
self.det = "off"
raise NotImplementedError()
async def async_disable_detection(self):
"""Call the job and disable motion detection."""
await self.hass.async_add_executor_job(self.disable_detection)
@property
def state(self):
"""Return the state of the entity."""
confidence = 0
state = None
# No confidence support
if not self.confidence:
return self.total_faces
# Search high confidence
for face in self.faces:
if ATTR_CONFIDENCE not in face:
continue
f_co = face[ATTR_CONFIDENCE]
if f_co > confidence:
confidence = f_co
for attr in [ATTR_NAME, ATTR_MOTION]:
if attr in face:
state = face[attr]
break
return state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "face"
@final
@property
def state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_FACES: self.faces,
ATTR_TOTAL_FACES: self.total_faces,
ATTR_MOTION: self.det,
}
def process_faces(self, faces, total):
"""Send event with detected faces and store data."""
run_callback_threadsafe(
self.hass.loop, self.async_process_faces, faces, total
).result()
@callback
def async_process_faces(self, faces, total):
"""Send event with detected faces and store data.
known are a dict in follow format:
[
{
ATTR_CONFIDENCE: 80,
ATTR_NAME: 'Name',
ATTR_AGE: 12.0,
ATTR_GENDER: 'man',
ATTR_MOTION: 'smile',
ATTR_GLASSES: 'sunglasses'
},
]
This method must be run in the event loop.
"""
# Send events
for face in faces:
if (
ATTR_CONFIDENCE in face
and self.confidence
and face[ATTR_CONFIDENCE] < self.confidence
):
continue
face.update({ATTR_ENTITY_ID: self.entity_id})
self.hass.async_add_job(self.hass.bus.async_fire, EVENT_DETECT_FACE, face)
# Update entity store
self.faces = faces
self.total_faces = total
|
the-stack_0_9251 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.base_model_validators."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from core import jobs_registry
from core.domain import base_model_validators
from core.domain import prod_validation_jobs_one_off
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(base_models, user_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.user])
class MockModel(base_models.BaseModel):
pass
class MockSnapshotModel(base_models.BaseModel):
commit_type = 'edit'
commit_cmds = []
class MockBaseModelValidator(base_model_validators.BaseModelValidator):
pass
class MockModelValidatorWithInvalidValidationType(
base_model_validators.BaseModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return []
@classmethod
def _get_model_domain_object_instance(cls, unused_item):
return MockModel()
@classmethod
def _get_domain_object_validation_type(cls, unused_item):
return 'Invalid'
class MockSummaryModelValidator(
base_model_validators.BaseSummaryModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return []
class MockSnapshotContentModelValidator(
base_model_validators.BaseSnapshotContentModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return []
class MockSnapshotMetadataModelValidator(
base_model_validators.BaseSnapshotMetadataModelValidator):
EXTERNAL_MODEL_NAME = 'external model'
@classmethod
def _get_external_id_relationships(cls, item):
return [
base_model_validators.ExternalModelFetcherDetails(
'external_model_ids', MockModel, [])]
class MockBaseUserModelValidator(
base_model_validators.BaseUserModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return []
@classmethod
def _get_custom_validation_functions(cls):
return [cls._validate_common_properties_do_not_match]
@classmethod
def _get_external_instance_custom_validation_functions(cls):
return [
cls._validate_explorations_are_public,
cls._validate_collections_are_public
]
class MockCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
pass
class MockCommitLogEntryModelValidator(
base_model_validators.BaseCommitLogEntryModelValidator):
EXTERNAL_MODEL_NAME = 'mockmodel'
@classmethod
def _get_change_domain_class(cls, item):
if item.id.startswith('mock'):
return MockCommitLogEntryModel
else:
cls._add_error(
'model %s' % base_model_validators.ERROR_CATEGORY_ID_CHECK,
'Entity id %s: Entity id does not match regex pattern' % (
item.id))
return None
@classmethod
def _get_external_id_relationships(cls, item):
return [
base_model_validators.UserSettingsModelFetcherDetails(
'user_id', [item.user_id],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=False
)]
class BaseValidatorTests(test_utils.AuditJobsTestBase):
def setUp(self):
super(BaseValidatorTests, self).setUp()
self.invalid_model = MockModel(id='mockmodel')
self.invalid_model.update_timestamps()
self.invalid_model.put()
def test_error_is_raised_if_fetch_external_properties_is_undefined(self):
with self.assertRaisesRegexp(
NotImplementedError,
r'The _get_external_id_relationships\(\) method is missing from the'
' derived class. It should be implemented in the derived class.'):
MockBaseModelValidator().validate(self.invalid_model)
def test_error_is_get_external_model_properties_is_undefined(self):
with self.assertRaisesRegexp(
NotImplementedError,
r'The _get_external_model_properties\(\) method is missing from the'
' derived class. It should be implemented in the derived class.'):
MockSummaryModelValidator().validate(self.invalid_model)
def test_error_is_raised_if_external_model_name_is_undefined(self):
with self.assertRaisesRegexp(
Exception, 'External model name should be specified'):
MockSnapshotContentModelValidator().validate(self.invalid_model)
def test_error_is_raised_if_get_change_domain_class_is_undefined(self):
with self.assertRaisesRegexp(
NotImplementedError,
r'The _get_change_domain_class\(\) method is missing from the '
'derived class. It should be implemented in the derived class.'):
snapshot_model = MockSnapshotModel(id='mockmodel')
snapshot_model.update_timestamps()
snapshot_model.put()
MockSnapshotMetadataModelValidator().validate(snapshot_model)
def test_error_is_raised_if_entity_classes_to_map_over_is_undefined(self):
job_class = (
prod_validation_jobs_one_off.ProdValidationAuditOneOffJob)
with self.assertRaisesRegexp(
NotImplementedError,
r'The entity_classes_to_map_over\(\) method is missing from the '
'derived class. It should be implemented in the derived class.'):
with self.swap(jobs_registry, 'ONE_OFF_JOB_MANAGERS', [job_class]):
job_id = job_class.create_new()
job_class.enqueue(job_id)
def test_error_is_raised_with_invalid_validation_type_for_domain_objects(
self):
MockModelValidatorWithInvalidValidationType.validate(self.invalid_model)
expected_errors = {
'domain object check': [
'Entity id mockmodel: Entity fails domain validation with '
'the error Invalid validation type for domain object: Invalid']}
self.assertEqual(
MockModelValidatorWithInvalidValidationType.errors, expected_errors)
def test_no_error_is_raised_for_base_user_model(self):
user = MockModel(id='12345')
user.update_timestamps()
user.put()
MockBaseUserModelValidator().validate(user)
def test_validate_deleted_reports_error_for_old_deleted_model(self):
year_ago = datetime.datetime.utcnow() - datetime.timedelta(weeks=52)
model = MockModel(
id='123',
deleted=True,
last_updated=year_ago
)
model.update_timestamps(update_last_updated_time=False)
model.put()
validator = MockBaseUserModelValidator()
validator.validate_deleted(model)
self.assertEqual(
validator.errors,
{
'entity stale check': [
'Entity id 123: model marked as '
'deleted is older than 8 weeks'
]
}
)
def test_external_model_fetcher_with_user_settings_raise_error(self):
with self.assertRaisesRegexp(
Exception,
'When fetching instances of UserSettingsModel, please use ' +
'UserSettingsModelFetcherDetails instead of ' +
'ExternalModelFetcherDetails'):
base_model_validators.ExternalModelFetcherDetails(
'committer_ids', user_models.UserSettingsModel,
[
feconf.MIGRATION_BOT_USER_ID, 'User-1',
self.PSEUDONYMOUS_ID
]
)
def test_may_contain_system_users_filters_system_ids(self):
user_settings_model = (
base_model_validators.UserSettingsModelFetcherDetails(
'committer_ids',
[feconf.MIGRATION_BOT_USER_ID, 'User-1'],
may_contain_system_ids=True,
may_contain_pseudonymous_ids=False
))
self.assertItemsEqual(
user_settings_model.model_ids, ['User-1'])
def test_error_raised_if_model_ids_contain_system_ids(self):
with self.assertRaisesRegexp(
utils.ValidationError,
'The field \'committer_ids\' should not contain system IDs'):
base_model_validators.UserSettingsModelFetcherDetails(
'committer_ids', [feconf.MIGRATION_BOT_USER_ID, 'User-1'],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=False
)
def test_may_contain_pseudonymous_users_filters_pseudonymous_users(self):
user_settings_model = (
base_model_validators.UserSettingsModelFetcherDetails(
'committer_ids', ['User-1', self.PSEUDONYMOUS_ID],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=True
))
self.assertItemsEqual(
user_settings_model.model_ids, ['User-1'])
def test_error_raised_if_model_ids_contain_pseudonymous_ids(self):
with self.assertRaisesRegexp(
utils.ValidationError,
'The field \'committer_ids\' should not contain pseudonymous IDs'):
base_model_validators.UserSettingsModelFetcherDetails(
'committer_ids', [self.PSEUDONYMOUS_ID, 'User-1'],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=False
)
def test_error_raised_when_fetching_external_model_with_system_ids(self):
model = MockCommitLogEntryModel(
id='mock-12345',
user_id=feconf.MIGRATION_BOT_USER_ID,
commit_cmds=[])
model.update_timestamps()
mock_validator = MockCommitLogEntryModelValidator()
mock_validator.errors.clear()
mock_validator.validate(model)
self.assertDictContainsSubset(
{
'invalid user setting ids': [
'Entity id mock-12345: '
'The field \'user_id\' should not contain system IDs'
]
},
mock_validator.errors
)
def test_error_raised_when_fetching_external_model_with_pseudo_ids(self):
model = MockCommitLogEntryModel(
id='mock-12345',
user_id=self.PSEUDONYMOUS_ID,
commit_cmds=[])
model.update_timestamps()
mock_validator = MockCommitLogEntryModelValidator()
mock_validator.errors.clear()
mock_validator.validate(model)
self.assertDictContainsSubset(
{
'invalid user setting ids': [
'Entity id mock-12345: '
'The field \'user_id\' should not contain pseudonymous IDs'
]
},
mock_validator.errors
)
|
the-stack_0_9255 | load("@bazel_tools//tools/cpp:lib_cc_configure.bzl", "get_cpu_value")
def execute_or_fail_loudly(
repository_ctx,
arguments,
environment = {},
working_directory = ""):
"""Execute the given command
Fails if the command does not exit with exit-code 0.
Args:
arguments: List, the command line to execute.
Returns:
exec_result: The output of the command.
"""
exec_result = repository_ctx.execute(
arguments,
environment = environment,
quiet = True,
working_directory = working_directory,
)
if exec_result.return_code != 0:
arguments = [_as_string(x) for x in arguments]
fail("\n".join(["Command failed: " + " ".join(arguments), exec_result.stderr]))
return exec_result
def _as_string(v):
if type(v) == "string":
return v
else:
return repr(v)
def find_python(repository_ctx):
python = repository_ctx.which("python3")
if not python:
python = repository_ctx.which("python")
if not python:
fail("There is no Python in PATH. Please install Python >= 3.3.")
result = repository_ctx.execute([python, "--version"])
if not result.stdout.startswith("Python 3"):
fail("rules_haskell requires Python >= 3.3.")
return python
def resolve_labels(repository_ctx, labels):
"""
Avoid rule restart by resolving these labels early. See
https://github.com/bazelbuild/bazel/blob/master/tools/cpp/lib_cc_configure.bzl#L17.
Args:
repository_ctx: The context with which to resolve the labels.
labels: Labels to be resolved expressed as a list of strings.
Returns:
A dictionary with the labels as keys and their paths as values.
"""
return dict([(label, repository_ctx.path(Label(label))) for label in labels])
def define_rule(rule_type, name, **kwargs):
"""Generate a string representing a rule definition.
Take care to escape string values using repr().
### Examples
```bzl
define_rule("myrule",
name = "foo",
myattr1 = repr("bar"),
myattr2 = ["baz"],
)
```
"""
attrs = ["{} = {},".format(k, v) for k, v in kwargs.items() if v != None]
skeleton = """\
{rule_type}(
name = {name},
{attrs}
)
"""
return skeleton.format(
rule_type = rule_type,
name = repr(name),
attrs = "\n ".join(attrs),
)
|
the-stack_0_9261 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests fuzzers.mutator_plugin."""
from pyfakefs import fake_filesystem_unittest
import os
import shutil
import unittest
from bot.fuzzers import mutator_plugin
from tests.test_libs import helpers
from tests.test_libs import test_utils
class FindMutatorPluginTest(fake_filesystem_unittest.TestCase):
"""Tests find_mutator_plugin."""
def setUp(self):
helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
self.plugins_root_dir = '/plugins'
os.environ['MUTATOR_PLUGINS_DIR'] = self.plugins_root_dir
def test_find_mutator_plugin_with_usable(self):
"""Tests that the right path is returned by find_mutator_plugin when there
is a usable mutator plugin available."""
usable_plugin_path = os.path.join(
self.plugins_root_dir, 'plugins',
mutator_plugin.MUTATOR_SHARED_OBJECT_FILENAME)
self.fs.create_file(usable_plugin_path)
self.assertEqual(usable_plugin_path, mutator_plugin.find_mutator_plugin())
def test_set_mutator_plugin_without_usable(self):
"""Tests that None is returned by find_mutator_plugin when there isn't a
usable mutator plugin available."""
self.assertIsNone(mutator_plugin.find_mutator_plugin())
# pylint: disable=protected-access
class GetDirectoryFunctionsTest(unittest.TestCase):
"""Tests functions for get plugin directories."""
def setUp(self):
helpers.patch_environ(self)
self.plugins_root_dir = '/plugins'
os.environ['MUTATOR_PLUGINS_DIR'] = self.plugins_root_dir
def test_get_mutator_plugins_subdir(self):
"""Tests that _get_mutator_plugins_subdir returns the path to the correct
subdirectory."""
subdir = 'x'
self.assertEqual(
os.path.join(self.plugins_root_dir, subdir),
mutator_plugin._get_mutator_plugins_subdir(subdir))
def test_get_mutator_plugins_archives_dir(self):
"""Tests that _get_mutator_plugins_archives_dir returns the path to the
mutator plugin archives directory."""
self.assertEqual(
os.path.join(self.plugins_root_dir,
mutator_plugin.ARCHIVES_SUBDIR_NAME),
mutator_plugin._get_mutator_plugins_archives_dir())
def test_get_mutator_plugins_unpacked_dir(self):
"""Tests that _get_mutator_plugins_unpacked_dir returns the path to the
unpacked mutator plugin directory."""
self.assertEqual(
os.path.join(self.plugins_root_dir, mutator_plugin.PLUGINS_SUBDIR_NAME),
mutator_plugin._get_mutator_plugins_unpacked_dir())
# pylint: disable=protected-access
class PluginGetterTest(fake_filesystem_unittest.TestCase):
"""Tests PluginGetter."""
def setUp(self):
"""Setup for plugin getter test."""
helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
os.environ['JOB_NAME'] = 'libfuzzer_asan_test'
self.fuzzer_binary_name = 'test_fuzzer'
self.name = 'myplugin'
self.plugins_root_dir = '/plugins'
os.environ['MUTATOR_PLUGINS_DIR'] = self.plugins_root_dir
self.fs.create_dir(self.plugins_root_dir)
self.plugin_getter = mutator_plugin.PluginGetter(self.fuzzer_binary_name)
self.plugins_archives_dir = os.path.join(self.plugins_root_dir, 'archives')
self.plugin_archive_filename = '%s-%s-%s.zip' % (
self.name, os.environ['JOB_NAME'], self.fuzzer_binary_name)
self.plugin_archive_path = os.path.join(self.plugins_archives_dir,
self.plugin_archive_filename)
self.plugins_dir = os.path.join(self.plugins_root_dir, 'plugins')
helpers.patch(self, [
'google_cloud_utils.storage.copy_file_from',
'bot.fuzzers.mutator_plugin._get_mutator_plugins_from_bucket',
])
def mocked_copy_file_from(gcs_url, file_path):
expected_url = '%s/%s' % (mutator_plugin._get_mutator_plugins_bucket_url(
), self.plugin_archive_filename)
self.assertEqual(expected_url, gcs_url)
self.assertEqual(file_path, self.plugin_archive_path)
return file_path
self.mock.copy_file_from.side_effect = mocked_copy_file_from
def test_create_directories(self):
"""Tests that create_directories creates the right directories."""
shutil.rmtree(self.plugins_root_dir)
self.fs.create_dir(self.plugins_root_dir)
self.plugin_getter.create_directories()
directories = [
os.path.join(self.plugins_root_dir, 'plugins'),
os.path.join(self.plugins_root_dir, 'archives')
]
self.assertTrue(all(os.path.isdir(directory) for directory in directories))
def test_recognizes_usable(self):
"""Tests that _is_plugin_usable recognizes a usable plugin archive."""
self.assertTrue(
self.plugin_getter._is_plugin_usable(self.plugin_archive_filename))
def test_recognizes_unusable(self):
"""Tests that _is_plugin_usable recognizes an unusable plugin archive."""
unusable_plugin_archive_filename = self.plugin_archive_filename.replace(
self.fuzzer_binary_name, 'other_binary')
self.assertFalse(
self.plugin_getter._is_plugin_usable(unusable_plugin_archive_filename))
def test_download_mutator_plugin_archive(self):
"""Tests that _download_mutator_plugin_archive downloads an archive to the
correct location."""
self.assertEqual(
self.plugin_archive_path,
mutator_plugin._download_mutator_plugin_archive(
self.plugin_archive_filename))
class ExtractNameFromArchiveTest(unittest.TestCase):
"""Tests for _extract_name_from_archive."""
def test_extract_name_from_archive(self):
"""Tests that _extract_name_from_archive extracts the name from the
archive."""
name = 'myplugin'
fuzzer_binary_name = 'test_fuzzer'
job_name = 'libfuzzer_asan_test'
plugin_archive_filename = '%s-%s-%s.zip' % (name, job_name,
fuzzer_binary_name)
extracted_name, job_and_fuzz_target = (
mutator_plugin._extract_name_from_archive(plugin_archive_filename))
self.assertEqual(name, extracted_name)
expected_job_and_fuzz_target = '%s-%s' % (job_name, fuzzer_binary_name)
self.assertEqual(expected_job_and_fuzz_target, job_and_fuzz_target)
|
the-stack_0_9262 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os
from six import iteritems
import logging
from werkzeug.wrappers import Request
from werkzeug.local import LocalManager
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.middleware.profiler import ProfilerMiddleware
from werkzeug.middleware.shared_data import SharedDataMiddleware
import frappe
import frappe.handler
import frappe.auth
import frappe.api
import frappe.utils.response
import frappe.website.render
from frappe.utils import get_site_name, sanitize_html
from frappe.middlewares import StaticDataMiddleware
from frappe.utils.error import make_error_snapshot
from frappe.core.doctype.comment.comment import update_comments_in_parent_after_request
from frappe import _
import frappe.recorder
import frappe.monitor
import frappe.rate_limiter
local_manager = LocalManager([frappe.local])
_site = None
_sites_path = os.environ.get("SITES_PATH", ".")
class RequestContext(object):
def __init__(self, environ):
self.request = Request(environ)
def __enter__(self):
init_request(self.request)
def __exit__(self, type, value, traceback):
frappe.destroy()
@Request.application
def application(request):
response = None
try:
rollback = True
init_request(request)
frappe.recorder.record()
frappe.monitor.start()
frappe.rate_limiter.apply()
if frappe.local.form_dict.cmd:
response = frappe.handler.handle()
elif frappe.request.path.startswith("/api/"):
response = frappe.api.handle()
elif frappe.request.path.startswith('/backups'):
response = frappe.utils.response.download_backup(request.path)
elif frappe.request.path.startswith('/private/files/'):
response = frappe.utils.response.download_private_file(request.path)
elif frappe.local.request.method in ('GET', 'HEAD', 'POST'):
response = frappe.website.render.render()
else:
raise NotFound
except HTTPException as e:
return e
except frappe.SessionStopped as e:
response = frappe.utils.response.handle_session_stopped()
except Exception as e:
response = handle_exception(e)
else:
rollback = after_request(rollback)
finally:
if frappe.local.request.method in ("POST", "PUT") and frappe.db and rollback:
frappe.db.rollback()
# set cookies
if response and hasattr(frappe.local, 'cookie_manager'):
frappe.local.cookie_manager.flush_cookies(response=response)
frappe.rate_limiter.update()
frappe.monitor.stop(response)
frappe.recorder.dump()
if response and hasattr(frappe.local, 'rate_limiter'):
response.headers.extend(frappe.local.rate_limiter.headers())
frappe.destroy()
return response
def init_request(request):
frappe.local.request = request
frappe.local.is_ajax = frappe.get_request_header("X-Requested-With")=="XMLHttpRequest"
site = _site or request.headers.get('X-Frappe-Site-Name') or get_site_name(request.host)
frappe.init(site=site, sites_path=_sites_path)
if not (frappe.local.conf and frappe.local.conf.db_name):
# site does not exist
raise NotFound
if frappe.local.conf.get('maintenance_mode'):
frappe.connect()
raise frappe.SessionStopped('Session Stopped')
make_form_dict(request)
frappe.local.http_request = frappe.auth.HTTPRequest()
def make_form_dict(request):
import json
request_data = request.get_data(as_text=True)
if 'application/json' in (request.content_type or '') and request_data:
args = json.loads(request_data)
else:
args = request.form or request.args
try:
frappe.local.form_dict = frappe._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in iteritems(args) })
except IndexError:
frappe.local.form_dict = frappe._dict(args)
if "_" in frappe.local.form_dict:
# _ is passed by $.ajax so that the request is not cached by the browser. So, remove _ from form_dict
frappe.local.form_dict.pop("_")
def handle_exception(e):
response = None
http_status_code = getattr(e, "http_status_code", 500)
return_as_message = False
if frappe.get_request_header('Accept') and (frappe.local.is_ajax or 'application/json' in frappe.get_request_header('Accept')):
# handle ajax responses first
# if the request is ajax, send back the trace or error message
response = frappe.utils.response.report_error(http_status_code)
elif (http_status_code==500
and (frappe.db and isinstance(e, frappe.db.InternalError))
and (frappe.db and (frappe.db.is_deadlocked(e) or frappe.db.is_timedout(e)))):
http_status_code = 508
elif http_status_code==401:
frappe.respond_as_web_page(_("Session Expired"),
_("Your session has expired, please login again to continue."),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==403:
frappe.respond_as_web_page(_("Not Permitted"),
_("You do not have enough permissions to complete the action"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==404:
frappe.respond_as_web_page(_("Not Found"),
_("The resource you are looking for is not available"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code == 429:
response = frappe.rate_limiter.respond()
else:
traceback = "<pre>" + sanitize_html(frappe.get_traceback()) + "</pre>"
if frappe.local.flags.disable_traceback:
traceback = ""
frappe.respond_as_web_page("Server Error",
traceback, http_status_code=http_status_code,
indicator_color='red', width=640)
return_as_message = True
if e.__class__ == frappe.AuthenticationError:
if hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.clear_cookies()
if http_status_code >= 500:
frappe.logger().error('Request Error', exc_info=True)
make_error_snapshot(e)
if return_as_message:
response = frappe.website.render.render("message",
http_status_code=http_status_code)
return response
def after_request(rollback):
if (frappe.local.request.method in ("POST", "PUT") or frappe.local.flags.commit) and frappe.db:
if frappe.db.transaction_writes:
frappe.db.commit()
rollback = False
# update session
if getattr(frappe.local, "session_obj", None):
updated_in_db = frappe.local.session_obj.update()
if updated_in_db:
frappe.db.commit()
rollback = False
update_comments_in_parent_after_request()
return rollback
application = local_manager.make_middleware(application)
def serve(port=8000, profile=False, no_reload=False, no_threading=False, site=None, sites_path='.'):
global application, _site, _sites_path
_site = site
_sites_path = sites_path
from werkzeug.serving import run_simple
patch_werkzeug_reloader()
if profile:
application = ProfilerMiddleware(application, sort_by=('cumtime', 'calls'))
if not os.environ.get('NO_STATICS'):
application = SharedDataMiddleware(application, {
str('/assets'): str(os.path.join(sites_path, 'assets'))
})
application = StaticDataMiddleware(application, {
str('/files'): str(os.path.abspath(sites_path))
})
application.debug = True
application.config = {
'SERVER_NAME': 'localhost:8000'
}
in_test_env = os.environ.get('CI')
if in_test_env:
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
run_simple('0.0.0.0', int(port), application,
use_reloader=False if in_test_env else not no_reload,
use_debugger=not in_test_env,
use_evalex=not in_test_env,
threaded=not no_threading)
def patch_werkzeug_reloader():
"""
This function monkey patches Werkzeug reloader to ignore reloading files in
the __pycache__ directory.
To be deprecated when upgrading to Werkzeug 2.
"""
from werkzeug._reloader import WatchdogReloaderLoop
trigger_reload = WatchdogReloaderLoop.trigger_reload
def custom_trigger_reload(self, filename):
if os.path.basename(os.path.dirname(filename)) == "__pycache__":
return
return trigger_reload(self, filename)
WatchdogReloaderLoop.trigger_reload = custom_trigger_reload |
the-stack_0_9264 | import argparse
import json
import os
import numpy as np
import tensorflow.compat.v1 as tf
import time
class AccumulatingOptimizer(object):
def __init__(self, opt, var_list):
self.opt = opt
self.var_list = var_list
self.accum_vars = {tv : tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False)
for tv in var_list}
self.total_loss = tf.Variable(tf.zeros(shape=[], dtype=tf.float32))
self.count_loss = tf.Variable(tf.zeros(shape=[], dtype=tf.float32))
def reset(self):
updates = [tv.assign(tf.zeros_like(tv)) for tv in self.accum_vars.values()]
updates.append(self.total_loss.assign(tf.zeros(shape=[], dtype=tf.float32)))
updates.append(self.count_loss.assign(tf.zeros(shape=[], dtype=tf.float32)))
with tf.control_dependencies(updates):
return tf.no_op()
def compute_gradients(self, loss):
grads = self.opt.compute_gradients(loss, self.var_list)
updates = [self.accum_vars[v].assign_add(g) for (g,v) in grads]
updates.append(self.total_loss.assign_add(loss))
updates.append(self.count_loss.assign_add(1.0))
with tf.control_dependencies(updates):
return tf.no_op()
def apply_gradients(self):
grads = [(g,v) for (v,g) in self.accum_vars.items()]
with tf.control_dependencies([self.opt.apply_gradients(grads)]):
return self.total_loss / self.count_loss
|
the-stack_0_9265 | import os
import time
from gym_idsgame.config.runner_mode import RunnerMode
from gym_idsgame.simulation.dao.simulation_config import SimulationConfig
from gym_idsgame.agents.dao.agent_type import AgentType
from gym_idsgame.config.client_config import ClientConfig
from gym_idsgame.runnner import Runner
from gym_idsgame.agents.training_agents.q_learning.q_agent_config import QAgentConfig
from gym_idsgame.experiments.util import plotting_util, util
import argparse
def default_output_dir() -> str:
"""
:return: the default output dir
"""
script_dir = os.path.dirname(__file__)
return script_dir
def define_args():
parser = argparse.ArgumentParser()
parser.add_argument('--attacker_path', type=str,default='')
parser.add_argument('--defender_path', type=str,default='')
parser.add_argument('--num_episodes', type=int, default = 100)
parser.add_argument('--attacker_bot', action='store_true')
parser.add_argument('--defender_bot', action='store_true')
args = parser.parse_args()
return args
def default_config(args) -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
simulation_config = SimulationConfig(render=True, sleep=0.8, video=False, log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/videos", num_episodes=args.num_episodes,
gifs=False, gif_dir=default_output_dir() + "/gifs", video_frequency = 1)
q_agent_config = QAgentConfig(attacker_load_path=args.attacker_path,defender_load_path=args.defender_path)
env_name = "idsgame-cyber-v0"
attacker_type = AgentType.TABULAR_Q_AGENT.value
defender_type = AgentType.TABULAR_Q_AGENT.value
if(args.attacker_bot):
attacker_type = AgentType.ATTACK_MAXIMAL_VALUE.value
if(args.defender_bot):
defender_type = AgentType.DEFEND_MINIMAL_VALUE.value
client_config = ClientConfig(env_name=env_name, attacker_type=attacker_type,
defender_type=defender_type, mode=RunnerMode.SIMULATE.value,
simulation_config=simulation_config, output_dir=default_output_dir(),
title="Simulation",
q_agent_config=q_agent_config)
return client_config
# Program entrypoint
if __name__ == '__main__':
args = define_args()
config = default_config(args)
result = Runner.run(config)
print(f'Number of attack victory in {args.num_episodes} episodes: {result.attacker_wins[-1]}')
|
the-stack_0_9266 |
def read_verilog(args):
assert(len(args) == 1)
filename = args[0]
with open(filename, 'r') as verilogfile:
content = [line for line in verilogfile if 'assign' in line][:-1]
boolDict = dict()
for line in content:
left, right = line.split('=')
name = left.split()[1]
for k, v in boolDict.items():
right = right.replace(k, '({})'.format(v))
boolDict[name] = right.replace(';', '').replace('\n', '')
#print(boolDict['valid'])
return boolDict['valid']
|
the-stack_0_9267 | from quanser_robots import GentlyTerminating
import threading
import gym
import torch
import numpy as np
from abstract_rl.src.data_structures.temporal_difference_data.trajectory_builder import TrajectoryBuilder
from abstract_rl.src.data_structures.temporal_difference_data.trajectory_collection import TrajectoryCollection
from abstract_rl.src.misc.cli_printer import CliPrinter
from abstract_rl.src.misc.running_centered_filter import RunningCenteredFilter
class MCMCEnvWrapper:
"""
Environment wrapper for gym environments. Adds support for executing a whole trajectory based on a policy,
instead of only giving a step based interface to the outside.
"""
def namespace(self):
return self._name_sp
def __init__(self, mc):
"""
Initialize a new environment.
:param mc: The model configuration with everything important.
"""
conf = mc['conf']
self.mc = mc
self.num_threads = conf['num_threads']
self.render = conf['render']
self._name_sp = conf.get_namespace()
# save config
self.conf = conf
self.num_epochs = conf.get_root('num_epochs')
self.env_name = conf['name']
self.env = GentlyTerminating(gym.make(self.env_name))
self._normalize = conf['normalize']
self._discount = conf['discount']
self.epoch = 0
# set best measured reward to lowest possible reward
self.best_reward = np.finfo(np.float64).min
self.last_reward = None
self.max_traj_len = 0
self.min_reward = None
self.cli = CliPrinter().instance
self.created_trajectories = 0
self.obs_sp = self.env.observation_space
self.act_sp = self.env.action_space
self.thread_lock = threading.Lock()
self.state_filter = RunningCenteredFilter('states', self.observation_dim)
self.reward_filter = RunningCenteredFilter('rewards', 1)
def last_ucb_reward(self):
assert self.last_reward is not None
return self.last_reward
def discount(self):
"""
Discount factor of the environment.
:return: The discount factor used.
"""
return self._discount
def reset(self):
"""
Resets the environment.
:return: The state after the reset.
"""
cs = self.env.reset()
return cs
def execute_policy(self, policy, max_steps, batch_size, exploration=True, render=False, rew_field_name=None):
"""
Executes a policy for a maximum number of steps multiple times. This work can be split onto multiple threads as
well.
:param policy: The policy to evaluate.
:param max_steps: The maximum number of steps.
:return: A list of trajectories.
"""
with self.conf.ns('policy'):
t = 0
k = 0
trajectories = []
while t < batch_size:
tr = self.execute_policy_once(np.minimum(batch_size - t, max_steps), policy, render and k == 0, opt=not exploration)
trajectories.append(tr)
t += len(tr)
k += 1
if rew_field_name is not None:
disc_rewards = [traj.discounted_reward(self.discount()) for traj in trajectories]
self.mc['log'].log({rew_field_name: [np.mean(disc_rewards), np.std(disc_rewards)]})
self.epoch += 1
tj = TrajectoryCollection(self, sum([len(tra) for tra in trajectories]))
tj.extend(trajectories)
tj.print()
return tj
def execute_policy_once(self, max_steps, policy, render=False, opt=False):
"""
Execute a policy once for the maximum number of steps or the environment sends done.
:param max_steps: The maximum number of steps, if done not set.
:param policy: The policy to use.
:param render: Render the environment.
:param seed: Set a seed if wanted.
:return: The finalized and built trajectory.
"""
# reset environment and create empty trajectory
env = GentlyTerminating(gym.make(self.env_name)) if not self.render else self.env
cs = env.reset()
if self._normalize: cs /= env.observation_space.high
self.state_filter.register(cs)
# create new trajectory builder
with self.thread_lock:
new_id = self.created_trajectories
self.created_trajectories += 1
traj_builder = TrajectoryBuilder(new_id, self, cs)
t = 0
# repeat for the number of steps
while max_steps is None or t < max_steps:
# sample distribution based on state
tcs = torch.Tensor(cs)
tcs = tcs.view([1, -1])
# sample action and calc log likelihood
suff_stats = policy.forward(tcs)
a = policy.mode(suff_stats=suff_stats) \
if opt else policy.sample_actions(suff_stats=suff_stats)
ll = policy.log_prob(a, suff_stats=suff_stats)
# prepare for usage
ll = ll.detach().numpy()[0]
a = a.detach().numpy()[0]
cs, r, done, info = env.step(a)
self.state_filter.register(cs)
self.reward_filter.register(cs)
# bug fix for quanser
cs /= env.observation_space.high
t += 1
# only punish if episode aborted
traj_builder.observe(a, r, cs, ll, int(done))
# render if needed
if render: env.render()
# break if necessary
if done: break
# compile using the discount factor
traj = traj_builder.finalize()
self.max_traj_len = max(self.max_traj_len, t)
env.close()
return traj
@property
def observation_space(self):
"""
Bounds for the observation space.
:return: Bound of the observation space.
"""
return self.obs_sp
@property
def action_space(self):
"""
Bounds for the action space.
:return: Bound of the action space.
"""
return self.act_sp
@property
def observation_dim(self):
"""
Dimension of observation space.
:return: Dimension of the observation space.
"""
return int(np.prod(self.observation_space.high.shape))
@property
def action_dim(self):
"""
Dimension of action space.
:return: Dimension of the action space.
"""
return int(np.prod(self.action_space.high.shape))
|
the-stack_0_9268 | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioTaskRouterClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
workflow_sid = "WWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = TwilioTaskRouterClient(account_sid, auth_token)
workflow = client.workflows(workspace_sid).update(
workflow_sid, task_reservation_timout='20'
)
print(workflow.task_reservation_timeout)
# alternatively
workflow = client.workflows(workspace_sid).get(workflow_sid)
workflow = workflow.update(task_reservation_timeout='20')
print(workflow.task_reservation_timeout)
|
the-stack_0_9269 | import torch
from torch import nn
import numpy as np
import os
from .utils.detect_face import detect_face, extract_face
class PNet(nn.Module):
"""MTCNN PNet.
Keyword Arguments:
pretrained {bool} -- Whether or not to load saved pretrained weights (default: {True})
"""
def __init__(self, pretrained=True):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.prelu1 = nn.PReLU(10)
self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(10, 16, kernel_size=3)
self.prelu2 = nn.PReLU(16)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
self.prelu3 = nn.PReLU(32)
self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)
self.softmax4_1 = nn.Softmax(dim=1)
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)
self.training = False
if pretrained:
state_dict_path = os.path.join(os.path.dirname(__file__), '../data/pnet.pt')
state_dict = torch.load(state_dict_path)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.conv3(x)
x = self.prelu3(x)
a = self.conv4_1(x)
a = self.softmax4_1(a)
b = self.conv4_2(x)
return b, a
class RNet(nn.Module):
"""MTCNN RNet.
Keyword Arguments:
pretrained {bool} -- Whether or not to load saved pretrained weights (default: {True})
"""
def __init__(self, pretrained=True):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
self.training = False
if pretrained:
state_dict_path = os.path.join(os.path.dirname(__file__), '../data/rnet.pt')
state_dict = torch.load(state_dict_path)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x)
a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a
class ONet(nn.Module):
"""MTCNN ONet.
Keyword Arguments:
pretrained {bool} -- Whether or not to load saved pretrained weights (default: {True})
"""
def __init__(self, pretrained=True):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
self.training = False
if pretrained:
state_dict_path = os.path.join(os.path.dirname(__file__), '../data/onet.pt')
state_dict = torch.load(state_dict_path)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
class MTCNN(nn.Module):
"""MTCNN face detection module.
This class loads pretrained P-, R-, and O-nets and returns images cropped to include the face
only, given raw input images of one of the following types:
- PIL image or list of PIL images
- numpy.ndarray (uint8) representing either a single image (3D) or a batch of images (4D).
Cropped faces can optionally be saved to file
also.
Keyword Arguments:
image_size {int} -- Output image size in pixels. The image will be square. (default: {160})
margin {int} -- Margin to add to bounding box, in terms of pixels in the final image.
Note that the application of the margin differs slightly from the davidsandberg/facenet
repo, which applies the margin to the original image before resizing, making the margin
dependent on the original image size (this is a bug in davidsandberg/facenet).
(default: {0})
min_face_size {int} -- Minimum face size to search for. (default: {20})
thresholds {list} -- MTCNN face detection thresholds (default: {[0.6, 0.7, 0.7]})
factor {float} -- Factor used to create a scaling pyramid of face sizes. (default: {0.709})
post_process {bool} -- Whether or not to post process images tensors before returning.
(default: {True})
select_largest {bool} -- If True, if multiple faces are detected, the largest is returned.
If False, the face with the highest detection probability is returned.
(default: {True})
selection_method {string} -- Which heuristic to use for selection. Default None. If
specified, will override select_largest:
"probability": highest probability selected
"largest": largest box selected
"largest_over_threshold": largest box over a certain probability selected
"center_weighted_size": box size minus weighted squared offset from image center
(default: {None})
keep_all {bool} -- If True, all detected faces are returned, in the order dictated by the
select_largest parameter. If a save_path is specified, the first face is saved to that
path and the remaining faces are saved to <save_path>1, <save_path>2 etc.
(default: {False})
device {torch.device} -- The device on which to run neural net passes. Image tensors and
models are copied to this device before running forward passes. (default: {None})
"""
def __init__(
self, image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
select_largest=True, selection_method=None, keep_all=False, device=None
):
super().__init__()
self.image_size = image_size
self.margin = margin
self.min_face_size = min_face_size
self.thresholds = thresholds
self.factor = factor
self.post_process = post_process
self.select_largest = select_largest
self.keep_all = keep_all
self.selection_method = selection_method
self.pnet = PNet()
self.rnet = RNet()
self.onet = ONet()
self.device = torch.device('cpu')
if device is not None:
self.device = device
self.to(device)
if not self.selection_method:
self.selection_method = 'largest' if self.select_largest else 'probability'
def forward(self, img, save_path=None, return_prob=False):
"""Run MTCNN face detection on a PIL image or numpy array. This method performs both
detection and extraction of faces, returning tensors representing detected faces rather
than the bounding boxes. To access bounding boxes, see the MTCNN.detect() method below.
Arguments:
img {PIL.Image, np.ndarray, or list} -- A PIL image, np.ndarray, torch.Tensor, or list.
Keyword Arguments:
save_path {str} -- An optional save path for the cropped image. Note that when
self.post_process=True, although the returned tensor is post processed, the saved
face image is not, so it is a true representation of the face in the input image.
If `img` is a list of images, `save_path` should be a list of equal length.
(default: {None})
return_prob {bool} -- Whether or not to return the detection probability.
(default: {False})
Returns:
Union[torch.Tensor, tuple(torch.tensor, float)] -- If detected, cropped image of a face
with dimensions 3 x image_size x image_size. Optionally, the probability that a
face was detected. If self.keep_all is True, n detected faces are returned in an
n x 3 x image_size x image_size tensor with an optional list of detection
probabilities. If `img` is a list of images, the item(s) returned have an extra
dimension (batch) as the first dimension.
Example:
>>> from facenet_pytorch import MTCNN
>>> mtcnn = MTCNN()
>>> face_tensor, prob = mtcnn(img, save_path='face.png', return_prob=True)
"""
# Detect faces
batch_boxes, batch_probs, batch_points = self.detect(img, landmarks=True)
# Select faces
if not self.keep_all:
batch_boxes, batch_probs, batch_points = self.select_boxes(
batch_boxes, batch_probs, batch_points, img, method=self.selection_method
)
# Extract faces
faces = self.extract(img, batch_boxes, save_path)
if return_prob:
return faces, batch_boxes, batch_probs
else:
return faces, batch_boxes
def detect(self, img, landmarks=False):
"""Detect all faces in PIL image and return bounding boxes and optional facial landmarks.
This method is used by the forward method and is also useful for face detection tasks
that require lower-level handling of bounding boxes and facial landmarks (e.g., face
tracking). The functionality of the forward function can be emulated by using this method
followed by the extract_face() function.
Arguments:
img {PIL.Image, np.ndarray, or list} -- A PIL image, np.ndarray, torch.Tensor, or list.
Keyword Arguments:
landmarks {bool} -- Whether to return facial landmarks in addition to bounding boxes.
(default: {False})
Returns:
tuple(numpy.ndarray, list) -- For N detected faces, a tuple containing an
Nx4 array of bounding boxes and a length N list of detection probabilities.
Returned boxes will be sorted in descending order by detection probability if
self.select_largest=False, otherwise the largest face will be returned first.
If `img` is a list of images, the items returned have an extra dimension
(batch) as the first dimension. Optionally, a third item, the facial landmarks,
are returned if `landmarks=True`.
Example:
>>> from PIL import Image, ImageDraw
>>> from facenet_pytorch import MTCNN, extract_face
>>> mtcnn = MTCNN(keep_all=True)
>>> boxes, probs, points = mtcnn.detect(img, landmarks=True)
>>> # Draw boxes and save faces
>>> img_draw = img.copy()
>>> draw = ImageDraw.Draw(img_draw)
>>> for i, (box, point) in enumerate(zip(boxes, points)):
... draw.rectangle(box.tolist(), width=5)
... for p in point:
... draw.rectangle((p - 10).tolist() + (p + 10).tolist(), width=10)
... extract_face(img, box, save_path='detected_face_{}.png'.format(i))
>>> img_draw.save('annotated_faces.png')
"""
with torch.no_grad():
batch_boxes, batch_points = detect_face(
img, self.min_face_size,
self.pnet, self.rnet, self.onet,
self.thresholds, self.factor,
self.device
)
boxes, probs, points = [], [], []
for box, point in zip(batch_boxes, batch_points):
box = np.array(box)
point = np.array(point)
if len(box) == 0:
boxes.append(None)
probs.append([None])
points.append(None)
elif self.select_largest:
box_order = np.argsort((box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]))[::-1]
box = box[box_order]
point = point[box_order]
boxes.append(box[:, :4])
probs.append(box[:, 4])
points.append(point)
else:
boxes.append(box[:, :4])
probs.append(box[:, 4])
points.append(point)
boxes = np.array(boxes)
probs = np.array(probs)
points = np.array(points)
if (
not isinstance(img, (list, tuple)) and
not (isinstance(img, np.ndarray) and len(img.shape) == 4) and
not (isinstance(img, torch.Tensor) and len(img.shape) == 4)
):
boxes = boxes[0]
probs = probs[0]
points = points[0]
if landmarks:
return boxes, probs, points
return boxes, probs
def select_boxes(
self, all_boxes, all_probs, all_points, imgs, method='probability', threshold=0.9,
center_weight=2.0
):
"""Selects a single box from multiple for a given image using one of multiple heuristics.
Arguments:
all_boxes {np.ndarray} -- Ix0 ndarray where each element is a Nx4 ndarry of
bounding boxes for N detected faces in I images (output from self.detect).
all_probs {np.ndarray} -- Ix0 ndarray where each element is a Nx0 ndarry of
probabilities for N detected faces in I images (output from self.detect).
all_points {np.ndarray} -- Ix0 ndarray where each element is a Nx5x2 array of
points for N detected faces. (output from self.detect).
imgs {PIL.Image, np.ndarray, or list} -- A PIL image, np.ndarray, torch.Tensor, or list.
Keyword Arguments:
method {str} -- Which heuristic to use for selection:
"probability": highest probability selected
"largest": largest box selected
"largest_over_theshold": largest box over a certain probability selected
"center_weighted_size": box size minus weighted squared offset from image center
(default: {'probability'})
threshold {float} -- theshold for "largest_over_threshold" method. (default: {0.9})
center_weight {float} -- weight for squared offset in center weighted size method.
(default: {2.0})
Returns:
tuple(numpy.ndarray, numpy.ndarray, numpy.ndarray) -- nx4 ndarray of bounding boxes
for n images. Ix0 array of probabilities for each box, array of landmark points.
"""
#copying batch detection from extract, but would be easier to ensure detect creates consistent output.
batch_mode = True
if (
not isinstance(imgs, (list, tuple)) and
not (isinstance(imgs, np.ndarray) and len(imgs.shape) == 4) and
not (isinstance(imgs, torch.Tensor) and len(imgs.shape) == 4)
):
imgs = [imgs]
all_boxes = [all_boxes]
all_probs = [all_probs]
all_points = [all_points]
batch_mode = False
selected_boxes, selected_probs, selected_points = [], [], []
for boxes, points, probs, img in zip(all_boxes, all_points, all_probs, imgs):
if boxes is None:
selected_boxes.append(None)
selected_probs.append([None])
selected_points.append(None)
continue
# If at least 1 box found
boxes = np.array(boxes)
probs = np.array(probs)
points = np.array(points)
if method == 'largest':
box_order = np.argsort((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]))[::-1]
elif method == 'probability':
box_order = np.argsort(probs)[::-1]
elif method == 'center_weighted_size':
box_sizes = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
img_center = (img.width / 2, img.height/2)
box_centers = np.array(list(zip((boxes[:, 0] + boxes[:, 2]) / 2, (boxes[:, 1] + boxes[:, 3]) / 2)))
offsets = box_centers - img_center
offset_dist_squared = np.sum(np.power(offsets, 2.0), 1)
box_order = np.argsort(box_sizes - offset_dist_squared * center_weight)[::-1]
elif method == 'largest_over_threshold':
box_mask = probs > threshold
boxes = boxes[box_mask]
box_order = np.argsort((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]))[::-1]
if sum(box_mask) == 0:
selected_boxes.append(None)
selected_probs.append([None])
selected_points.append(None)
continue
box = boxes[box_order][[0]]
prob = probs[box_order][[0]]
point = points[box_order][[0]]
selected_boxes.append(box)
selected_probs.append(prob)
selected_points.append(point)
if batch_mode:
selected_boxes = np.array(selected_boxes)
selected_probs = np.array(selected_probs)
selected_points = np.array(selected_points)
else:
selected_boxes = selected_boxes[0]
selected_probs = selected_probs[0][0]
selected_points = selected_points[0]
return selected_boxes, selected_probs, selected_points
def extract(self, img, batch_boxes, save_path):
# Determine if a batch or single image was passed
batch_mode = True
if (
not isinstance(img, (list, tuple)) and
not (isinstance(img, np.ndarray) and len(img.shape) == 4) and
not (isinstance(img, torch.Tensor) and len(img.shape) == 4)
):
img = [img]
batch_boxes = [batch_boxes]
batch_mode = False
# Parse save path(s)
if save_path is not None:
if isinstance(save_path, str):
save_path = [save_path]
else:
save_path = [None for _ in range(len(img))]
# Process all bounding boxes
faces = []
for im, box_im, path_im in zip(img, batch_boxes, save_path):
if box_im is None:
faces.append(None)
continue
if not self.keep_all:
box_im = box_im[[0]]
faces_im = []
for i, box in enumerate(box_im):
face_path = path_im
if path_im is not None and i > 0:
save_name, ext = os.path.splitext(path_im)
face_path = save_name + '_' + str(i + 1) + ext
face = extract_face(im, box, self.image_size, self.margin, face_path)
if self.post_process:
face = fixed_image_standardization(face)
faces_im.append(face)
if self.keep_all:
faces_im = torch.stack(faces_im)
else:
faces_im = faces_im[0]
faces.append(faces_im)
if not batch_mode:
faces = faces[0]
return faces
def fixed_image_standardization(image_tensor):
processed_tensor = (image_tensor - 127.5) / 128.0
return processed_tensor
def prewhiten(x):
mean = x.mean()
std = x.std()
std_adj = std.clamp(min=1.0/(float(x.numel())**0.5))
y = (x - mean) / std_adj
return y
|
the-stack_0_9270 | # coding=utf-8
# Copyright (c) 2017,2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import pytest
import requests
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper import \
ResourceType
from ..testlib.resource_validator import ResourceValidator
from ..testlib.service_reader import LoadbalancerReader
requests.packages.urllib3.disable_warnings()
LOG = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def services():
neutron_services_filename = (
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../testdata/service_requests/pool_multiple_members.json')
)
return (json.load(open(neutron_services_filename)))
def test_pool_lb_change_ratio(track_bigip_cfg, bigip, services, icd_config,
icontrol_driver):
env_prefix = icd_config['environment_prefix']
service_iter = iter(services)
validator = ResourceValidator(bigip, env_prefix)
# create lb
service = service_iter.next()
lb_reader = LoadbalancerReader(service)
folder = '{0}_{1}'.format(env_prefix, lb_reader.tenant_id())
icontrol_driver._common_service_handler(service)
assert bigip.folder_exists(folder)
# create listener
service = service_iter.next()
icontrol_driver._common_service_handler(service)
# create pool with round-robin, no members
service = service_iter.next()
icontrol_driver._common_service_handler(service)
pool_srvc = service['pools'][0]
pool_name = '{0}_{1}'.format(env_prefix, pool_srvc['id'])
validator.assert_pool_valid(pool_srvc, folder)
pool = bigip.get_resource(ResourceType.pool, pool_name, partition=folder)
assert pool.loadBalancingMode == 'round-robin'
# create member with weight = 1
service = service_iter.next()
member = service['members'][0]
icontrol_driver._common_service_handler(service)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'round-robin'
# create member with weight > 1
service = service_iter.next()
member = service['members'][1]
icontrol_driver._common_service_handler(service)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'ratio-member'
# create member with weight = 1
service = service_iter.next()
member = service['members'][2]
icontrol_driver._common_service_handler(service)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'ratio-member'
# delete pool member with weight > 1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'round-robin'
# update pool to have lb method least connections
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-member'
# create member with weight > 1
service = service_iter.next()
member = service['members'][2]
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'ratio-least-connections-member'
# delete member with weight > 1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-member'
# delete second member
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-member'
# set lb method to SOURCE_IP for pool
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-node'
# update member to have weight > 1
service = service_iter.next()
member = service['members'][0]
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-node'
# delete remaining member
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-node'
# delete pool
service = service_iter.next()
icontrol_driver._common_service_handler(service)
assert not bigip.resource_exists(
ResourceType.pool, pool_name, partition=folder)
# delete listener
service = service_iter.next()
icontrol_driver._common_service_handler(service)
# delete lb
service = service_iter.next()
icontrol_driver._common_service_handler(service)
|
the-stack_0_9272 | import gym
from gym import error, spaces, utils
from gym.utils import seeding
from math import gcd
import pygame
import numpy as np
class MARLEnv(gym.Env):
WINDOW_HEIGHT = 360
WINDOW_WIDTH = 640
CELL_LENGTH = gcd(WINDOW_HEIGHT, WINDOW_WIDTH)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
YELLOW = (247, 240, 48)
RED = (201, 16, 41)
BLUE = (0, 0, 255)
PADDING = 5
MAX_NUMBER_OF_AGENTS = 5
MIN_BALLS_COUNT = 10
MAX_BALLS_COUNT = 20
MIN_PITS_COUNT = 3
MAX_PITS_COUNT = 10
ROBOT_PLAYER = "../assets/robot-pack/PNG/Top view/robot_yellow.png"
ROBOT_LOADED = "../assets/robot-pack/PNG/Top view/robot_green.png"
ROBOT_UN_LOADED = "../assets/robot-pack/PNG/Top view/robot_red.png"
TARGET_FLAG = "../assets/kenney_sportspack/PNG/Equipment/flag_checkered.png"
BALL = "../assets/kenney_sportspack/PNG/Equipment/ball_soccer1.png"
def __init__(self):
pygame.init()
self.game_window = pygame.display.set_mode((MARLEnv.WINDOW_WIDTH, MARLEnv.WINDOW_HEIGHT), 0, 32)
self.grid = None
self.agents = None
self.source_balls = None
self.target_balls = None
self.pits_pos = None
# Initialize the agents number.
self.reset()
def render(self, mode='human', close=False):
# Fill window.
self.game_window.fill(MARLEnv.WHITE)
############################
# Draw the grid.
############################
h, w = self.grid.shape
for i in range(0, w, MARLEnv.CELL_LENGTH):
pygame.draw.line(self.game_window, MARLEnv.BLACK, (i, 0),
(i, MARLEnv.WINDOW_HEIGHT - 1))
for j in range(0, h, MARLEnv.CELL_LENGTH):
pygame.draw.line(self.game_window, MARLEnv.BLACK, (0, j), (MARLEnv.WINDOW_WIDTH - 1, j))
############################
# Draw the pits.
############################
for pit_pos in self.pits_pos:
pygame.draw.rect(self.game_window, MARLEnv.RED,
(pit_pos[0] * MARLEnv.CELL_LENGTH, pit_pos[1] * MARLEnv.CELL_LENGTH, MARLEnv.CELL_LENGTH,
MARLEnv.CELL_LENGTH))
############################
# Draw the source and the dest boxes.
############################
pygame.draw.rect(self.game_window, MARLEnv.BLUE,
(0, 0, MARLEnv.CELL_LENGTH, MARLEnv.CELL_LENGTH))
i, j = (
MARLEnv.WINDOW_HEIGHT - MARLEnv.CELL_LENGTH + 1,
MARLEnv.WINDOW_WIDTH - MARLEnv.CELL_LENGTH + 1
)
pygame.draw.rect(self.game_window, MARLEnv.YELLOW,
(j, i, i + MARLEnv.CELL_LENGTH, j + MARLEnv.CELL_LENGTH))
############################
# Draw the agents.
############################
i = 0
for agent in self.agents:
if i == 0:
robot_img = pygame.image.load(MARLEnv.ROBOT_PLAYER).convert_alpha()
elif agent['loaded']:
robot_img = pygame.image.load(MARLEnv.ROBOT_LOADED).convert_alpha()
else:
robot_img = pygame.image.load(MARLEnv.ROBOT_UN_LOADED).convert_alpha()
robot_img = pygame.transform.scale(robot_img,
(MARLEnv.CELL_LENGTH - 2 * MARLEnv.PADDING,
MARLEnv.CELL_LENGTH - 2 * MARLEnv.PADDING))
robot_img_rect = (
agent['pos'][0] * MARLEnv.CELL_LENGTH + MARLEnv.PADDING,
agent['pos'][1] * MARLEnv.CELL_LENGTH + MARLEnv.PADDING)
self.game_window.blit(robot_img, robot_img_rect)
i += 1
############################
# Draw the target flag.
############################
flag = pygame.image.load(MARLEnv.TARGET_FLAG).convert_alpha()
flag = pygame.transform.scale(flag, (30, 30))
flag_rect = (
MARLEnv.WINDOW_WIDTH - MARLEnv.CELL_LENGTH,
MARLEnv.WINDOW_HEIGHT - MARLEnv.CELL_LENGTH - MARLEnv.PADDING
)
self.game_window.blit(flag, flag_rect)
############################
# Draw the items (balls).
############################
for ball in self.source_balls:
ball_img = pygame.image.load(MARLEnv.BALL).convert_alpha()
ball_rect = (ball['pos'][0] - MARLEnv.PADDING, ball['pos'][1] - MARLEnv.PADDING)
self.game_window.blit(ball_img, ball_rect)
for ball in self.target_balls:
ball_img = pygame.image.load(MARLEnv.BALL).convert_alpha()
ball_rect = (ball['pos'][0] + MARLEnv.PADDING, ball['pos'][1] + MARLEnv.PADDING)
self.game_window.blit(ball_img, ball_rect)
############################
# Update pygame display(required).
############################
pygame.display.update()
return
def step(self, action):
"""
Parameters
----------
action :
Returns
-------
ob, reward, episode_over, info : tuple
ob (object) :
an environment-specific object representing your observation of
the environment.
reward (float) :
amount of reward achieved by the previous action. The scale
varies between environments, but the goal is always to increase
your total reward.
episode_over (bool) :
whether it's time to reset the environment again. Most (but not
all) tasks are divided up into well-defined episodes, and done
being True indicates the episode has terminated. (For example,
perhaps the pole tipped too far, or you lost your last life.)
info (dict) :
diagnostic information useful for debugging. It can sometimes
be useful for learning (for example, it might contain the raw
probabilities behind the environment's last state change).
However, official evaluations of your agent are not allowed to
use this for learning.
"""
x, y = self.agents[0]['pos']
pickup = False
drop = False
false_pickup = False
false_drop = False
collision = False
reward = 0
episode_over = False
print(action)
if action == 0: # 'LEFT':
x -= 1
elif action == 1: # 'RIGHT':
x += 1
elif action == 2: # 'UP':
y -= 1
elif action == 3: # 'DOWN':
y += 1
elif action == 4: # 'PICK_UP':
# check if he picked up correctly in the right place and there exists at least one ball in the source.
if not ((y, x) in [(0, 1), (1, 0), (1, 1)] and len(self.source_balls) > 0 and (
not self.agents[0]['loaded'])):
false_pickup = True
else:
pickup = True
self.agents[0]['loaded'] = True
ball = self.source_balls.pop(len(self.source_balls) - 1)
self.agents[0]['balls'].append(ball)
self.agents[0]['steps'] = -1
elif action == 5:
drop = True
last_rack_idx_x = MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH - 1
last_rack_idx_y = MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH - 1
if (self.agents[0]['loaded'] and
(y, x) in [(last_rack_idx_y, last_rack_idx_x - 1), (last_rack_idx_y - 1, last_rack_idx_x),
(last_rack_idx_y - 1, last_rack_idx_x - 1)] and
len(self.source_balls) > 0):
ball = self.agents[0]['balls'].pop()
ball['pos'] = (
np.random.randint(MARLEnv.WINDOW_WIDTH - MARLEnv.CELL_LENGTH,
MARLEnv.WINDOW_WIDTH - MARLEnv.PADDING),
np.random.randint(MARLEnv.WINDOW_HEIGHT - MARLEnv.CELL_LENGTH,
MARLEnv.WINDOW_HEIGHT - MARLEnv.PADDING)
)
self.target_balls.append(ball)
self.agents[0]['loaded'] = len(self.agents[0]['balls']) > 0
self.agents[0]['steps'] = -1
elif (self.agents[0]['loaded'] and
(y, x) in [(last_rack_idx_y, last_rack_idx_x - 1), (last_rack_idx_y - 1, last_rack_idx_x),
(last_rack_idx_y - 1, last_rack_idx_x - 1)]):
false_drop = True
episode_over = True
else:
false_drop = True
if (x, y) in self.pits_pos or (x, y) in [self.agents[i]['pos'] for i in range(1, len(self.agents))]:
collision = True
episode_over = True
self.agents[0]['steps'] += 1
self.agents[0]['pos'] = (x, y)
# TODO add missed pcikups
reward = -collision * 100 - \
false_drop * 80 - \
false_pickup * 70 - \
self.agents[0]['steps'] + \
90 * drop * (not false_drop) + \
90 * pickup * (not false_pickup)
observation = self.get_observation()
print(reward, x, y)
return reward, episode_over, observation
def reset(self):
# Add pits.
self.pits_pos = []
for i in range(np.random.randint(MARLEnv.MIN_PITS_COUNT, MARLEnv.MAX_PITS_COUNT)):
self.pits_pos.append(
(
np.random.randint(3, MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH - 2),
np.random.randint(3, MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH - 2)
)
)
# Initialize the agents number.
self.agents = []
for i in range(np.random.randint(2, MARLEnv.MAX_NUMBER_OF_AGENTS)):
x, y = (np.random.randint(0, MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH),
np.random.randint(0, MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH))
while (x, y) in self.pits_pos:
x, y = (np.random.randint(0, MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH),
np.random.randint(0, MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH))
self.agents.append(
{
'pos': (x, y),
'steps': 0,
'loaded': False,
'balls': []
}
)
# Initialize the grid.
self.grid = np.zeros((MARLEnv.WINDOW_HEIGHT, MARLEnv.WINDOW_WIDTH))
# Initialize the items(balls) parameters.
self.source_balls = []
for i in range(np.random.randint(MARLEnv.MIN_BALLS_COUNT, MARLEnv.MAX_BALLS_COUNT)):
self.source_balls.append(
{
'pos': (np.random.randint(0, MARLEnv.CELL_LENGTH // 1.5),
np.random.randint(0, MARLEnv.CELL_LENGTH) // 1.5)
}
)
self.target_balls = []
def get_observation(self):
ob = charar = np.chararray(
(MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH, MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH))
ob[:] = '.'
# set the source balls.
if len(self.source_balls) > 0:
ob[0][0] = 'X'
else:
ob[0][0] = 'E'
# set the player.
x, y = self.agents[0]['pos']
ob[y][x] = 'P'
# set other agents
for i in range(1, len(self.agents)):
agent = self.agents[i]
x, y = agent['pos']
ob[y][x] = '*' # TODO @Samir, try to make it different.
# set pits
for pit_pos in self.pits_pos:
x, y = pit_pos
ob[y][x] = '*'
# set target balls/
if len(self.target_balls) > 0:
ob[-1][-1] = 'X'
else:
ob[-1][-1] = 'E'
return ob
|
the-stack_0_9273 | from typing import List, NamedTuple
import libkol
from ..Error import (
InvalidLocationError,
NotEnoughMeatError,
UnknownError,
WrongKindOfItemError,
)
from ..util import parsing
from .request import Request
from ..Store import Store
class Response(NamedTuple):
items: List["libkol.types.ItemQuantity"]
meat_gained: int
class npc_buy(Request):
"""
Purchases items from an NPC store.
:param session: Active session
:param store: NPC store to buy from
:param item: Item to buy
:param quantity: Quantity of said item to buy
"""
def __init__(
self,
session: "libkol.Session",
store: Store,
item: "libkol.Item",
quantity: int = 1,
) -> None:
if item.store_id != store.id:
raise WrongKindOfItemError("This item cannot be purchased in that store")
# Gift shop is handled differently
if store.slug == "town_giftshop.php":
params = {"action": "buy", "howmany": quantity, "whichitem": item.id}
self.request = session.request("town_giftshop.php", pwd=True, params=params)
return
params = {"whichshop": store.slug, "action": "buyitem", "quantity": quantity}
if item.store_row:
params["whichrow"] = item.store_row
else:
params["whichitem"] = item.id
self.request = session.request("shop.php", pwd=True, params=params)
@staticmethod
async def parser(content: str, **kwargs) -> Response:
if len(content) == 0:
raise InvalidLocationError("You cannot visit that store yet.")
if "You've been sent back here by some kind of bug" in content:
raise InvalidLocationError("The store you tried to visit doesn't exist.")
if (
"This store doesn't sell that item" in content
or "Invalid item selected" in content
or "<td>That isn't a thing that's sold here.</td>" in content
):
raise WrongKindOfItemError("This store doesn't carry that item.")
if "You can't afford " in content:
raise NotEnoughMeatError(
"You do not have enough meat to purchase the item(s)."
)
items = await parsing.item(content)
if len(items) == 0:
raise UnknownError("Unknown error. No items received.")
meat = parsing.meat(content)
return Response(items, meat)
|
the-stack_0_9276 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for the anime faces dataset where each image has a list labels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
from six.moves import urllib
import tensorflow as tf
from datasets import dataset_utils
import util_misc
import tensorflow.contrib.slim as slim
_FILE_PATTERN = '%s-*'
_ITEMS_TO_DESCRIPTIONS = {
'source': 'A color image of varying height and width.',
'label_text': 'The text of the label.',
'conditional_labels': 'one hot encoded labels extracted from `label_text`',
'filename': 'Name of the image file.',
}
FLAGS = tf.flags.FLAGS
DEFAULT_NUM_CLASSES = 51
TAG_TEXT_DELIMITER = ', '
_DEFAULT_TRAIN_SIZE = 27247
_DEFAULT_VALIDATION_SIZE = 641
_PRELOGITS_SIZE = 2048
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
assert FLAGS.num_classes == 0 or FLAGS.num_classes == DEFAULT_NUM_CLASSES
num_classes = FLAGS.num_classes or DEFAULT_NUM_CLASSES
_SPLITS_TO_SIZES = {
'train': FLAGS.train_size or _DEFAULT_TRAIN_SIZE,
'validation': FLAGS.validation_size or _DEFAULT_VALIDATION_SIZE,
}
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/format': tf.FixedLenFeature(
(), tf.string, default_value='jpeg'),
'image/class/label': tf.VarLenFeature(
dtype=tf.int64),
'image/class/text': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
'image/filename': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
}
output_name = 'target' if FLAGS.dataset_use_target else 'source'
items_to_handlers = {
output_name: slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'conditional_labels': dataset_utils.OneHotLabelTensor('image/class/text',
tags_id_lookup_file=FLAGS.tags_id_lookup_file,
num_classes=num_classes,
tags_key_column_index=FLAGS.tags_key_column_index,
tags_value_column_index=FLAGS.tags_value_column_index),
'label_text': slim.tfexample_decoder.Tensor('image/class/text'),
'filename': slim.tfexample_decoder.Tensor('image/filename'),
}
items_used = [output_name, 'conditional_labels', 'filename', 'label_text']
items_need_preprocessing = [output_name, 'conditional_labels',]
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
items_used=items_used,
items_need_preprocessing=items_need_preprocessing,
num_classes=num_classes,
has_source=True)
|
the-stack_0_9280 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import seaborn as sns
from ipywidgets import *
from IPython.display import display, HTML
def prodmix_graph(zoom):
# create the plot object
fig, ax = plt.subplots(figsize=(8, 8))
s = np.linspace(0, 3000)
plt.plot(s, 10000/6 - 5*s/6, lw=3, label='$5x_1 + 6x_2 \leq 10000$')
plt.fill_between(s, 0, 10000/6 - 5*s/6, alpha=0.1)
plt.plot(s, 1500 - s/2, lw=3, label='$x_1 + 2x_2 \leq 3000$')
plt.fill_between(s, 0, 1500 - s/2, alpha=0.1)
plt.plot(600 * np.ones_like(s), s, lw=3, label='$x_1 \leq 600$')
plt.fill_betweenx(s, 0, 600, alpha=0.1)
plt.plot(s, 1200 * np.ones_like(s), lw=3, label='$x_2 \leq 1200$')
plt.fill_betweenx(0, s, 1200, alpha=0.1)
# add non-negativity constraints
plt.plot(s, np.zeros_like(s), lw=3, label='$x_1$ non-negative')
plt.plot(np.zeros_like(s), s, lw=3, label='$x_2$ non-negative')
# highlight the feasible region
path = Path([
(0., 0.),
(0., 1200.),
(560, 1200.),
(600., 7000/6),
(600., 0.),
(0., 0.),
])
patch = PathPatch(path, label='feasible region', alpha=0.5)
ax.add_patch(patch)
# labels and stuff
plt.xlabel('$x_1$ (Basic)', fontsize=16)
plt.ylabel('$x_2$ (XP)', fontsize=16)
if zoom:
plt.xlim(400, 800)
plt.ylim(1000, 1400)
else:
plt.xlim(-0.5, 1500)
plt.ylim(-0.5, 1500)
plt.legend(fontsize=11)
ax.legend(loc='upper right', bbox_to_anchor=(1.4, 1))
plt.show()
def prodmix_obj(zoom, margin1, margin2):
fig, ax = plt.subplots(figsize=(9, 8))
s = np.linspace(0, 1500)
plt.plot(s, 10000/6 - 5*s/6, lw=3, label='$5x_1 + 6x_2 \leq 10000$')
plt.plot(s, 1500 - s/2, lw=3, label='$x_1 + 2x_2 \leq 3000$')
plt.plot(600 * np.ones_like(s), s, lw=3, label='$x_1 \leq 600$')
plt.plot(s, 1200 * np.ones_like(s), lw=3, label='$x_2 \leq 1200$')
plt.plot(s, np.zeros_like(s), lw=3, label='$x_1$ non-negative')
plt.plot(np.zeros_like(s), s, lw=3, label='$x_2$ non-negative')
# plot the possible (x1, x2) pairs
pairs = [(x1, x2) for x1 in np.arange(start=0, stop=600, step=25)
for x2 in np.arange(start=0, stop=1200, step=30)
if (5*x1 + 6*x2) <= 10000
and (x1 + 2*x2) <= 3000
and x1<=600 and x2<=1200]
# split these into our variables
x1, x2 = np.hsplit(np.array(pairs), 2)
# caculate the objective function at each pair
z = margin1*x1 + margin2*x2 # the objective function
# plot the results
plt.scatter(x1, x2, c=z, cmap='jet',
label='Profit={} $x_1$ + {} $x_2$'.format(margin1, margin2), zorder=3)
# labels and stuff
cb = plt.colorbar()
cb.set_label('profit', fontsize=14)
plt.xlabel('$x_1$ (Basic)', fontsize=16)
plt.ylabel('$x_2$ (XP)', fontsize=16)
if zoom:
plt.xlim(400, 800)
plt.ylim(1000, 1400)
else:
plt.xlim(-0.5, 1500)
plt.ylim(-0.5, 1500)
plt.legend(fontsize=18)
ax.legend(loc='upper right', bbox_to_anchor=(1.8, 1))
plt.show()
def show_integer_feasregion():
fig, ax = plt.subplots(figsize=(9, 8))
s = np.linspace(0, 50)
plt.plot(s, 45 - 5*s/7, lw=3, label='$7x_1 + 5x_2 \leq 45$')
plt.plot(s, -25 + 1.9*s, lw=3, label='$1.9x_1 - x_2 \geq 25$')
plt.plot(s, 15.5 + 5*s/9, lw=3, label='$-5x_1 + 9x_2 \leq 15.5$')
plt.plot(16 * np.ones_like(s), s, lw=3, label='$x_1 \geq 16$')
plt.plot(s, 18 * np.ones_like(s), lw=3, label='$x_2 \geq 18$')
# plot the possible (x1, x2) pairs
pairs = [(x1, x2) for x1 in np.arange(start=15, stop=31, step=1)
for x2 in np.arange(start=15, stop=31, step=1)
if (5*x1 + 6*x2) <= 10000
and (x1 + 2*x2) <= 3000
and x1<=600 and x2<=1200]
# split these into our variables
x1, x2 = np.hsplit(np.array(pairs), 2)
# plot the results
plt.scatter(x1, x2, c=0*x1 + 0*x2, cmap='jet', zorder=3)
plt.xlim(15-0.5, 30)
plt.ylim(15-0.5, 30)
plt.xlabel('$x_1$', fontsize=16)
plt.ylabel('$x_2$', fontsize=16)
lppath = Path([
(16., 18.),
(16., 24.4),
(23.3, 28.4),
(26.8, 25.8),
(22.6, 18.),
(16., 18.),
])
lppatch = PathPatch(lppath, label='LP feasible region', alpha=0.3)
ax.add_patch(lppatch)
mippath = Path([
(16., 18.),
(16., 24),
(19, 26),
(23, 28),
(25, 27),
(26, 26),
(26, 25),
(23, 19),
(22, 18.),
(16., 18.),
])
mippatch = PathPatch(mippath, label='Integer feasible region', alpha=0.5)
ax.add_patch(mippatch)
plt.legend(fontsize=18)
ax.legend(loc='upper right', bbox_to_anchor=(1.4, 1))
plt.show()
def draw_local_global_opt():
function = lambda x: (x-1)*(x-2)*(x-3)*(x-4)*(x-5)*(x-6)*(x-7)
x = np.linspace(1,7,500)
plt.figure(figsize=(12,7))
plt.plot(x, function(x), label='$f(x)$')
globalx = 1.32
localx = 3.45
plt.scatter(globalx, function(globalx), s=30, c='r', label='global opt')
plt.scatter(localx, function(localx), s=30, c='orange', label='local opt')
plt.axhline(linewidth=2, color='black')
plt.legend()
plt.show()
def showconvex(values):
plt.subplots(2, 2, figsize=(17,10))
function = lambda x: (x-3)**2
x = np.linspace(0.8,4.2,500)
plt.subplot(2,2,1)
plt.plot(x, function(x), label='$f(x)$')
line = np.array(values)
plt.plot(line, function(line), 'o-')
plt.title('Convex: Line joining any two poits is above the curve')
function = lambda x: np.log(x) - (x-2)**2
x = np.linspace(0.8,4.2,500)
plt.subplot(2,2,2)
plt.plot(x, function(x), label='$f(x)$')
line = np.array(values)
plt.plot(line, function(line), 'o-')
plt.title('Concave: Line joining any two poits is below the curve')
function = lambda x: np.log(x) - 2*x*(x-4)**2
x = np.linspace(0.8,4.2,500)
plt.subplot(2,2,3)
plt.plot(x, function(x), label='$f(x)$')
line = np.array(values)
plt.plot(line, function(line), 'o-')
plt.title('Neither convex or concave')
function = lambda x: np.cos(x*2)*x
x = np.linspace(0.8,4.2,500)
plt.subplot(2,2,4)
plt.plot(x, function(x), label='$f(x)$')
line = np.array(values)
plt.plot(line, function(line), 'o-')
plt.title('Neither convex or concave')
plt.legend()
plt.show()
def deriv(x):
x_deriv = (x-2)*(x-3)*(x-4)*(x-5)+(x-1)*(x-3)*(x-4)*(x-5)+(x-1)*(x-2)*(x-4)*(x-5)+(x-1)*(x-2)*(x-3)*(x-5)\
+(x-1)*(x-2)*(x-3)*(x-4)
return x_deriv
def step(x_new, x_prev, precision, l_r):
function = lambda x: (x-1)*(x-2)*(x-3)*(x-4)*(x-5)
x = np.linspace(1,5,500)
x_list, y_list = [x_new], [function(x_new)]
while abs(x_new - x_prev) > precision:
x_prev = x_new
d_x = - deriv(x_prev)
x_new = x_prev + (l_r * d_x)
x_list.append(x_new)
y_list.append(function(x_new))
print("Local minimum occurs at: "+ str(x_new))
print("Number of steps: " + str(len(x_list)))
plt.subplots(1, 2, figsize=(17,7))
plt.subplot(1,2,1)
plt.scatter(x_list,y_list,c="g")
plt.plot(x_list,y_list,c="g")
plt.plot(x,function(x), c="r")
plt.title("Gradient descent")
plt.subplot(1,2,2)
plt.scatter(x_list,y_list,c="g")
plt.plot(x_list,y_list,c="g")
plt.plot(x,function(x), c="r")
plt.xlim([x_list[0]-.2,x_list[-1]+.2])
plt.title("Zoomed in Gradient descent to Key Area")
plt.show()
def montyhall(sample_size):
np.random.seed(1234)
prizes = [np.append(np.random.permutation(prizes),[1,1])\
for prizes in np.tile(['goat', 'goat', 'car'], (sample_size,1))]
prizes = [np.append(r,np.where(r=='car')[0]+1) for r in prizes]
prizes = [np.append(r,np.random.choice(list(set(np.where(r=='goat')[0]+1)-{1}))) for r in prizes]
prizes = [np.append(r,list({'2','3'}-{r[-1]})[0]) for r in prizes]
df = pd.DataFrame(prizes, columns=['door1','door2','door3','select', 'keep', 'prize', 'open','switch'])
df['win'] = 'NA'
df.win[df.prize==df.keep] = 'keep'
df.win[df.prize==df.switch] = 'switch'
fig, axes = plt.subplots(1, 1, figsize = (12,6))
ax = sns.countplot(x='win', data=df, order=df['win'].value_counts().sort_values().index, ax=axes)
total = len(df.win)
nbars = len(ax.patches)
for p in ax.patches:
percentage = '{:.1f}%'.format(100 * p.get_height()/total)
x = p.get_x() + p.get_width()/2 -.05
y = p.get_y() + p.get_height() + total/100
ax.annotate(percentage, (x, y))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.show()
display(df.head(10))
def successful_rate(num_candidates,
num_reject,
num_sim = 5000,
printtable = False):
np.random.seed(1234)
candidates = [np.random.choice(range(100), num_candidates, replace=False) for i in range(num_sim)]
df = pd.DataFrame(candidates, columns=['person'+str(i+1) for i in range(num_candidates)])
df['best_score'] = df[df.columns[:num_candidates]].max(axis=1)
df['best'] = df[df.columns[:num_candidates]].idxmax(axis=1)
rate = dict.fromkeys(num_reject)
for r in num_reject:
df['best_at_stop'] = df[df.columns[:r]].max(axis=1)
df_rest = df[df.columns[r:num_candidates]]
df['hired_score'] = np.where(df_rest.gt(df['best_at_stop'], axis=0).any(axis=1),
df_rest[df_rest.gt(df['best_at_stop'],
axis=0)].stack(dropna=False).groupby(level=0).first(),
df[df.columns[num_candidates-1]]).astype('int64')
df['hired'] = np.where(df_rest.gt(df['best_at_stop'], axis=0).any(axis=1),
df_rest.gt(df['best_at_stop'], axis=0).idxmax(axis=1),
'person'+str(num_candidates))
rate[r] = np.sum(df.best==df.hired)/num_sim*100
if printtable == True:
print('The best candidate is hired {} times in {} trials with {} rejection'.format(np.sum(df.best==df.hired),
num_sim,
r))
display(df.head(10))
return rate
def secretary(n):
rate = successful_rate(n, range(1,n))
lists = sorted(rate.items())
x, y = zip(*lists)
plt.plot(x, y)
plt.show()
print('optimal rejection is {} with {}% chance to hire the best candidate'.\
format(max(rate, key=rate.get), round(max(rate.values())),2)) |
the-stack_0_9282 | import gym
import numpy as np
import tensorflow as tf
from gym.wrappers import TimeLimit
def ortho_init(scale=1.0):
"""
Orthogonal initialization for the policy weights
:param scale: (float) Scaling factor for the weights.
:return: (function) an initialization function for the weights
"""
# _ortho_init(shape, dtype, partition_info=None)
def _ortho_init(shape, *_, **_kwargs):
"""Intialize weights as Orthogonal matrix.
Orthogonal matrix initialization [1]_. For n-dimensional shapes where
n > 2, the n-1 trailing axes are flattened. For convolutional layers, this
corresponds to the fan-in, so this makes the initialization usable for
both dense and convolutional layers.
References
----------
.. [1] Saxe, Andrew M., James L. McClelland, and Surya Ganguli.
"Exact solutions to the nonlinear dynamics of learning in deep
linear
"""
# lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
gaussian_noise = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(gaussian_noise, full_matrices=False)
weights = u if u.shape == flat_shape else v # pick the one with the correct shape
weights = weights.reshape(shape)
return (scale * weights[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(input_tensor, scope, *, n_filters, filter_size, stride,
pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
"""
Creates a 2d convolutional layer for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the convolution
:param scope: (str) The TensorFlow variable scope
:param n_filters: (int) The number of filters
:param filter_size: (Union[int, [int], tuple<int, int>]) The filter size for the squared kernel matrix,
or the height and width of kernel filter if the input is a list or tuple
:param stride: (int) The stride of the convolution
:param pad: (str) The padding type ('VALID' or 'SAME')
:param init_scale: (int) The initialization scale
:param data_format: (str) The data format for the convolution weights
:param one_dim_bias: (bool) If the bias should be one dimentional or not
:return: (TensorFlow Tensor) 2d convolutional layer
"""
if isinstance(filter_size, list) or isinstance(filter_size, tuple):
assert len(filter_size) == 2, \
"Filter size must have 2 elements (height, width), {} were given".format(len(filter_size))
filter_height = filter_size[0]
filter_width = filter_size[1]
else:
filter_height = filter_size
filter_width = filter_size
if data_format == 'NHWC':
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, n_filters]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, n_filters, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [n_filters] if one_dim_bias else [1, n_filters, 1, 1]
n_input = input_tensor.get_shape()[channel_ax].value
wshape = [filter_height, filter_width, n_input, n_filters]
with tf.compat.v1.variable_scope(scope):
weight = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
bias = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
bias = tf.reshape(bias, bshape)
return bias + tf.nn.conv2d(input_tensor, weight, strides=strides, padding=pad, data_format=data_format)
def linear(input_tensor, scope, n_hidden, *, init_scale=1.0, init_bias=0.0):
"""
Creates a fully connected layer for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the fully connected layer
:param scope: (str) The TensorFlow variable scope
:param n_hidden: (int) The number of hidden neurons
:param init_scale: (int) The initialization scale
:param init_bias: (int) The initialization offset bias
:return: (TensorFlow Tensor) fully connected layer
"""
with tf.compat.v1.variable_scope(scope):
n_input = input_tensor.get_shape()[1]
weight = tf.compat.v1.get_variable("w", [n_input, n_hidden], initializer=ortho_init(init_scale))
bias = tf.compat.v1.get_variable("b", [n_hidden], initializer=tf.constant_initializer(init_bias))
return tf.matmul(input_tensor, weight) + bias
def batch_to_seq(tensor_batch, n_batch, n_steps, flat=False):
"""
Transform a batch of Tensors, into a sequence of Tensors for recurrent policies
:param tensor_batch: (TensorFlow Tensor) The input tensor to unroll
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_steps: (int) The number of steps to run for each environment
:param flat: (bool) If the input Tensor is flat
:return: (TensorFlow Tensor) sequence of Tensors for recurrent policies
"""
if flat:
tensor_batch = tf.reshape(tensor_batch, [n_batch, n_steps])
else:
tensor_batch = tf.reshape(tensor_batch, [n_batch, n_steps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=n_steps, value=tensor_batch)]
def seq_to_batch(tensor_sequence, flat=False):
"""
Transform a sequence of Tensors, into a batch of Tensors for recurrent policies
:param tensor_sequence: (TensorFlow Tensor) The input tensor to batch
:param flat: (bool) If the input Tensor is flat
:return: (TensorFlow Tensor) batch of Tensors for recurrent policies
"""
shape = tensor_sequence[0].get_shape().as_list()
if not flat:
assert len(shape) > 1
n_hidden = tensor_sequence[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=tensor_sequence), [-1, n_hidden])
else:
return tf.reshape(tf.stack(values=tensor_sequence, axis=1), [-1])
def lstm(input_tensor, mask_tensor, cell_state_hidden, scope, n_hidden, init_scale=1.0, layer_norm=False):
"""
Creates an Long Short Term Memory (LSTM) cell for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the LSTM cell
:param mask_tensor: (TensorFlow Tensor) The mask tensor for the LSTM cell
:param cell_state_hidden: (TensorFlow Tensor) The state tensor for the LSTM cell
:param scope: (str) The TensorFlow variable scope
:param n_hidden: (int) The number of hidden neurons
:param init_scale: (int) The initialization scale
:param layer_norm: (bool) Whether to apply Layer Normalization or not
:return: (TensorFlow Tensor) LSTM cell
"""
_, n_input = [v.value for v in input_tensor[0].get_shape()]
with tf.variable_scope(scope):
weight_x = tf.get_variable("wx", [n_input, n_hidden * 4], initializer=ortho_init(init_scale))
weight_h = tf.get_variable("wh", [n_hidden, n_hidden * 4], initializer=ortho_init(init_scale))
bias = tf.get_variable("b", [n_hidden * 4], initializer=tf.constant_initializer(0.0))
if layer_norm:
# Gain and bias of layer norm
gain_x = tf.get_variable("gx", [n_hidden * 4], initializer=tf.constant_initializer(1.0))
bias_x = tf.get_variable("bx", [n_hidden * 4], initializer=tf.constant_initializer(0.0))
gain_h = tf.get_variable("gh", [n_hidden * 4], initializer=tf.constant_initializer(1.0))
bias_h = tf.get_variable("bh", [n_hidden * 4], initializer=tf.constant_initializer(0.0))
gain_c = tf.get_variable("gc", [n_hidden], initializer=tf.constant_initializer(1.0))
bias_c = tf.get_variable("bc", [n_hidden], initializer=tf.constant_initializer(0.0))
cell_state, hidden = tf.split(axis=1, num_or_size_splits=2, value=cell_state_hidden)
for idx, (_input, mask) in enumerate(zip(input_tensor, mask_tensor)):
cell_state = cell_state * (1 - mask)
hidden = hidden * (1 - mask)
if layer_norm:
gates = _ln(tf.matmul(_input, weight_x), gain_x, bias_x) \
+ _ln(tf.matmul(hidden, weight_h), gain_h, bias_h) + bias
else:
gates = tf.matmul(_input, weight_x) + tf.matmul(hidden, weight_h) + bias
in_gate, forget_gate, out_gate, cell_candidate = tf.split(axis=1, num_or_size_splits=4, value=gates)
in_gate = tf.nn.sigmoid(in_gate)
forget_gate = tf.nn.sigmoid(forget_gate)
out_gate = tf.nn.sigmoid(out_gate)
cell_candidate = tf.tanh(cell_candidate)
cell_state = forget_gate * cell_state + in_gate * cell_candidate
if layer_norm:
hidden = out_gate * tf.tanh(_ln(cell_state, gain_c, bias_c))
else:
hidden = out_gate * tf.tanh(cell_state)
input_tensor[idx] = hidden
cell_state_hidden = tf.concat(axis=1, values=[cell_state, hidden])
return input_tensor, cell_state_hidden
def _ln(input_tensor, gain, bias, epsilon=1e-5, axes=None):
"""
Apply layer normalisation.
:param input_tensor: (TensorFlow Tensor) The input tensor for the Layer normalization
:param gain: (TensorFlow Tensor) The scale tensor for the Layer normalization
:param bias: (TensorFlow Tensor) The bias tensor for the Layer normalization
:param epsilon: (float) The epsilon value for floating point calculations
:param axes: (tuple, list or int) The axes to apply the mean and variance calculation
:return: (TensorFlow Tensor) a normalizing layer
"""
if axes is None:
axes = [1]
mean, variance = tf.nn.moments(input_tensor, axes=axes, keep_dims=True)
input_tensor = (input_tensor - mean) / tf.sqrt(variance + epsilon)
input_tensor = input_tensor * gain + bias
return input_tensor
def conv_to_fc(input_tensor):
"""
Reshapes a Tensor from a convolutional network to a Tensor for a fully connected network
:param input_tensor: (TensorFlow Tensor) The convolutional input tensor
:return: (TensorFlow Tensor) The fully connected output tensor
"""
n_hidden = np.prod([v.value for v in input_tensor.get_shape()[1:]])
input_tensor = tf.reshape(input_tensor, [-1, n_hidden])
return input_tensor
class DoneOnSuccessWrapper(gym.Wrapper):
"""
Reset on success and offsets the reward.
Useful for GoalEnv.
"""
def __init__(self, env, reward_offset=1.0):
super(DoneOnSuccessWrapper, self).__init__(env)
self.reward_offset = reward_offset
def step(self, action):
obs, reward, done, info = self.env.step(action)
done = done or info.get('is_success', False)
reward += self.reward_offset
return obs, reward, done, info
def compute_reward(self, achieved_goal, desired_goal, info):
reward = self.env.compute_reward(achieved_goal, desired_goal, info)
return reward + self.reward_offset
class TimeFeatureWrapper(gym.Wrapper):
"""
Add remaining time to observation space for fixed length episodes.
See https://arxiv.org/abs/1712.00378 and https://github.com/aravindr93/mjrl/issues/13.
:param env: (gym.Env)
:param max_steps: (int) Max number of steps of an episode
if it is not wrapped in a TimeLimit object.
:param test_mode: (bool) In test mode, the time feature is constant,
equal to zero. This allow to check that the agent did not overfit this feature,
learning a deterministic pre-defined sequence of actions.
"""
def __init__(self, env, max_steps=1000, test_mode=False):
assert isinstance(env.observation_space, gym.spaces.Box)
# Add a time feature to the observation
low, high = env.observation_space.low, env.observation_space.high
low, high= np.concatenate((low, [0])), np.concatenate((high, [1.]))
env.observation_space = gym.spaces.Box(low=low, high=high, dtype=np.float32)
super(TimeFeatureWrapper, self).__init__(env)
if isinstance(env, TimeLimit):
self._max_steps = env._max_episode_steps
else:
self._max_steps = max_steps
self._current_step = 0
self._test_mode = test_mode
def reset(self):
self._current_step = 0
return self._get_obs(self.env.reset())
def step(self, action):
self._current_step += 1
obs, reward, done, info = self.env.step(action)
return self._get_obs(obs), reward, done, info
def _get_obs(self, obs):
"""
Concatenate the time feature to the current observation.
:param obs: (np.ndarray)
:return: (np.ndarray)
"""
# Remaining time is more general
time_feature = 1 - (self._current_step / self._max_steps)
if self._test_mode:
time_feature = 1.0
# Optionnaly: concatenate [time_feature, time_feature ** 2]
return np.concatenate((obs, [time_feature]))
def total_episode_reward_logger(rew_acc, rewards, masks, writer, steps):
"""
calculates the cumulated episode reward, and prints to tensorflow log the output
:param rew_acc: (np.array float) the total running reward
:param rewards: (np.array float) the rewards
:param masks: (np.array bool) the end of episodes
:param writer: (TensorFlow Session.writer) the writer to log to
:param steps: (int) the current timestep
:return: (np.array float) the updated total running reward
:return: (np.array float) the updated total running reward
"""
with tf.compat.v1.variable_scope("environment_info", reuse=True):
for env_idx in range(rewards.shape[0]):
dones_idx = np.sort(np.argwhere(masks[env_idx]))
if len(dones_idx) == 0:
rew_acc[env_idx] += sum(rewards[env_idx])
else:
rew_acc[env_idx] += sum(rewards[env_idx, :dones_idx[0, 0]])
summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag="episode_reward", simple_value=rew_acc[env_idx])])
writer.add_summary(summary, steps + dones_idx[0, 0])
for k in range(1, len(dones_idx[:, 0])):
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[k-1, 0]:dones_idx[k, 0]])
summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag="episode_reward", simple_value=rew_acc[env_idx])])
writer.add_summary(summary, steps + dones_idx[k, 0])
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[-1, 0]:])
return rew_acc |
the-stack_0_9283 | # Loads a target data then defines tables for it
spark.read \
.option("header", True) \
.csv("./testdata/adult.csv") \
.write \
.saveAsTable("adult")
delphi.misc \
.options({"db_name": "default", "table_name": "adult", "row_id": "tid"}) \
.flatten() \
.write \
.saveAsTable("adult_flatten")
spark.table("adult").show(1)
spark.table("adult_flatten").show(1)
# Loads a ground truth data then defines tables for it
spark.read \
.option("header", True) \
.csv("./testdata/adult_clean.csv") \
.write \
.saveAsTable("adult_clean")
spark.table("adult_flatten") \
.join(spark.table("adult_clean"), ["tid", "attribute"], "inner") \
.where("not(value <=> correct_val)") \
.write \
.saveAsTable("error_cells_ground_truth")
spark.table("adult_clean").show(1)
spark.table("error_cells_ground_truth").show(1)
# Detects error cells then repairs them
from repair.errors import NullErrorDetector, ConstraintErrorDetector
error_detectors = [
ConstraintErrorDetector(constraint_path="./testdata/adult_constraints.txt"),
NullErrorDetector()
]
repaired_df = delphi.repair \
.setDbName("default") \
.setTableName("adult") \
.setRowId("tid") \
.setErrorDetectors(error_detectors) \
.run()
# Computes performance numbers (precision & recall)
# - Precision: the fraction of correct repairs, i.e., repairs that match
# the ground truth, over the total number of repairs performed
# - Recall: correct repairs over the total number of errors
pdf = repaired_df.join(spark.table("adult_clean"), ["tid", "attribute"], "inner")
rdf = repaired_df.join(spark.table("error_cells_ground_truth"), ["tid", "attribute"], "right_outer")
# Compares predicted values with the correct ones
pdf.orderBy("attribute").show()
precision = pdf.where("repaired <=> correct_val").count() / pdf.count()
recall = rdf.where("repaired <=> correct_val").count() / rdf.count()
f1 = (2.0 * precision * recall) / (precision + recall)
print(f"Precision={precision} Recall={recall} F1={f1}")
|
the-stack_0_9284 | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for PSD kernel linop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
tfpk = tfp.math.psd_kernels
def skip_if_no_xla(skip_test_fn):
try:
tf.function(lambda: tf.constant(0), experimental_compile=True)()
except tf.errors.UnimplementedError as e:
if 'Could not find compiler' in str(e):
skip_test_fn('XLA not available')
@test_util.run_all_in_graph_and_eager_modes
class LinearOperatorPSDKernelTest(tfp_test_util.TestCase):
"""Tests for tfp.experimental.linalg.LinearOperatorPSDKernel."""
def test_shape(self):
kernel = tfpk.ExponentiatedQuadratic(
amplitude=tf.random.uniform([17, 1, 1]),
feature_ndims=2)
x1 = tf.random.normal([1, 11, 5, 2, 13])
x2 = tf.random.normal([7, 1, 3, 2, 13])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
self.assertAllEqual((17, 7, 11, 5, 3), linop.shape)
self.assertAllEqual((17, 7, 11), linop.batch_shape)
def test_diag_part(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([7, 3, 5, 2]) # square matrix 5x5
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x1)),
linop.diag_part()
])
self.assertAllClose(expected, actual)
x2 = tf.random.normal([3, 11, 2]) # wide matrix 5x11
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x2)),
linop.diag_part()
])
self.assertAllClose(expected, actual)
x2 = tf.random.normal([2, 2]) # tall matrix 5x2
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x2)),
linop.diag_part()
])
self.assertAllClose(expected, actual)
def test_diag_part_xla(self):
skip_if_no_xla(self.skipTest)
if not tf.executing_eagerly(): return # experimental_compile is eager-only.
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([7, 3, 5, 2]) # square matrix 5x5
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x1)),
tf.function(linop.diag_part, experimental_compile=True)()
])
self.assertAllClose(expected, actual)
x2 = tf.random.normal([3, 11, 2]) # wide matrix 5x11
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x2)),
tf.function(linop.diag_part, experimental_compile=True)()
])
self.assertAllClose(expected, actual)
x2 = tf.random.normal([2, 2]) # tall matrix 5x2
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x2)),
tf.function(linop.diag_part, experimental_compile=True)()
])
self.assertAllClose(expected, actual)
def test_row_scalar(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 2])
x2 = tf.random.normal([7, 3, 5, 2])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
i = np.random.randint(0, 5)
expected, actual = self.evaluate(
[kernel.matrix(x1, x2)[..., i, :], linop.row(i)])
self.assertAllClose(expected, actual)
def test_row_batch(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([7, 1, 5, 2])
x2 = tf.random.normal([1, 3, 4, 2])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
i = np.random.randint(0, 5, size=(7, 3))
cov = kernel.matrix(x1, x2)
expected, actual = self.evaluate([
tf.gather(cov, i[..., tf.newaxis], batch_dims=2)[..., 0, :],
linop.row(i)
])
self.assertAllClose(expected, actual)
def test_col_scalar(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 2])
x2 = tf.random.normal([7, 3, 5, 2])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
j = np.random.randint(0, 5)
expected, actual = self.evaluate(
[kernel.matrix(x1, x2)[..., j], linop.col(j)])
self.assertAllClose(expected, actual)
def test_col_batch(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([3, 5, 2])
x2 = tf.random.normal([7, 1, 4, 2])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
j = np.random.randint(0, 4, size=(7, 3))
cov = kernel.matrix(x1, x2)
transpose = tf.linalg.matrix_transpose
# Gather with batch_dims wants all the batch dims adjacent and leading, so
# transpose-gather-transpose is easier to write than injecting a
# range(nrows) column into the gather indices.
expected, actual = self.evaluate([
transpose(tf.gather(transpose(cov), j[..., tf.newaxis], batch_dims=2)
)[..., 0],
linop.col(j)
])
self.assertAllClose(expected, actual)
def test_matmul(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([3, 2, 11])
x2 = tf.random.normal([5, 1, 4, 11])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
cov = kernel.matrix(x1, x2)
x = tf.random.normal([4, 3])
expected, actual = self.evaluate([tf.matmul(cov, x), linop.matmul(x)])
self.assertAllClose(expected, actual)
def test_matmul_chunked(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([3, 2, 11])
x2 = tf.random.normal([5, 1, 14, 11])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2,
num_matmul_parts=7)
cov = kernel.matrix(x1, x2)
x = tf.random.normal([14, 3])
expected, actual = self.evaluate([tf.matmul(cov, x), linop.matmul(x)])
self.assertAllClose(expected, actual)
@parameterized.named_parameters(
(dict(testcase_name='_{}chunk'.format(n), nchunks=n) for n in (2, 5)))
def test_matmul_chunked_with_remainder(self, nchunks):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([3, 2, 11])
x2 = tf.random.normal([5, 1, 17, 11])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(
kernel, x1, x2, num_matmul_parts=nchunks)
cov = kernel.matrix(x1, x2)
x = tf.random.normal([17, 3])
expected, actual = self.evaluate([tf.matmul(cov, x), linop.matmul(x)])
self.assertAllClose(expected, actual)
def test_matmul_chunked_grad(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 3])
x2 = tf.random.normal([7, 3])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2,
num_matmul_parts=3)
x = tf.random.normal([7, 2])
with tf.GradientTape() as tape:
tape.watch((x1, x2, x))
y = linop.matmul(x)
out_grad = tf.random.normal(tf.shape(y))
actuals = tape.gradient(y, (x1, x2, x), output_gradients=out_grad)
with tf.GradientTape() as tape:
tape.watch((x1, x2, x))
y = tf.matmul(kernel.matrix(x1, x2), x)
expecteds = tape.gradient(y, (x1, x2, x), output_gradients=out_grad)
expecteds, actuals = self.evaluate([expecteds, actuals])
self.assertEqual(len(expecteds), len(actuals))
for expected, actual in zip(expecteds, actuals):
self.assertAllClose(expected, actual)
def test_matmul_xla(self):
skip_if_no_xla(self.skipTest)
if not tf.executing_eagerly(): return # experimental_compile is eager-only.
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 3])
x2 = tf.random.normal([7, 3])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(
kernel, x1, x2, num_matmul_parts=3)
x = tf.random.normal([7, 2])
@tf.function(experimental_compile=True)
def f():
return linop.matmul(x)
actual = f()
expected = tf.matmul(kernel.matrix(x1, x2), x)
expected, actual = self.evaluate([expected, actual])
self.assertAllClose(expected, actual)
def test_matmul_grad_xla(self):
skip_if_no_xla(self.skipTest)
if not tf.executing_eagerly(): return # experimental_compile is eager-only.
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 3])
x2 = tf.random.normal([7, 3])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(
kernel, x1, x2, num_matmul_parts=3)
x = tf.random.normal([7, 2])
@tf.function(experimental_compile=True)
def f():
with tf.GradientTape() as tape:
tape.watch((x1, x2, x))
y = linop.matmul(x)
out_grad = tf.random.normal(tf.shape(y))
actuals = tape.gradient(y, (x1, x2, x), output_gradients=out_grad)
return y, actuals, out_grad
y, actuals, out_grad = f()
with tf.GradientTape() as tape:
tape.watch((x1, x2, x))
y = tf.matmul(kernel.matrix(x1, x2), x)
expecteds = tape.gradient(y, (x1, x2, x), output_gradients=out_grad)
expecteds, actuals = self.evaluate([expecteds, actuals])
self.assertEqual(len(expecteds), len(actuals))
for expected, actual in zip(expecteds, actuals):
self.assertAllClose(expected, actual)
def test_matmul_grad_xla_kernelparams(self):
skip_if_no_xla(self.skipTest)
if not tf.executing_eagerly(): return # experimental_compile is eager-only.
feature_dim = 3
def kernel_fn(eq_params, poly_params):
return (tfpk.ExponentiatedQuadratic(**eq_params) *
tfpk.Polynomial(**poly_params))
kernel_args = (
dict(length_scale=tf.random.uniform([], .5, 1.5, dtype=tf.float64),
amplitude=tf.random.uniform([], 1.5, 2.5, dtype=tf.float64)),
dict(bias_variance=tf.random.uniform([feature_dim], .5, 1.5,
dtype=tf.float64),
shift=tf.random.normal([feature_dim], dtype=tf.float64)))
x1 = tf.random.normal([5, feature_dim], dtype=tf.float64)
x2 = tf.random.normal([7, feature_dim], dtype=tf.float64)
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(
kernel_fn, x1, x2, kernel_args=kernel_args, num_matmul_parts=3)
x = tf.random.normal([7, 2], dtype=tf.float64)
@tf.function(experimental_compile=True)
def f():
with tf.GradientTape() as tape:
tape.watch((x1, x2, x, kernel_args))
y = linop.matmul(x)
out_grad = tf.random.normal(tf.shape(y), dtype=tf.float64)
actuals = tape.gradient(y, (x1, x2, x, kernel_args),
output_gradients=out_grad)
return y, actuals, out_grad
y, actuals, out_grad = f()
with tf.GradientTape() as tape:
tape.watch((x1, x2, x, kernel_args))
y = tf.matmul(kernel_fn(*kernel_args).matrix(x1, x2), x)
expecteds = tape.gradient(y, (x1, x2, x, kernel_args),
output_gradients=out_grad)
expecteds, actuals = self.evaluate([expecteds, actuals])
tf.nest.assert_same_structure(expecteds, actuals)
for expected, actual in zip(tf.nest.flatten(expecteds),
tf.nest.flatten(actuals)):
self.assertAllClose(expected, actual)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
|
the-stack_0_9285 | import os
from .Backend import Backend
class TextFile(Backend):
def __init__(self, filename):
self.filename = filename
i = 1
while os.path.exists(self.filename):
i += 1
self.filename = "%s_%d" % (filename, i)
self.f = open(filename, 'w')
self.last_route = ""
def write(self, route, attribute, value):
if route != self.last_route:
self.f.write(str(route) + "\n")
self.last_route = route
try:
self.f.write("\t%s : %s\n" % (str(attribute), str(value.__dict__)))
except Exception:
self.f.write("\t%s : %s\n" % (str(attribute), str(value)))
def __del__(self):
self.f.close()
|
the-stack_0_9286 | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import copy
import io
import multiprocessing
import os
import re
import stat
import subprocess
import sys
try:
from catkin_pkg.cmake import configure_file, get_metapackage_cmake_template_path
from catkin_pkg.packages import find_packages
from catkin_pkg.topological_order import topological_order_packages
except ImportError as e:
sys.exit(
'ImportError: "from catkin_pkg.topological_order import '
'topological_order" failed: %s\nMake sure that you have installed '
'"catkin_pkg", it is up to date and on the PYTHONPATH.' % e
)
from catkin.cmake import get_cmake_path
from catkin.terminal_color import ansi, disable_ANSI_colors, fmt, sanitize
def split_arguments(args, splitter_name, default=None):
if splitter_name not in args:
return args, default
index = args.index(splitter_name)
return args[0:index], args[index + 1:]
def extract_cmake_and_make_arguments(args):
args, cmake_args, make_args, _ = _extract_cmake_and_make_arguments(args, extract_catkin_make=False)
return args, cmake_args, make_args
def extract_cmake_and_make_and_catkin_make_arguments(args):
return _extract_cmake_and_make_arguments(args, extract_catkin_make=True)
def _extract_cmake_and_make_arguments(args, extract_catkin_make):
cmake_args = []
make_args = []
catkin_make_args = []
arg_types = {
'--cmake-args': cmake_args,
'--make-args': make_args
}
if extract_catkin_make:
arg_types['--catkin-make-args'] = catkin_make_args
arg_indexes = {}
for k in arg_types.keys():
if k in args:
arg_indexes[args.index(k)] = k
for index in reversed(sorted(arg_indexes.keys())):
arg_type = arg_indexes[index]
args, specific_args = split_arguments(args, arg_type)
arg_types[arg_type].extend(specific_args)
# classify -D* and -G* arguments as cmake specific arguments
implicit_cmake_args = [a for a in args if a.startswith('-D') or a.startswith('-G')]
args = [a for a in args if a not in implicit_cmake_args]
return args, implicit_cmake_args + cmake_args, make_args, catkin_make_args
def cprint(msg, end=None):
print(fmt(msg), end=end)
def colorize_line(line):
cline = sanitize(line)
cline = cline.replace(
'-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~',
'-- @{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~@|'
)
if line.startswith('-- ~~'):
# -- ~~ -
cline = cline.replace('~~ ', '@{pf}~~ @|')
cline = cline.replace(' - ', ' - @!@{bf}')
cline = cline.replace('(', '@|(')
cline = cline.replace('(plain cmake)', '@|(@{rf}plain cmake@|)')
cline = cline.replace('(unknown)', '@|(@{yf}unknown@|)')
if line.startswith('-- +++'):
# -- +++ add_subdirectory(package)
cline = cline.replace('+++', '@!@{gf}+++@|')
cline = cline.replace('kin package: \'', 'kin package: \'@!@{bf}')
cline = cline.replace(')', '@|)')
cline = cline.replace('\'\n', '@|\'\n')
cline = cline.replace('cmake package: \'', 'cmake package: \'@!@{bf}')
cline = cline.replace('\'\n', '@|\'\n')
if line.startswith('-- ==>'):
cline = cline.replace('-- ==>', '-- @!@{bf}==>@|')
if line.lower().startswith('warning'):
# WARNING
cline = ansi('yf') + cline
if line.startswith('CMake Warning'):
# CMake Warning...
cline = cline.replace('CMake Warning', '@{yf}@!CMake Warning@|')
if line.startswith('ERROR:'):
# ERROR:
cline = cline.replace('ERROR:', '@!@{rf}ERROR:@|')
if line.startswith('CMake Error'):
# CMake Error...
cline = cline.replace('CMake Error', '@{rf}@!CMake Error@|')
if line.startswith('Call Stack (most recent call first):'):
# CMake Call Stack
cline = cline.replace('Call Stack (most recent call first):',
'@{cf}@_Call Stack (most recent call first):@|')
return fmt(cline)
def print_command_banner(cmd, cwd, color):
if color:
# Prepare for printing
cmd_str = sanitize(' '.join(cmd))
cwd_str = sanitize(cwd)
# Print command notice
cprint('@{bf}####')
cprint('@{bf}#### Running command: @!"%s"@|@{bf} in @!"%s"' % (cmd_str, cwd_str))
cprint('@{bf}####')
else:
print('####')
print('#### Running command: "%s" in "%s"' % (' '.join(cmd), cwd))
print('####')
def run_command_colorized(cmd, cwd, quiet=False):
run_command(cmd, cwd, quiet=quiet, colorize=True)
def run_command(cmd, cwd, quiet=False, colorize=False):
capture = (quiet or colorize)
stdout_pipe = subprocess.PIPE if capture else None
stderr_pipe = subprocess.STDOUT if capture else None
try:
proc = subprocess.Popen(
cmd, cwd=cwd, shell=False,
stdout=stdout_pipe, stderr=stderr_pipe
)
except OSError as e:
raise OSError("Failed command '%s': %s" % (cmd, e))
out = io.StringIO() if quiet else sys.stdout
if capture:
while True:
line = unicode(proc.stdout.readline())
if proc.returncode is not None or not line:
break
try:
line = colorize_line(line) if colorize else line
except Exception as e:
import traceback
traceback.print_exc()
print('<caktin_make> color formatting problem: ' + str(e),
file=sys.stderr)
out.write(line)
proc.wait()
if proc.returncode:
if quiet:
print(out.getvalue())
raise subprocess.CalledProcessError(proc.returncode, ' '.join(cmd))
return out.getvalue() if quiet else ''
blue_arrow = '@!@{bf}==>@|@!'
def _check_build_dir(name, workspace, buildspace):
package_build_dir = os.path.join(buildspace, name)
if not os.path.exists(package_build_dir):
cprint(
blue_arrow + ' Creating build directory: \'' +
os.path.relpath(package_build_dir, workspace) + '\'@|'
)
os.mkdir(package_build_dir)
return package_build_dir
def isolation_print_command(cmd, path=None):
cprint(
blue_arrow + " " + sanitize(cmd) + "@|" +
(" @!@{kf}in@| '@!" + sanitize(path) + "@|'" if path else '')
)
def get_python_install_dir():
# this function returns the same value as the CMake variable PYTHON_INSTALL_DIR from catkin/cmake/python.cmake
python_install_dir = 'lib'
if os.name != 'nt':
python_version_xdoty = str(sys.version_info[0]) + '.' + str(sys.version_info[1])
python_install_dir = os.path.join(python_install_dir, 'python' + python_version_xdoty)
python_use_debian_layout = os.path.exists('/etc/debian_version')
python_packages_dir = 'dist-packages' if python_use_debian_layout else 'site-packages'
python_install_dir = os.path.join(python_install_dir, python_packages_dir)
return python_install_dir
def handle_make_arguments(input_make_args, force_single_threaded_when_running_tests=False):
make_args = list(input_make_args)
if force_single_threaded_when_running_tests:
# force single threaded execution when running test since rostest does not support multiple parallel runs
run_tests = [a for a in make_args if a.startswith('run_tests')]
if run_tests:
print('Forcing "-j1" for running unit tests.')
make_args.append('-j1')
# If no -j/--jobs/-l/--load-average flags are in make_args
if not extract_jobs_flags(' '.join(make_args)):
# If -j/--jobs/-l/--load-average are in MAKEFLAGS
if 'MAKEFLAGS' in os.environ and extract_jobs_flags(os.environ['MAKEFLAGS']):
# Do not extend make arguments, let MAKEFLAGS set things
pass
else:
# Else extend the make_arguments to include some jobs flags
# If ROS_PARALLEL_JOBS is set use those flags
if 'ROS_PARALLEL_JOBS' in os.environ:
# ROS_PARALLEL_JOBS is a set of make variables, not just a number
ros_parallel_jobs = os.environ['ROS_PARALLEL_JOBS']
make_args.extend(ros_parallel_jobs.split())
else:
# Else Use the number of CPU cores
try:
jobs = multiprocessing.cpu_count()
make_args.append('-j{0}'.format(jobs))
make_args.append('-l{0}'.format(jobs))
except NotImplementedError:
# If the number of cores cannot be determined, do not extend args
pass
return make_args
def extract_jobs_flags(mflags):
regex = r'(?:^|\s)(-?(?:j|l)(?:\s*[0-9]+|\s|$))' + \
r'|' + \
r'(?:^|\s)((?:--)?(?:jobs|load-average)(?:(?:=|\s+)[0-9]+|(?:\s|$)))'
matches = re.findall(regex, mflags) or []
matches = [m[0] or m[1] for m in matches]
return ' '.join([m.strip() for m in matches]) if matches else None
def build_catkin_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args
):
cprint(
"Processing @{cf}catkin@| package: '@!@{bf}" +
package.name + "@|'"
)
# Make the build dir
build_dir = _check_build_dir(package.name, workspace, buildspace)
# Check last_env
if last_env is not None:
cprint(
blue_arrow + " Building with env: " +
"'{0}'".format(last_env)
)
# Check for Makefile and maybe call cmake
makefile = os.path.join(build_dir, 'Makefile')
if not os.path.exists(makefile) or force_cmake:
package_dir = os.path.dirname(package.filename)
if not os.path.exists(os.path.join(package_dir, 'CMakeLists.txt')):
export_tags = [e.tagname for e in package.exports]
if 'metapackage' not in export_tags:
print(colorize_line('Error: Package "%s" does not have a CMakeLists.txt file' % package.name))
sys.exit('Can not build catkin package without CMakeLists.txt file')
# generate CMakeLists.txt for metpackages without one
print(colorize_line('Warning: metapackage "%s" should have a CMakeLists.txt file' % package.name))
cmake_code = configure_file(
get_metapackage_cmake_template_path(),
{'name': package.name, 'metapackage_arguments': 'DIRECTORY "%s"' % package_dir})
cmakelists_txt = os.path.join(build_dir, 'CMakeLists.txt')
with open(cmakelists_txt, 'w') as f:
f.write(cmake_code)
package_dir = build_dir
# Run cmake
cmake_cmd = [
'cmake',
package_dir,
'-DCATKIN_DEVEL_PREFIX=' + develspace,
'-DCMAKE_INSTALL_PREFIX=' + installspace
]
cmake_cmd.extend(cmake_args)
isolation_print_command(' '.join(cmake_cmd))
if last_env is not None:
cmake_cmd = [last_env] + cmake_cmd
try:
run_command_colorized(cmake_cmd, build_dir, quiet)
except subprocess.CalledProcessError as e:
if os.path.exists(makefile):
# remove Makefile to force CMake invocation next time
os.remove(makefile)
raise
else:
print('Makefile exists, skipping explicit cmake invocation...')
# Check to see if cmake needs to be run via make
make_check_cmake_cmd = ['make', 'cmake_check_build_system']
isolation_print_command(' '.join(make_check_cmake_cmd), build_dir)
if last_env is not None:
make_check_cmake_cmd = [last_env] + make_check_cmake_cmd
run_command_colorized(
make_check_cmake_cmd, build_dir, quiet
)
# Run make
make_cmd = ['make']
make_cmd.extend(handle_make_arguments(make_args, force_single_threaded_when_running_tests=True))
isolation_print_command(' '.join(make_cmd), build_dir)
if last_env is not None:
make_cmd = [last_env] + make_cmd
run_command(make_cmd, build_dir, quiet)
# Make install
if install:
make_install_cmd = ['make', 'install']
isolation_print_command(' '.join(make_install_cmd), build_dir)
if last_env is not None:
make_install_cmd = [last_env] + make_install_cmd
run_command(make_install_cmd, build_dir, quiet)
def build_cmake_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args
):
# Notify the user that we are processing a plain cmake package
cprint(
"Processing @{cf}plain cmake@| package: '@!@{bf}" + package.name +
"@|'"
)
# Make the build dir
build_dir = _check_build_dir(package.name, workspace, buildspace)
# Check last_env
if last_env is not None:
cprint(blue_arrow + " Building with env: " +
"'{0}'".format(last_env))
# Check for Makefile and maybe call cmake
makefile = os.path.join(build_dir, 'Makefile')
install_target = installspace if install else develspace
if not os.path.exists(makefile) or force_cmake:
# Call cmake
cmake_cmd = [
'cmake',
os.path.dirname(package.filename),
'-DCMAKE_INSTALL_PREFIX=' + install_target
]
cmake_cmd.extend(cmake_args)
isolation_print_command(' '.join(cmake_cmd))
if last_env is not None:
cmake_cmd = [last_env] + cmake_cmd
run_command_colorized(cmake_cmd, build_dir, quiet)
else:
print('Makefile exists, skipping explicit cmake invocation...')
# Check to see if cmake needs to be run via make
make_check_cmake_cmd = ['make', 'cmake_check_build_system']
isolation_print_command(' '.join(make_check_cmake_cmd), build_dir)
if last_env is not None:
make_check_cmake_cmd = [last_env] + make_check_cmake_cmd
run_command_colorized(
make_check_cmake_cmd, build_dir, quiet
)
# Run make
make_cmd = ['make']
make_cmd.extend(handle_make_arguments(make_args))
isolation_print_command(' '.join(make_cmd), build_dir)
if last_env is not None:
make_cmd = [last_env] + make_cmd
run_command(make_cmd, build_dir, quiet)
# Make install
make_install_cmd = ['make', 'install']
isolation_print_command(' '.join(make_install_cmd), build_dir)
if last_env is not None:
make_install_cmd = [last_env] + make_install_cmd
run_command(make_install_cmd, build_dir, quiet)
# If we are installing, and a env.sh exists, don't overwrite it
if install and os.path.exists(os.path.join(installspace, 'env.sh')):
return
cprint(blue_arrow + " Generating an env.sh")
# Generate env.sh for chaining to catkin packages
new_env_path = os.path.join(install_target, 'env.sh')
variables = {
'SETUP_DIR': install_target,
'SETUP_FILENAME': 'setup'
}
with open(os.path.join(new_env_path), 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'env.sh.in'), variables))
os.chmod(new_env_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
# Generate setup.sh for chaining to catkin packages
new_setup_path = os.path.join(install_target, 'setup.sh')
subs = {}
subs['cmake_prefix_path'] = install_target + ":"
subs['ld_path'] = os.path.join(install_target, 'lib') + ":"
pythonpath = os.path.join(install_target, get_python_install_dir())
subs['pythonpath'] = pythonpath + ':'
subs['pkgcfg_path'] = os.path.join(install_target, 'lib', 'pkgconfig')
subs['pkgcfg_path'] += ":"
subs['path'] = os.path.join(install_target, 'bin') + ":"
if not os.path.exists(install_target):
os.mkdir(install_target)
with open(new_setup_path, 'w+') as file_handle:
file_handle.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module
""")
if last_env is not None:
last_setup_env = os.path.join(os.path.dirname(last_env), 'setup.sh')
file_handle.write('. %s\n\n' % last_setup_env)
file_handle.write("""\
# detect if running on Darwin platform
UNAME=`which uname`
UNAME=`$UNAME`
IS_DARWIN=0
if [ "$UNAME" = "Darwin" ]; then
IS_DARWIN=1
fi
# Prepend to the environment
export CMAKE_PREFIX_PATH="{cmake_prefix_path}$CMAKE_PREFIX_PATH"
if [ $IS_DARWIN -eq 0 ]; then
export LD_LIBRARY_PATH="{ld_path}$LD_LIBRARY_PATH"
else
export DYLD_LIBRARY_PATH="{ld_path}$DYLD_LIBRARY_PATH"
fi
export PATH="{path}$PATH"
export PKG_CONFIG_PATH="{pkgcfg_path}$PKG_CONFIG_PATH"
export PYTHONPATH="{pythonpath}$PYTHONPATH"
""".format(**subs))
def build_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args, catkin_make_args,
number=None, of=None
):
cprint('@!@{gf}==>@| ', end='')
new_last_env = get_new_env(package, develspace, installspace, install, last_env)
build_type = _get_build_type(package)
if build_type == 'catkin':
build_catkin_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args + catkin_make_args
)
if not os.path.exists(new_last_env):
raise RuntimeError(
"No env.sh file generated at: '" + new_last_env +
"'\n This sometimes occurs when a non-catkin package is "
"interpreted as a catkin package.\n This can also occur "
"when the cmake cache is stale, try --force-cmake."
)
elif build_type == 'cmake':
build_cmake_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args
)
else:
sys.exit('Can not build package with unknown build_type')
if number is not None and of is not None:
msg = ' [@{gf}@!' + str(number) + '@| of @!@{gf}' + str(of) + '@|]'
else:
msg = ''
cprint('@{gf}<==@| Finished processing package' + msg + ': \'@{bf}@!' +
package.name + '@|\'')
return new_last_env
def get_new_env(package, develspace, installspace, install, last_env):
new_env = None
build_type = _get_build_type(package)
if build_type in ['catkin', 'cmake']:
new_env = os.path.join(
installspace if install else develspace,
'env.sh'
)
return new_env
def _get_build_type(package):
build_type = 'catkin'
if 'build_type' in [e.tagname for e in package.exports]:
build_type = [e.content for e in package.exports if e.tagname == 'build_type'][0]
return build_type
def build_workspace_isolated(
workspace='.',
sourcespace=None,
buildspace=None,
develspace=None,
installspace=None,
merge=False,
install=False,
force_cmake=False,
colorize=True,
build_packages=None,
quiet=False,
cmake_args=None,
make_args=None,
catkin_make_args=None
):
'''
Runs ``cmake``, ``make`` and optionally ``make install`` for all
catkin packages in sourcespace_dir. It creates several folders
in the current working directory. For non-catkin packages it runs
``cmake``, ``make`` and ``make install`` for each, installing it to
the devel space or install space if the ``install`` option is specified.
:param workspace: path to the current workspace, ``str``
:param sourcespace: workspace folder containing catkin packages, ``str``
:param buildspace: path to build space location, ``str``
:param develspace: path to devel space location, ``str``
:param installspace: path to install space (CMAKE_INSTALL_PREFIX), ``str``
:param merge: if True, build each catkin package into the same
devel space. does not work with non-catkin packages, ``bool``
:param install: if True, install all packages to the install space,
``bool``
:param force_cmake: (optional), if True calls cmake explicitly for each
package, ``bool``
:param colorize: if True, colorize cmake output and other messages,
``bool``
:param build_packages: specific packages to build (all parent packages
in the topological order must have been built before), ``str``
:param quiet: if True, hides some build output, ``bool``
:param cmake_args: additional arguments for cmake, ``[str]``
:param make_args: additional arguments for make, ``[str]``
:param catkin_make_args: additional arguments for make but only for catkin
packages, ``[str]``
'''
if not colorize:
disable_ANSI_colors()
# Check workspace existance
if not os.path.exists(workspace):
sys.exit("Workspace path '{0}' does not exist.".format(workspace))
workspace = os.path.abspath(workspace)
# Check source space existance
if sourcespace is None:
ws_sourcespace = os.path.join(workspace, 'src')
if not os.path.exists(ws_sourcespace):
sys.exit("Could not find source space: {0}".format(sourcespace))
sourcespace = ws_sourcespace
sourcespace = os.path.abspath(sourcespace)
print('Base path: ' + str(workspace))
print('Source space: ' + str(sourcespace))
# Check build space
if buildspace is None:
buildspace = os.path.join(workspace, 'build_isolated')
buildspace = os.path.abspath(buildspace)
if not os.path.exists(buildspace):
os.mkdir(buildspace)
print('Build space: ' + str(buildspace))
# Check devel space
if develspace is None:
develspace = os.path.join(workspace, 'devel_isolated')
develspace = os.path.abspath(develspace)
print('Devel space: ' + str(develspace))
# Check install space
if installspace is None:
installspace = os.path.join(workspace, 'install_isolated')
installspace = os.path.abspath(installspace)
print('Install space: ' + str(installspace))
if cmake_args:
print("Additional CMake Arguments: " + " ".join(cmake_args))
else:
cmake_args = []
if make_args:
print("Additional make Arguments: " + " ".join(make_args))
else:
make_args = []
if catkin_make_args:
print("Additional make Arguments for catkin packages: " + " ".join(catkin_make_args))
else:
catkin_make_args = []
# Find packages
packages = find_packages(sourcespace, exclude_subspaces=True)
if not packages:
print(fmt("@{yf}No packages found in source space: %s@|" % sourcespace))
# verify that specified package exists in workspace
if build_packages:
packages_by_name = {p.name: path for path, p in packages.iteritems()}
unknown_packages = [p for p in build_packages if p not in packages_by_name]
if unknown_packages:
sys.exit('Packages not found in the workspace: %s' % ', '.join(unknown_packages))
# Report topological ordering
ordered_packages = topological_order_packages(packages)
unknown_build_types = []
msg = []
msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
msg.append('@{pf}~~@| traversing %d packages in topological order:' % len(ordered_packages))
for path, package in ordered_packages:
export_tags = [e.tagname for e in package.exports]
if 'build_type' in export_tags:
build_type_tag = [e.content for e in package.exports if e.tagname == 'build_type'][0]
else:
build_type_tag = 'catkin'
if build_type_tag == 'catkin':
msg.append('@{pf}~~@| - @!@{bf}' + package.name + '@|')
elif build_type_tag == 'cmake':
msg.append(
'@{pf}~~@| - @!@{bf}' + package.name + '@|' +
' (@!@{cf}plain cmake@|)'
)
else:
msg.append(
'@{pf}~~@| - @!@{bf}' + package.name + '@|' +
' (@{rf}unknown@|)'
)
unknown_build_types.append(package)
msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
for index in range(len(msg)):
msg[index] = fmt(msg[index])
print('\n'.join(msg))
# Error if there are packages with unknown build_types
if unknown_build_types:
print(colorize_line('Error: Packages with unknown build types exist'))
sys.exit('Can not build workspace with packages of unknown build_type')
# Check to see if the workspace has changed
if not force_cmake:
force_cmake, install_toggled = cmake_input_changed(
packages,
buildspace,
install=install,
cmake_args=cmake_args,
filename='catkin_make_isolated'
)
if force_cmake:
print('The packages or cmake arguments have changed, forcing cmake invocation')
elif install_toggled:
print('The install argument has been toggled, forcing cmake invocation on plain cmake package')
# Build packages
pkg_develspace = None
last_env = None
for index, path_package in enumerate(ordered_packages):
path, package = path_package
if merge:
pkg_develspace = develspace
else:
pkg_develspace = os.path.join(develspace, package.name)
if not build_packages or package.name in build_packages:
try:
export_tags = [e.tagname for e in package.exports]
is_cmake_package = 'cmake' in [e.content for e in package.exports if e.tagname == 'build_type']
last_env = build_package(
path, package,
workspace, buildspace, pkg_develspace, installspace,
install, force_cmake or (install_toggled and is_cmake_package),
quiet, last_env, cmake_args, make_args, catkin_make_args,
number=index + 1, of=len(ordered_packages)
)
except Exception as e:
import traceback
traceback.print_exc()
cprint(
'@{rf}@!<==@| ' +
'Failed to process package \'@!@{bf}' +
package.name + '@|\': \n ' +
('KeyboardInterrupt' if isinstance(e, KeyboardInterrupt)
else str(e))
)
if isinstance(e, subprocess.CalledProcessError):
cmd = ' '.join(e.cmd) if isinstance(e.cmd, list) else e.cmd
print(fmt("\n@{rf}Reproduce this error by running:"))
print(fmt("@{gf}@!==> @|") + cmd + "\n")
sys.exit('Command failed, exiting.')
else:
cprint("Skipping package: '@!@{bf}" + package.name + "@|'")
last_env = get_new_env(package, pkg_develspace, installspace, install, last_env)
# Provide a top level devel space environment setup script
if not os.path.exists(develspace):
os.makedirs(develspace)
if not build_packages:
generated_env_sh = os.path.join(develspace, 'env.sh')
generated_setup_sh = os.path.join(develspace, 'setup.sh')
generated_setup_util_py = os.path.join(develspace, '_setup_util.py')
if not merge and pkg_develspace:
# generate env.sh and setup.sh which relay to last devel space
with open(generated_env_sh, 'w') as f:
f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module
{0} "$@"
""".format(os.path.join(pkg_develspace, 'env.sh')))
os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
with open(generated_setup_sh, 'w') as f:
f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module
. "{0}/setup.sh"
""".format(pkg_develspace))
elif not pkg_develspace:
# generate env.sh and setup.sh for an empty devel space
if 'CMAKE_PREFIX_PATH' in os.environ.keys():
variables = {
'CATKIN_GLOBAL_BIN_DESTINATION': 'bin',
'CATKIN_GLOBAL_LIB_DESTINATION': 'lib',
'CMAKE_PREFIX_PATH_AS_IS': ';'.join(os.environ['CMAKE_PREFIX_PATH'].split(os.pathsep)),
'PYTHON_INSTALL_DIR': get_python_install_dir(),
'SETUP_DIR': '',
}
with open(generated_setup_util_py, 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', '_setup_util.py.in'), variables))
os.chmod(generated_setup_util_py, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
else:
sys.exit("Unable to process CMAKE_PREFIX_PATH from environment. Cannot generate environment files.")
variables = {
'SETUP_DIR': develspace,
'SETUP_FILENAME': 'setup'
}
with open(generated_env_sh, 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'env.sh.in'), variables))
os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
variables = {'SETUP_DIR': develspace}
with open(generated_setup_sh, 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'setup.sh.in'), variables))
if not merge and pkg_develspace:
# remove _setup_util.py file which might have been generated for an empty
if os.path.exists(generated_setup_util_py):
os.remove(generated_setup_util_py)
if not merge or not pkg_develspace:
# generate setup.bash and setup.zsh for convenience
variables = {'SETUP_DIR': develspace}
with open(os.path.join(develspace, 'setup.bash'), 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'setup.bash.in'), variables))
with open(os.path.join(develspace, 'setup.zsh'), 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'setup.zsh.in'), variables))
def cmake_input_changed(packages, build_path, install=None, cmake_args=None, filename='catkin_make'):
# get current input
package_paths = os.pathsep.join(sorted(packages.keys()))
cmake_args = ' '.join(cmake_args) if cmake_args else ''
# file to store current input
changed = False
install_toggled = False
input_filename = os.path.join(build_path, '%s.cache' % filename)
if not os.path.exists(input_filename):
changed = True
else:
# compare with previously stored input
with open(input_filename, 'r') as f:
previous_package_paths = f.readline().rstrip()
previous_cmake_args = f.readline().rstrip()
previous_install = f.readline().rstrip() == str(True)
if package_paths != previous_package_paths:
changed = True
if cmake_args != previous_cmake_args:
changed = True
if install is not None and install != previous_install:
install_toggled = True
# store current input for next invocation
with open(input_filename, 'w') as f:
f.write('%s\n%s\n%s' % (package_paths, cmake_args, install))
return changed, install_toggled
|
the-stack_0_9287 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unit tests for gluon.contenttype
"""
import unittest
from .fix_path import fix_sys_path
fix_sys_path(__file__)
from gluon.contenttype import contenttype
from gluon._compat import iteritems
class TestContentType(unittest.TestCase):
def testTypeRecognition(self):
rtn = contenttype('.png')
self.assertEqual(rtn, 'image/png')
rtn = contenttype('.gif')
self.assertEqual(rtn, 'image/gif')
rtn = contenttype('.tar.bz2')
self.assertEqual(rtn, 'application/x-bzip-compressed-tar')
# test overrides and additions
mapping = {
'.load': 'text/html; charset=utf-8',
'.json': 'application/json',
'.jsonp': 'application/jsonp',
'.pickle': 'application/python-pickle',
'.w2p': 'application/w2p',
'.md': 'text/x-markdown; charset=utf-8'
}
for k, v in iteritems(mapping):
self.assertEqual(contenttype(k), v)
# test without dot extension
rtn = contenttype('png')
self.assertEqual(rtn, 'text/plain; charset=utf-8')
if __name__ == '__main__':
unittest.main()
|
the-stack_0_9288 | # %%
from numpy import array, matrix, zeros, empty, delete, insert, matmul, divide, add, subtract
from numpy import nanmax, seterr, shape
from numpy.linalg import solve
from scipy.sparse.linalg import spsolve
from scipy.sparse import csc_matrix
from math import isclose
from PyNite.Node3D import Node3D
from PyNite.Spring3D import Spring3D
from PyNite.Member3D import Member3D
from PyNite.Quad3D import Quad3D
from PyNite.Plate3D import Plate3D
from PyNite.LoadCombo import LoadCombo
# %%
class FEModel3D():
'''
A class representing a 3D finite element model.
'''
#%%
def __init__(self):
'''
Initializes a new 3D finite element model.
'''
self.Nodes = [] # A list of the structure's nodes
self.auxNodes = [] # A list of the structure's auxiliary nodes
self.Springs = [] # A list of the structure's springs
self.Members = [] # A list of the structure's members
self.Quads = [] # A list of the structura's quadiralterals
self.Plates = [] # A list of the structure's rectangular plates
self.__D = {} # A dictionary of the structure's nodal displacements by load combination
self.LoadCombos = {} # A dictionary of the structure's load combinations
#%%
def AddNode(self, Name, X, Y, Z):
'''
Adds a new node to the model.
Parameters
----------
Name : string
A unique user-defined name for the node.
X : number
The global X-coordinate of the node.
Y : number
The global Y-coordinate of the node.
Z : number
The global Z-coordinate of the node.
'''
# Create a new node
newNode = Node3D(Name, X, Y, Z)
# Add the new node to the list
self.Nodes.append(newNode)
#%%
def AddAuxNode(self, Name, X, Y, Z):
'''
Adds a new auxiliary node to the model.
Parameters
----------
Name : string
A unique user-defined name for the node.
X : number
The global X-coordinate of the node.
Y : number
The global Y-coordinate of the node.
Z : number
The global Z-coordinate of the node.
'''
# Create a new node
newNode = Node3D(Name, X, Y, Z)
# Add the new node to the list
self.auxNodes.append(newNode)
#%%
def AddSpring(self, Name, iNode, jNode, ks, tension_only=False, comp_only=False):
'''
Adds a new spring to the model.
Parameters
----------
Name : string
A unique user-defined name for the member.
iNode : string
The name of the i-node (start node).
jNode : string
The name of the j-node (end node).
ks : number
The spring constant (force/displacement).
tension_only : bool, optional
Indicates if the member is tension-only. Default is False.
comp_only : bool, optional
Indicates if the member is compression-only. Default is False.
'''
# Create a new spring
newSpring = Spring3D(Name, self.GetNode(iNode), self.GetNode(jNode), ks,
self.LoadCombos, tension_only=tension_only, comp_only=comp_only)
# Add the new member to the list
self.Springs.append(newSpring)
#%%
def AddMember(self, Name, iNode, jNode, E, G, Iy, Iz, J, A, auxNode=None,
tension_only=False, comp_only=False):
'''
Adds a new member to the model.
Parameters
----------
Name : string
A unique user-defined name for the member.
iNode : string
The name of the i-node (start node).
jNode : string
The name of the j-node (end node).
E : number
The modulus of elasticity of the member.
G : number
The shear modulus of the member.
Iy : number
The moment of inertia of the member about its local y-axis.
Iz : number
The moment of inertia of the member about its local z-axis.
J : number
The polar moment of inertia of the member.
A : number
The cross-sectional area of the member.
auxNode : string, optional
The name of the auxialary node used to define the local z-axis.
The default is for the program to define the axis instead of
using an auxiliary node.
tension_only : bool, optional
Indicates if the member is tension-only. Default is False.
comp_only : bool, optional
Indicates if the member is compression-only. Default is False.
'''
# Create a new member
if auxNode == None:
newMember = Member3D(Name, self.GetNode(iNode),
self.GetNode(jNode), E, G, Iy, Iz, J, A,
LoadCombos=self.LoadCombos, tension_only=tension_only, comp_only=comp_only)
else:
newMember = Member3D(Name, self.GetNode(iNode),
self.GetNode(jNode), E, G, Iy, Iz, J, A, self.GetAuxNode(auxNode),
self.LoadCombos, tension_only=tension_only, comp_only=comp_only)
# Add the new member to the list
self.Members.append(newMember)
#%%
def AddPlate(self, Name, iNode, jNode, mNode, nNode, t, E, nu):
'''
Adds a new plate to the model.
Plates will be dapricated in a future version. Quadrilaterals are more
verstile and will replace them.
Parameters
----------
Name : string
A unique user-defined name for the plate.
iNode : string
The name of the i-node (1st node definded in clockwise order).
jNode : string
The name of the j-node (2nd node defined in clockwise order).
mNode : string
The name of the m-node (3rd node defined in clockwise order).
nNode : string
The name of the n-node (4th node defined in clockwise order).
t : number
The thickness of the plate.
E : number
The modulus of elasticity of the plate.
mew : number
Posson's ratio for the plate.
'''
# Create a new member
newPlate = Plate3D(Name, self.GetNode(iNode), self.GetNode(jNode), self.GetNode(mNode), self.GetNode(nNode), t, E, nu)
# Add the new member to the list
self.Plates.append(newPlate)
#%%
def AddQuad(self, Name, iNode, jNode, mNode, nNode, t, E, nu):
'''
Adds a new quadrilateral to the model.
Quadrilaterals are similar to plates, except they do not have to be
rectangular. Plates will be dapricated in a future version. Note that
quadrilateral nodes are defined in counter-clockwise order instead of
the clockwise order that plates have used up to this point.
Parameters
----------
Name : string
A unique user-defined name for the quadrilateral.
iNode : string
The name of the i-node (1st node definded in counter-clockwise order).
jNode : string
The name of the j-node (2nd node defined in counter-clockwise order).
mNode : string
The name of the m-node (3rd node defined in counter-clockwise order).
nNode : string
The name of the n-node (4th node defined in counter-clockwise order).
t : number
The thickness of the quadrilateral.
E : number
The modulus of elasticity of the quadrilateral.
mew : number
Posson's ratio for the quadrilateral.
'''
# Create a new member
newQuad = Quad3D(Name, self.GetNode(iNode), self.GetNode(jNode), self.GetNode(mNode), self.GetNode(nNode), t, E, nu)
# Add the new member to the list
self.Quads.append(newQuad)
#%%
def RemoveNode(self, Node):
'''
Removes a node from the model. All nodal loads associated with the
node and members attached to the node will also be removed.
Parameters
----------
Node : string
The name of the node to be removed.
'''
# Remove the node. Nodal loads are stored within the node, so they
# will be deleted automatically when the node is deleted.
self.Nodes.remove(self.GetNode(Node))
# Find any members attached to the node and remove them
self.Members = [member for member in self.Members if member.iNode.Name != Node and member.jNode.Name != Node]
#%%
def RemoveSpring(self, Spring):
'''
Removes a spring from the model.
Parameters
----------
Spring : string
The name of the spring to be removed.
'''
# Remove the spring.
self.Springs.remove(self.GetSpring(Spring))
#%%
def RemoveMember(self, Member):
'''
Removes a member from the model. All member loads associated with the
member will also be removed.
Parameters
----------
Member : string
The name of the member to be removed.
'''
# Remove the member. Member loads are stored within the member, so they
# will be deleted automatically when the member is deleted.
self.Members.remove(self.GetMember(Member))
#%%
def DefineSupport(self, Node, SupportDX=False, SupportDY=False, SupportDZ=False, SupportRX=False, SupportRY=False, SupportRZ=False):
'''
Defines the support conditions at a node.
Nodes will default to fully unsupported unless specified otherwise.
Parameters
----------
Node : string
The name of the node where the support is being defined
SupportDX : number
Indicates whether the node is supported against translation in the global X-direction.
SupportDY : number
Indicates whether the node is supported against translation in the global Y-direction.
SupportDZ : number
Indicates whether the node is supported against translation in the global Z-direction.
SupportRX : number
Indicates whether the node is supported against rotation about the global X-axis.
SupportRY : number
Indicates whether the node is supported against rotation about the global Y-axis.
SupportRZ : number
Indicates whether the node is supported against rotation about the global Z-axis.
'''
# Get the node to be supported
node = self.GetNode(Node)
# Set the node's support conditions
node.SupportDX = SupportDX
node.SupportDY = SupportDY
node.SupportDZ = SupportDZ
node.SupportRX = SupportRX
node.SupportRY = SupportRY
node.SupportRZ = SupportRZ
#%%
def AddNodeDisplacement (self, Node, Direction, Magnitude):
'''
Defines a nodal displacement at a node.
Node : string
The name of the node where the nodal displacement is being applied.
Direction : {'DX', 'DY', 'DZ', 'RX', 'RY', 'RZ'}
The global direction the nodal displacement is being applied in. Displacements are 'DX', 'DY', and 'DZ'. Rotations are 'RX', 'RY', and 'RZ'.
Sign convention follows the model's global coordinate system.
Magnitude : number
The magnitude of the displacement.
'''
# Validate the value of Direction
if Direction not in ('DX', 'DY', 'DZ', 'RX', 'RY', 'RZ'):
raise ValueError(f"Direction must be 'DX', 'DY', 'DZ', 'RX', 'RY', or 'RZ'. {Direction} was given.")
# Get the node
node = self.GetNode(Node)
if Direction == 'DX':
node.EnforcedDX = Magnitude
if Direction == 'DY':
node.EnforcedDY = Magnitude
if Direction == 'DZ':
node.EnforcedDZ = Magnitude
if Direction == 'RX':
node.EnforcedRX = Magnitude
if Direction == 'RY':
node.EnforcedRY = Magnitude
if Direction == 'RZ':
node.EnforcedRZ = Magnitude
#%%
def DefineReleases(self, Member, Dxi=False, Dyi=False, Dzi=False, Rxi=False, Ryi=False, Rzi=False, Dxj=False, Dyj=False, Dzj=False, Rxj=False, Ryj=False, Rzj=False):
'''
Defines member end releases.
All member end releases will default to unreleased unless specified otherwise.
Parameters
----------
Member : string
The name of the member to have its releases modified.
Dxi : boolean
Indicates whether the member is released axially at its start.
Dyi : boolean
Indicates whether the member is released for shear in the local y-axis at its start.
Dzi : boolean
Indicates whether the member is released for shear in the local z-axis at its start.
Rxi : boolean
Indicates whether the member is released for torsion at its start.
Ryi : boolean
Indicates whether the member is released for moment about the local y-axis at its start.
Rzi : boolean
Indicates whether the member is released for moment about the local z-axis at its start.
Dxj : boolean
Indicates whether the member is released axially at its end.
Dyj : boolean
Indicates whether the member is released for shear in the local y-axis at its end.
Dzj : boolean
Indicates whether the member is released for shear in the local z-axis.
Rxj : boolean
Indicates whether the member is released for torsion at its end.
Ryj : boolean
Indicates whether the member is released for moment about the local y-axis at its end.
Rzj : boolean
Indicates whether the member is released for moment about the local z-axis at its end.
'''
# Apply the end releases to the member
self.GetMember(Member).Releases = [Dxi, Dyi, Dzi, Rxi, Ryi, Rzi, Dxj, Dyj, Dzj, Rxj, Ryj, Rzj]
#%%
def AddLoadCombo(self, name, factors, combo_type='strength'):
'''
Adds a load combination to the model
Parameters
----------
name : string
A unique name for the load combination (e.g. '1.2D+1.6L+0.5S' or 'Gravity Combo').
factors : dictionary
A dictionary containing load cases and their corresponding factors (e.g. {'D':1.2, 'L':1.6, 'S':0.5}).
combo_type : string
A description of the type of load combination (e.g. 'strength', 'service'). Currently
this does nothing in the program, and is a placeholder for future features.
'''
# Create a new load combination object
new_combo = LoadCombo(name, combo_type, factors)
# Add the load combination to the dictionary of load combinations
self.LoadCombos[name] = new_combo
#%%
def AddNodeLoad(self, Node, Direction, P, case='Case 1'):
'''
Adds a nodal load to the model.
Parameters
----------
Node : string
The name of the node where the load is being applied.
Direction : {'FX', 'FY', 'FZ', 'MX', 'MY', 'MZ'}
The global direction the load is being applied in. Forces are 'FX', 'FY', and 'FZ'. Moments are 'MX', 'MY', and 'MZ'.
P : number
The numeric value (magnitude) of the load.
case : string
The name of the load case the load belongs to.
'''
# Validate the value of Direction
if Direction not in ('FX', 'FY', 'FZ', 'MX', 'MY', 'MZ'):
raise ValueError(f"Direction must be 'FX', 'FY', 'FZ', 'MX', 'MY', or 'MZ'. {Direction} was given.")
# Add the node load to the model
self.GetNode(Node).NodeLoads.append((Direction, P, case))
#%%
def AddMemberPtLoad(self, Member, Direction, P, x, case='Case 1'):
'''
Adds a member point load to the model.
Parameters
----------
Member : string
The name of the member the load is being applied to.
Direction : {'Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'}
The direction in which the force is to be applied. Note that
typical beam sign convention is used. Transverse forces acting
toward the beam are positive. Moments are positive if they act
counter-clockwise relative to the beam's local coordinate system.
Torsional point loads follow the right hand rule for sign convention.
P : number
The numeric value (magnitude) of the load.
x : number
The load's location along the member's local x-axis.
'''
# Validate the value of Direction
if Direction not in ('Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'):
raise ValueError(f"Direction must be 'Fx', 'Fy', 'Fz', 'Mx', 'My', or 'Mz'. {Direction} was given.")
# Add the point load to the member
self.GetMember(Member).PtLoads.append((Direction, P, x, case))
#%%
def AddMemberDistLoad(self, Member, Direction, w1, w2, x1=None, x2=None, case='Case 1'):
'''
Adds a member distributed load to the model.
Parameters
----------
Member : string
The name of the member the load is being appied to
Direction : {'Fx', 'Fy', 'Fz'}
The direction in which the load is to be applied. Note that
typical beam sign convention is used. Forces acting toward the beam
are positive.
w1 : number
The starting value (magnitude) of the load.
w2 : number
The ending value (magnitude) of the load.
x1 : number
The load's start location along the member's local x-axis. If this argument
is not specified, the start of the member will be used.
x2 : number
The load's end location along the member's local x-axis. If this argument
is not specified, the end of the member will be used.
'''
# Validate the value of Direction
if Direction not in ('Fx', 'Fy', 'Fz'):
raise ValueError(f"Direction must be 'Fx', 'Fy', 'Fz'. {Direction} was given.")
# Determine if a starting and ending points for the load have been specified.
# If not, use the member start and end as defaults
if x1 == None:
start = 0
else:
start = x1
if x2 == None:
end = self.GetMember(Member).L()
else:
end = x2
# Add the distributed load to the member
self.GetMember(Member).DistLoads.append((Direction, w1, w2, start, end, case))
#%%
def AddPlateSurfacePressure(self, plate_ID, pressure, case='Case 1'):
'''
Adds a surface pressure to the rectangular plate element.
'''
# Add the surface pressure to the rectangle
self.GetPlate(plate_ID).pressures.append([pressure, case])
#%%
def AddQuadSurfacePressure(self, quad_ID, pressure, case='Case 1'):
'''
Adds a surface pressure to the quadrilateral element.
'''
# Add the surface pressure to the quadrilateral
self.GetQuad(quad_ID).pressures.append([pressure, case])
#%%
def ClearLoads(self):
'''
Clears all loads from the model along with any results based on the loads.
'''
# Clear out the member loads and the calculated internal forces
for member in self.Members:
member.DistLoads = []
member.PtLoads = []
member.SegmentsZ = []
member.SegmentsY = []
member.SegmentsX = []
# Clear out the nodal loads, calculated displacements, and calculated reactions
for node in self.Nodes:
node.NodeLoads = []
node.DX = {}
node.DY = {}
node.DZ = {}
node.RX = {}
node.RY = {}
node.RZ = {}
node.RxnFX = {}
node.RxnFY = {}
node.RxnFZ = {}
node.RxnMX = {}
node.RxnMY = {}
node.RxnMZ = {}
#%%
def GetNode(self, Name):
'''
Returns the node with the given name.
Parameters
----------
Name : string
The name of the node to be returned.
'''
# Step through each node in the 'Nodes' list
for node in self.Nodes:
# Check the name of the node
if node.Name == Name:
# Return the node of interest
return node
# if the node name is not found and loop finishes
raise ValueError(f"Node '{Name}' was not found in the model")
def GetAuxNode(self, Name):
'''
Returns the auxiliary node with the given name.
Parameters
----------
Name : string
The name of the auxiliary node to be returned.
'''
# Step through each node in the 'Nodes' list
for node in self.auxNodes:
# Check the name of the node
if node.Name == Name:
# Return the node of interest
return node
# If the node name is not found and loop finishes
raise ValueError(f"AuxNode '{Name}' was not found in the model")
#%%
def GetSpring(self, Name):
'''
Returns the spring with the given name.
Parameters
----------
Name : string
The name of the spring to be returned.
'''
# Step through each spring in the 'Springs' list
for spring in self.Springs:
# Check the name of the member
if spring.Name == Name:
# Return the spring of interest
return spring
# If the spring name is not found and loop finishes
raise ValueError(f"Spring '{Name}' was not found in the model")
#%%
def GetMember(self, Name):
'''
Returns the member with the given name.
Parameters
----------
Name : string
The name of the member to be returned.
'''
# Step through each member in the 'Members' list
for member in self.Members:
# Check the name of the member
if member.Name == Name:
# Return the member of interest
return member
# If the member name is not found and loop finishes
raise ValueError(f"Member '{Name}' was not found in the model")
#%%
def GetPlate(self, Name):
'''
Returns the plate with the given name.
Parameters
----------
Name : string
The name of the plate to be returned.
'''
# Step through each plate in the 'Plates' list
for plate in self.Plates:
# Check the name of the plate
if plate.Name == Name:
# Return the plate of interest
return plate
# Raise an exception if the plate name is not found and loop finishes
raise ValueError(f"Plate '{Name}' was not found in the model")
#%%
def GetQuad(self, Name):
'''
Returns the quadrilateral with the given name.
Parameters
----------
Name : string
The name of the quadrilateral to be returned.
'''
# Step through each quadrilateral in the 'Quads' list
for quad in self.Quads:
# Check the name of the quadrilateral
if quad.Name == Name:
# Return the quadrilateral of interest
return quad
# Raise an excption if the quadrilateral name is not found and loop
# finishes
raise ValueError(f"Quadrilateral '{Name}' was not found in the model")
#%%
def __Renumber(self):
'''
Assigns node, spring, member, and plate member ID numbers to be used internally by the
program. Numbers are assigned according to the order nodes, springs, members, and plates
were added to the model.
'''
# Number each node in the model
i = 0
for node in self.Nodes:
node.ID = i
i += 1
# Number each spring in the model
i = 0
for spring in self.Springs:
spring.ID = i
i += 1
# Number each member in the model
i = 0
for member in self.Members:
member.ID = i
i += 1
# Number each plate in the model
i = 0
for plate in self.Plates:
plate.ID = i
i += 1
# Number each quadrilateral in the model
i = 0
for quad in self.Quads:
quad.ID = i
i += 1
#%%
def __AuxList(self):
'''
Builds a list with known nodal displacements and with the positions in global stiffness matrix of known
and unknown nodal displacements
Returns
-------
D1_indices : number
A list of the global matrix indices for the unknown nodal displacements
D2_indices : number
A list of the global matrix indices for the known nodal displacements
D2 : number
A list of the known nodal displacements
'''
D1_indices = [] # A list of the indices for the unknown nodal displacements
D2_indices = [] # A list of the indices for the known nodal displacements
D2 = [] # A list of the values of the known nodal displacements (D != None)
# Create the auxiliary table
for node in self.Nodes:
# Unknown displacement DX
if node.SupportDX == False and node.EnforcedDX == None:
D1_indices.append((node.ID*6) + 0)
# Known displacement DX
elif node.EnforcedDX != None:
D2_indices.append((node.ID*6) + 0)
D2.append(node.EnforcedDX)
# Support at DX
else:
D2_indices.append((node.ID*6) + 0)
D2.append(0.0)
# Unknown displacement DY
if node.SupportDY == False and node.EnforcedDY == None:
D1_indices.append((node.ID*6) + 1)
# Known displacement DY
elif node.EnforcedDY != None:
D2_indices.append((node.ID*6) + 1)
D2.append(node.EnforcedDY)
# Support at DY
else:
D2_indices.append((node.ID*6) + 1)
D2.append(0.0)
# Unknown displacement DZ
if node.SupportDZ == False and node.EnforcedDZ == None:
D1_indices.append((node.ID*6) + 2)
# Known displacement DZ
elif node.EnforcedDZ != None:
D2_indices.append((node.ID*6) + 2)
D2.append(node.EnforcedDZ)
# Support at DZ
else:
D2_indices.append((node.ID*6) + 2)
D2.append(0.0)
# Unknown displacement RX
if node.SupportRX == False and node.EnforcedRX == None:
D1_indices.append((node.ID*6) + 3)
# Known displacement RX
elif node.EnforcedRX != None:
D2_indices.append((node.ID*6) + 3)
D2.append(node.EnforcedRX)
# Support at RX
else:
D2_indices.append((node.ID*6) + 3)
D2.append(0.0)
# Unknown displacement RY
if node.SupportRY == False and node.EnforcedRY == None:
D1_indices.append((node.ID*6) + 4)
# Known displacement RY
elif node.EnforcedRY != None:
D2_indices.append((node.ID*6) + 4)
D2.append(node.EnforcedRY)
# Support at RY
else:
D2_indices.append((node.ID*6) + 4)
D2.append(0.0)
# Unknown displacement RZ
if node.SupportRZ == False and node.EnforcedRZ == None:
D1_indices.append((node.ID*6) + 5)
# Known displacement RZ
elif node.EnforcedRZ != None:
D2_indices.append((node.ID*6) + 5)
D2.append(node.EnforcedRZ)
# Support at RZ
else:
D2_indices.append((node.ID*6) + 5)
D2.append(0.0)
# Return the indices and the known displacements
return D1_indices, D2_indices, D2
#%%
def K(self, combo_name='Combo_1'):
'''
Assembles and returns the global stiffness matrix.
'''
# Initialize a zero matrix to hold all the stiffness terms
K = zeros((len(self.Nodes)*6, len(self.Nodes)*6))
# Add stiffness terms for each spring in the model
print('...Adding spring stiffness terms to global stiffness matrix')
for spring in self.Springs:
if spring.active[combo_name] == True:
# Get the spring's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
spring_K = spring.K()
# Step through each term in the spring's stiffness matrix
# 'a' & 'b' below are row/column indices in the spring's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = spring.iNode.ID*6 + a
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = spring.jNode.ID*6 + (a-6)
for b in range(12):
# Determine if index 'b' is related to the i-node or j-node
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = spring.iNode.ID*6 + b
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = spring.jNode.ID*6 + (b-6)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + spring_K.item((a, b)))
# Add stiffness terms for each member in the model
print('...Adding member stiffness terms to global stiffness matrix')
for member in self.Members:
if member.active[combo_name] == True:
# Get the member's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_K = member.K()
# Step through each term in the member's stiffness matrix
# 'a' & 'b' below are row/column indices in the member's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.iNode.ID*6 + a
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.jNode.ID*6 + (a-6)
for b in range(12):
# Determine if index 'b' is related to the i-node or j-node
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.iNode.ID*6 + b
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.jNode.ID*6 + (b-6)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + member_K.item((a, b)))
# Add stiffness terms for each quadrilateral in the model
print('...Adding quadrilateral stiffness terms to global stiffness matrix')
for quad in self.Quads:
# Get the quadrilateral's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_K = quad.K()
# Step through each term in the quadrilateral's stiffness matrix
# 'a' & 'b' below are row/column indices in the quadrilateral's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(24):
# Determine which node the index 'a' is related to
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.mNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.nNode.ID*6 + (a-6)
elif a < 18:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.iNode.ID*6 + (a-12)
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.jNode.ID*6 + (a-18)
for b in range(24):
# Determine which node the index 'b' is related to
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.mNode.ID*6 + b
elif b < 12:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.nNode.ID*6 + (b-6)
elif b < 18:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.iNode.ID*6 + (b-12)
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.jNode.ID*6 + (b-18)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + quad_K.item((a, b)))
# Add stiffness terms for each plate in the model
print('...Adding plate stiffness terms to global stiffness matrix')
for plate in self.Plates:
# Get the plate's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_K = plate.K()
# Step through each term in the plate's stiffness matrix
# 'a' & 'b' below are row/column indices in the plate's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(24):
# Determine which node the index 'a' is related to
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.iNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.nNode.ID*6 + (a-6)
elif a < 18:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.mNode.ID*6 + (a-12)
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.jNode.ID*6 + (a-18)
for b in range(24):
# Determine which node the index 'b' is related to
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.iNode.ID*6 + b
elif b < 12:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.nNode.ID*6 + (b-6)
elif b < 18:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.mNode.ID*6 + (b-12)
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.jNode.ID*6 + (b-18)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + plate_K.item((a, b)))
# Return the global stiffness matrix
return K
#%%
def Kg(self, combo_name='Combo 1'):
'''
Assembles and returns the global geometric stiffness matrix.
The model must have a static solution prior to obtaining the geometric stiffness matrix.
Stiffness of plates is not included.
Parameters
----------
combo_name : string
The name of the load combination to derive the matrix for (not the load combination itself).
'''
# Initialize a zero matrix to hold all the stiffness terms
Kg = zeros((len(self.Nodes)*6, len(self.Nodes)*6))
# Add stiffness terms for each member in the model
print('...Adding member geometric stiffness terms to global geometric stiffness matrix')
for member in self.Members:
if member.active[combo_name] == True:
# Calculate the axial force in the member
E = member.E
A = member.A
L = member.L()
d = member.d(combo_name)
P = E*A/L*(d[6, 0] - d[0, 0])
# Get the member's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_Kg = member.Kg(P)
# Step through each term in the member's stiffness matrix
# 'a' & 'b' below are row/column indices in the member's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.iNode.ID*6 + a
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.jNode.ID*6 + (a-6)
for b in range(12):
# Determine if index 'b' is related to the i-node or j-node
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.iNode.ID*6 + b
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.jNode.ID*6 + (b-6)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
Kg.itemset((m, n), Kg.item((m, n)) + member_Kg.item((a, b)))
# Return the global geometric stiffness matrix
return Kg
#%%
def FER(self, combo_name='Combo 1'):
'''
Assembles and returns the global fixed end reaction vector.
Parameters
----------
combo_name : string
The name of the load combination to get the fixed end reaction vector for (not the load combination itself).
'''
# Initialize a zero vector to hold all the terms
FER = zeros((len(self.Nodes) * 6, 1))
# Add terms for each member in the model
for member in self.Members:
# Get the member's global fixed end reaction vector
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_FER = member.FER(combo_name)
# Step through each term in the member's fixed end reaction vector
# 'a' below is the row index in the member's fixed end reaction vector
# 'm' below is the corresponding row index in the global fixed end reaction vector
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = member.iNode.ID * 6 + a
else:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = member.jNode.ID * 6 + (a - 6)
# Now that 'm' is known, place the term in the global fixed end reaction vector
FER.itemset((m, 0), FER[m, 0] + member_FER[a, 0])
# Add terms for each rectangle in the model
for plate in self.Plates:
# Get the quadrilateral's global fixed end reaction vector
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_FER = plate.FER(combo_name)
# Step through each term in the quadrilateral's fixed end reaction vector
# 'a' below is the row index in the quadrilateral's fixed end reaction vector
# 'm' below is the corresponding row index in the global fixed end reaction vector
for a in range(24):
# Determine if index 'a' is related to the i-node, j-node, m-node, or n-node
if a < 6:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = plate.iNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = plate.nNode.ID*6 + (a - 6)
elif a < 18:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = plate.mNode.ID*6 + (a - 12)
else:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = plate.jNode.ID*6 + (a - 18)
# Now that 'm' is known, place the term in the global fixed end reaction vector
FER.itemset((m, 0), FER[m, 0] + plate_FER[a, 0])
# Add terms for each quadrilateral in the model
for quad in self.Quads:
# Get the quadrilateral's global fixed end reaction vector
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_FER = quad.FER(combo_name)
# Step through each term in the quadrilateral's fixed end reaction vector
# 'a' below is the row index in the quadrilateral's fixed end reaction vector
# 'm' below is the corresponding row index in the global fixed end reaction vector
for a in range(24):
# Determine if index 'a' is related to the i-node, j-node, m-node, or n-node
if a < 6:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = quad.mNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = quad.nNode.ID*6 + (a - 6)
elif a < 18:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = quad.iNode.ID*6 + (a - 12)
else:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = quad.jNode.ID*6 + (a - 18)
# Now that 'm' is known, place the term in the global fixed end reaction vector
FER.itemset((m, 0), FER[m, 0] + quad_FER[a, 0])
# Return the global fixed end reaction vector
return FER
#%%
def P(self, combo_name='Combo 1'):
'''
Assembles and returns the global nodal force vector.
Parameters
----------
combo_name : string
The name of the load combination to get the force vector for (not the load combination itself).
'''
# Initialize a zero vector to hold all the terms
P = zeros((len(self.Nodes)*6, 1))
# Add terms for each node in the model
for node in self.Nodes:
# Get the node's ID
ID = node.ID
# Get the load combination for the given 'combo_name'
combo = self.LoadCombos[combo_name]
# Step through each load factor in the load combination
for case, factor in combo.factors.items():
# Add the node's loads to the global nodal load vector
for load in node.NodeLoads:
if load[2] == case:
if load[0] == 'FX':
P.itemset((ID*6 + 0, 0), P[ID*6 + 0, 0] + factor*load[1])
elif load[0] == 'FY':
P.itemset((ID*6 + 1, 0), P[ID*6 + 1, 0] + factor*load[1])
elif load[0] == 'FZ':
P.itemset((ID*6 + 2, 0), P[ID*6 + 2, 0] + factor*load[1])
elif load[0] == 'MX':
P.itemset((ID*6 + 3, 0), P[ID*6 + 3, 0] + factor*load[1])
elif load[0] == 'MY':
P.itemset((ID*6 + 4, 0), P[ID*6 + 4, 0] + factor*load[1])
elif load[0] == 'MZ':
P.itemset((ID*6 + 5, 0), P[ID*6 + 5, 0] + factor*load[1])
# Return the global nodal force vector
return P
#%%
def D(self, combo_name='Combo 1'):
'''
Returns the global displacement vector for the model.
Parameters
----------
combo_name : string
The name of the load combination to get the displacements for (not the load combination itself).
'''
# Return the global displacement vector
return self.__D[combo_name]
#%%
def __Partition(self, unp_matrix, D1_indices, D2_indices):
'''
Partitions a matrix into submatrices based on degree of freedom boundary conditions
Parameters
----------
unp_matrix : matrix
The unpartitioned matrix to be partitioned.
'''
if unp_matrix.shape[1] == 1:
m1 = unp_matrix[D1_indices, :]
m2 = unp_matrix[D2_indices, :]
return m1, m2
else:
m11 = unp_matrix[D1_indices, :][:, D1_indices]
m12 = unp_matrix[D1_indices, :][:, D2_indices]
m21 = unp_matrix[D2_indices, :][:, D1_indices]
m22 = unp_matrix[D2_indices, :][:, D2_indices]
return m11, m12, m21, m22
#%%
def Analyze(self, check_statics=False, max_iter=30, sparse=True):
'''
Performs first-order static analysis.
Iterations are performed if tension-only members or
compression-only members are present.
Parameters
----------
check_statics : bool, optional
When set to True, causes a statics check to be performed
max_iter : number, optional
The maximum number of iterations to try to get convergence
for tension/compression-only analysis.
sparse : bool, optional
Indicates whether the sparse matrix solver should be used. A matrix can be considered
sparse or dense depening on how many zero terms there are. Structural stiffness
matrices often contain many zero terms. The sparse solver can offer faster solutions
for such matrices. Using the sparse solver on dense matrices may lead to slower
solution times.
'''
print('+-----------+')
print('| Analyzing |')
print('+-----------+')
# Assign an ID to all nodes and elements in the model
self.__Renumber()
# Ensure there is at least 1 load combination to solve if the user didn't define any
if self.LoadCombos == {}:
# Create and add a default load combination to the dictionary of load combinations
self.LoadCombos['Combo 1'] = LoadCombo('Combo 1', factors={'Case 1':1.0})
# Activate all springs and members for all load combinations
for spring in self.Springs:
for combo_name in self.LoadCombos.keys():
spring.active[combo_name] = True
for member in self.Members:
for combo_name in self.LoadCombos.keys():
member.active[combo_name] = True
# Get the auxiliary list used to determine how the matrices will be partitioned
D1_indices, D2_indices, D2 = self.__AuxList()
# Convert D2 from a list to a matrix
D2 = matrix(D2).T
# Step through each load combination
for combo in self.LoadCombos.values():
print('')
print('...Analyzing load combination ' + combo.name)
# Keep track of the number of iterations
iter_count = 1
convergence = False
divergence = False
# Iterate until convergence or divergence occurs
while convergence == False and divergence == False:
# Get the partitioned global stiffness matrix K11, K12, K21, K22
K11, K12, K21, K22 = self.__Partition(self.K(combo.name), D1_indices, D2_indices)
# Get the partitioned global fixed end reaction vector
FER1, FER2 = self.__Partition(self.FER(combo.name), D1_indices, D2_indices)
# Get the partitioned global nodal force vector
P1, P2 = self.__Partition(self.P(combo.name), D1_indices, D2_indices)
# Calculate the global displacement vector
print('...Calculating global displacement vector for load combination', combo.name)
if K11.shape == (0, 0):
# All displacements are known, so D1 is an empty vector
D1 = []
else:
try:
# Calculate the unknown displacements D1
if sparse == True:
D1 = spsolve(csc_matrix(K11), subtract(subtract(P1, FER1), matmul(K12, D2)))
D1 = D1.reshape(len(D1), 1)
else:
D1 = solve(K11, subtract(subtract(P1, FER1), matmul(K12, D2)))
except:
# Return out of the method if 'K' is singular and provide an error message
raise Exception('The stiffness matrix is singular, which implies rigid body motion. The structure is unstable. Aborting analysis.')
# Form the global displacement vector, D, from D1 and D2
D = zeros((len(self.Nodes)*6, 1))
for node in self.Nodes:
if D2_indices.count(node.ID*6 + 0) == 1:
D.itemset((node.ID*6 + 0, 0), D2[D2_indices.index(node.ID*6 + 0), 0])
else:
D.itemset((node.ID*6 + 0, 0), D1[D1_indices.index(node.ID*6 + 0), 0])
if D2_indices.count(node.ID*6 + 1) == 1:
D.itemset((node.ID*6 + 1, 0), D2[D2_indices.index(node.ID*6 + 1), 0])
else:
D.itemset((node.ID*6 + 1, 0), D1[D1_indices.index(node.ID*6 + 1), 0])
if D2_indices.count(node.ID*6 + 2) == 1:
D.itemset((node.ID*6 + 2, 0), D2[D2_indices.index(node.ID*6 + 2), 0])
else:
D.itemset((node.ID*6 + 2, 0), D1[D1_indices.index(node.ID*6 + 2), 0])
if D2_indices.count(node.ID*6 + 3) == 1:
D.itemset((node.ID*6 + 3, 0), D2[D2_indices.index(node.ID*6 + 3), 0])
else:
D.itemset((node.ID*6 + 3, 0), D1[D1_indices.index(node.ID*6 + 3), 0])
if D2_indices.count(node.ID*6 + 4) == 1:
D.itemset((node.ID*6 + 4, 0), D2[D2_indices.index(node.ID*6 + 4), 0])
else:
D.itemset((node.ID*6 + 4, 0), D1[D1_indices.index(node.ID*6 + 4), 0])
if D2_indices.count(node.ID*6 + 5) == 1:
D.itemset((node.ID*6 + 5, 0), D2[D2_indices.index(node.ID*6 + 5), 0])
else:
D.itemset((node.ID*6 + 5, 0), D1[D1_indices.index(node.ID*6 + 5), 0])
# Save the global displacement vector
self.__D[combo.name] = D
# Store the calculated global nodal displacements into each node
for node in self.Nodes:
node.DX[combo.name] = D[node.ID*6 + 0, 0]
node.DY[combo.name] = D[node.ID*6 + 1, 0]
node.DZ[combo.name] = D[node.ID*6 + 2, 0]
node.RX[combo.name] = D[node.ID*6 + 3, 0]
node.RY[combo.name] = D[node.ID*6 + 4, 0]
node.RZ[combo.name] = D[node.ID*6 + 5, 0]
# Check for divergence
if iter_count > max_iter:
divergence = True
raise Exception('...Model diverged during tension/compression-only analysis')
# Assume the model has converged (to be checked below)
convergence = True
# Check tension-only and compression-only springs
print('...Checking for tension/compression-only spring convergence')
for spring in self.Springs:
if spring.active[combo.name] == True:
# Check if tension-only conditions exist
if spring.tension_only == True and spring.Axial(combo.name) > 0:
spring.active[combo.name] = False
convergence = False
# Check if compression-only conditions exist
elif spring.comp_only == True and spring.Axial(combo.name) < 0:
spring.active[combo.name] = False
convergence = False
# Check tension-only and compression-only members
print('...Checking for tension/compression-only member convergence')
for member in self.Members:
# Only run the tension/compression only check if the member is still active
if member.active[combo.name] == True:
# Check if tension-only conditions exist
if member.tension_only == True and member.MaxAxial(combo.name) > 0:
member.active[combo.name] = False
convergence = False
# Check if compression-only conditions exist
elif member.comp_only == True and member.MinAxial(combo.name) < 0:
member.active[combo.name] = False
convergence = False
if convergence == False:
print('...Tension/compression-only analysis did not converge. Adjusting stiffness matrix and reanalyzing.')
else:
print('...Tension/compression-only analysis converged after ' + str(iter_count) + ' iteration(s)')
# Keep track of the number of tension/compression only iterations
iter_count += 1
# Calculate reactions
self.__CalcReactions()
print('...Analysis complete')
print('')
# Check statics if requested
if check_statics == True:
self.__CheckStatics()
#%%
def Analyze_PDelta(self, max_iter=30, tol=0.01, sparse=True):
'''
Performs second order (P-Delta) analysis.
Parameters
----------
max_iter : number
The maximum number of iterations permitted. If this value is exceeded the program will
report divergence.
tol : number
The deflection tolerance (as a percentage) between iterations that will be used to
define whether the model has converged (e.g. 0.01 = deflections must converge within 1%
between iterations).
sparse : bool, optional
Indicates whether the sparse matrix solver should be used. A matrix can be considered
sparse or dense depening on how many zero terms there are. Structural stiffness
matrices often contain many zero terms. The sparse solver can offer faster solutions
for such matrices. Using the sparse solver on dense matrices may lead to slower
solution times.
'''
print('+--------------------+')
print('| Analyzing: P-Delta |')
print('+--------------------+')
# Assign an ID to all nodes and elements in the model
self.__Renumber()
# Ensure there is at least 1 load combination to solve if the user didn't define any
if self.LoadCombos == {}:
# Create and add a default load combination to the dictionary of load combinations
self.LoadCombos['Combo 1'] = LoadCombo('Combo 1', factors={'Case 1':1.0})
# Activate all springs and members for all load combinations. They can be turned inactive
# during the course of the tension/compression-only analysis
for spring in self.Springs:
for combo_name in self.LoadCombos.keys():
spring.active[combo_name] = True
for member in self.Members:
for combo_name in self.LoadCombos.keys():
member.active[combo_name] = True
# Get the auxiliary list used to determine how the matrices will be partitioned
D1_indices, D2_indices, D2 = self.__AuxList()
# Convert D2 from a list to a matrix
D2 = array(D2, ndmin=2).T
# Step through each load combination
for combo in self.LoadCombos.values():
print('')
print('...Analyzing load combination ' + combo.name)
iter_count_TC = 1 # Tracks tension/compression-only iterations
iter_count_PD = 1 # Tracks P-Delta iterations
convergence_TC = False # Tracks tension/compression-only convergence
convergence_PD = False # Tracks P-Delta convergence
divergence_TC = False # Tracks tension/compression-only divergence
divergence_PD = False # Tracks P-Delta divergence
# Iterate until convergence or divergence occurs
while ((convergence_TC == False or convergence_PD == False)
and (divergence_TC == False and divergence_PD == False)):
# Inform the user which iteration we're on
print('...Beginning tension/compression-only iteration #' + str(iter_count_TC))
print('...Beginning P-Delta iteration #' + str(iter_count_PD))
# Get the partitioned global matrices
if iter_count_PD == 1:
K11, K12, K21, K22 = self.__Partition(self.K(combo.name), D1_indices, D2_indices) # Initial stiffness matrix
FER1, FER2 = self.__Partition(self.FER(combo.name), D1_indices, D2_indices) # Fixed end reactions
P1, P2 = self.__Partition(self.P(combo.name), D1_indices, D2_indices) # Nodal forces
else:
# Calculate the global stiffness matrices (partitioned)
K11, K12, K21, K22 = self.__Partition(self.K(combo.name), D1_indices, D2_indices) # Initial stiffness matrix
Kg11, Kg12, Kg21, Kg22 = self.__Partition(self.Kg(combo.name), D1_indices, D2_indices) # Geometric stiffness matrix
# Combine the stiffness matrices
K11 = add(K11, Kg11)
K12 = add(K12, Kg12)
K21 = add(K21, Kg21)
K22 = add(K22, Kg22)
# Calculate the global displacement vector
print('...Calculating the global displacement vector')
if K11.shape == (0, 0):
# All displacements are known, so D1 is an empty vector
D1 = []
else:
try:
# Calculate the global displacement vector
if sparse == True:
D1 = spsolve(csc_matrix(K11), subtract(subtract(P1, FER1), matmul(K12, D2)))
D1 = D1.reshape(len(D1), 1)
else:
D1 = solve(K11, subtract(subtract(P1, FER1), matmul(K12, D2)))
except:
# Return out of the method if 'K' is singular and provide an error message
raise ValueError('The stiffness matrix is singular, which implies rigid body motion. The structure is unstable. Aborting analysis.')
D = zeros((len(self.Nodes)*6, 1))
for node in self.Nodes:
if D2_indices.count(node.ID*6 + 0) == 1:
D.itemset((node.ID*6 + 0, 0), D2[D2_indices.index(node.ID*6 + 0), 0])
else:
D.itemset((node.ID*6 + 0, 0), D1[D1_indices.index(node.ID*6 + 0), 0])
if D2_indices.count(node.ID*6 + 1) == 1:
D.itemset((node.ID*6 + 1, 0), D2[D2_indices.index(node.ID*6 + 1), 0])
else:
D.itemset((node.ID*6 + 1, 0), D1[D1_indices.index(node.ID*6 + 1), 0])
if D2_indices.count(node.ID*6 + 2) == 1:
D.itemset((node.ID*6 + 2, 0), D2[D2_indices.index(node.ID*6 + 2), 0])
else:
D.itemset((node.ID*6 + 2, 0), D1[D1_indices.index(node.ID*6 + 2), 0])
if D2_indices.count(node.ID*6 + 3) == 1:
D.itemset((node.ID*6 + 3, 0), D2[D2_indices.index(node.ID*6 + 3), 0])
else:
D.itemset((node.ID*6 + 3, 0), D1[D1_indices.index(node.ID*6 + 3), 0])
if D2_indices.count(node.ID*6 + 4) == 1:
D.itemset((node.ID*6 + 4, 0), D2[D2_indices.index(node.ID*6 + 4), 0])
else:
D.itemset((node.ID*6 + 4, 0), D1[D1_indices.index(node.ID*6 + 4), 0])
if D2_indices.count(node.ID*6 + 5) == 1:
D.itemset((node.ID*6 + 5, 0), D2[D2_indices.index(node.ID*6 + 5), 0])
else:
D.itemset((node.ID*6 + 5, 0), D1[D1_indices.index(node.ID*6 + 5), 0])
# Save the global displacement vector
self.__D[combo.name] = D
# Store the calculated global nodal displacements into each node
for node in self.Nodes:
node.DX[combo.name] = D[node.ID*6 + 0, 0]
node.DY[combo.name] = D[node.ID*6 + 1, 0]
node.DZ[combo.name] = D[node.ID*6 + 2, 0]
node.RX[combo.name] = D[node.ID*6 + 3, 0]
node.RY[combo.name] = D[node.ID*6 + 4, 0]
node.RZ[combo.name] = D[node.ID*6 + 5, 0]
# Assume the model has converged (to be checked below)
convergence_TC = True
# Check for tension/compression-only springs that need to be deactivated
print('...Checking for tension/compression-only spring convergence')
for spring in self.Springs:
# Only run the tension/compression only check if the spring is still active
if spring.active[combo.name] == True:
# Check if tension-only conditions exist
if spring.tension_only == True and spring.Axial(combo.name) > 0:
spring.active[combo.name] = False
convergence_TC = False
# Reset the P-Delta analysis for the new geometry
iter_count_PD = 0
convergence_PD = False
# Check if compression-only conditions exist
elif spring.comp_only == True and spring.Axial(combo.name) < 0:
spring.active[combo.name] = False
convergence_TC = False
# Reset the P-Delta analysis for the new geometry
iter_count_PD = 0
convergence_PD = False
# Check for tension/compression-only members that need to be deactivated
print('...Checking for tension/compression-only member convergence')
for member in self.Members:
# Only run the tension/compression only check if the member is still active
if member.active[combo.name] == True:
# Check if tension-only conditions exist
if member.tension_only == True and member.MaxAxial(combo.name) > 0:
member.active[combo.name] = False
convergence_TC = False
# Reset the P-Delta analysis for the new geometry
iter_count_PD = 0
convergence_PD = False
# Check if compression-only conditions exist
elif member.comp_only == True and member.MinAxial(combo.name) < 0:
member.active[combo.name] = False
convergence_TC = False
# Reset the P-Delta analysis for the new geometry
iter_count_PD = 0
convergence_PD = False
# Report on convergence of tension/compression only analysis
if convergence_TC == False:
print('...Tension/compression-only analysis did not converge on this iteration')
print('...Stiffness matrix will be adjusted for newly deactivated elements')
print('...P-Delta analysis will be restarted')
# Increment the tension/compression-only iteration count
iter_count_TC += 1
else:
print('...Tension/compression-only analysis converged after ' + str(iter_count_TC) + ' iteration(s)')
# Check for divergence in the tension/compression-only analysis
if iter_count_TC > max_iter:
divergence_TC = True
raise Exception('...Model diverged during tension/compression-only analysis')
# Check for P-Delta convergence
if iter_count_PD > 1:
# Print a status update for the user
print('...Checking for convergence')
# Temporarily disable error messages for invalid values.
# We'll be dealing with some 'nan' values due to division by zero at supports with zero deflection.
seterr(invalid='ignore')
# Check for convergence
if abs(1 - nanmax(divide(prev_results, D1))) <= tol:
convergence_PD = True
print('...P-Delta analysis converged after ' + str(iter_count_PD) + ' iteration(s)')
# Check for divergence
elif iter_count_PD > max_iter:
divergence_PD = True
print('...P-Delta analysis failed to converge after ' + str(max_iter) + ' iteration(s)')
# Turn invalid value warnings back on
seterr(invalid='warn')
# Save the results for the next iteration
prev_results = D1
# Increment the P-Delta iteration count
iter_count_PD += 1
# Calculate reactions
self.__CalcReactions()
print('...Analysis complete')
print('')
#%%
def __CalcReactions(self):
'''
Calculates reactions once the model is solved.
'''
# Print a status update to the console
print('...Calculating reactions')
# Calculate the reactions, node by node
for node in self.Nodes:
# Step through each load combination
for combo in self.LoadCombos.values():
# Initialize reactions for this node and load combination
node.RxnFX[combo.name] = 0.0
node.RxnFY[combo.name] = 0.0
node.RxnFZ[combo.name] = 0.0
node.RxnMX[combo.name] = 0.0
node.RxnMY[combo.name] = 0.0
node.RxnMZ[combo.name] = 0.0
# Determine if the node has any supports
if (node.SupportDX == True) \
or (node.SupportDY == True) \
or (node.SupportDZ == True) \
or (node.SupportRX == True) \
or (node.SupportRY == True) \
or (node.SupportRZ == True):
# Sum the spring end forces at the node
for spring in self.Springs:
if spring.iNode == node and spring.active[combo.name] == True:
# Get the spring's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
spring_F = spring.F(combo.name)
node.RxnFX[combo.name] += spring_F[0, 0]
node.RxnFY[combo.name] += spring_F[1, 0]
node.RxnFZ[combo.name] += spring_F[2, 0]
node.RxnMX[combo.name] += spring_F[3, 0]
node.RxnMY[combo.name] += spring_F[4, 0]
node.RxnMZ[combo.name] += spring_F[5, 0]
elif spring.jNode == node and spring.active[combo.name] == True:
# Get the spring's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
spring_F = spring.F(combo.name)
node.RxnFX[combo.name] += spring_F[6, 0]
node.RxnFY[combo.name] += spring_F[7, 0]
node.RxnFZ[combo.name] += spring_F[8, 0]
node.RxnMX[combo.name] += spring_F[9, 0]
node.RxnMY[combo.name] += spring_F[10, 0]
node.RxnMZ[combo.name] += spring_F[11, 0]
# Sum the member end forces at the node
for member in self.Members:
if member.iNode == node and member.active[combo.name] == True:
# Get the member's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_F = member.F(combo.name)
node.RxnFX[combo.name] += member_F[0, 0]
node.RxnFY[combo.name] += member_F[1, 0]
node.RxnFZ[combo.name] += member_F[2, 0]
node.RxnMX[combo.name] += member_F[3, 0]
node.RxnMY[combo.name] += member_F[4, 0]
node.RxnMZ[combo.name] += member_F[5, 0]
elif member.jNode == node and member.active[combo.name] == True:
# Get the member's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_F = member.F(combo.name)
node.RxnFX[combo.name] += member_F[6, 0]
node.RxnFY[combo.name] += member_F[7, 0]
node.RxnFZ[combo.name] += member_F[8, 0]
node.RxnMX[combo.name] += member_F[9, 0]
node.RxnMY[combo.name] += member_F[10, 0]
node.RxnMZ[combo.name] += member_F[11, 0]
# Sum the plate forces at the node
for plate in self.Plates:
if plate.iNode == node:
# Get the plate's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_F = plate.F(combo.name)
node.RxnFX[combo.name] += plate_F[0, 0]
node.RxnFY[combo.name] += plate_F[1, 0]
node.RxnFZ[combo.name] += plate_F[2, 0]
node.RxnMX[combo.name] += plate_F[3, 0]
node.RxnMY[combo.name] += plate_F[4, 0]
node.RxnMZ[combo.name] += plate_F[5, 0]
elif plate.jNode == node:
# Get the plate's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_F = plate.F(combo.name)
node.RxnFX[combo.name] += plate_F[18, 0]
node.RxnFY[combo.name] += plate_F[19, 0]
node.RxnFZ[combo.name] += plate_F[20, 0]
node.RxnMX[combo.name] += plate_F[21, 0]
node.RxnMY[combo.name] += plate_F[22, 0]
node.RxnMZ[combo.name] += plate_F[23, 0]
elif plate.mNode == node:
# Get the plate's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_F = plate.F(combo.name)
node.RxnFX[combo.name] += plate_F[12, 0]
node.RxnFY[combo.name] += plate_F[13, 0]
node.RxnFZ[combo.name] += plate_F[14, 0]
node.RxnMX[combo.name] += plate_F[15, 0]
node.RxnMY[combo.name] += plate_F[16, 0]
node.RxnMZ[combo.name] += plate_F[17, 0]
elif plate.nNode == node:
# Get the plate's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_F = plate.F(combo.name)
node.RxnFX[combo.name] += plate_F[6, 0]
node.RxnFY[combo.name] += plate_F[7, 0]
node.RxnFZ[combo.name] += plate_F[8, 0]
node.RxnMX[combo.name] += plate_F[9, 0]
node.RxnMY[combo.name] += plate_F[10, 0]
node.RxnMZ[combo.name] += plate_F[11, 0]
# Sum the quad forces at the node
for quad in self.Quads:
if quad.iNode == node:
# Get the quad's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_F = quad.F(combo.name)
node.RxnFX[combo.name] += quad_F[12, 0]
node.RxnFY[combo.name] += quad_F[13, 0]
node.RxnFZ[combo.name] += quad_F[14, 0]
node.RxnMX[combo.name] += quad_F[15, 0]
node.RxnMY[combo.name] += quad_F[16, 0]
node.RxnMZ[combo.name] += quad_F[17, 0]
elif quad.jNode == node:
# Get the quad's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_F = quad.F(combo.name)
node.RxnFX[combo.name] += quad_F[18, 0]
node.RxnFY[combo.name] += quad_F[19, 0]
node.RxnFZ[combo.name] += quad_F[20, 0]
node.RxnMX[combo.name] += quad_F[21, 0]
node.RxnMY[combo.name] += quad_F[22, 0]
node.RxnMZ[combo.name] += quad_F[23, 0]
elif quad.mNode == node:
# Get the quad's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_F = quad.F(combo.name)
node.RxnFX[combo.name] += quad_F[0, 0]
node.RxnFY[combo.name] += quad_F[1, 0]
node.RxnFZ[combo.name] += quad_F[2, 0]
node.RxnMX[combo.name] += quad_F[3, 0]
node.RxnMY[combo.name] += quad_F[4, 0]
node.RxnMZ[combo.name] += quad_F[5, 0]
elif quad.nNode == node:
# Get the quad's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_F = quad.F(combo.name)
node.RxnFX[combo.name] += quad_F[6, 0]
node.RxnFY[combo.name] += quad_F[7, 0]
node.RxnFZ[combo.name] += quad_F[8, 0]
node.RxnMX[combo.name] += quad_F[9, 0]
node.RxnMY[combo.name] += quad_F[10, 0]
node.RxnMZ[combo.name] += quad_F[11, 0]
# Sum the joint forces at the node
for load in node.NodeLoads:
if load[0] == 'FX':
node.RxnFX[combo.name] -= load[1]
elif load[0] == 'FY':
node.RxnFY[combo.name] -= load[1]
elif load[0] == 'FZ':
node.RxnFZ[combo.name] -= load[1]
elif load[0] == 'MX':
node.RxnMX[combo.name] -= load[1]
elif load[0] == 'MY':
node.RxnMY[combo.name] -= load[1]
elif load[0] == 'MZ':
node.RxnMZ[combo.name] -= load[1]
#%%
def __CheckStatics(self):
'''
Checks static equilibrium and prints results to the console.
Parameters
----------
precision : number
The number of decimal places to carry the results to.
'''
print('+----------------+')
print('| Statics Check: |')
print('+----------------+')
print('')
from prettytable import PrettyTable
# Start a blank table and create a header row
statics_table = PrettyTable()
statics_table.field_names = ['Load Combination', 'Sum FX', 'Sum RX', 'Sum FY', 'Sum RY', 'Sum FZ', 'Sum RZ', 'Sum MX', 'Sum RMX', 'Sum MY', 'Sum RMY', 'Sum MZ', 'Sum RMZ']
# Step through each load combination
for combo in self.LoadCombos.values():
# Initialize force and moment summations to zero
SumFX, SumFY, SumFZ = 0.0, 0.0, 0.0
SumMX, SumMY, SumMZ = 0.0, 0.0, 0.0
SumRFX, SumRFY, SumRFZ = 0.0, 0.0, 0.0
SumRMX, SumRMY, SumRMZ = 0.0, 0.0, 0.0
# Get the global force vector and the global fixed end reaction vector
P = self.P(combo.name)
FER = self.FER(combo.name)
# Step through each node and sum its forces
for node in self.Nodes:
# Get the node's coordinates
X = node.X
Y = node.Y
Z = node.Z
# Get the nodal forces
FX = P[node.ID*6+0][0] - FER[node.ID*6+0][0]
FY = P[node.ID*6+1][0] - FER[node.ID*6+1][0]
FZ = P[node.ID*6+2][0] - FER[node.ID*6+2][0]
MX = P[node.ID*6+3][0] - FER[node.ID*6+3][0]
MY = P[node.ID*6+4][0] - FER[node.ID*6+4][0]
MZ = P[node.ID*6+5][0] - FER[node.ID*6+5][0]
# Get the nodal reactions
RFX = node.RxnFX[combo.name]
RFY = node.RxnFY[combo.name]
RFZ = node.RxnFZ[combo.name]
RMX = node.RxnMX[combo.name]
RMY = node.RxnMY[combo.name]
RMZ = node.RxnMZ[combo.name]
# Sum the global forces
SumFX += FX
SumFY += FY
SumFZ += FZ
SumMX += MX - FY*Z + FZ*Y
SumMY += MY + FX*Z - FZ*X
SumMZ += MZ - FX*Y + FY*X
# Sum the global reactions
SumRFX += RFX
SumRFY += RFY
SumRFZ += RFZ
SumRMX += RMX - RFY*Z + RFZ*Y
SumRMY += RMY + RFX*Z - RFZ*X
SumRMZ += RMZ - RFX*Y + RFY*X
# Add the results to the table
statics_table.add_row([combo.name, '{:.3g}'.format(SumFX), '{:.3g}'.format(SumRFX),
'{:.3g}'.format(SumFY), '{:.3g}'.format(SumRFY),
'{:.3g}'.format(SumFZ), '{:.3g}'.format(SumRFZ),
'{:.3g}'.format(SumMX), '{:.3g}'.format(SumRMX),
'{:.3g}'.format(SumMY), '{:.3g}'.format(SumRMY),
'{:.3g}'.format(SumMZ), '{:.3g}'.format(SumRMZ)])
# Print the static check table
print(statics_table)
print('')
|
the-stack_0_9289 | import pytest
from bispy.utilities.graph_entities import (
_QBlock,
_Vertex,
_Edge,
)
from typing import Set, Tuple, List
import networkx as nx
from bispy.saha.ranked_pta import ranked_split
from bispy.paige_tarjan.paige_tarjan import paige_tarjan
from bispy.saha.saha import add_edge
from bispy.utilities.graph_decorator import decorate_nx_graph
def partition_to_integer(partition: List[_QBlock]) -> Set[Set[int]]:
return set(
frozenset(vertex.label for vertex in block.vertexes)
for block in filter(lambda b: b.vertexes.size > 0, partition)
)
def integer_to_partition(
partition: List[Tuple], vertexes: List[_Vertex]
) -> List[_QBlock]:
qblocks = []
for block in partition:
qblocks.append(_QBlock([vertexes[i] for i in block], None))
return qblocks
def test_resets_aux_count():
g = nx.DiGraph()
g.add_nodes_from(range(5))
g.add_edges_from([(0, 1), (0, 2), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3)])
vertexes, _ = decorate_nx_graph(g)
integer_partition = paige_tarjan(g)
q_partition = integer_to_partition(integer_partition, vertexes)
# now we modify the graph
add_edge(vertexes[3], vertexes[0])
# find [v]
modified_destination_block = None
for block in q_partition:
for vertex in block.vertexes:
if vertex.label == 0:
modified_destination_block = block
break
ranked_split(q_partition, modified_destination_block, 2)
for vx in vertexes:
assert not hasattr(vx, "aux_count") or vx.aux_count is None
def test_ranked_split():
g = nx.DiGraph()
g.add_nodes_from(range(5))
g.add_edges_from([(0, 1), (0, 2), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3)])
vertexes, _ = decorate_nx_graph(g)
integer_partition = paige_tarjan(g)
q_partition = integer_to_partition(integer_partition, vertexes)
# now we modify the graph
add_edge(vertexes[3], vertexes[0])
# find [v]
modified_destination_block = None
for block in q_partition:
for vertex in block.vertexes:
if vertex.label == 0:
modified_destination_block = block
break
ranked_split(q_partition, modified_destination_block, 2)
final_integer_partition = partition_to_integer(q_partition)
assert final_integer_partition == set(
[frozenset([0]), frozenset([1, 2]), frozenset([3]), frozenset([4])]
)
|
the-stack_0_9290 | # -*- coding: utf-8 -*-
"""
Fast Kalman Filter attitude estimation
======================================
References
----------
.. [Guo] Siwen Guo, Jin Wu, Zuocai Wang, and Jide Qian, "Novel MARG-Sensor
Orientation Estimation Algorithm Using Fast Kalman Filter." Journal of
Sensors, vol. 2017, Article ID 8542153, 12 pages.
https://doi.org/10.1155/2017/8542153 and https://github.com/zarathustr/FKF
"""
import numpy as np
from ahrs.common.orientation import *
from ahrs.common import DEG2RAD
class FKF:
"""
Class of Fast Kalman Filter algorithm
Parameters
----------
acc : array
Sample of tri-axial Accelerometer.
mag : array
Sample of tri-axial Magnetometer.
"""
def __init__(self, acc: np.ndarray = None, mag: np.ndarray = None, **kwargs):
self.q = np.array([1.0, 0.0, 0.0, 0.0])
self.Ar = np.array([0.0, 0.0, 1.0])
self.Mr = np.array([0.0, 0.0, 1.0])
def update(self, acc, mag):
"""
FKF algorithm with a 6-axis Accelerometer-Magnetometer architecture.
Parameters
----------
acc : array
Sample of tri-axial Accelerometer.
mag : array
Sample of tri-axial Magnetometer.
Returns
-------
q : array
Estimated quaternion.
"""
Ab = acc.copy()
Mb = mag.copy()
return self.q
|
the-stack_0_9291 | import cv2
import numpy as np
def detect_face(net, frame, conf_threshold=0.7):
# Siapkan input image
h, w, c = frame.shape
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), [104, 117, 123], False, False)
# Feedforward
# prediksi dari SSD mengeluarkan output (1, 1, N, 7)
# 7 output tersebut merupakan: image_id, label, conf, x_min, y_min, x_max, y_max
net.setInput(blob)
detections = net.forward()
# Filter prediksi yang low confidence
bbox = []
for _, _, conf, x1, y1, x2, y2 in detections[0, 0]:
if conf > conf_threshold:
box = np.array([x1, y1, x2, y2]) * [w, h, w, h]
bbox.append(box.astype(int))
return bbox
def normalize_image(img):
mean = img.reshape(-1, 3).mean(0).reshape(1, 1, -1)
std = img.reshape(-1, 3).std(0).reshape(1, 1, -1)
img = (img - mean) / std
img = (np.clip(img, [-4, -4, -4], [4, 4, 4]) + 4) / 8
img = (img*255).astype(np.uint8)
return img
def calculate_skin_percent(face, min_val=(90, 100, 110), max_val=(150, 150, 150)):
face = normalize_image(face)
min_val = np.array(min_val, dtype=np.uint8)
max_val = np.array(max_val, dtype=np.uint8)
skin = ((face >= min_val) & (face <= max_val)).all(2)
skin_percent = skin.mean()
return skin_percent |
the-stack_0_9293 | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import sys
import unittest
import fundamental_tester_base
from pyplusplus import code_creators
class tester_t(fundamental_tester_base.fundamental_tester_base_t):
EXTENSION_NAME = 'duplicate_aliases'
def __init__( self, *args ):
fundamental_tester_base.fundamental_tester_base_t.__init__(
self
, tester_t.EXTENSION_NAME
, *args )
def customize(self, mb):
classes = mb.classes( lambda decl: 'duplicate_aliases' in decl.name )
classes.alias = 'duplicate_aliases'
classes.wrapper_alias = 'wrapper_duplicate_aliases'
def run_tests( self, module):
#check compilation
pass
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
|
the-stack_0_9294 | from easydict import EasyDict
cartpole_iqn_config = dict(
env=dict(
collector_env_num=8,
evaluator_env_num=5,
n_evaluator_episode=5,
stop_value=195,
),
policy=dict(
cuda=False,
on_policy=False,
priority=True,
model=dict(
obs_shape=4,
action_shape=2,
encoder_hidden_size_list=[128, 128, 64],
num_quantiles=32,
),
discount_factor=0.97,
nstep=3,
learn=dict(
update_per_collect=3,
batch_size=64,
learning_rate=0.001,
target_update_freq=100,
kappa=1.0,
),
collect=dict(
n_sample=80,
unroll_len=1,
),
other=dict(
eps=dict(
type='exp',
start=0.95,
end=0.1,
decay=10000,
), replay_buffer=dict(replay_buffer_size=20000, )
),
),
)
cartpole_iqn_config = EasyDict(cartpole_iqn_config)
main_config = cartpole_iqn_config
cartpole_iqn_create_config = dict(
env=dict(
type='cartpole',
import_names=['dizoo.classic_control.cartpole.envs.cartpole_env'],
),
env_manager=dict(type='base'),
policy=dict(type='iqn'),
)
cartpole_iqn_create_config = EasyDict(cartpole_iqn_create_config)
create_config = cartpole_iqn_create_config
|
the-stack_0_9295 | #!/usr/bin/env python
#
# This example can be used to demonstrate pvaPy server/client channel
# monitoring
#
# Run server.py in one window, and client.py in another one.
#
import sys
import time
from pvaccess import Channel
from collections import OrderedDict
class ClientMonitor:
def __init__(self, name):
self.name = name
self.value = 0
self.nReceived = 0
self.nMissed = 0
self.percentageMissed = 0
self.startTime = 0
self.receiveRate = 0
def toString(self):
return '%6s: Received: %7d (%6.2f [kHz]); Missed: %7d (%6.2f%%)' % (self.name, self.nReceived, self.receiveRateKHz, self.nMissed, self.percentageMissed)
def monitor(self, pv):
oldValue = self.value
self.value = pv['c']
self.nReceived += 1
diff = self.value - oldValue
if oldValue > 0:
self.nMissed += diff-1
else:
self.startTime = time.time()
if self.nReceived % 10000 == 0:
currentTime = time.time()
deltaT = currentTime - self.startTime
self.receiveRateKHz = self.nReceived/deltaT/1000.0
self.percentageMissed = (self.nMissed*100.0)/(self.nReceived+self.nMissed)
if self.nReceived % 100000 == 0:
print(self.toString())
if __name__ == '__main__':
runtime = 60
if len(sys.argv) > 1:
runtime = float(sys.argv[1])
channelName = 'counter'
c = Channel(channelName)
#print('CONNECT TO %s:\n%s\n' % (channelName, c.get()))
m = ClientMonitor(channelName)
t0 = time.time()
print('STARTING MONITOR for %s at %s\n' % (channelName, t0))
#c.monitor(m.monitor)
c.monitor(m.monitor, 'field(c)')
time.sleep(runtime)
c.stopMonitor()
t1 = time.time()
deltaT = t1-t0
print('STOP MONITOR at %s\n' % t1)
print('FINAL STATS:')
print(m.toString())
print('')
print('RUNTIME: %.2f [s]' % (deltaT))
print('\nDONE')
|
the-stack_0_9296 | # -*- coding: utf-8 -*-
"""
drftoolbox.views
~~~~~~~~~~~~~~~~
This module defines view classes used by the API
:copyright: (c) 2018 by Medical Decisions LLC
"""
import functools
import json
import logging
import re
from django.contrib.auth import get_user_model
from rest_framework import generics
from jose import jwt as jose_jwt, exceptions as jose_exceptions
from drftoolbox.serializers import UserKMSKeySerializer
LOGGER = logging.getLogger(__name__)
class BaseUserKMSKeyView(generics.RetrieveAPIView):
queryset = get_user_model().objects.filter(is_active=True)
serializer_class = UserKMSKeySerializer
def http_sign_class(self):
raise NotImplementedError
class RequestLoggingViewMixin(object):
REQUEST_LOGGING_LOGGER = LOGGER
REQUEST_LOGGING_LEVEL = logging.INFO
REQUEST_LOGGING_OBFUSCATE_PATTERN = re.compile(r'.*(authorization|cookie)$', re.I)
@classmethod
def obfuscate(cls, value):
result = []
for section in str(value).split('; '):
# try handling the value as a cookie, and if so see if we can
# only obfuscate the value parts of that cookie, however if not
# a cookie just fall back to obfuscating everything after the
# first 6 chars
parts = section.split('=', 1)
k = parts[0] if len(parts) > 1 else ''
v = parts[-1]
result.append(f'{k} {v[:6]}...'.strip())
return ' '.join(result)
@classmethod
def request_logging(cls, request):
"""
utility method to log the details of a request
"""
log = functools.partial(cls.REQUEST_LOGGING_LOGGER.log, cls.REQUEST_LOGGING_LEVEL)
pattern = cls.REQUEST_LOGGING_OBFUSCATE_PATTERN
data, headers = {}, {}
for k, v in request.data.items():
if pattern.match(k):
v = cls.obfuscate(v)
data[k] = v
for k, v in request._request.headers.items(): # pylint: disable=protected-access
if pattern.match(k):
try:
token = v.split()[-1]
v = {
'jwt_headers': jose_jwt.get_unverified_header(token),
'jwt_claims': jose_jwt.get_unverified_claims(token),
}
except (jose_exceptions.JOSEError, IndexError):
v = cls.obfuscate(v)
headers[k] = v
msg = {
'path': request._request.path, # pylint: disable=protected-access
'query params': dict(request.query_params),
'data': data,
'headers': headers,
}
log(f'REQUEST => {json.dumps(msg, indent=2)}')
def initialize_request(self, *args, **kwargs):
request = super().initialize_request(*args, **kwargs)
self.request_logging(request)
return request
|
the-stack_0_9298 | from django.db.models import CharField, Expression
from psycopg2.sql import Identifier, Literal, SQL
from usaspending_api.common.helpers.sql_helpers import convert_composable_query_to_string
from usaspending_api.recipient.models import RecipientLookup, RecipientProfile
from usaspending_api.recipient.v2.lookups import SPECIAL_CASES
def obtain_recipient_uri(recipient_name, recipient_unique_id, parent_recipient_unique_id, is_parent_recipient=False):
""" Return a valid string to be used for api/v2/recipient/duns/<recipient-hash>/ (or None)
Keyword Arguments:
recipient_name -- Legal Entity Name from the record
recipient_unique_id -- DUNS from the record
parent_recipient_unique_id -- parent DUNS from the record
is_parent_recipient -- boolean flag to force the recipient level to be "P" (default False)
By the nature of transaction records, the listed recipient can only be "R" or "C"
This flag is for the parent recipient link (as appropriate)
Return example string: 11fcdf15-3490-cdad-3df4-3b410f3d9b20-C
"""
if (is_parent_recipient and not recipient_unique_id) or not (recipient_unique_id or recipient_name):
return None
if recipient_unique_id:
recipient_hash = fetch_recipient_hash_using_duns(recipient_unique_id)
else:
recipient_hash = None
if recipient_hash is None:
recipient_hash = generate_missing_recipient_hash(recipient_unique_id, recipient_name)
recipient_level = obtain_recipient_level(
{
"duns": recipient_unique_id,
"parent_duns": parent_recipient_unique_id,
"is_parent_recipient": is_parent_recipient,
}
)
# Confirm that a recipient profile exists for the recipient information we have collected/generated.
if RecipientProfile.objects.filter(recipient_hash=recipient_hash, recipient_level=recipient_level).exists():
return combine_recipient_hash_and_level(recipient_hash, recipient_level)
return None
def generate_missing_recipient_hash(recipient_unique_id, recipient_name):
# SQL: MD5(UPPER(
# CASE
# WHEN awardee_or_recipient_uniqu IS NOT NULL THEN CONCAT('duns-', awardee_or_recipient_uniqu)
# ELSE CONCAT('name-', awardee_or_recipient_legal) END
# ))::uuid AS recipient_hash,
import hashlib
import uuid
if recipient_unique_id is None:
prefix = "name"
value = recipient_name
else:
prefix = "duns"
value = recipient_unique_id
return str(uuid.UUID(hashlib.md5(f"{prefix}-{value}".upper().encode("utf-8")).hexdigest()))
def fetch_recipient_hash_using_duns(recipient_unique_id):
recipient = RecipientLookup.objects.filter(duns=recipient_unique_id).values("recipient_hash").first()
return recipient["recipient_hash"] if recipient else None
def obtain_recipient_level(recipient_record: dict) -> str:
level = None
if recipient_is_parent(recipient_record):
level = "P"
elif recipient_is_standalone(recipient_record):
level = "R"
elif recipient_is_child(recipient_record):
level = "C"
return level
def recipient_is_parent(recipient_record: dict) -> bool:
return recipient_record["is_parent_recipient"]
def recipient_is_standalone(recipient_record: dict) -> bool:
return recipient_record["parent_duns"] is None
def recipient_is_child(recipient_record: dict) -> bool:
return recipient_record["parent_duns"] is not None
def combine_recipient_hash_and_level(recipient_hash, recipient_level):
return f"{recipient_hash}-{recipient_level.upper()}"
def _annotate_recipient_id(field_name, queryset, annotation_sql):
"""
Add recipient id (recipient hash + recipient level) to a queryset. The assumption here is that
the queryset is based on a data source that contains recipient_unique_id and
parent_recipient_unique_id which, currently, all of our advanced search materialized views do.
"""
class RecipientId(Expression):
"""
Used to graft a subquery into a queryset that can build recipient ids.
This is a bit less than ideal, but I just couldn't construct an ORM query to mimic this
logic. There are several issues including but not limited to:
- There are currently no relations between these tables in the Django ORM which makes
joining them... challenging.
- Adding relations to the ORM changes how the fields behave making this a much bigger
enhancement than originally planned.
- When I did add relations to the ORM, I couldn't figure out how to make the Django
OuterRef expression check for nulls since the subquery needs to check to see if the
parent_recipient_unique_id in the outer query is null.
Anyhow, this works and is encapsulated so if someone smart figures out how to use pure ORM,
it should be easy to patch in.
"""
def __init__(self):
super(RecipientId, self).__init__(CharField())
def as_sql(self, compiler, connection):
return (
convert_composable_query_to_string(
SQL(annotation_sql).format(
outer_table=Identifier(compiler.query.model._meta.db_table),
special_cases=Literal(tuple(sc for sc in SPECIAL_CASES)),
)
),
[],
)
return queryset.annotate(**{field_name: RecipientId()})
def annotate_recipient_id(field_name, queryset):
return _annotate_recipient_id(
field_name,
queryset,
"""(
select
rp.recipient_hash || '-' || rp.recipient_level
from
recipient_profile rp
inner join recipient_lookup rl on rl.recipient_hash = rp.recipient_hash
where
(
(
{outer_table}.recipient_unique_id is null
and rl.duns is null
and {outer_table}.recipient_name = rl.legal_business_name
) or (
{outer_table}.recipient_unique_id is not null
and rl.duns is not null
and rl.duns = {outer_table}.recipient_unique_id
)
)
and rp.recipient_level = case
when {outer_table}.parent_recipient_unique_id is null then 'R'
else 'C' end
and rp.recipient_name not in {special_cases}
)""",
)
def annotate_prime_award_recipient_id(field_name, queryset):
return _annotate_recipient_id(
field_name,
queryset,
"""(
select
rp.recipient_hash || '-' || rp.recipient_level
from
broker_subaward bs
inner join recipient_lookup rl on rl.duns = bs.awardee_or_recipient_uniqu
inner join recipient_profile rp on rp.recipient_hash = rl.recipient_hash
where
bs.id = {outer_table}.subaward_id and
rp.recipient_level = case
when bs.ultimate_parent_unique_ide is null or bs.ultimate_parent_unique_ide = '' then 'R'
else 'C'
end and
rp.recipient_name not in {special_cases}
)""",
)
|
the-stack_0_9301 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kfp.v2.google.client.client_utils."""
import json
import unittest
from unittest import mock
from google.cloud import storage
from kfp.v2.google.client import client_utils
class ClientUtilsTest(unittest.TestCase):
@mock.patch.object(storage, 'Client', autospec=True)
@mock.patch.object(storage.Blob, 'download_as_bytes', autospec=True)
def test_load_json_from_gs_uri(self, mock_download_as_bytes,
unused_storage_client):
mock_download_as_bytes.return_value = b'{"key":"value"}'
self.assertEqual({'key': 'value'},
client_utils.load_json('gs://bucket/path/to/blob'))
@mock.patch('builtins.open', mock.mock_open(read_data='{"key":"value"}'))
def test_load_json_from_local_file(self):
self.assertEqual({'key': 'value'},
client_utils.load_json('/path/to/file'))
@mock.patch.object(storage, 'Client', autospec=True)
def test_load_json_from_gs_uri_with_non_gs_uri_should_fail(
self, unused_storage_client):
with self.assertRaisesRegex(ValueError, 'URI scheme must be gs'):
client_utils._load_json_from_gs_uri(
'https://storage.google.com/bucket/blob')
@mock.patch.object(storage, 'Client', autospec=True)
@mock.patch.object(storage.Blob, 'download_as_bytes', autospec=True)
def test_load_json_from_gs_uri_with_invalid_json_should_fail(
self, mock_download_as_bytes, unused_storage_client):
mock_download_as_bytes.return_value = b'invalid-json'
with self.assertRaises(json.decoder.JSONDecodeError):
client_utils._load_json_from_gs_uri('gs://bucket/path/to/blob')
@mock.patch('builtins.open', mock.mock_open(read_data='invalid-json'))
def test_load_json_from_local_file_with_invalid_json_should_fail(self):
with self.assertRaises(json.decoder.JSONDecodeError):
client_utils._load_json_from_local_file('/path/to/file')
if __name__ == '__main__':
unittest.main()
|
the-stack_0_9304 | import numpy as np
def kalman_xy(x, P, measurement, R,
motion = np.matrix('0. 0. 0. 0.').T,
Q = np.matrix(np.eye(4))):
"""
Parameters:
x: initial state 4-tuple of location and velocity: (x0, x1, x0_dot, x1_dot)
P: initial uncertainty convariance matrix
measurement: observed position
R: measurement noise
motion: external motion added to state vector x
Q: motion noise (same shape as P)
"""
return kalman(x, P, measurement, R, motion, Q,
F = np.matrix('''
1. 0. 1. 0.;
0. 1. 0. 1.;
0. 0. 1. 0.;
0. 0. 0. 1.
'''),
H = np.matrix('''
1. 0. 0. 0.;
0. 1. 0. 0.'''))
def kalman(x, P, measurement, R, motion, Q, F, H):
'''
Parameters:
x: initial state
P: initial uncertainty convariance matrix
measurement: observed position (same shape as H*x)
R: measurement noise (same shape as H)
motion: external motion added to state vector x
Q: motion noise (same shape as P)
F: next state function: x_prime = F*x
H: measurement function: position = H*x
Return: the updated and predicted new values for (x, P)
See also http://en.wikipedia.org/wiki/Kalman_filter
This version of kalman can be applied to many different situations by
appropriately defining F and H
'''
# UPDATE x, P based on measurement m
# distance between measured and current position-belief
y = np.matrix(measurement).T - H * x
S = H * P * H.T + R # residual convariance
K = P * H.T * S.I # Kalman gain
x = x + K*y
I = np.matrix(np.eye(F.shape[0])) # identity matrix
P = (I - K*H)*P
# PREDICT x, P based on motion
x = F*x + motion
P = F*P*F.T + Q
return x, P |
the-stack_0_9305 | from __future__ import division
import dolfin as df
import numpy as np
import logging
import os
import scipy.sparse.linalg
from time import time
from finmag.util import helpers
from finmag.util.meshes import embed3d
from itertools import izip
from math import pi
from finmag.field import Field
logger = logging.getLogger('finmag')
# Matrix-vector or Matrix-matrix product
def _mult_one(a, b):
# a and b are ?x?xn arrays where ? = 1..3
assert len(a.shape) == 3
assert len(b.shape) == 3
assert a.shape[2] == b.shape[2]
assert a.shape[1] == b.shape[0]
assert a.shape[0] <= 3 and a.shape[1] <= 3
assert b.shape[0] <= 3 and b.shape[1] <= 3
# One of the arrays might be complex, so we determine the type
# of the resulting array by adding two elements of the argument arrays
res = np.zeros(
(a.shape[0], b.shape[1], a.shape[2]), dtype=type(a[0, 0, 0] + b[0, 0, 0]))
for i in xrange(res.shape[0]):
for j in xrange(res.shape[1]):
for k in xrange(a.shape[1]):
res[i, j, :] += a[i, k, :] * b[k, j, :]
return res
# Returns the componentwise matrix product of the supplied matrix fields
def mf_mult(*args):
if len(args) < 2:
raise Exception("mult requires at least 2 arguments")
res = args[0]
for i in xrange(1, len(args)):
res = _mult_one(res, args[i])
return res
# Transposes the mxk matrix to a kxm matrix
def mf_transpose(a):
return np.transpose(a, [1, 0, 2])
# Computes the componentwise cross product of a vector field a
# and a vector or vector field b
def mf_cross(a, b):
assert a.shape == (3, 1, a.shape[2])
res = np.empty(a.shape, dtype=a.dtype)
res[0] = a[1] * b[2] - a[2] * b[1]
res[1] = a[2] * b[0] - a[0] * b[2]
res[2] = a[0] * b[1] - a[1] * b[0]
return res
# Normalises the 3d vector m
def mf_normalise(m):
assert m.shape == (3, 1, m.shape[2])
return m / np.sqrt(m[0] * m[0] + m[1] * m[1] + m[2] * m[2])
# Set up the basis for the tangential space and the corresponding
# projection operator
def compute_tangential_space_basis(m0):
assert m0.ndim == 3
n = m0.shape[2]
assert m0.shape == (3, 1, n)
# Set up a field of vectors m_perp that are perpendicular to m0
# Start with e_z and compute e_z x m
m_perp = mf_cross(m0, [0., 0., -1.])
# In case m || e_z, add a tiny component in e_y
m_perp[1] += 1e-100
# Normalise and compute the cross product with m0 again
m_perp = mf_cross(mf_normalise(m_perp), m0)
m_perp = mf_normalise(m_perp)
# The basis in the 3d space is ((m_perp x m0) x m0, m_perp x m0, m0)
R = np.zeros((3, 3, n))
R[:, 2, :] = m0[:, 0, :]
R[:, 1, :] = m_perp[:, 0, :]
R[:, 0, :] = mf_cross(m_perp, m0)[:, 0, :]
# Matrix for the injection from 2n to 3n (3x2)
S = np.zeros((3, 2, n))
S[0, 0, :] = 1.
S[1, 1, :] = 1.
# Matrix for the projection from 3n to 2n is transpose(S)
# Matrix for the cross product m0 x in the 2n space
Mcross = np.zeros((2, 2, n))
Mcross[0, 1, :] = -1
Mcross[1, 0, :] = 1
# The relationship between the 3d tangential vector v
# and the 2d vector w is
# v = (R S) w
# w = (R S)^t v
Q = mf_mult(R, S)
return Q, R, S, Mcross
def differentiate_fd4(f, x, dx):
"""
Compute and return a fourth-order approximation to the directional
derivative of `f` at the point `x` in the direction of `dx`.
"""
x_sq = np.dot(x, x)
dx_sq = np.dot(dx, dx)
h = 0.001 * np.sqrt(x_sq + dx_sq) / np.sqrt(dx_sq + 1e-50)
# weights: 1. / 12., -2. / 3., 2. / 3., -1. / 12.
# coefficients: -2., -1., 1., 2.
res = (1. / 12. / h) * f(x - 2 * h * dx)
res += (-2. / 3. / h) * f(x - h * dx)
res += (2. / 3. / h) * f(x + h * dx)
res += (-1. / 12. / h) * f(x + 2 * h * dx)
return res
def compute_eigenproblem_matrix(sim, frequency_unit=1e9, filename=None, differentiate_H_numerically=True, dtype=complex):
"""
Compute and return the square matrix `D` defining the eigenproblem which
has the normal mode frequencies and oscillation patterns as its solution.
Note that `sim` needs to be in a relaxed state, otherwise the results will
be wrong.
"""
# Create the helper simulation which we use to compute
# the effective field for various values of m.
#Ms = sim.Ms
#A = sim.get_interaction('Exchange').A
#unit_length = sim.unit_length
# try:
# sim.get_interaction('Demag')
# demag_solver = 'FK'
# except ValueError:
# demag_solver = None
#sim_aux = sim_with(sim.mesh, Ms=Ms, m_init=[1, 0, 0], A=A, unit_length=unit_length, demag_solver=demag_solver)
# In order to compute the derivative of the effective field, the magnetisation needs to be set
# to many different values. Thus we store a backup so that we can restore
# it later.
m_orig = sim.m
def effective_field_for_m(m, normalise=True):
if np.iscomplexobj(m):
raise NotImplementedError(
"XXX TODO: Implement the version for complex arrays!")
sim.set_m(m, normalise=normalise, debug=False)
return sim.effective_field()
# N is the number of degrees of freedom of the magnetisation vector.
# It may be smaller than the number of mesh nodes if we are using
# periodic boundary conditions.
N = sim.llg.S3.dim()
n = N // 3
assert (N == 3 * n)
m0_array = sim.m.copy()
# this corresponds to the vector 'm0_flat' in Simlib
m0_3xn = m0_array.reshape(3, n)
m0_column_vector = m0_array.reshape(3, 1, n)
H0_array = effective_field_for_m(m0_array)
H0_3xn = H0_array.reshape(3, n)
h0 = H0_3xn[0] * m0_3xn[0] + H0_3xn[1] * m0_3xn[1] + H0_3xn[2] * m0_3xn[2]
logger.debug(
"Computing basis of the tangent space and transition matrices.")
Q, R, S, Mcross = compute_tangential_space_basis(m0_column_vector)
Qt = mf_transpose(Q).copy()
# Returns the product of the linearised llg times vector
def linearised_llg_times_vector(v):
assert v.shape == (3, 1, n)
# The linearised equation is
# dv/dt = - gamma m0 x (H' v - h_0 v)
v_array = v.view()
v_array.shape = (-1,)
# Compute H'(m_0)*v, i.e. the "directional derivative" of H at
# m_0 in the direction of v. Since H is linear in m (at least
# theoretically, although this is not quite true in the case
# of our demag computation), this is the same as H(v)!
if differentiate_H_numerically:
res = differentiate_fd4(effective_field_for_m, m0_array, v_array)
else:
res = effective_field_for_m(v_array, normalise=False)
res.shape = (3, -1)
# Subtract h0 v
res[0] -= h0 * v[0, 0]
res[1] -= h0 * v[1, 0]
res[2] -= h0 * v[2, 0]
# Multiply by -gamma m0x
res *= sim.gamma
res.shape = (3, 1, -1)
# Put res on the left in case v is complex
res = mf_cross(res, m0_column_vector)
return res
# The linearised equation in the tangential basis
def linearised_llg_times_tangential_vector(w):
w = w.view()
w.shape = (2, 1, n)
# Go to the 3d space
v = mf_mult(Q, w)
# Compute the linearised llg
L = linearised_llg_times_vector(v)
# Go back to 2d space
res = np.empty(w.shape, dtype=dtype)
res[:] = mf_mult(Qt, L)
if dtype == complex:
# Multiply by -i/(2*pi*U) so that we get frequencies as the real
# part of eigenvalues
res *= -1j / (2 * pi * frequency_unit)
else:
# This will yield imaginary eigenvalues, but we divide by 1j in the
# calling routine.
res *= 1. / (2 * pi * frequency_unit)
res.shape = (-1,)
return res
df.tic()
logger.info("Assembling eigenproblem matrix.")
D = np.zeros((2 * n, 2 * n), dtype=dtype)
logger.debug("Eigenproblem matrix D will occupy {:.2f} MB of memory.".format(
D.nbytes / 1024. ** 2))
for i, w in enumerate(np.eye(2 * n)):
if i % 50 == 0:
t_cur = df.toc()
completion_info = '' if (i == 0) else ', estimated remaining time: {}'.format(
helpers.format_time(t_cur * (2 * n / i - 1)))
logger.debug("Processing row {}/{} (time elapsed: {}{})".format(i,
2 * n, helpers.format_time(t_cur), completion_info))
D[:, i] = linearised_llg_times_tangential_vector(w)
logger.debug("Eigenproblem matrix D occupies {:.2f} MB of memory.".format(
D.nbytes / 1024. ** 2))
logger.info("Finished assembling eigenproblem matrix.")
if filename != None:
logger.info("Saving eigenproblem matrix to file '{}'".format(filename))
np.save(filename, D)
# Restore the original magnetisation.
# XXX TODO: Is this method safe, or does it leave any trace of the
# temporary changes we did above?
sim.set_m(m_orig)
return D
# We use the following class (which behaves like a function due to its
# __call__ method) instead of a simple lambda expression because it is
# pickleable, which is needed if we want to cache computation results.
#
# XXX TODO: lambda expresions can be pickled with the 'dill' module,
# so we should probably get rid of this.
class M_times_w(object):
def __init__(self, Mcross, n, alpha=0.):
self.Mcross = Mcross
self.n = n
self.alpha = alpha
def __call__(self, w):
w = w.view()
w.shape = (2, 1, self.n)
res = -1j * mf_mult(self.Mcross, w)
if self.alpha != 0.:
res += -1j * self.alpha * w
res.shape = (-1,)
return res
class NotImplementedOp(object):
def __call__(self, w):
raise NotImplementedError("rmatvec is not implemented")
def is_hermitian(A, atol=1e-8, rtol=1e-12):
"""
Returns True if the matrix `A` is Hermitian (up to the given
tolerance) and False otherwise.
The arguments `atol` and `rtol` have the same meaning as in
`numpy.allclose`.
"""
if isinstance(A, np.ndarray):
# Note: just using an absolute tolerance and checking for
# the maximum difference is about twice as efficient, so
# maybe we should avoid the relative tolerance in the future.
return np.allclose(A, np.conj(A.T), atol=atol, rtol=rtol)
elif isinstance(A, scipy.sparse.linalg.LinearOperator):
raise NotImplementedError
else:
raise NotImplementedError
def check_is_hermitian(A, matrix_name, atol=1e-8, rtol=1e-12):
"""
Check if `A` is hermitian and print a warning if this is not the case.
The argument `matrix_name` is only used for printing the warning.
"""
if not is_hermitian(A):
mat_diff = np.absolute(A - np.conj(A.T))
logger.critical("Matrix {} is not Hermitian. Maximum difference "
"between A and conj(A^tr): {}, median difference: {}, "
"mean difference: {} (maximum entry of A: {}, "
"median entry: {}, mean entry: {})".format(
matrix_name, mat_diff.max(), np.median(
mat_diff), np.mean(mat_diff),
np.max(np.absolute(A)), np.median(np.absolute(A)), np.mean(np.absolute(A))))
def compute_generalised_eigenproblem_matrices(sim, alpha=0.0, frequency_unit=1e9,
filename_mat_A=None, filename_mat_M=None,
check_hermitian=False, differentiate_H_numerically=True):
"""
XXX TODO: write me
"""
m_orig = sim.m
def effective_field_for_m(m, normalise=True):
if np.iscomplexobj(m):
raise NotImplementedError(
"XXX TODO: Implement the version for complex arrays!")
sim.set_m(m, normalise=normalise)
return sim.effective_field()
n = sim.mesh.num_vertices()
N = 3 * n # number of degrees of freedom
m0_array = sim.m.copy()
# this corresponds to the vector 'm0_flat' in Simlib
m0_3xn = m0_array.reshape(3, n)
m0_column_vector = m0_array.reshape(3, 1, n)
H0_array = effective_field_for_m(m0_array)
H0_3xn = H0_array.reshape(3, n)
h0 = H0_3xn[0] * m0_3xn[0] + H0_3xn[1] * m0_3xn[1] + H0_3xn[2] * m0_3xn[2]
logger.debug(
"Computing basis of the tangent space and transition matrices.")
Q, R, S, Mcross = compute_tangential_space_basis(m0_column_vector)
Qt = mf_transpose(Q).copy()
logger.debug("Q.shape: {} ({} MB)".format(Q.shape, Q.nbytes / 1024. ** 2))
def A_times_vector(v):
# A = H' v - h_0 v
assert v.shape == (3, 1, n)
v_array = v.view()
v_array.shape = (-1,)
# Compute H'(m_0)*v, i.e. the "directional derivative" of H at
# m_0 in the direction of v. Since H is linear in m (at least
# theoretically, although this is not quite true in the case
# of our demag computation), this is the same as H(v)!
if differentiate_H_numerically:
res = differentiate_fd4(effective_field_for_m, m0_array, v_array)
else:
res = effective_field_for_m(v_array, normalise=False)
res.shape = (3, n)
# Subtract h0 v
res[0] -= h0 * v[0, 0]
res[1] -= h0 * v[1, 0]
res[2] -= h0 * v[2, 0]
res.shape = (3, 1, n)
return res
df.tic()
logger.info("Assembling eigenproblem matrix.")
A = np.zeros((2 * n, 2 * n), dtype=complex)
logger.debug("Eigenproblem matrix A occupies {:.2f} MB of memory.".format(
A.nbytes / 1024. ** 2))
# Compute A
w = np.zeros(2 * n)
for i in xrange(2 * n):
if i % 50 == 0:
logger.debug(
"Processing row {}/{} (time taken so far: {:.2f} seconds)".format(i, 2 * n, df.toc()))
# Ensure that w is the i-th standard basis vector
w.shape = (2 * n,)
w[i - 1] = 0.0 # this will do no harm if i==0
w[i] = 1.0
w.shape = (2, 1, n)
Av = A_times_vector(mf_mult(Q, w))
A[:, i] = mf_mult(Qt, Av).reshape(-1)
# Multiply by (-gamma)/(2 pi U)
A[:, i] *= -sim.gamma / (2 * pi * frequency_unit)
# Compute B, which is -i Mcross 2 pi U / gamma
# B = np.zeros((2, n, 2, n), dtype=complex)
# for i in xrange(n):
# B[:, i, :, i] = Mcross[:, :, i]
# B[:, i, :, i] *= -1j
# B.shape = (2*n, 2*n)
M = scipy.sparse.linalg.LinearOperator(
(2 * n, 2 * n), M_times_w(Mcross, n, alpha), NotImplementedOp(), NotImplementedOp(), dtype=complex)
if check_hermitian:
# Sanity check: A and M should be Hermitian matrices
check_is_hermitian(A, "A")
#check_is_hermitian(M, "M")
if filename_mat_A != None:
dirname_mat_A = os.path.dirname(os.path.abspath(filename_mat_A))
if not os.path.exists(dirname_mat_A):
logger.debug(
"Creating directory '{}' as it does not exist.".format(dirname_mat_A))
os.makedirs(dirname_mat_A)
logger.info(
"Saving generalised eigenproblem matrix 'A' to file '{}'".format(filename_mat_A))
np.save(filename_mat_A, A)
if filename_mat_M != None:
dirname_mat_M = os.path.dirname(os.path.abspath(filename_mat_M))
if not os.path.exists(dirname_mat_M):
logger.debug(
"Creating directory '{}' as it does not exist.".format(dirname_mat_M))
os.makedirs(dirname_mat_M)
logger.info(
"Saving generalised eigenproblem matrix 'M' to file '{}'".format(filename_mat_M))
np.save(filename_mat_M, M)
# Restore the original magnetisation.
# XXX TODO: Is this method safe, or does it leave any trace of the
# temporary changes we did above?
sim.set_m(m_orig)
return A, M, Q, Qt
def compute_normal_modes(D, n_values=10, sigma=0., tol=1e-8, which='LM'):
logger.debug("Solving eigenproblem. This may take a while...")
df.tic()
omega, w = scipy.sparse.linalg.eigs(
D, n_values, which=which, sigma=0., tol=tol, return_eigenvectors=True)
logger.debug(
"Computing the eigenvalues and eigenvectors took {:.2f} seconds".format(df.toc()))
return omega, w
def compute_normal_modes_generalised(A, M, n_values=10, tol=1e-8, discard_negative_frequencies=False, sigma=None, which='LM',
v0=None, ncv=None, maxiter=None, Minv=None, OPinv=None, mode='normal'):
logger.debug("Solving eigenproblem. This may take a while...")
df.tic()
if discard_negative_frequencies:
n_values *= 2
# XXX TODO: The following call seems to increase memory consumption quite a bit. Why?!?
#
# We have to swap M and A when passing them to eigsh since the M matrix
# has to be positive definite for eigsh!
omega_inv, w = scipy.sparse.linalg.eigsh(M, k=n_values, M=A, which=which, tol=tol, return_eigenvectors=True, sigma=sigma,
v0=v0, ncv=ncv, maxiter=maxiter, Minv=Minv, OPinv=OPinv, mode=mode)
logger.debug(
"Computing the eigenvalues and eigenvectors took {:.2f} seconds".format(df.toc()))
# The true eigenfrequencies are given by 1/omega_inv because we swapped M
# and A above and thus computed the inverse eigenvalues.
omega = 1. / omega_inv
# Sanity check: the eigenfrequencies should occur in +/- pairs.
TOL = 1e-3
positive_freqs = filter(lambda x: x > 0, omega)
negative_freqs = filter(lambda x: x < 0, omega)
freq_pairs = izip(positive_freqs, negative_freqs)
if (n_values % 2 == 0 and len(positive_freqs) != len(negative_freqs)) or \
(n_values % 2 == 0 and len(positive_freqs) - len(negative_freqs) not in [0, 1]) or \
any([abs(x + y) > TOL for (x, y) in freq_pairs]):
logger.warning("The eigenfrequencies should occur in +/- pairs, but this "
"does not seem to be the case (with TOL={})! Please "
"double-check that the results make sense!".format(TOL))
# Find the indices that sort the frequency by absolute value,
# with the positive frequencies occurring before the negative ones (where.
sorted_indices = sorted(np.arange(len(omega)),
key=lambda i: (np.round(abs(omega[i]), decimals=4), -np.sign(omega[i]), abs(omega[i])))
if discard_negative_frequencies:
# Discard indices corresponding to negative frequencies
sorted_indices = filter(lambda i: omega[i] >= 0.0, sorted_indices)
omega = omega[sorted_indices]
# XXX TODO: can we somehow avoid copying the columns to save memory?!?
w = w[:, sorted_indices]
return omega, w
def export_normal_mode_animation(mesh, m0, freq, w, filename, num_cycles=1, num_snapshots_per_cycle=20, scaling=0.2, dm_only=False, save_h5=False):
"""
Save a number of vtk files of different snapshots of a given normal mode.
These can be imported and animated in Paraview.
*Arguments*
mesh : dolfin.Mesh
The mesh on which the magnetisation is defined.
m0 : numpy.array
The ground state of the magnetisation for which the normal mode was computed.
The size must be so that the array can be reshaped to size 3xN.
freq : float
The frequency of the normal mode.
w : numpy.array
The eigenvector representing the normal mode (as returned by `compute_eigenv`
or `compute_eigenv_generalised`).
filename : string
The filename of the exported animation files. Each individual frame will
have the same basename but will be given a suffix indicating the frame
number, too.
num_cycles : int
The number of cycles to be animated.
num_snapshots_per_cycle : int
The number of snapshot per cycle to be exported. Thus the total number of
exported frames is num_cycles * num_snapshots_per_cycle.
scaling : float
If `dm_only` is False, this determines the maximum size of the
oscillation (relative to the magnetisation vector) in the
visualisation. If `dm_only` is True, this has no effect.
dm_only : bool (optional)
If False (the default), plots `m0 + scaling*dm(t)`, where m0 is the
average magnetisation and dm(t) the (spatially varying)
oscillation corresponding to the frequency of the normal mode.
If True, only `dm(t)` is plotted.
"""
if freq.imag != 0 and abs(freq.imag) > 5e-3:
logger.warning("Frequency expected to be a real number. "
"Got: {}. This may lead to unexpected behaviour".format(freq))
freq = freq.real
#basename = os.path.basename(re.sub('\.vtk$', '', filename))
#dirname = os.path.dirname(filename)
# if not os.path.exists(dirname):
# print "Creating directory '{}' as it doesn't exist.".format(dirname)
# os.makedirs(dirname)
#mesh = comp.mesh
#mesh_shape = mesh.mesh_size
m0_array = m0.copy() # we assume that sim is relaxed!!
Q, R, S, Mcross = compute_tangential_space_basis(
m0_array.reshape(3, 1, -1))
Qt = mf_transpose(Q).copy()
n = mesh.num_vertices()
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
func = df.Function(V)
func.rename('m', 'magnetisation')
w_3d = mf_mult(Q, w.reshape((2, 1, n)))
w_flat = w_3d.reshape(-1)
phi = np.angle(w_flat) # relative phases of the oscillations
a = np.absolute(w_flat)
a = a / a.max() # normalised amplitudes of the oscillations
t_end = num_cycles * 2 * pi / freq
timesteps = np.linspace(
0, t_end, num_cycles * num_snapshots_per_cycle, endpoint=False)
m_osc = np.zeros(3 * n)
t0 = time()
f = df.File(filename, 'compressed')
field = Field(V, name='m')
for (i, t) in enumerate(timesteps):
logger.debug("Saving animation snapshot for timestep {} ({}/{})".format(t,
i, num_cycles * num_snapshots_per_cycle))
if dm_only is False:
m_osc = (
m0_array + scaling * a * np.cos(t * freq + phi)).reshape(-1)
else:
m_osc = (scaling * a * np.cos(t * freq + phi)).reshape(-1)
#save_vector_field(m_osc, os.path.join(dirname, basename + '_{:04d}.vtk'.format(i)))
func.vector().set_local(m_osc)
f << func
if save_h5:
field.set(func)
field.save_hdf5(filename[0:-4], i)
field.close_hdf5()
t1 = time()
logger.debug(
"Saving the data to file '{}' took {} seconds".format(filename, t1 - t0))
def get_colormap_from_name(cmap_name):
from matplotlib import cm
import custom_colormaps
colormaps = {'coolwarm': cm.coolwarm,
'cool': cm.cool,
'hot': cm.hot,
'afmhot': cm.afmhot,
'rainbow': cm.jet,
'hsv': cm.hsv,
'circular1': custom_colormaps.circular1,
'circular2': custom_colormaps.circular2,
'circular3': custom_colormaps.circular3,
'circular4': custom_colormaps.circular4,
'husl_99_75': custom_colormaps.husl_99_75,
'husl_99_70': custom_colormaps.husl_99_70,
'husl_99_65': custom_colormaps.husl_99_65,
}
try:
if cmap_name == 'rainbow':
logger.warning('The rainbow colormap is strongly discouraged for scientific visualizations, it is '
'highly recommended to choose a different colormap. See for example '
'http://medvis.org/2012/08/21/rainbow-colormaps-what-are-they-good-for-absolutely-nothing/ '
'for more information.')
return colormaps[cmap_name]
except KeyError:
raise ValueError("Unknown colormap name: '{}'. Allowed values: {}".format(
cmap_name, colormaps.keys()))
def extract_mesh_slice(mesh, slice_z):
coords = mesh.coordinates()
xmin = min(coords[:, 0])
xmax = max(coords[:, 0])
ymin = min(coords[:, 1])
ymax = max(coords[:, 1])
nx = int(1 * (xmax - xmin))
ny = int(1 * (ymax - ymin))
slice_mesh = embed3d(
df.RectangleMesh(df.Point(xmin, ymin), df.Point(xmax, ymax), nx, ny), z_embed=slice_z)
V = df.FunctionSpace(mesh, 'CG', 1)
f = df.Function(V)
V_slice = df.FunctionSpace(slice_mesh, 'CG', 1)
f_slice = df.Function(V_slice)
lg = df.LagrangeInterpolator()
def restrict_to_slice_mesh(a):
f.vector().set_local(a)
lg.interpolate(f_slice, f)
return f_slice.vector().array()
return slice_mesh, restrict_to_slice_mesh
def get_phaseplot_ticks_and_labels(num_ticks):
"""
Helper function to define nice ticks for phase plots which are
multiples of pi/2. Currently `num_ticks` must be either 3 or 5.
"""
if num_ticks == 3:
ticks = [-pi, 0, pi]
ticklabels = [u'-\u03C0', u'0', u'\u03C0']
elif num_ticks == 5:
ticks = [-pi, -pi / 2, 0, pi / 2, pi]
ticklabels = [u'-\u03C0', u'-\u03C0/2', u'0', u'\u03C0/2', u'\u03C0']
else:
raise ValueError(
"Number of phase plot ticks must be either 3 or 5. Got: {}".format(num_ticks))
return ticks, ticklabels
def plot_spatially_resolved_normal_mode(
mesh, m0, w, slice_z='z_max', components='xyz', label_components=True,
figure_title=None, yshift_title=0.0, plot_powers=True, plot_phases=True,
label_power='Power', label_phase='Phase', xticks=None, yticks=None,
num_power_colorbar_ticks=5, num_phase_colorbar_ticks=5,
colorbar_fmt='%.2e', cmap_powers='coolwarm', cmap_phases='circular4',
vmin_powers=0.0, show_axis_labels=True, show_axis_frames=True,
show_colorbars=True, figsize=None, outfilename=None, dpi=None):
"""
Plot the normal mode profile across a slice of the sample.
Remark: Due to a bug in matplotlib (see [1]), when saving the
`matplotlib.Figure` object returned from this function the title
and left annotations will likely be cut off. Therefore it is
recommended to save the plot by specifying the argument
`outfilename`.
[1] http://stackoverflow.com/questions/10101700/moving-matplotlib-legend-outside-of-the-axis-makes-it-cutoff-by-the-figure-box
*Arguments*
mesh:
The mesh of the simulation object for which the eigenmode was computed.
m0 : numpy.array
The ground state of the magnetisation for which the normal mode was computed.
The size must be so that the array can be reshaped to size 3xN.
w:
The eigenvector representing the normal mode (for example,
one of the columns of the second return value of
`compute_normal_modes_generalised`).
slice_z:
The z-value of the mesh slice which will be plotted. This can be either
'z_min' or 'z_max' (which correspond to the bottom/top layer of the mesh)
or a numerical value. Note that the mesh must have a layer of nodes with
this z-coordinate, otherwise the plotting routine will fail.
num_power_colorbar_ticks:
The number of tick labels for the power colorbars. Currently
this must be either 3 or 5 (default: 5).
num_phase_colorbar_ticks:
The number of tick labels for the phase colorbars. Currently
this must be either 3 or 5 (default: 5).
outfilename:
If given, the plot will be saved to a file with this name. Any
missing directory components will be created first. Default: None.
dpi:
The resolution of the saved plot (ignored if `outfilename` is None).
*Returns*
The `matplotlib.Figure` containing the plot.
"""
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
coords = mesh.coordinates()
if slice_z == 'z_min':
slice_z = min(coords[:, 2])
elif slice_z == 'z_max':
slice_z = max(coords[:, 2])
slice_mesh, restrict_to_submesh = extract_mesh_slice(mesh, slice_z)
m0_array = m0.copy()
Q, R, S, Mcross = compute_tangential_space_basis(
m0_array.reshape(3, 1, -1))
Qt = mf_transpose(Q).copy()
n = mesh.num_vertices()
w_3d = mf_mult(Q, w.reshape((2, 1, n)))
w_x = w_3d[0, 0, :]
w_y = w_3d[1, 0, :]
w_z = w_3d[2, 0, :]
######################################################################
slice_coords = slice_mesh.coordinates()
xvals = slice_coords[:, 0]
yvals = slice_coords[:, 1]
# We use the mesh triangulation provided by dolfin in case the
# mesh has multiple disconnected regions (in which case matplotlib
# would connect them).
mesh_triang = tri.Triangulation(xvals, yvals, slice_mesh.cells())
# Determine the number of rows (<=2) and columns (<=3) in the plot
num_rows = 0
if plot_powers:
num_rows += 1
if plot_phases:
num_rows += 1
if num_rows == 0:
raise ValueError(
"At least one of the arguments `plot_powers`, `plot_phases` must be True.")
num_columns = len(components)
def plot_mode_profile(ax, a, title=None, vmin=None, vmax=None, cmap=None, cticks=None, cticklabels=None):
ax.set_aspect('equal')
vals = restrict_to_submesh(a)
trimesh = ax.tripcolor(mesh_triang, vals, shading='gouraud', cmap=cmap)
ax.set_title(title)
if show_colorbars:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "5%", pad="3%")
if vmin is None:
vmin = min(vals)
if vmax is None:
vmax = max(vals)
trimesh.set_clim(vmin=vmin, vmax=vmax)
cbar = plt.colorbar(trimesh, cax=cax, format=FormatStrFormatter(colorbar_fmt),
ticks=cticks)
if cticklabels != None:
cbar.ax.set_yticklabels(cticklabels)
if not show_axis_labels:
ax.set_xticks([])
ax.set_yticks([])
if not show_axis_frames:
ax.axis('off')
fig = plt.figure(figsize=figsize)
if isinstance(cmap_powers, str):
cmap_powers = get_colormap_from_name(cmap_powers)
if isinstance(cmap_phases, str):
cmap_phases = get_colormap_from_name(cmap_phases)
powers = {'x': np.absolute(w_x) ** 2,
'y': np.absolute(w_y) ** 2,
'z': np.absolute(w_z) ** 2}
phases = {'x': np.angle(w_x),
'y': np.angle(w_y),
'z': np.angle(w_z)}
def set_xyticks(ax):
if xticks != None:
ax.set_xticks(xticks)
if yticks != None:
ax.set_yticks(yticks)
cnt = 1
if plot_powers:
cticklabels = None
for comp in components:
ax = fig.add_subplot(num_rows, num_columns, cnt)
if num_power_colorbar_ticks != None:
if vmin_powers != None:
minval = vmin_powers
else:
minval = powers[comp].min()
cticks = np.linspace(
minval, powers[comp].max(), num_power_colorbar_ticks)
else:
cticks = None
comp_title = 'm_{}'.format(comp) if label_components else ''
plot_mode_profile(ax, powers[comp], title=comp_title,
cticks=cticks, cticklabels=cticklabels,
vmin=vmin_powers, cmap=cmap_powers)
set_xyticks(ax)
cnt += 1
if plot_phases:
cticks, cticklabels = get_phaseplot_ticks_and_labels(
num_phase_colorbar_ticks)
for comp in components:
ax = fig.add_subplot(num_rows, num_columns, cnt)
if label_components and not plot_powers:
comp_title = 'm_{}'.format(comp)
else:
comp_title = ''
plot_mode_profile(ax, phases[comp], title=comp_title,
cticks=cticks, cticklabels=cticklabels,
vmin=-pi, vmax=+pi,
cmap=cmap_phases)
set_xyticks(ax)
cnt += 1
bbox_extra_artists = []
if figure_title != None:
txt = plt.text(0.5, 1.0 + yshift_title, figure_title,
horizontalalignment='center',
fontsize=20,
transform=fig.transFigure)
bbox_extra_artists.append(txt)
num_axes = len(fig.axes)
ax_annotate_powers = fig.axes[0]
ax_annotate_phases = fig.axes[(num_axes // 2) if plot_powers else 0]
if plot_powers:
txt_power = plt.text(-0.2, 0.5, label_power,
fontsize=16,
horizontalalignment='right',
verticalalignment='center',
rotation='vertical',
# transform=fig.transFigure)
transform=ax_annotate_powers.transAxes)
bbox_extra_artists.append(txt_power)
#
#ax_topleft.text(0, 0, label_power, ha='left', va='center', rotation=90)
#
#from matplotlib.offsetbox import AnchoredOffsetbox, TextArea
#box = TextArea(label_power, textprops=dict(color="k", fontsize=20))
# anchored_box = AnchoredOffsetbox(loc=3,
# child=box, pad=0.,
# frameon=False,
# bbox_to_anchor=(-0.1, 0.5),
# bbox_transform=ax.transAxes,
# borderpad=0.,
# )
# ax_topleft.add_artist(anchored_box)
# bbox_extra_artists.append(anchored_box)
if plot_phases:
txt_phase = plt.text(-0.2, 0.5, label_phase,
fontsize=16,
horizontalalignment='right',
verticalalignment='center',
rotation='vertical',
# transform=fig.transFigure)
transform=ax_annotate_phases.transAxes)
bbox_extra_artists.append(txt_phase)
if outfilename != None:
helpers.create_missing_directory_components(outfilename)
fig.savefig(
outfilename, bbox_extra_artists=bbox_extra_artists, bbox_inches='tight', dpi=dpi)
return fig
|
the-stack_0_9307 | import torch
import torchani
import unittest
import os
import pickle
path = os.path.dirname(os.path.realpath(__file__))
class TestGrad(unittest.TestCase):
# torch.autograd.gradcheck and torch.autograd.gradgradcheck verify that
# the numerical and analytical gradient and hessian of a function
# matches to within a given tolerance.
#
# The forward call of the function is wrapped with a lambda so that
# gradcheck gets a function with only one tensor input and tensor output.
# nondet_tol is necessarily greater than zero since some operations are
# nondeterministic which makes two equal inputs have different outputs
def setUp(self):
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.model = torchani.models.ANI1x(model_index=0).to(device=self.device,
dtype=torch.double)
datafile = os.path.join(path, 'test_data/NIST/all')
# Some small molecules are selected to make the tests faster
self.data = pickle.load(open(datafile, 'rb'))[1243:1250]
def testGradCheck(self):
for coordinates, species, _, _, _, _ in self.data:
coordinates = torch.from_numpy(coordinates).to(device=self.device,
dtype=torch.float64)
coordinates.requires_grad_(True)
species = torch.from_numpy(species).to(self.device)
torch.autograd.gradcheck(lambda x: self.model((species, x)).energies,
coordinates,
nondet_tol=1e-13)
def testGradGradCheck(self):
for coordinates, species, _, _, _, _ in self.data:
coordinates = torch.from_numpy(coordinates).to(device=self.device,
dtype=torch.float64)
coordinates.requires_grad_(True)
species = torch.from_numpy(species).to(self.device)
torch.autograd.gradgradcheck(lambda x: self.model((species, x)).energies,
coordinates,
nondet_tol=1e-13)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_9308 | #! /usr/bin/env python
import sys
import yt ; yt.funcs.mylog.setLevel(0)
import numpy as np
from scipy import signal
# Build Jx without filter (from other simulation)
my_F_nofilter = np.zeros([16,16])
my_F_nofilter[8,8] = -1.601068065642412e-11
my_F_nofilter[8,7] = -1.601068065642412e-11
# Build 2D filter
filter0 = np.array([.25,.5,.25])
my_order = [1,5]
my_filterx = filter0
my_filtery = filter0
while my_order[0]>1:
my_filterx = np.convolve(my_filterx,filter0)
my_order[0] -= 1
while my_order[1]>1:
my_filtery = np.convolve(my_filtery,filter0)
my_order[1] -= 1
my_filter = my_filterx[:,None]*my_filtery
# Apply filter. my_F_filetered is the theoretical value for filtered field
my_F_filtered = signal.convolve2d(my_F_nofilter, my_filter, boundary='symm', mode='same')
# Get simulation result for F_filtered
filename = sys.argv[1]
ds = yt.load( filename )
sl = yt.SlicePlot(ds, 2, 'jx', aspect=1)
all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)
F_filtered = all_data_level_0['boxlib', 'jx'].v.squeeze()
# Compare theory and PIC for filtered value
error = np.sum( np.abs(F_filtered - my_F_filtered) ) / np.sum( np.abs(my_F_filtered) )
assert( error < 1.e-14 )
|
the-stack_0_9310 | # -*- coding: utf-8 -*-
"""Main tasks.py file for current application module."""
import time
import os
import json
import shutil
from datetime import datetime as dtime
from celery import group
from celery import shared_task
from flask import current_app
from libs import helpers
from exts.sqlalchemy import db
from mods.api.models import Eval
from mods.api.models import File
@shared_task(bind=True, ignore_result=True)
def client_eval(self, files, client_id=None):
""" Client evaluation task."""
with current_app.app_context():
idle_time = 0.1
new_eval = Eval.query.filter(Eval.uuid_f == self.request.id).first()
if len(files) > 0:
self.update_state(state='PROGRESS')
file_tasks = []
for file in files:
for k, v in file.items():
file_tasks.append(eval_file.s(v, k, client_id))
group(*file_tasks)()
if helpers.eval_running(new_eval) is True:
while helpers.eval_running(new_eval) is True:
self.update_state(state='PROGRESS')
time.sleep(idle_time)
db.session.refresh(new_eval)
new_eval.status_f, new_eval.score = helpers.eval_status(new_eval)
#new_eval.date_b = str(dtime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3])
#new_eval.date_b = dtime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
new_eval.date_b = dtime.now()
db.session.commit()
fpath = "/tmp/uploads/files/{}".format(new_eval.uuid_f)
shutil.rmtree(str(fpath), ignore_errors=True, onerror=None)
return self.update_state(state='SUCCESS')
@shared_task(bind=True, ignore_result=True)
def eval_file(self, fullpath, file_hash, client_id=None):
""" Single file submission to backend task."""
import requests
requests.packages.urllib3.disable_warnings()
with current_app.app_context():
self.update_state(state='PROGRESS')
fc = helpers.file_config(fullpath, file_hash, client_id)
fd = open(fc["fullpath"], "rb")
file = fd.read()
fd.close()
os.remove(fc["fullpath"])
ma_files = {
fc["filename"]: (fc["filename"], file, 'application/octet-stream')
}
r = requests.post(
fc["scan_url"], files=ma_files, verify=False, headers=fc["headers"])
if not r.ok:
return self.update_state(state='FAILURE')
return self.update_state(state='SUCCESS')
@shared_task(bind=True, ignore_result=True)
def eval_result(self, jdata):
""" Single file result received from wsclient service processing task."""
with current_app.app_context():
out_msg = helpers.file_result(jdata)
jdata['status_f'] = "Complete"
if jdata['status'] == 2 or jdata['status'] == 3:
jdata['status_f'] = "Error"
db.session.query(File).filter(File.sha1 == jdata["sha1"]).update({
File.status_f: jdata['status_f'],
File.score: jdata['score'],
File.exec_time: jdata['exec_time'],
#File.date_b: jdata['server_time'],
#File.date_b: dtime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3].datetime(),
File.date_b: dtime.now(),
File.message: out_msg,
File.results: json.dumps(jdata)
#File.results: jdata
#File.results: {}
})
db.session.commit()
return self.update_state(state='SUCCESS')
|
the-stack_0_9312 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train DCGAN on ModelArts, get checkpoint files and air/onnx models."""
import argparse
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import mindspore.common.dtype as mstype
from mindspore import context
from mindspore import nn, Tensor, export
from mindspore.train.callback import CheckpointConfig, _InternalCallbackParam, ModelCheckpoint, RunContext
from mindspore.context import ParallelMode
from mindspore.communication.management import init, get_rank
import moxing as mox
from src.dataset import create_dataset_imagenet
from src.config import dcgan_imagenet_cfg as cfg
from src.generator import Generator
from src.discriminator import Discriminator
from src.cell import WithLossCellD, WithLossCellG
from src.dcgan import DCGAN
NORMALIZE_MEAN = 127.5
NORMALIZE_STD = 127.5
def save_imgs(gen_imgs, idx):
"""
Save images in 4 * 4 format when training on the modelarts
Inputs:
- **gen_imgs** (array) - Images generated by the generator.
- **idx** (int) - Training epoch.
"""
matplotlib.use('Agg')
for index in range(gen_imgs.shape[0]):
plt.subplot(4, 4, index + 1)
gen_imgs[index] = gen_imgs[index] * NORMALIZE_STD + NORMALIZE_MEAN
perm = (1, 2, 0)
show_imgs = np.transpose(gen_imgs[index], perm)
sdf = show_imgs.astype(int)
plt.imshow(sdf)
plt.axis("off")
plt.savefig("/cache/images/{}.png".format(idx))
def save_losses(G_losses_list, D_losses_list, idx):
"""
Save Loss visualization images when training on the modelarts
Inputs:
- **G_losses_list** (list) - Generator loss list.
- **D_losses_list** (list) - Discriminator loss list.
- **idx** (int) - Training epoch.
"""
plt.figure(figsize=(10, 5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses_list, label="G")
plt.plot(D_losses_list, label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig("/cache/losses/{}.png".format(idx))
parser = argparse.ArgumentParser(description='MindSpore dcgan training')
parser.add_argument('--data_url', default=None,
help='Directory contains ImageNet-1k dataset.')
parser.add_argument('--train_url', default=None,
help='Directory of training output.')
parser.add_argument('--images_url', default=None,
help='Location of images outputs.')
parser.add_argument('--losses_url', default=None,
help='Location of losses outputs.')
parser.add_argument("--file_format", type=str,
default="AIR", help="Format of export file.")
parser.add_argument("--file_name", type=str,
default="dcgan", help="Output file name.")
parser.add_argument('--epoch_size', type=int,
default=cfg.epoch_size, help='Epoch size of training.')
args = parser.parse_args()
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
local_input_url = '/cache/data' + str(device_id)
local_output_url = '/cache/ckpt' + str(device_id)
local_images_url = '/cache/images'
local_losses_url = '/cache/losses'
context.set_context(mode=context.GRAPH_MODE,
device_target="Ascend", save_graphs=False)
context.set_context(device_id=device_id)
if device_num > 1:
init()
context.set_auto_parallel_context(device_num=device_num,
global_rank=device_id,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
rank = get_rank()
else:
rank = 0
mox.file.copy_parallel(src_url=args.data_url, dst_url=local_input_url)
mox.file.copy_parallel(src_url=args.images_url, dst_url=local_images_url)
mox.file.copy_parallel(src_url=args.losses_url, dst_url=local_losses_url)
if __name__ == '__main__':
# Load Dataset
ds = create_dataset_imagenet(os.path.join(
local_input_url), num_parallel_workers=2)
steps_per_epoch = ds.get_dataset_size()
# Define Network
netD = Discriminator()
netG = Generator()
criterion = nn.BCELoss(reduction='mean')
netD_with_criterion = WithLossCellD(netD, netG, criterion)
netG_with_criterion = WithLossCellG(netD, netG, criterion)
optimizerD = nn.Adam(netD.trainable_params(),
learning_rate=cfg.learning_rate, beta1=cfg.beta1)
optimizerG = nn.Adam(netG.trainable_params(),
learning_rate=cfg.learning_rate, beta1=cfg.beta1)
myTrainOneStepCellForD = nn.TrainOneStepCell(
netD_with_criterion, optimizerD)
myTrainOneStepCellForG = nn.TrainOneStepCell(
netG_with_criterion, optimizerG)
dcgan = DCGAN(myTrainOneStepCellForD, myTrainOneStepCellForG)
dcgan.set_train()
# checkpoint save
ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch,
keep_checkpoint_max=args.epoch_size)
ckpt_cb = ModelCheckpoint(
config=ckpt_config, directory=local_output_url, prefix='dcgan')
cb_params = _InternalCallbackParam()
cb_params.train_network = netG
cb_params.batch_num = steps_per_epoch
cb_params.epoch_num = args.epoch_size
# For each epoch
cb_params.cur_epoch_num = 0
cb_params.cur_step_num = 0
run_context = RunContext(cb_params)
ckpt_cb.begin(run_context)
np.random.seed(1)
fixed_noise = Tensor(np.random.normal(
size=(16, cfg.latent_size, 1, 1)).astype("float32"))
data_loader = ds.create_dict_iterator(
output_numpy=True, num_epochs=args.epoch_size)
G_losses = []
D_losses = []
# Start Training Loop
print("Starting Training Loop...")
for epoch in range(args.epoch_size):
# For each batch in the dataloader
for i, data in enumerate(data_loader):
real_data = Tensor(data['image'])
latent_code = Tensor(data["latent_code"])
netD_loss, netG_loss = dcgan(real_data, latent_code)
if i % 50 == 0:
print("Date time: ", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), "\tepoch: ", epoch, "/",
args.epoch_size, "\tstep: ", i, "/", steps_per_epoch, "\tDloss: ", netD_loss, "\tGloss: ",
netG_loss)
D_losses.append(netD_loss.asnumpy())
G_losses.append(netG_loss.asnumpy())
cb_params.cur_step_num = cb_params.cur_step_num + 1
cb_params.cur_epoch_num = cb_params.cur_epoch_num + 1
print("================saving model===================")
if device_id == 0:
ckpt_cb.step_end(run_context)
fake = netG(fixed_noise)
print("================saving images===================")
save_imgs(fake.asnumpy(), epoch + 1)
print("================saving losses===================")
save_losses(G_losses, D_losses, epoch + 1)
mox.file.copy_parallel(
src_url=local_images_url, dst_url=args.images_url)
mox.file.copy_parallel(
src_url=local_losses_url, dst_url=args.losses_url)
mox.file.copy_parallel(
src_url=local_output_url, dst_url=args.train_url)
print("================success================")
# export checkpoint file into air, onnx, mindir models
inputs = Tensor(np.random.rand(16, 100, 1, 1), mstype.float32)
export(netG, inputs, file_name=args.file_name,
file_format=args.file_format)
file_name = args.file_name + "." + args.file_format.lower()
mox.file.copy_parallel(
src_url=file_name, dst_url=os.path.join(args.train_url, file_name))
|
the-stack_0_9314 | import pygame as py
import variables as v
class Button(py.sprite.Sprite):
def __init__(self, text, pos, size, normalcolour, hovercolour, font, ID, centred = False, bsize=(0,0)):
"""
Create a simple button.
Arguments:
text <str> -- the button's text
pos (x, y) -- the position of the button
size <int> -- the font size of the text
normalcolour (r, g, b) -- the colour of the button
hovercolour (r, g, b) -- the colour of the button when it is hovered
font <str> -- the font file to use (use None for default font)
ID <str|int> -- a unique identifier for this button
centred <bool> -- whether the origin of the button is its topleft corner or centre (default=False)
bsize (w, h) -- a set size for the button (default=(0, 0) - automatic)
"""
super().__init__()
self.ID = ID
self.hovered = False
self.text = text
self.pos = pos
self.hcolour = hovercolour
self.ncolour = normalcolour
self.font = font
self.font = py.font.Font(font, int(size)) #Creates a new font object using font file and font size
self.centred = centred
self.size = bsize
self.rend = self.font.render(self.text, True, (0,0,0)) #Creates a new surface with the text on
self.set_rect()
def update(self):
if self.hovered: #Changes the button colour if it is being hovered
colour = self.hcolour
else:
colour = self.ncolour
py.draw.rect(v.screen, colour, self.rect) #Draws a rectangle
v.screen.blit(self.rend, self.rect) #Blits the text onto the screen
if self.rect.collidepoint(py.mouse.get_pos()): #Detects if the mouse is over the button
self.hovered = True
else:
self.hovered = False
def set_rect(self): #Calculates the size and position of the button
self.rect = self.rend.get_rect()
if not self.centred:
self.rect.topleft = self.pos
if self.centred:
self.rect.center = self.pos
if not self.size[0] == 0:
self.rect.width = self.size[0]
if not self.size[1] == 0:
self.rect.height = self.size[1]
def pressed(self): #Detects if the button is pressed
for event in v.events:
if self.hovered:
if event.type == py.MOUSEBUTTONDOWN:
return True
return False
def fill_gradient(surface, color, gradient, rect=None, vertical=True, forward=True):
"""fill a surface with a gradient pattern
Parameters:
color (r, g, b) -- starting color
gradient (r, g, b) -- final color
rect <pygame.Rect> -- area to fill (default=Surface's rect)
vertical <bool> -- True=vertical, False=horizontal (default=True)
forward <bool> -> True=forward, False=reverse (default=True)
Pygame recipe: http://www.pygame.org/wiki/GradientCode
"""
if rect is None: rect = surface.get_rect()
x1,x2 = rect.left, rect.right
y1,y2 = rect.top, rect.bottom
if vertical: h = y2-y1
else: h = x2-x1
if forward: a, b = color, gradient
else: b, a = color, gradient
rate = (
float(b[0]-a[0])/h,
float(b[1]-a[1])/h,
float(b[2]-a[2])/h
)
fn_line = py.draw.line
if vertical:
for line in range(y1,y2):
color = (
min(max(a[0]+(rate[0]*(line-y1)),0),255),
min(max(a[1]+(rate[1]*(line-y1)),0),255),
min(max(a[2]+(rate[2]*(line-y1)),0),255)
)
fn_line(surface, color, (x1,line), (x2,line))
else:
for col in range(x1,x2):
color = (
min(max(a[0]+(rate[0]*(col-x1)),0),255),
min(max(a[1]+(rate[1]*(col-x1)),0),255),
min(max(a[2]+(rate[2]*(col-x1)),0),255)
)
fn_line(surface, color, (col,y1), (col,y2))
class textLabel(py.sprite.Sprite):
def __init__(self, text, pos, colour, font, size, centred=False):
"""
Create a simple text label.
Arguments:
text <str> -- the label's text
pos (x, y) -- the position of the text
size <int> -- the font size of the text
colour (r, g, b) -- the colour of the text
font <str> -- the font file to use (use None for default font)
centred <bool> -- whether the origin of the text is its topleft corner or centre (default=False)
"""
super().__init__()
self.text = text
self.pos = pos
self.colour = colour
self.font = font
self.size = size
self.centred = centred
def update(self):
pos = self.pos
font = py.font.Font(self.font, self.size) #Creates a new font with given file and size
label = font.render(self.text, 1, self.colour) #Renders given text with font
if self.centred:
#Centres text
pos = list(self.pos)
pos[0] -= font.size(self.text)[0] / 2
pos[1] -= font.size(self.text)[1] / 2
pos = tuple(pos)
v.screen.blit(label, pos) #Blits label to screen |
the-stack_0_9316 | import datetime
from io import BytesIO
import os
import shutil
import numpy as np
import pytest
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.testing import _has_tex_package, _check_for_pgf
from matplotlib.testing.compare import compare_images, ImageComparisonFailure
from matplotlib.backends.backend_pgf import PdfPages, common_texification
from matplotlib.testing.decorators import (_image_directories,
check_figures_equal,
image_comparison)
baseline_dir, result_dir = _image_directories(lambda: 'dummy func')
needs_xelatex = pytest.mark.skipif(not _check_for_pgf('xelatex'),
reason='xelatex + pgf is required')
needs_pdflatex = pytest.mark.skipif(not _check_for_pgf('pdflatex'),
reason='pdflatex + pgf is required')
needs_lualatex = pytest.mark.skipif(not _check_for_pgf('lualatex'),
reason='lualatex + pgf is required')
needs_ghostscript = pytest.mark.skipif(
"eps" not in mpl.testing.compare.converter,
reason="This test needs a ghostscript installation")
def compare_figure(fname, savefig_kwargs={}, tol=0):
actual = os.path.join(result_dir, fname)
plt.savefig(actual, **savefig_kwargs)
expected = os.path.join(result_dir, "expected_%s" % fname)
shutil.copyfile(os.path.join(baseline_dir, fname), expected)
err = compare_images(expected, actual, tol=tol)
if err:
raise ImageComparisonFailure(err)
def create_figure():
plt.figure()
x = np.linspace(0, 1, 15)
# line plot
plt.plot(x, x ** 2, "b-")
# marker
plt.plot(x, 1 - x**2, "g>")
# filled paths and patterns
plt.fill_between([0., .4], [.4, 0.], hatch='//', facecolor="lightgray",
edgecolor="red")
plt.fill([3, 3, .8, .8, 3], [2, -2, -2, 0, 2], "b")
# text and typesetting
plt.plot([0.9], [0.5], "ro", markersize=3)
plt.text(0.9, 0.5, 'unicode (ü, °, µ) and math ($\\mu_i = x_i^2$)',
ha='right', fontsize=20)
plt.ylabel('sans-serif, blue, $\\frac{\\sqrt{x}}{y^2}$..',
family='sans-serif', color='blue')
plt.xlim(0, 1)
plt.ylim(0, 1)
@pytest.mark.parametrize('plain_text, escaped_text', [
(r'quad_sum: $\sum x_i^2$', r'quad\_sum: \(\displaystyle \sum x_i^2\)'),
(r'no \$splits \$ here', r'no \$splits \$ here'),
('with_underscores', r'with\_underscores'),
('% not a comment', r'\% not a comment'),
('^not', r'\^not'),
])
def test_common_texification(plain_text, escaped_text):
assert common_texification(plain_text) == escaped_text
# test compiling a figure to pdf with xelatex
@needs_xelatex
@pytest.mark.backend('pgf')
@image_comparison(['pgf_xelatex.pdf'], style='default')
def test_xelatex():
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
create_figure()
# test compiling a figure to pdf with pdflatex
@needs_pdflatex
@pytest.mark.skipif(not _has_tex_package('ucs'), reason='needs ucs.sty')
@pytest.mark.backend('pgf')
@image_comparison(['pgf_pdflatex.pdf'], style='default')
def test_pdflatex():
if os.environ.get('APPVEYOR'):
pytest.xfail("pdflatex test does not work on appveyor due to missing "
"LaTeX fonts")
rc_pdflatex = {'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ('\\usepackage[utf8x]{inputenc}'
'\\usepackage[T1]{fontenc}')}
mpl.rcParams.update(rc_pdflatex)
create_figure()
# test updating the rc parameters for each figure
@needs_xelatex
@needs_pdflatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_rcupdate():
rc_sets = [{'font.family': 'sans-serif',
'font.size': 30,
'figure.subplot.left': .2,
'lines.markersize': 10,
'pgf.rcfonts': False,
'pgf.texsystem': 'xelatex'},
{'font.family': 'monospace',
'font.size': 10,
'figure.subplot.left': .1,
'lines.markersize': 20,
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ('\\usepackage[utf8x]{inputenc}'
'\\usepackage[T1]{fontenc}'
'\\usepackage{sfmath}')}]
tol = [6, 0]
for i, rc_set in enumerate(rc_sets):
with mpl.rc_context(rc_set):
for substring, pkg in [('sfmath', 'sfmath'), ('utf8x', 'ucs')]:
if (substring in mpl.rcParams['pgf.preamble']
and not _has_tex_package(pkg)):
pytest.skip(f'needs {pkg}.sty')
create_figure()
compare_figure('pgf_rcupdate%d.pdf' % (i + 1), tol=tol[i])
# test backend-side clipping, since large numbers are not supported by TeX
@needs_xelatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_pathclip():
mpl.rcParams.update({'font.family': 'serif', 'pgf.rcfonts': False})
plt.plot([0., 1e100], [0., 1e100])
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.savefig(BytesIO(), format="pdf") # No image comparison.
# test mixed mode rendering
@needs_xelatex
@pytest.mark.backend('pgf')
@image_comparison(['pgf_mixedmode.pdf'], style='default')
def test_mixedmode():
mpl.rcParams.update({'font.family': 'serif', 'pgf.rcfonts': False})
Y, X = np.ogrid[-1:1:40j, -1:1:40j]
plt.pcolor(X**2 + Y**2).set_rasterized(True)
# test bbox_inches clipping
@needs_xelatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_bbox_inches():
mpl.rcParams.update({'font.family': 'serif', 'pgf.rcfonts': False})
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(range(5))
ax2.plot(range(5))
plt.tight_layout()
bbox = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
compare_figure('pgf_bbox_inches.pdf', savefig_kwargs={'bbox_inches': bbox},
tol=0)
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
@pytest.mark.parametrize('system', [
pytest.param('lualatex', marks=[needs_lualatex]),
pytest.param('pdflatex', marks=[needs_pdflatex]),
pytest.param('xelatex', marks=[needs_xelatex]),
])
def test_pdf_pages(system):
rc_pdflatex = {
'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': system,
}
mpl.rcParams.update(rc_pdflatex)
fig1, ax1 = plt.subplots()
ax1.plot(range(5))
fig1.tight_layout()
fig2, ax2 = plt.subplots(figsize=(3, 2))
ax2.plot(range(5))
fig2.tight_layout()
path = os.path.join(result_dir, f'pdfpages_{system}.pdf')
md = {
'Author': 'me',
'Title': 'Multipage PDF with pgf',
'Subject': 'Test page',
'Keywords': 'test,pdf,multipage',
'ModDate': datetime.datetime(
1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))),
'Trapped': 'Unknown'
}
with PdfPages(path, metadata=md) as pdf:
pdf.savefig(fig1)
pdf.savefig(fig2)
pdf.savefig(fig1)
assert pdf.get_pagecount() == 3
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
@pytest.mark.parametrize('system', [
pytest.param('lualatex', marks=[needs_lualatex]),
pytest.param('pdflatex', marks=[needs_pdflatex]),
pytest.param('xelatex', marks=[needs_xelatex]),
])
def test_pdf_pages_metadata_check(monkeypatch, system):
# Basically the same as test_pdf_pages, but we keep it separate to leave
# pikepdf as an optional dependency.
pikepdf = pytest.importorskip('pikepdf')
monkeypatch.setenv('SOURCE_DATE_EPOCH', '0')
mpl.rcParams.update({'pgf.texsystem': system})
fig, ax = plt.subplots()
ax.plot(range(5))
md = {
'Author': 'me',
'Title': 'Multipage PDF with pgf',
'Subject': 'Test page',
'Keywords': 'test,pdf,multipage',
'ModDate': datetime.datetime(
1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))),
'Trapped': 'True'
}
path = os.path.join(result_dir, f'pdfpages_meta_check_{system}.pdf')
with PdfPages(path, metadata=md) as pdf:
pdf.savefig(fig)
with pikepdf.Pdf.open(path) as pdf:
info = {k: str(v) for k, v in pdf.docinfo.items()}
# Not set by us, so don't bother checking.
if '/PTEX.FullBanner' in info:
del info['/PTEX.FullBanner']
if '/PTEX.Fullbanner' in info:
del info['/PTEX.Fullbanner']
assert info == {
'/Author': 'me',
'/CreationDate': 'D:19700101000000Z',
'/Creator': f'Matplotlib v{mpl.__version__}, https://matplotlib.org',
'/Keywords': 'test,pdf,multipage',
'/ModDate': 'D:19680801000000Z',
'/Producer': f'Matplotlib pgf backend v{mpl.__version__}',
'/Subject': 'Test page',
'/Title': 'Multipage PDF with pgf',
'/Trapped': '/True',
}
@needs_xelatex
def test_tex_restart_after_error():
fig = plt.figure()
fig.suptitle(r"\oops")
with pytest.raises(ValueError):
fig.savefig(BytesIO(), format="pgf")
fig = plt.figure() # start from scratch
fig.suptitle(r"this is ok")
fig.savefig(BytesIO(), format="pgf")
@needs_xelatex
def test_bbox_inches_tight():
fig, ax = plt.subplots()
ax.imshow([[0, 1], [2, 3]])
fig.savefig(BytesIO(), format="pdf", backend="pgf", bbox_inches="tight")
@needs_xelatex
@needs_ghostscript
def test_png():
# Just a smoketest.
fig, ax = plt.subplots()
fig.savefig(BytesIO(), format="png", backend="pgf")
@needs_xelatex
def test_unknown_font(caplog):
with caplog.at_level("WARNING"):
mpl.rcParams["font.family"] = "this-font-does-not-exist"
plt.figtext(.5, .5, "hello, world")
plt.savefig(BytesIO(), format="pgf")
assert "Ignoring unknown font: this-font-does-not-exist" in [
r.getMessage() for r in caplog.records]
@check_figures_equal(extensions=["pdf"])
@pytest.mark.parametrize("texsystem", ("pdflatex", "xelatex", "lualatex"))
@pytest.mark.backend("pgf")
def test_minus_signs_with_tex(fig_test, fig_ref, texsystem):
if not _check_for_pgf(texsystem):
pytest.skip(texsystem + ' + pgf is required')
mpl.rcParams["pgf.texsystem"] = texsystem
fig_test.text(.5, .5, "$-1$")
fig_ref.text(.5, .5, "$\N{MINUS SIGN}1$")
|
the-stack_0_9317 | #!/usr/bin/env python
"""Test the grr aff4 objects."""
import hashlib
import io
import time
from builtins import range # pylint: disable=redefined-builtin
import mock
from grr_response_core.lib import flags
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import cloud as rdf_cloud
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import events
from grr_response_server.aff4_objects import aff4_grr
from grr_response_server.flows.general import transfer
from grr.test_lib import action_mocks
from grr.test_lib import aff4_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class MockChangeEvent(events.EventListener):
EVENTS = ["MockChangeEvent"]
CHANGED_URNS = []
def ProcessMessages(self, msgs=None, token=None):
MockChangeEvent.CHANGED_URNS.extend(msgs)
class AFF4GRRTest(aff4_test_lib.AFF4ObjectTest):
"""Test the client aff4 implementation."""
def setUp(self):
super(AFF4GRRTest, self).setUp()
MockChangeEvent.CHANGED_URNS = []
def testAFF4Path(self):
"""Test the pathspec to URN conversion function."""
pathspec = rdf_paths.PathSpec(
path="\\\\.\\Volume{1234}\\",
pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point="/c:/").Append(
path="/windows", pathtype=rdf_paths.PathSpec.PathType.TSK)
urn = pathspec.AFF4Path(rdf_client.ClientURN("C.1234567812345678"))
self.assertEqual(
urn,
rdfvalue.RDFURN(
r"aff4:/C.1234567812345678/fs/tsk/\\.\Volume{1234}\/windows"))
# Test an ADS
pathspec = rdf_paths.PathSpec(
path="\\\\.\\Volume{1234}\\",
pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point="/c:/").Append(
pathtype=rdf_paths.PathSpec.PathType.TSK,
path="/Test Directory/notes.txt:ads",
inode=66,
ntfs_type=128,
ntfs_id=2)
urn = pathspec.AFF4Path(rdf_client.ClientURN("C.1234567812345678"))
self.assertEqual(
urn,
rdfvalue.RDFURN(r"aff4:/C.1234567812345678/fs/tsk/\\.\Volume{1234}\/"
"Test Directory/notes.txt:ads"))
def testClientSubfieldGet(self):
"""Test we can get subfields of the client."""
fd = aff4.FACTORY.Create(
"C.0000000000000000", aff4_grr.VFSGRRClient, token=self.token)
kb = fd.Schema.KNOWLEDGE_BASE()
for i in range(5):
kb.users.Append(rdf_client.User(username="user%s" % i))
fd.Set(kb)
fd.Close()
fd = aff4.FACTORY.Open(
"C.0000000000000000", aff4_grr.VFSGRRClient, token=self.token)
for i, user in enumerate(fd.Get(fd.Schema.KNOWLEDGE_BASE).users):
self.assertEqual(user.username, "user%s" % i)
def testVFSFileContentLastNotUpdated(self):
"""Make sure CONTENT_LAST does not update when only STAT is written.."""
path = "/C.12345/contentlastchecker"
timestamp = 1
with utils.Stubber(time, "time", lambda: timestamp):
fd = aff4.FACTORY.Create(
path, aff4_grr.VFSFile, mode="w", token=self.token)
timestamp += 1
fd.SetChunksize(10)
# Make lots of small writes - The length of this string and the chunk size
# are relative primes for worst case.
for i in range(100):
fd.Write("%s%08X\n" % ("Test", i))
# Flush after every write.
fd.Flush()
# And advance the time.
timestamp += 1
fd.Set(fd.Schema.STAT, rdf_client_fs.StatEntry())
fd.Close()
fd = aff4.FACTORY.Open(path, mode="rw", token=self.token)
# Make sure the attribute was written when the write occured.
self.assertEqual(int(fd.GetContentAge()), 101000000)
# Write the stat (to be the same as before, but this still counts
# as a write).
fd.Set(fd.Schema.STAT, fd.Get(fd.Schema.STAT))
fd.Flush()
fd = aff4.FACTORY.Open(path, token=self.token)
# The age of the content should still be the same.
self.assertEqual(int(fd.GetContentAge()), 101000000)
def testVFSFileStartsOnlyOneMultiGetFileFlowOnUpdate(self):
"""File updates should only start one MultiGetFile at any point in time."""
client_id = self.SetupClient(0)
# We need to create a file path having a pathspec.
path = "fs/os/c/bin/bash"
with aff4.FACTORY.Create(
client_id.Add(path),
aff4_type=aff4_grr.VFSFile,
mode="rw",
token=self.token) as file_fd:
file_fd.Set(
file_fd.Schema.STAT,
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(path="/bin/bash", pathtype="OS")))
# Starts a MultiGetFile flow.
file_fd.Update()
# Check that there is exactly one flow on the client.
flows_fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)
flows = list(flows_fd.ListChildren())
self.assertEqual(len(flows), 1)
# The flow is the MultiGetFile flow holding the lock on the file.
flow_obj = aff4.FACTORY.Open(flows[0], token=self.token)
self.assertEqual(
flow_obj.Get(flow_obj.Schema.TYPE), transfer.MultiGetFile.__name__)
self.assertEqual(flow_obj.urn, file_fd.Get(file_fd.Schema.CONTENT_LOCK))
# Since there is already a running flow having the lock on the file,
# this call shouldn't do anything.
file_fd.Update()
# There should still be only one flow on the client.
flows_fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)
flows = list(flows_fd.ListChildren())
self.assertEqual(len(flows), 1)
def testVFSFileStartsNewMultiGetFileWhenLockingFlowHasFinished(self):
"""A new MultiFileGet can be started when the locking flow has finished."""
client_id = self.SetupClient(0)
path = "fs/os/c/bin/bash"
with aff4.FACTORY.Create(
client_id.Add(path),
aff4_type=aff4_grr.VFSFile,
mode="rw",
token=self.token) as file_fd:
file_fd.Set(
file_fd.Schema.STAT,
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(path="/bin/bash", pathtype="OS")))
# Starts a MultiGetFile flow.
first_update_flow_urn = file_fd.Update()
# Check that there is exactly one flow on the client.
flows_fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)
flows = list(flows_fd.ListChildren())
self.assertEqual(len(flows), 1)
# Finish the flow holding the lock.
client_mock = action_mocks.ActionMock()
flow_test_lib.TestFlowHelper(
flows[0], client_mock, client_id=client_id, token=self.token)
# The flow holding the lock has finished, so Update() should start a new
# flow.
second_update_flow_urn = file_fd.Update()
# There should be two flows now.
flows_fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)
flows = list(flows_fd.ListChildren())
self.assertEqual(len(flows), 2)
# Make sure that each Update() started a new flow and that the second flow
# is holding the lock.
self.assertNotEqual(first_update_flow_urn, second_update_flow_urn)
self.assertEqual(second_update_flow_urn,
file_fd.Get(file_fd.Schema.CONTENT_LOCK))
def testGetClientSummary(self):
hostname = "test"
system = "Linux"
os_release = "12.02"
kernel = "3.15-rc2"
fqdn = "test.test.com"
arch = "amd64"
install_time = rdfvalue.RDFDatetime.Now()
user = "testuser"
userobj = rdf_client.User(username=user)
interface = rdf_client_network.Interface(ifname="eth0")
google_cloud_instance = rdf_cloud.GoogleCloudInstance(
instance_id="1771384456894610289",
zone="projects/123456789733/zones/us-central1-a",
project_id="myproject",
unique_id="us-central1-a/myproject/1771384456894610289")
cloud_instance = rdf_cloud.CloudInstance(
cloud_type="GOOGLE", google=google_cloud_instance)
serial_number = "DSD33679FZ"
system_manufacturer = "Foobar Inc."
system_uuid = "C31292AD-6Z4F-55D8-28AC-EC1100E42222"
hwinfo = rdf_client.HardwareInfo(
serial_number=serial_number,
system_manufacturer=system_manufacturer,
system_uuid=system_uuid)
timestamp = 1
with utils.Stubber(time, "time", lambda: timestamp):
with aff4.FACTORY.Create(
"C.0000000000000000",
aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as fd:
kb = rdf_client.KnowledgeBase()
kb.users.Append(userobj)
empty_summary = fd.GetSummary()
self.assertEqual(empty_summary.client_id, "C.0000000000000000")
self.assertFalse(empty_summary.system_info.version)
self.assertEqual(empty_summary.timestamp.AsSecondsSinceEpoch(), 1)
# This will cause TYPE to be written with current time = 101 when the
# object is closed
timestamp += 100
fd.Set(fd.Schema.HOSTNAME(hostname))
fd.Set(fd.Schema.SYSTEM(system))
fd.Set(fd.Schema.OS_RELEASE(os_release))
fd.Set(fd.Schema.KERNEL(kernel))
fd.Set(fd.Schema.FQDN(fqdn))
fd.Set(fd.Schema.ARCH(arch))
fd.Set(fd.Schema.INSTALL_DATE(install_time))
fd.Set(fd.Schema.KNOWLEDGE_BASE(kb))
fd.Set(fd.Schema.USERNAMES(user))
fd.Set(fd.Schema.HARDWARE_INFO(hwinfo))
fd.Set(fd.Schema.INTERFACES([interface]))
fd.Set(fd.Schema.CLOUD_INSTANCE(cloud_instance))
with aff4.FACTORY.Open(
"C.0000000000000000",
aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as fd:
summary = fd.GetSummary()
self.assertEqual(summary.system_info.system, system)
self.assertEqual(summary.system_info.release, os_release)
self.assertEqual(summary.system_info.kernel, kernel)
self.assertEqual(summary.system_info.fqdn, fqdn)
self.assertEqual(summary.system_info.machine, arch)
self.assertEqual(summary.system_info.install_date, install_time)
self.assertItemsEqual(summary.users, [userobj])
self.assertItemsEqual(summary.interfaces, [interface])
self.assertFalse(summary.client_info)
self.assertEqual(summary.timestamp.AsSecondsSinceEpoch(), 101)
self.assertEqual(summary.cloud_type, "GOOGLE")
self.assertEqual(summary.cloud_instance_id,
"us-central1-a/myproject/1771384456894610289")
self.assertEqual(summary.serial_number, serial_number)
self.assertEqual(summary.system_manufacturer, system_manufacturer)
self.assertEqual(summary.system_uuid, system_uuid)
def StoreBlobStub(blob, token=None):
del token # Unused.
return hashlib.sha256(blob).hexdigest()
class BlobImageTest(aff4_test_lib.AFF4ObjectTest):
"""Tests for cron functionality."""
def testAppendContentError(self):
src_content = b"ABCD" * 10
src_fd = io.BytesIO(src_content)
dest_fd = aff4.FACTORY.Create(
aff4.ROOT_URN.Add("temp"),
aff4_grr.VFSBlobImage,
token=self.token,
mode="rw")
dest_fd.SetChunksize(7)
dest_fd.AppendContent(src_fd)
dest_fd.Seek(0)
self.assertEqual(dest_fd.Read(5000), src_content)
src_fd.seek(0)
self.assertRaises(IOError, dest_fd.AppendContent, src_fd)
def testAppendContent(self):
"""Test writing content where content length % chunksize == 0."""
src_content = b"ABCDEFG" * 10 # 10 chunksize blobs
src_fd = io.BytesIO(src_content)
dest_fd = aff4.FACTORY.Create(
aff4.ROOT_URN.Add("temp"),
aff4_grr.VFSBlobImage,
token=self.token,
mode="rw")
self.assertEqual(dest_fd.Get(dest_fd.Schema.HASHES), None)
dest_fd.SetChunksize(7)
dest_fd.AppendContent(src_fd)
self.assertEqual(int(dest_fd.Get(dest_fd.Schema.SIZE)), len(src_content))
self.assertTrue(dest_fd.Get(dest_fd.Schema.HASHES))
dest_fd.Seek(0)
self.assertEqual(dest_fd.Read(5000), src_content)
src_fd.seek(0)
dest_fd.AppendContent(src_fd)
self.assertEqual(dest_fd.size, 2 * len(src_content))
self.assertEqual(
int(dest_fd.Get(dest_fd.Schema.SIZE)), 2 * len(src_content))
dest_fd.Seek(0)
self.assertEqual(dest_fd.Read(5000), src_content + src_content)
def testMultiStreamStreamsSingleFileWithSingleChunk(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"123456789"))
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd]))
self.assertEqual(len(chunks_fds), 1)
self.assertEqual(chunks_fds[0][1], b"123456789")
self.assertIs(chunks_fds[0][0], fd)
def testMultiStreamStreamsSinglfeFileWithTwoChunks(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"123456789"))
with aff4.FACTORY.Create(
"aff4:/bar", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"abcd"))
fd1 = aff4.FACTORY.Open("aff4:/foo", token=self.token)
fd2 = aff4.FACTORY.Open("aff4:/bar", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd1, fd2]))
self.assertEqual(len(chunks_fds), 2)
self.assertEqual(chunks_fds[0][1], b"123456789")
self.assertIs(chunks_fds[0][0], fd1)
self.assertEqual(chunks_fds[1][1], b"abcd")
self.assertIs(chunks_fds[1][0], fd2)
def testMultiStreamStreamsTwoFilesWithTwoChunksInEach(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"*" * 10 + b"123456789"))
with aff4.FACTORY.Create(
"aff4:/bar", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"*" * 10 + b"abcd"))
fd1 = aff4.FACTORY.Open("aff4:/foo", token=self.token)
fd2 = aff4.FACTORY.Open("aff4:/bar", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd1, fd2]))
self.assertEqual(len(chunks_fds), 4)
self.assertEqual(chunks_fds[0][1], b"*" * 10)
self.assertIs(chunks_fds[0][0], fd1)
self.assertEqual(chunks_fds[1][1], b"123456789")
self.assertIs(chunks_fds[1][0], fd1)
self.assertEqual(chunks_fds[2][1], b"*" * 10)
self.assertIs(chunks_fds[2][0], fd2)
self.assertEqual(chunks_fds[3][1], b"abcd")
self.assertIs(chunks_fds[3][0], fd2)
def testMultiStreamReturnsExceptionIfChunkIsMissing(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
# Patching StoreBlob prevents the blobs from actually being written.
with mock.patch.object(
data_store.DB, "StoreBlob", side_effect=StoreBlobStub):
fd.AppendContent(io.BytesIO(b"123456789"))
fd.index.seek(0)
blob_id = fd.index.read(fd._HASH_SIZE).encode("hex")
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
returned_fd, _, e = list(aff4.AFF4Stream.MultiStream([fd]))[0]
self.assertNotEqual(e, None)
self.assertEqual(returned_fd, fd)
self.assertEqual(e.missing_chunks, [blob_id])
def testMultiStreamIgnoresTheFileIfAnyChunkIsMissingInReadAheadChunks(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"*" * 10))
# Patching StoreBlob prevents the blobs from actually being written.
with mock.patch.object(
data_store.DB, "StoreBlob", side_effect=StoreBlobStub):
fd.AppendContent(io.BytesIO(b"123456789"))
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
count = 0
for _, _, e in aff4.AFF4Stream.MultiStream([fd]):
if not e:
count += 1
self.assertEqual(count, 0)
@mock.patch.object(aff4_grr.VFSBlobImage, "MULTI_STREAM_CHUNKS_READ_AHEAD", 1)
def testMultiStreamTruncatesBigFileIfLastChunkIsMissing(self):
# If the file is split between 2 batches of chunks, and the missing
# chunk is in the second batch, the first batch will be succesfully
# yielded.
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"*" * 10))
# Patching StoreBlob prevents the blobs from actually being written.
with mock.patch.object(
data_store.DB, "StoreBlob", side_effect=StoreBlobStub):
fd.AppendContent(io.BytesIO(b"123456789"))
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
content = []
error_detected = False
for fd, chunk, e in aff4.AFF4Stream.MultiStream([fd]):
if not e:
content.append(chunk)
else:
error_detected = True
self.assertEqual(content, [b"*" * 10])
self.assertTrue(error_detected)
@mock.patch.object(aff4_grr.VFSBlobImage, "MULTI_STREAM_CHUNKS_READ_AHEAD", 1)
def testMultiStreamSkipsBigFileIfFirstChunkIsMissing(self):
# If the file is split between 2 batches of chunks, and the missing
# chunk is in the first batch, the file will be skipped entirely.
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
# Patching StoreBlob prevents the blobs from actually being written.
with mock.patch.object(
data_store.DB, "StoreBlob", side_effect=StoreBlobStub):
fd.AppendContent(io.BytesIO(b"*" * 10))
fd.AppendContent(io.BytesIO(b"123456789"))
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
count = 0
for _, _, e in aff4.AFF4Stream.MultiStream([fd]):
if not e:
count += 1
self.assertEqual(count, 0)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
the-stack_0_9319 | import setuptools
from src.ptth import __version__ as version
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="post-tonal-theory-helper-mbmasuda",
version=version,
author="Mari Masuda",
author_email="[email protected]",
description="Post-tonal music theory analysis functions",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mbmasuda/post-tonal-theory-helper",
packages=setuptools.find_packages('src'),
package_dir={'':'src'},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
the-stack_0_9320 | """ AIPS STar table
Due to the funky nature of the AIPS STar table it cannot be made in the usual
Obit fashion. This class allows doing this from python.
Symbol type codes
1: Plus sign (default) 12: Five pointed star
2: Cross (X) 13: Star of David
3: Circle 14: Seven-pointed star
4: Box 15: Eight-pointed star
5: Triangle 16: Nine-pointed star
6: Diamond 17: Ten-pointed star
7: Pentagon 18: 11-pointed star
8: Hexagon 19: 12-pointed star
9: Septagon 20: 13-pointed star
10: Octagon 21: 14-pointed star
11: Nine-gon 22: Plus with gap
23: Vertical line
24: Cross (X) with gap
"""
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2007,2019
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: [email protected].
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
from __future__ import absolute_import
import Obit, Table, TableDesc, OErr, Image, ImageDesc
class TableSTar(Table.Table):
pass
# end class TableSTar
# Data type codes
OBIT_double = 10
OBIT_float = 9
OBIT_string = 13
OBIT_int = 2
# Non class functions
def PCreate(im, err, ver=0):
"""
New AIPS STars table
Create a ST table on input image im
im = Obit Image on which to attach ST Table
err = Python Obit Error/message stack
ver = version, 0=> new
"""
################################################################
# Check
if not im.ImageIsA():
raise TypeError('im MUST be a Python Obit Image')
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
if err.isErr: # existing error?
return None
# Get image descriptor
id = im.Desc.Dict
# Set descriptor dict
dd = {"FieldName":[id["ctype"][0].strip(), id["ctype"][1].strip(), "MAJOR AX", "MINOR AX", \
'POSANG', 'STARTYPE', 'LABEL', \
"_status"], \
"FieldUnit":["DEGREES", "DEGREES", "DEGREES", "DEGREES", \
"DEGREES", "INDEX ", " ", " "], \
"repeat":[1,1,1,1,1,1,24,1], \
"dim0":[1,1,1,1,1,1,24,1], \
"dim1":[1,1,1,1,1,1,1,1], \
"dim2":[1,1,1,1,1,1,1,1], \
"type":[OBIT_double,OBIT_double,OBIT_float,OBIT_float,OBIT_float,OBIT_float,\
OBIT_string,OBIT_int], \
"sortOrder1":0, "sortOrder2":0, "Table name":"AIPS ST", "version":1 \
}
# Table descriptor
tabDesc = TableDesc.PDef(dd)
# Table
st = im.NewTable(Table.WRITEONLY,"AIPS ST",ver,err)
Obit.TableSetDesc(st.me, tabDesc.me)
# Instantiate
Table.PFullInstantiate(st, Table.WRITEONLY, err)
return st
# end PCreate
def newRow (im):
""" Create new row structure for writing ST Table
im = Obit Image on which to attach ST Table
returns row:
Position columns have labelws of first two axes of image
(e.g. 'RA---SIN', 'DEC--SIN')
'MAJOR AX' major axis of symbol
'MINOR AX Minor axis of symbol (deg)
'POSANG' Position angle in deg
'STARTYPE' symbol code
1: Plus sign (default) 12: Five pointed star
2: Cross (X) 13: Star of David
3: Circle 14: Seven-pointed star
4: Box 15: Eight-pointed star
5: Triangle 16: Nine-pointed star
6: Diamond 17: Ten-pointed star
7: Pentagon 18: 11-pointed star
8: Hexagon 19: 12-pointed star
9: Septagon 20: 13-pointed star
10: Octagon 21: 14-pointed star
11: Nine-gon 22: Plus with gap
23: Vertical line
24: Cross (X) with gap
'LABEL' Label string for symbol, up to 24 char.
"""
# Get image descriptor
id = im.Desc.Dict
out = {id["ctype"][0].strip():[0.0], id["ctype"][1].strip():[0.0], \
'MINOR AX': [0.0], 'MAJOR AX': [0.0], 'POSANG': [0.0], 'STARTYPE':[3.0], \
'LABEL': [' '], \
'NumFields': 8, 'Table name': 'AIPS ST', '_status': [0]}
return out
# end newRow
def PWriteCirc (sttab, im, center, radius, err):
""" Write an entry for drawing a circle
sttab = Python Table object, must be open with write enabled
im = Obit Image on which to attach ST Table
center = [x,y] pixels
radius = radius in pixels
err = Python Obit Error/message stack
"""
################################################################
# Check
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
if err.isErr: # existing error?
return None
# Get image descriptor
id = im.Desc.Dict
# Get row
row = newRow(im)
# Convert pixels to positions
pos = ImageDesc.PGetPos(im.Desc, center, err)
if err.isErr:
printErrMsg(err, "Error converting pixel location to position")
row[id["ctype"][0].strip()] = [pos[0]]
row[id["ctype"][1].strip()] = [pos[1]]
row['MAJOR AX'] = [radius * abs(id["cdelt"][0])]
row['MINOR AX'] = row['MAJOR AX']
row['POSANG'] = [0.0]
row['STARTYPE'] = [3.0]
row['LABEL'] = [" "]
# Write
sttab.WriteRow(-1,row, err)
if err.isErr:
printErrMsg(err, "Error Writing ST table")
# end PWriteCirc
def PWriteEllipse (sttab, im, center, major, minor, PA, err):
""" Write an entry for drawing a circle
sttab = Python Table object, must be open with write enabled
im = Obit Image on which to attach ST Table
center = [x,y] pixels
major = major axis size in pixels
minor = minor axis size in pixels
PA = position angle (from N thru E in deg)
err = Python Obit Error/message stack
"""
################################################################
# Check
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
if err.isErr: # existing error?
return None
# Get image descriptor
id = im.Desc.Dict
# Get row
row = newRow(im)
# Convert pixels to positions
pos = ImageDesc.PGetPos(im.Desc, center, err)
if err.isErr:
printErrMsg(err, "Error converting pixel location to position")
row[id["ctype"][0].strip()] = [pos[0]]
row[id["ctype"][1].strip()] = [pos[1]]
row['MAJOR AX'] = [major * abs(id["cdelt"][0])]
row['MINOR AX'] = [minor * abs(id["cdelt"][0])]
row['POSANG'] = [PA]
row['STARTYPE'] = [3.0]
row['LABEL'] = [" "]
# Write
sttab.WriteRow(-1,row, err)
if err.isErr:
printErrMsg(err, "Error Writing ST table")
# end PWriteEllipse
|
the-stack_0_9321 | from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^cms/', include('cms.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Admin URLs
(r'^admin/filebrowser/', include('filebrowser.urls')),
(r'^grappelli/', include('grappelli.urls')),
#(r'^tinymce/', include('tinymce.urls')),
(r'^admin/(.*)', admin.site.root),
# cms URLs
(r'^/?$', 'django.views.generic.simple.redirect_to', { 'url': 'weblog/' } ),
(r'^search/$', 'cms.search.views.search'),
# snakelog URLs
(r'^weblog/categories/', include('snakelog.urls.categories')),
(r'^weblog/links/', include('snakelog.urls.links')),
(r'^weblog/tags/', include('snakelog.urls.tags')),
(r'^weblog/', include('snakelog.urls.entries')),
# Comment URLS
(r'^comments/', include('django.contrib.comments.urls')),
# Last catch all for flatpages
(r'', include('django.contrib.flatpages.urls')),
)
|
the-stack_0_9324 | import numpy as np
def value_iteration(env, gamma, theta, max_iterations, value=None):
if value is None:
value = np.zeros(env.n_states)
else:
value = np.array(value, dtype=np.float)
for _ in range(max_iterations):
delta = 0.
for s in range(env.n_states):
v = value[s]
value[s] = max([sum([env.p(next_s, s, a) * (env.r(next_s, s, a) + gamma * value[next_s]) for next_s in range(env.n_states)]) for a in range(env.n_actions)])
delta = max(delta, np.abs(v - value[s]))
if delta < theta:
break
policy = np.zeros(env.n_states, dtype=int)
for s in range(env.n_states):
policy[s] = np.argmax([sum([env.p(next_s, s, a) * (env.r(next_s, s, a) + gamma * value[next_s]) for next_s in range(env.n_states)]) for a in range(env.n_actions)])
return policy, value
def policy_evaluation(env, policy, gamma, theta, max_iterations):
value = np.zeros(env.n_states, dtype=np.float)
for _ in range(max_iterations):
delta = 0
for s in range(env.n_states):
v = value[s]
value[s] = sum([env.p(next_s, s, policy[s]) * (env.r(next_s, s, policy[s]) + gamma * value[next_s]) for next_s in range(env.n_states)])
delta = max(delta, abs(v - value[s]))
if delta < theta:
break
return value
|
the-stack_0_9325 | import csv
from django.http import HttpResponse
class ExportCsvMixin:
def export_as_csv(self, request, queryset):
meta = self.model._meta
field_names = [field.name for field in meta.fields]
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = "attachment; filename={}.csv".format(meta)
writer = csv.writer(response)
writer.writerow(field_names)
# for obj in queryset:
# row = writer.writerow([getattr(obj, field) for field in field_names])
return response
export_as_csv.short_description = "Export to csv"
def all_complete(self, request, queryset):
self.model.objects.all().update(completed=True)
self.message_user(request, "All task are set as completed now")
def all_not_complete(self, request, queryset):
self.model.objects.all().update(completed=False)
self.message_user(request, "All task are set as uncompleted now")
|
the-stack_0_9326 | # -*- coding: utf-8 -*-
'''
The music21 Framework is Copyright © 2006-2015 Michael Scott Cuthbert
and the music21 Project
(Michael Scott Cuthbert, principal investigator; [email protected])
Some Rights Reserved
Released under the Lesser GNU Public License (LGPL) or the BSD (3-clause) license.
See license.txt file for the full license which represents your legal
obligations in using, modifying, or distributing music21.
Roughly speaking, this means that anyone can use this software for
free, they can distribute it to anyone, so long as this acknowledgment
of copyright and ownership remain publicly accessible. You may also
modify this software or use it in your own programs so long as you do
so long as you make your product available
under the same license. You may also link to this code as a library
from your sold, proprietary commercial product so long as this code
remains open and accessible, this license is made accessible,
and the developers are credited.
The development of music21 was supported by grants
from the Seaver Institute and the NEH/Digging into Data Challenge,
with the support of the MIT
Music and Theater Arts section and the School of Humanities, Arts,
and Social Sciences. Portions of music21 were originally part of
the PMusic (Perl) library, developed by Cuthbert prior to arriving at MIT.
music21 outputs a subset of XML data defined by the MusicXML 2.0
standard, Copyright © Recordare LLC; License available at
http://www.recordare.com/dtds/license.html, now transferred to MakeMusic
music21 incorporates Microsoft Excel reading via the included
xlrd library:
Portions copyright (c) 2005-2006, Stephen John Machin, Lingfo Pty Ltd
All rights reserved.
see ext/xlrd/licenses.py for the complete disclaimer and conditions
Files in the ext/ folder are not copyright music21 Project but whose distribution
is compatible with music21. The corpus files have copyrights retained by their
owners who have allowed them to be included with music21.
'''
# this defines what is loaded when importing __all__
# put these in alphabetical order FIRST dirs then modules
# but: base must come first; in some cases other modules depend on
# definitions in base
__all__ = [
'base',
'sites', # important
# sub folders
'abcFormat',
'analysis',
'audioSearch',
'braille',
'capella',
'composition',
'counterpoint',
'corpus',
'demos',
'features',
'figuredBass',
'humdrum',
'ipython21',
'languageExcerpts',
'lily',
'mei',
'midi',
'musedata',
'musicxml',
'noteworthy',
'omr',
'romanText',
'scala',
'search',
'test',
'theoryAnalysis',
'timespans',
'trecento',
'vexflow',
'webapps',
# individual modules
# KEEP ALPHABETICAL unless necessary for load reasons, if so
# put a note. Keep one letter per line.
'articulations',
'bar',
# base listed above
'beam',
'chant',
'chord',
'chordTables',
'clef',
'common',
'configure',
'contour',
'converter',
'defaults',
'derivation',
'duration',
'dynamics',
'editorial',
'environment',
'exceptions21',
'expressions',
'freezeThaw',
'graph',
'harmony',
'instrument',
'interval',
'intervalNetwork',
'key',
'layout',
'medren',
'metadata',
'meter',
'note',
'pitch',
'repeat',
'roman',
'scale',
'serial',
'sieve',
'spanner',
'stream',
'tempo',
'text',
'tie',
'tinyNotation',
'variant',
'voiceLeading',
'volume',
'xmlnode',
]
#__all__.reverse()
#print __all__
# skipped purposely, "base", "xmlnode"
#-------------------------------------------------------------------------------
# for sub packages, need to manually add the modules in these subpackages
#from music21.analysis import *
#import sys
#x = sys.stdout
#-------------------------------------------------------------------------------
# base Music21Object -- all objects should inherit from this!
from music21 import base
from music21.base import VERSION
from music21.base import VERSION_STR
from music21.base import VERSION_STR as __version__
from music21.base import Music21Exception
from music21.base import SitesException
from music21.base import Music21ObjectException
from music21.base import ElementException
from music21.base import Groups
from music21.base import SiteRef
from music21.base import Sites
from music21.base import Music21Object
from music21.base import ElementWrapper
from music21.base import mainTest
from music21.base import *
#del(types)
#del(sys)
#del(imp)
#del(doctest)
#del(copy)
#del(codecs)
#del(unittest)
#-------------------------------------------------------------------------------
# place the parse function directly in the music21 namespace
# this cannot go in music21/base.py
#import converter
#parse = converter.parse
#------------------------------------------------------------------------------
# this bring all of the __all__ names into the music21 package namespace
from music21 import * # @UnresolvedImport
#------------------------------------------------------------------------------
# eof
|
the-stack_0_9327 | import warnings
import pytest
import flask
from flask.sessions import SecureCookieSessionInterface
from flask.sessions import SessionInterface
try:
from greenlet import greenlet
except ImportError:
greenlet = None
def test_teardown_on_pop(app):
buffer = []
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
ctx = app.test_request_context()
ctx.push()
assert buffer == []
ctx.pop()
assert buffer == [None]
def test_teardown_with_previous_exception(app):
buffer = []
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
try:
raise Exception("dummy")
except Exception:
pass
with app.test_request_context():
assert buffer == []
assert buffer == [None]
def test_teardown_with_handled_exception(app):
buffer = []
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
with app.test_request_context():
assert buffer == []
try:
raise Exception("dummy")
except Exception:
pass
assert buffer == [None]
def test_proper_test_request_context(app):
app.config.update(SERVER_NAME="localhost.localdomain:5000")
@app.route("/")
def index():
return None
@app.route("/", subdomain="foo")
def sub():
return None
with app.test_request_context("/"):
assert (
flask.url_for("index", _external=True)
== "http://localhost.localdomain:5000/"
)
with app.test_request_context("/"):
assert (
flask.url_for("sub", _external=True)
== "http://foo.localhost.localdomain:5000/"
)
# suppress Werkzeug 0.15 warning about name mismatch
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Current server name", UserWarning, "flask.app"
)
with app.test_request_context(
"/", environ_overrides={"HTTP_HOST": "localhost"}
):
pass
app.config.update(SERVER_NAME="localhost")
with app.test_request_context("/", environ_overrides={"SERVER_NAME": "localhost"}):
pass
app.config.update(SERVER_NAME="localhost:80")
with app.test_request_context(
"/", environ_overrides={"SERVER_NAME": "localhost:80"}
):
pass
def test_context_binding(app):
@app.route("/")
def index():
return f"Hello {flask.request.args['name']}!"
@app.route("/meh")
def meh():
return flask.request.url
with app.test_request_context("/?name=World"):
assert index() == "Hello World!"
with app.test_request_context("/meh"):
assert meh() == "http://localhost/meh"
assert flask._request_ctx_stack.top is None
def test_context_test(app):
assert not flask.request
assert not flask.has_request_context()
ctx = app.test_request_context()
ctx.push()
try:
assert flask.request
assert flask.has_request_context()
finally:
ctx.pop()
def test_manual_context_binding(app):
@app.route("/")
def index():
return f"Hello {flask.request.args['name']}!"
ctx = app.test_request_context("/?name=World")
ctx.push()
assert index() == "Hello World!"
ctx.pop()
with pytest.raises(RuntimeError):
index()
@pytest.mark.skipif(greenlet is None, reason="greenlet not installed")
class TestGreenletContextCopying:
def test_greenlet_context_copying(self, app, client):
greenlets = []
@app.route("/")
def index():
flask.session["fizz"] = "buzz"
reqctx = flask._request_ctx_stack.top.copy()
def g():
assert not flask.request
assert not flask.current_app
with reqctx:
assert flask.request
assert flask.current_app == app
assert flask.request.path == "/"
assert flask.request.args["foo"] == "bar"
assert flask.session.get("fizz") == "buzz"
assert not flask.request
return 42
greenlets.append(greenlet(g))
return "Hello World!"
rv = client.get("/?foo=bar")
assert rv.data == b"Hello World!"
result = greenlets[0].run()
assert result == 42
def test_greenlet_context_copying_api(self, app, client):
greenlets = []
@app.route("/")
def index():
flask.session["fizz"] = "buzz"
@flask.copy_current_request_context
def g():
assert flask.request
assert flask.current_app == app
assert flask.request.path == "/"
assert flask.request.args["foo"] == "bar"
assert flask.session.get("fizz") == "buzz"
return 42
greenlets.append(greenlet(g))
return "Hello World!"
rv = client.get("/?foo=bar")
assert rv.data == b"Hello World!"
result = greenlets[0].run()
assert result == 42
def test_session_error_pops_context():
class SessionError(Exception):
pass
class FailingSessionInterface(SessionInterface):
def open_session(self, app, request):
raise SessionError()
class CustomFlask(flask.Flask):
session_interface = FailingSessionInterface()
app = CustomFlask(__name__)
@app.route("/")
def index():
# shouldn't get here
AssertionError()
response = app.test_client().get("/")
assert response.status_code == 500
assert not flask.request
assert not flask.current_app
def test_session_dynamic_cookie_name():
# This session interface will use a cookie with a different name if the
# requested url ends with the string "dynamic_cookie"
class PathAwareSessionInterface(SecureCookieSessionInterface):
def get_cookie_name(self, app):
if flask.request.url.endswith("dynamic_cookie"):
return "dynamic_cookie_name"
else:
return super().get_cookie_name(app)
class CustomFlask(flask.Flask):
session_interface = PathAwareSessionInterface()
app = CustomFlask(__name__)
app.secret_key = "secret_key"
@app.route("/set", methods=["POST"])
def set():
flask.session["value"] = flask.request.form["value"]
return "value set"
@app.route("/get")
def get():
v = flask.session.get("value", "None")
return v
@app.route("/set_dynamic_cookie", methods=["POST"])
def set_dynamic_cookie():
flask.session["value"] = flask.request.form["value"]
return "value set"
@app.route("/get_dynamic_cookie")
def get_dynamic_cookie():
v = flask.session.get("value", "None")
return v
test_client = app.test_client()
# first set the cookie in both /set urls but each with a different value
assert test_client.post("/set", data={"value": "42"}).data == b"value set"
assert (
test_client.post("/set_dynamic_cookie", data={"value": "616"}).data
== b"value set"
)
# now check that the relevant values come back - meaning that different
# cookies are being used for the urls that end with "dynamic cookie"
assert test_client.get("/get").data == b"42"
assert test_client.get("/get_dynamic_cookie").data == b"616"
def test_bad_environ_raises_bad_request():
app = flask.Flask(__name__)
from flask.testing import EnvironBuilder
builder = EnvironBuilder(app)
environ = builder.get_environ()
# use a non-printable character in the Host - this is key to this test
environ["HTTP_HOST"] = "\x8a"
with app.request_context(environ):
response = app.full_dispatch_request()
assert response.status_code == 400
def test_environ_for_valid_idna_completes():
app = flask.Flask(__name__)
@app.route("/")
def index():
return "Hello World!"
from flask.testing import EnvironBuilder
builder = EnvironBuilder(app)
environ = builder.get_environ()
# these characters are all IDNA-compatible
environ["HTTP_HOST"] = "ąśźäüжŠßя.com"
with app.request_context(environ):
response = app.full_dispatch_request()
assert response.status_code == 200
def test_normal_environ_completes():
app = flask.Flask(__name__)
@app.route("/")
def index():
return "Hello World!"
response = app.test_client().get("/", headers={"host": "xn--on-0ia.com"})
assert response.status_code == 200
|
the-stack_0_9331 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import re
import subprocess
import time
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.kubernetes import kube_client
from airflow.utils.log.logging_mixin import LoggingMixin
class SparkSubmitHook(BaseHook, LoggingMixin):
"""
This hook is a wrapper around the spark-submit binary to kick off a spark-submit job.
It requires that the "spark-submit" binary is in the PATH or the spark_home to be
supplied.
:param conf: Arbitrary Spark configuration properties
:type conf: dict
:param conn_id: The connection id as configured in Airflow administration. When an
invalid connection_id is supplied, it will default to yarn.
:type conn_id: str
:param files: Upload additional files to the executor running the job, separated by a
comma. Files will be placed in the working directory of each executor.
For example, serialized objects.
:type files: str
:param py_files: Additional python files used by the job, can be .zip, .egg or .py.
:type py_files: str
:param: archives: Archives that spark should unzip (and possibly tag with #ALIAS) into
the application working directory.
:param driver_class_path: Additional, driver-specific, classpath settings.
:type driver_class_path: str
:param jars: Submit additional jars to upload and place them in executor classpath.
:type jars: str
:param java_class: the main class of the Java application
:type java_class: str
:param packages: Comma-separated list of maven coordinates of jars to include on the
driver and executor classpaths
:type packages: str
:param exclude_packages: Comma-separated list of maven coordinates of jars to exclude
while resolving the dependencies provided in 'packages'
:type exclude_packages: str
:param repositories: Comma-separated list of additional remote repositories to search
for the maven coordinates given with 'packages'
:type repositories: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone, YARN and Kubernetes only) Number of cores per
executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:type driver_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param proxy_user: User to impersonate when submitting the application
:type proxy_user: str
:param name: Name of the job (default airflow-spark)
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param application_args: Arguments for the application being submitted
:type application_args: list
:param env_vars: Environment variables for spark-submit. It
supports yarn and k8s mode too.
:type env_vars: dict
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:type verbose: bool
:param spark_binary: The command to use for spark submit.
Some distros may use spark2-submit.
:type spark_binary: str
"""
def __init__(self,
conf=None,
conn_id='spark_default',
files=None,
py_files=None,
archives=None,
driver_class_path=None,
jars=None,
java_class=None,
packages=None,
exclude_packages=None,
repositories=None,
total_executor_cores=None,
executor_cores=None,
executor_memory=None,
driver_memory=None,
keytab=None,
principal=None,
proxy_user=None,
name='default-name',
num_executors=None,
application_args=None,
env_vars=None,
verbose=False,
spark_binary=None):
self._conf = conf
self._conn_id = conn_id
self._files = files
self._py_files = py_files
self._archives = archives
self._driver_class_path = driver_class_path
self._jars = jars
self._java_class = java_class
self._packages = packages
self._exclude_packages = exclude_packages
self._repositories = repositories
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._proxy_user = proxy_user
self._name = name
self._num_executors = num_executors
self._application_args = application_args
self._env_vars = env_vars
self._verbose = verbose
self._submit_sp = None
self._yarn_application_id = None
self._kubernetes_driver_pod = None
self._spark_binary = spark_binary
self._connection = self._resolve_connection()
self._is_yarn = 'yarn' in self._connection['master']
self._is_kubernetes = 'k8s' in self._connection['master']
if self._is_kubernetes and kube_client is None:
raise RuntimeError(
"{} specified by kubernetes dependencies are not installed!".format(
self._connection['master']))
self._should_track_driver_status = self._resolve_should_track_driver_status()
self._driver_id = None
self._driver_status = None
self._spark_exit_code = None
def _resolve_should_track_driver_status(self):
"""
Determines whether or not this hook should poll the spark driver status through
subsequent spark-submit status requests after the initial spark-submit request
:return: if the driver status should be tracked
"""
return ('spark://' in self._connection['master'] and
self._connection['deploy_mode'] == 'cluster')
def _resolve_connection(self):
# Build from connection master or default to yarn if not available
conn_data = {'master': 'yarn',
'queue': None,
'deploy_mode': None,
'spark_home': None,
'spark_binary': self._spark_binary or "spark-submit",
'namespace': None}
try:
# Master can be local, yarn, spark://HOST:PORT, mesos://HOST:PORT and
# k8s://https://<HOST>:<PORT>
conn = self.get_connection(self._conn_id)
if conn.port:
conn_data['master'] = "{}:{}".format(conn.host, conn.port)
else:
conn_data['master'] = conn.host
# Determine optional yarn queue from the extra field
extra = conn.extra_dejson
conn_data['queue'] = extra.get('queue', None)
conn_data['deploy_mode'] = extra.get('deploy-mode', None)
conn_data['spark_home'] = extra.get('spark-home', None)
conn_data['spark_binary'] = self._spark_binary or \
extra.get('spark-binary', "spark-submit")
conn_data['namespace'] = extra.get('namespace')
except AirflowException:
self.log.info(
"Could not load connection string %s, defaulting to %s",
self._conn_id, conn_data['master']
)
return conn_data
def get_conn(self):
pass
def _get_spark_binary_path(self):
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'], 'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
return connection_cmd
def _build_spark_submit_command(self, application):
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url of the spark master
connection_cmd += ["--master", self._connection['master']]
if self._conf:
for key in self._conf:
connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
if self._env_vars and (self._is_kubernetes or self._is_yarn):
if self._is_yarn:
tmpl = "spark.yarn.appMasterEnv.{}={}"
# Allow dynamic setting of hadoop/yarn configuration environments
self._env = self._env_vars
else:
tmpl = "spark.kubernetes.driverEnv.{}={}"
for key in self._env_vars:
connection_cmd += [
"--conf",
tmpl.format(key, str(self._env_vars[key]))]
elif self._env_vars and self._connection['deploy_mode'] != "cluster":
self._env = self._env_vars # Do it on Popen of the process
elif self._env_vars and self._connection['deploy_mode'] == "cluster":
raise AirflowException(
"SparkSubmitHook env_vars is not supported in standalone-cluster mode.")
if self._is_kubernetes and self._connection['namespace']:
connection_cmd += ["--conf", "spark.kubernetes.namespace={}".format(
self._connection['namespace'])]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._archives:
connection_cmd += ["--archives", self._archives]
if self._driver_class_path:
connection_cmd += ["--driver-class-path", self._driver_class_path]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._packages:
connection_cmd += ["--packages", self._packages]
if self._exclude_packages:
connection_cmd += ["--exclude-packages", self._exclude_packages]
if self._repositories:
connection_cmd += ["--repositories", self._repositories]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._proxy_user:
connection_cmd += ["--proxy-user", self._proxy_user]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._connection['queue']:
connection_cmd += ["--queue", self._connection['queue']]
if self._connection['deploy_mode']:
connection_cmd += ["--deploy-mode", self._connection['deploy_mode']]
# The actual script to execute
connection_cmd += [application]
# Append any application arguments
if self._application_args:
connection_cmd += self._application_args
self.log.info("Spark-Submit cmd: %s", connection_cmd)
return connection_cmd
def _build_track_driver_status_command(self):
"""
Construct the command to poll the driver status.
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The driver id so we can poll for its status
if self._driver_id:
connection_cmd += ["--status", self._driver_id]
else:
raise AirflowException(
"Invalid status: attempted to poll driver " +
"status but no driver id is known. Giving up.")
self.log.debug("Poll driver status cmd: %s", connection_cmd)
return connection_cmd
def submit(self, application="", **kwargs):
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_spark_submit_command(application)
if hasattr(self, '_env'):
env = os.environ.copy()
env.update(self._env)
kwargs["env"] = env
self._submit_sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True,
**kwargs)
self._process_spark_submit_log(iter(self._submit_sp.stdout))
returncode = self._submit_sp.wait()
# Check spark-submit return code. In Kubernetes mode, also check the value
# of exit code in the log, as it may differ.
if returncode or (self._is_kubernetes and self._spark_exit_code != 0):
raise AirflowException(
"Cannot execute: {}. Error code is: {}.".format(
spark_submit_cmd, returncode
)
)
self.log.debug("Should track driver: {}".format(self._should_track_driver_status))
# We want the Airflow job to wait until the Spark driver is finished
if self._should_track_driver_status:
if self._driver_id is None:
raise AirflowException(
"No driver id is known: something went wrong when executing " +
"the spark submit command"
)
# We start with the SUBMITTED status as initial status
self._driver_status = "SUBMITTED"
# Start tracking the driver status (blocking function)
self._start_driver_status_tracking()
if self._driver_status != "FINISHED":
raise AirflowException(
"ERROR : Driver {} badly exited with status {}"
.format(self._driver_id, self._driver_status)
)
def _process_spark_submit_log(self, itr):
"""
Processes the log files and extracts useful information out of it.
If the deploy-mode is 'client', log the output of the submit command as those
are the output logs of the Spark worker directly.
Remark: If the driver needs to be tracked for its status, the log-level of the
spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection['deploy_mode'] == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
self.log.info("Identified spark driver id: %s",
self._yarn_application_id)
# If we run Kubernetes cluster mode, we want to extract the driver pod id
# from the logs so we can kill the application when we stop it unexpectedly
elif self._is_kubernetes:
match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line)
if match:
self._kubernetes_driver_pod = match.groups()[0]
self.log.info("Identified spark driver pod: %s",
self._kubernetes_driver_pod)
# Store the Spark Exit code
match_exit_code = re.search(r'\s*Exit code: (\d+)', line)
if match_exit_code:
self._spark_exit_code = int(match_exit_code.groups()[0])
# if we run in standalone cluster mode and we want to track the driver status
# we need to extract the driver id from the logs. This allows us to poll for
# the status using the driver id. Also, we can kill the driver when needed.
elif self._should_track_driver_status and not self._driver_id:
match_driver_id = re.search(r'(driver-[0-9\-]+)', line)
if match_driver_id:
self._driver_id = match_driver_id.groups()[0]
self.log.info("identified spark driver id: {}"
.format(self._driver_id))
self.log.info(line)
def _process_spark_status_log(self, itr):
"""
parses the logs of the spark driver status query process
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# Check if the log line is about the driver status and extract the status.
if "driverState" in line:
self._driver_status = line.split(' : ')[1] \
.replace(',', '').replace('\"', '').strip()
self.log.debug("spark driver status log: {}".format(line))
def _start_driver_status_tracking(self):
"""
Polls the driver based on self._driver_id to get the status.
Finish successfully when the status is FINISHED.
Finish failed when the status is ERROR/UNKNOWN/KILLED/FAILED.
Possible status:
SUBMITTED
Submitted but not yet scheduled on a worker
RUNNING
Has been allocated to a worker to run
FINISHED
Previously ran and exited cleanly
RELAUNCHING
Exited non-zero or due to worker failure, but has not yet
started running again
UNKNOWN
The status of the driver is temporarily not known due to
master failure recovery
KILLED
A user manually killed this driver
FAILED
The driver exited non-zero and was not supervised
ERROR
Unable to run or restart due to an unrecoverable error
(e.g. missing jar file)
"""
# When your Spark Standalone cluster is not performing well
# due to misconfiguration or heavy loads.
# it is possible that the polling request will timeout.
# Therefore we use a simple retry mechanism.
missed_job_status_reports = 0
max_missed_job_status_reports = 10
# Keep polling as long as the driver is processing
while self._driver_status not in ["FINISHED", "UNKNOWN",
"KILLED", "FAILED", "ERROR"]:
# Sleep for 1 second as we do not want to spam the cluster
time.sleep(1)
self.log.debug("polling status of spark driver with id {}"
.format(self._driver_id))
poll_drive_status_cmd = self._build_track_driver_status_command()
status_process = subprocess.Popen(poll_drive_status_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True)
self._process_spark_status_log(iter(status_process.stdout))
returncode = status_process.wait()
if returncode:
if missed_job_status_reports < max_missed_job_status_reports:
missed_job_status_reports = missed_job_status_reports + 1
else:
raise AirflowException(
"Failed to poll for the driver status {} times: returncode = {}"
.format(max_missed_job_status_reports, returncode)
)
def _build_spark_driver_kill_command(self):
"""
Construct the spark-submit command to kill a driver.
:return: full command to kill a driver
"""
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'],
'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The actual kill command
connection_cmd += ["--kill", self._driver_id]
self.log.debug("Spark-Kill cmd: %s", connection_cmd)
return connection_cmd
def on_kill(self):
self.log.debug("Kill Command is being called")
if self._should_track_driver_status:
if self._driver_id:
self.log.info('Killing driver {} on cluster'
.format(self._driver_id))
kill_cmd = self._build_spark_driver_kill_command()
driver_kill = subprocess.Popen(kill_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.log.info("Spark driver {} killed with return code: {}"
.format(self._driver_id, driver_kill.wait()))
if self._submit_sp and self._submit_sp.poll() is None:
self.log.info('Sending kill signal to %s', self._connection['spark_binary'])
self._submit_sp.kill()
if self._yarn_application_id:
self.log.info('Killing application {} on YARN'
.format(self._yarn_application_id))
kill_cmd = "yarn application -kill {}" \
.format(self._yarn_application_id).split()
yarn_kill = subprocess.Popen(kill_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.log.info("YARN killed with return code: %s", yarn_kill.wait())
if self._kubernetes_driver_pod:
self.log.info('Killing pod %s on Kubernetes', self._kubernetes_driver_pod)
# Currently only instantiate Kubernetes client for killing a spark pod.
try:
import kubernetes
client = kube_client.get_kube_client()
api_response = client.delete_namespaced_pod(
self._kubernetes_driver_pod,
self._connection['namespace'],
body=kubernetes.client.V1DeleteOptions(),
pretty=True)
self.log.info("Spark on K8s killed with response: %s", api_response)
except kube_client.ApiException as e:
self.log.info("Exception when attempting to kill Spark on K8s:")
self.log.exception(e)
|
the-stack_0_9332 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerProbesOperations:
"""LoadBalancerProbesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerProbeListResult"]:
"""Gets all the load balancer probes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerProbeListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_11_01.models.LoadBalancerProbeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerProbeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerProbeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
probe_name: str,
**kwargs: Any
) -> "_models.Probe":
"""Gets load balancer probe.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param probe_name: The name of the probe.
:type probe_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Probe, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_11_01.models.Probe
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Probe"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Probe', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'} # type: ignore
|
the-stack_0_9334 | # -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import (
Blueprint,
current_app,
flash,
redirect,
render_template,
request,
url_for,
)
from flask_login import login_required, login_user, logout_user
from flask_blog_api.extensions import login_manager
from flask_blog_api.public.forms import LoginForm
from flask_blog_api.user.forms import RegisterForm
from flask_blog_api.user.models import User
from flask_blog_api.utils import flash_errors
blueprint = Blueprint("public", __name__, static_folder="../static")
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route("/", methods=["GET", "POST"])
def home():
"""Home page."""
form = LoginForm(request.form)
current_app.logger.info("Hello from the home page!")
# Handle logging in
if request.method == "POST":
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", "success")
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/home.html", form=form)
@blueprint.route("/logout/")
@login_required
def logout():
"""Logout."""
logout_user()
flash("You are logged out.", "info")
return redirect(url_for("public.home"))
@blueprint.route("/register/", methods=["GET", "POST"])
def register():
"""Register new user."""
form = RegisterForm(request.form)
if form.validate_on_submit():
User.create(
username=form.username.data,
email=form.email.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
password=form.password.data,
active=True,
)
flash("Thank you for registering. You can now log in.", "success")
return redirect(url_for("public.home"))
else:
flash_errors(form)
return render_template("public/register.html", form=form)
@blueprint.route("/about/")
def about():
"""About page."""
form = LoginForm(request.form)
return render_template("public/about.html", form=form)
|
the-stack_0_9335 | # -*- coding: utf-8 -*-
#
# sphinx-nbexamples documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 20 18:01:33 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import os.path as osp
import re
import six
import sphinx_nbexamples
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(osp.dirname(__file__)))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'autodocsumm',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_nbexamples',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
napoleon_use_admonition_for_examples = True
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = '.rst'
not_document_data = 'sphinx_nbexamples.gallery_config'
example_gallery_config = dict(
dont_preprocess=['../examples/Subgallery/example_bokeh.ipynb'],
insert_bokeh='0.12.1',
urls='https://github.com/Chilipp/sphinx-nbexamples/blob/master/examples',
binder_url='https://mybinder.org/v2/gh/Chilipp/sphinx-nbexamples/master?filepath=examples',
)
process_examples = not osp.exists(osp.join(osp.dirname(__file__), 'examples'))
if on_rtd:
import subprocess as spr
spr.call([sys.executable] +
('-m ipykernel install --user --name python3 '
'--display-name python3').split())
spr.call([sys.executable, '-m', 'bash_kernel.install'])
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
autodoc_default_flags = ['show_inheritance', 'autosummary']
autoclass_content = 'both'
autodata_content = 'call'
add_module_names = False
# General information about the project.
project = u'sphinx-nbexamples'
copyright = u'2016, Philipp Sommer'
author = u'Philipp Sommer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = re.match('\d+\.\d+\.\d+', sphinx_nbexamples.__version__).group()
# The full version, including alpha/beta/rc tags.
release = sphinx_nbexamples.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinx-nbexamplesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
'preamble': '\setcounter{tocdepth}{10}'
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sphinx-nbexamples.tex', u'sphinx-nbexamples Documentation',
u'Philipp Sommer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sphinx-nbexamples', u'sphinx-nbexamples Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sphinx-nbexamples', u'sphinx-nbexamples Documentation',
author, 'sphinx-nbexamples', 'Extending your autodoc API docs with a summary',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable/', None),
'sphinx_nbexamples_doc': (
'http://sphinx-nbexamples.readthedocs.io/en/latest/', None),
'psyplot': ('http://psyplot.readthedocs.io/en/latest/', None),
'nbconvert': ('https://nbconvert.readthedocs.io/en/latest/', None),
}
if six.PY3:
intersphinx_mapping['python'] = ('https://docs.python.org/3.4/', None)
else:
intersphinx_mapping['python'] = ('https://docs.python.org/2.7/', None)
extlinks = {'dudir': ('http://docutils.sourceforge.net/docs/ref/rst/'
'directives.html#%s', '')}
# -- Extension interface ------------------------------------------------------
# taken from sphinx conf.py
from sphinx import addnodes # noqa
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
m = event_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app):
from sphinx.util.docfields import GroupedField
app.add_object_type('confval', 'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value')
fdesc = GroupedField('parameter', label='Parameters',
names=['param'], can_collapse=True)
app.add_object_type('event', 'event', 'pair: %s; event', parse_event,
doc_field_types=[fdesc])
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.