filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_18734 | from functions import defun, defun_wrapped
@defun_wrapped
def _erf_complex(ctx, z):
z2 = ctx.square_exp_arg(z, -1)
#z2 = -z**2
v = (2/ctx.sqrt(ctx.pi))*z * ctx.hyp1f1((1,2),(3,2), z2)
if not ctx._re(z):
v = ctx._im(v)*ctx.j
return v
@defun_wrapped
def _erfc_complex(ctx, z):
if ctx.re(z) > 2:
z2 = ctx.square_exp_arg(z)
nz2 = ctx.fneg(z2, exact=True)
v = ctx.exp(nz2)/ctx.sqrt(ctx.pi) * ctx.hyperu((1,2),(1,2), z2)
else:
v = 1 - ctx._erf_complex(z)
if not ctx._re(z):
v = 1+ctx._im(v)*ctx.j
return v
@defun
def erf(ctx, z):
z = ctx.convert(z)
if ctx._is_real_type(z):
try:
return ctx._erf(z)
except NotImplementedError:
pass
if ctx._is_complex_type(z) and not z.imag:
try:
return type(z)(ctx._erf(z.real))
except NotImplementedError:
pass
return ctx._erf_complex(z)
@defun
def erfc(ctx, z):
z = ctx.convert(z)
if ctx._is_real_type(z):
try:
return ctx._erfc(z)
except NotImplementedError:
pass
if ctx._is_complex_type(z) and not z.imag:
try:
return type(z)(ctx._erfc(z.real))
except NotImplementedError:
pass
return ctx._erfc_complex(z)
@defun
def square_exp_arg(ctx, z, mult=1, reciprocal=False):
prec = ctx.prec*4+20
if reciprocal:
z2 = ctx.fmul(z, z, prec=prec)
z2 = ctx.fdiv(ctx.one, z2, prec=prec)
else:
z2 = ctx.fmul(z, z, prec=prec)
if mult != 1:
z2 = ctx.fmul(z2, mult, exact=True)
return z2
@defun_wrapped
def erfi(ctx, z):
if not z:
return z
z2 = ctx.square_exp_arg(z)
v = (2/ctx.sqrt(ctx.pi)*z) * ctx.hyp1f1((1,2), (3,2), z2)
if not ctx._re(z):
v = ctx._im(v)*ctx.j
return v
@defun_wrapped
def erfinv(ctx, x):
xre = ctx._re(x)
if (xre != x) or (xre < -1) or (xre > 1):
return ctx.bad_domain("erfinv(x) is defined only for -1 <= x <= 1")
x = xre
#if ctx.isnan(x): return x
if not x: return x
if x == 1: return ctx.inf
if x == -1: return ctx.ninf
if abs(x) < 0.9:
a = 0.53728*x**3 + 0.813198*x
else:
# An asymptotic formula
u = ctx.ln(2/ctx.pi/(abs(x)-1)**2)
a = ctx.sign(x) * ctx.sqrt(u - ctx.ln(u))/ctx.sqrt(2)
ctx.prec += 10
return ctx.findroot(lambda t: ctx.erf(t)-x, a)
@defun_wrapped
def npdf(ctx, x, mu=0, sigma=1):
sigma = ctx.convert(sigma)
return ctx.exp(-(x-mu)**2/(2*sigma**2)) / (sigma*ctx.sqrt(2*ctx.pi))
@defun_wrapped
def ncdf(ctx, x, mu=0, sigma=1):
a = (x-mu)/(sigma*ctx.sqrt(2))
if a < 0:
return ctx.erfc(-a)/2
else:
return (1+ctx.erf(a))/2
@defun_wrapped
def betainc(ctx, a, b, x1=0, x2=1, regularized=False):
if x1 == x2:
v = 0
elif not x1:
if x1 == 0 and x2 == 1:
v = ctx.beta(a, b)
else:
v = x2**a * ctx.hyp2f1(a, 1-b, a+1, x2) / a
else:
m, d = ctx.nint_distance(a)
if m <= 0:
if d < -ctx.prec:
h = +ctx.eps
ctx.prec *= 2
a += h
elif d < -4:
ctx.prec -= d
s1 = x2**a * ctx.hyp2f1(a,1-b,a+1,x2)
s2 = x1**a * ctx.hyp2f1(a,1-b,a+1,x1)
v = (s1 - s2) / a
if regularized:
v /= ctx.beta(a,b)
return v
@defun
def gammainc(ctx, z, a=0, b=None, regularized=False):
regularized = bool(regularized)
z = ctx.convert(z)
if a is None:
a = ctx.zero
lower_modified = False
else:
a = ctx.convert(a)
lower_modified = a != ctx.zero
if b is None:
b = ctx.inf
upper_modified = False
else:
b = ctx.convert(b)
upper_modified = b != ctx.inf
# Complete gamma function
if not (upper_modified or lower_modified):
if regularized:
if ctx.re(z) < 0:
return ctx.inf
elif ctx.re(z) > 0:
return ctx.one
else:
return ctx.nan
return ctx.gamma(z)
if a == b:
return ctx.zero
# Standardize
if ctx.re(a) > ctx.re(b):
return -ctx.gammainc(z, b, a, regularized)
# Generalized gamma
if upper_modified and lower_modified:
return +ctx._gamma3(z, a, b, regularized)
# Upper gamma
elif lower_modified:
return ctx._upper_gamma(z, a, regularized)
# Lower gamma
elif upper_modified:
return ctx._lower_gamma(z, b, regularized)
@defun
def _lower_gamma(ctx, z, b, regularized=False):
# Pole
if ctx.isnpint(z):
return type(z)(ctx.inf)
G = [z] * regularized
negb = ctx.fneg(b, exact=True)
def h(z):
T1 = [ctx.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
return (T1,)
return ctx.hypercomb(h, [z])
@defun
def _upper_gamma(ctx, z, a, regularized=False):
# Fast integer case, when available
if ctx.isint(z):
try:
if regularized:
# Gamma pole
if ctx.isnpint(z):
return type(z)(ctx.zero)
orig = ctx.prec
try:
ctx.prec += 10
return ctx._gamma_upper_int(z, a) / ctx.gamma(z)
finally:
ctx.prec = orig
else:
return ctx._gamma_upper_int(z, a)
except NotImplementedError:
pass
nega = ctx.fneg(a, exact=True)
G = [z] * regularized
# Use 2F0 series when possible; fall back to lower gamma representation
try:
def h(z):
r = z-1
return [([ctx.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
return ctx.hypercomb(h, [z], force_series=True)
except ctx.NoConvergence:
def h(z):
T1 = [], [1, z-1], [z], G, [], [], 0
T2 = [-ctx.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
return T1, T2
return ctx.hypercomb(h, [z])
@defun
def _gamma3(ctx, z, a, b, regularized=False):
pole = ctx.isnpint(z)
if regularized and pole:
return ctx.zero
try:
ctx.prec += 15
# We don't know in advance whether it's better to write as a difference
# of lower or upper gamma functions, so try both
T1 = ctx.gammainc(z, a, regularized=regularized)
T2 = ctx.gammainc(z, b, regularized=regularized)
R = T1 - T2
if ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
return R
if not pole:
T1 = ctx.gammainc(z, 0, b, regularized=regularized)
T2 = ctx.gammainc(z, 0, a, regularized=regularized)
R = T1 - T2
# May be ok, but should probably at least print a warning
# about possible cancellation
if 1: #ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
return R
finally:
ctx.prec -= 15
raise NotImplementedError
@defun_wrapped
def expint(ctx, n, z):
if ctx.isint(n) and ctx._is_real_type(z):
try:
return ctx._expint_int(n, z)
except NotImplementedError:
pass
if ctx.isnan(n) or ctx.isnan(z):
return z*n
if z == ctx.inf:
return 1/z
if z == 0:
# integral from 1 to infinity of t^n
if ctx.re(n) <= 1:
# TODO: reasonable sign of infinity
return type(z)(ctx.inf)
else:
return ctx.one/(n-1)
if n == 0:
return ctx.exp(-z)/z
if n == -1:
return ctx.exp(-z)*(z+1)/z**2
return z**(n-1) * ctx.gammainc(1-n, z)
@defun_wrapped
def li(ctx, z, offset=False):
if offset:
if z == 2:
return ctx.zero
return ctx.ei(ctx.ln(z)) - ctx.ei(ctx.ln2)
if not z:
return z
if z == 1:
return ctx.ninf
return ctx.ei(ctx.ln(z))
@defun
def ei(ctx, z):
try:
return ctx._ei(z)
except NotImplementedError:
return ctx._ei_generic(z)
@defun_wrapped
def _ei_generic(ctx, z):
# Note: the following is currently untested because mp and fp
# both use special-case ei code
if z == ctx.inf:
return z
if z == ctx.ninf:
return ctx.zero
if ctx.mag(z) > 1:
try:
r = ctx.one/z
v = ctx.exp(z)*ctx.hyper([1,1],[],r,
maxterms=ctx.prec, force_series=True)/z
im = ctx._im(z)
if im > 0:
v += ctx.pi*ctx.j
if im < 0:
v -= ctx.pi*ctx.j
return v
except ctx.NoConvergence:
pass
v = z*ctx.hyp2f2(1,1,2,2,z) + ctx.euler
if ctx._im(z):
v += 0.5*(ctx.log(z) - ctx.log(ctx.one/z))
else:
v += ctx.log(abs(z))
return v
@defun
def e1(ctx, z):
try:
return ctx._e1(z)
except NotImplementedError:
return ctx.expint(1, z)
@defun
def ci(ctx, z):
try:
return ctx._ci(z)
except NotImplementedError:
return ctx._ci_generic(z)
@defun_wrapped
def _ci_generic(ctx, z):
if ctx.isinf(z):
if z == ctx.inf: return ctx.zero
if z == ctx.ninf: return ctx.pi*1j
jz = ctx.fmul(ctx.j,z,exact=True)
njz = ctx.fneg(jz,exact=True)
v = 0.5*(ctx.ei(jz) + ctx.ei(njz))
zreal = ctx._re(z)
zimag = ctx._im(z)
if zreal == 0:
if zimag > 0: v += ctx.pi*0.5j
if zimag < 0: v -= ctx.pi*0.5j
if zreal < 0:
if zimag >= 0: v += ctx.pi*1j
if zimag < 0: v -= ctx.pi*1j
if ctx._is_real_type(z) and zreal > 0:
v = ctx._re(v)
return v
@defun
def si(ctx, z):
try:
return ctx._si(z)
except NotImplementedError:
return ctx._si_generic(z)
@defun_wrapped
def _si_generic(ctx, z):
if ctx.isinf(z):
if z == ctx.inf: return 0.5*ctx.pi
if z == ctx.ninf: return -0.5*ctx.pi
# Suffers from cancellation near 0
if ctx.mag(z) >= -1:
jz = ctx.fmul(ctx.j,z,exact=True)
njz = ctx.fneg(jz,exact=True)
v = (-0.5j)*(ctx.ei(jz) - ctx.ei(njz))
zreal = ctx._re(z)
if zreal > 0:
v -= 0.5*ctx.pi
if zreal < 0:
v += 0.5*ctx.pi
if ctx._is_real_type(z):
v = ctx._re(v)
return v
else:
return z*ctx.hyp1f2((1,2),(3,2),(3,2),-0.25*z*z)
@defun_wrapped
def chi(ctx, z):
nz = ctx.fneg(z, exact=True)
v = 0.5*(ctx.ei(z) + ctx.ei(nz))
zreal = ctx._re(z)
zimag = ctx._im(z)
if zimag > 0:
v += ctx.pi*0.5j
elif zimag < 0:
v -= ctx.pi*0.5j
elif zreal < 0:
v += ctx.pi*1j
return v
@defun_wrapped
def shi(ctx, z):
# Suffers from cancellation near 0
if ctx.mag(z) >= -1:
nz = ctx.fneg(z, exact=True)
v = 0.5*(ctx.ei(z) - ctx.ei(nz))
zimag = ctx._im(z)
if zimag > 0: v -= 0.5j*ctx.pi
if zimag < 0: v += 0.5j*ctx.pi
return v
else:
return z * ctx.hyp1f2((1,2),(3,2),(3,2),0.25*z*z)
@defun_wrapped
def fresnels(ctx, z):
if z == ctx.inf:
return ctx.mpf(0.5)
if z == ctx.ninf:
return ctx.mpf(-0.5)
return ctx.pi*z**3/6*ctx.hyp1f2((3,4),(3,2),(7,4),-ctx.pi**2*z**4/16)
@defun_wrapped
def fresnelc(ctx, z):
if z == ctx.inf:
return ctx.mpf(0.5)
if z == ctx.ninf:
return ctx.mpf(-0.5)
return z*ctx.hyp1f2((1,4),(1,2),(5,4),-ctx.pi**2*z**4/16)
|
the-stack_0_18735 | #!/usr/bin/env python
# normalDate.py - version 1.0 - 20000717
#hacked by Robin Becker 10/Apr/2001
#major changes include
# using Types instead of type(0) etc
# BusinessDate class
# __radd__, __rsub__ methods
# formatMS stuff
# derived from an original version created
# by Jeff Bauer of Rubicon Research and used
# with his kind permission
__version__=''' $Id$ '''
__doc__="Jeff Bauer's lightweight date class, extended by us. Predates Python's datetime module."
_bigBangScalar = -4345732 # based on (-9999, 1, 1) BC/BCE minimum
_bigCrunchScalar = 2958463 # based on (9999,12,31) AD/CE maximum
_daysInMonthNormal = [31,28,31,30,31,30,31,31,30,31,30,31]
_daysInMonthLeapYear = [31,29,31,30,31,30,31,31,30,31,30,31]
_dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
_monthName = ['January', 'February', 'March', 'April', 'May', 'June',
'July','August','September','October','November','December']
import string, re, time, datetime
if hasattr(time,'struct_time'):
_DateSeqTypes = (list,tuple,time.struct_time)
else:
_DateSeqTypes = (list,tuple)
_fmtPat = re.compile('\\{(m{1,5}|yyyy|yy|d{1,4})\\}',re.MULTILINE|re.IGNORECASE)
_iso_re = re.compile(r'(\d\d\d\d|\d\d)-(\d\d)-(\d\d)')
def getStdMonthNames():
return list(map(string.lower,_monthName))
def getStdShortMonthNames():
return [x[:3] for x in getStdMonthNames()]
def getStdDayNames():
return list(map(string.lower,_dayOfWeekName))
def getStdShortDayNames():
return [x[:3] for x in getStdDayNames()]
def isLeapYear(year):
"""determine if specified year is leap year, returns Python boolean"""
if year < 1600:
if year % 4:
return 0
else:
return 1
elif year % 4 != 0:
return 0
elif year % 100 != 0:
return 1
elif year % 400 != 0:
return 0
else:
return 1
class NormalDateException(Exception):
"""Exception class for NormalDate"""
pass
class NormalDate:
"""
NormalDate is a specialized class to handle dates without
all the excess baggage (time zones, daylight savings, leap
seconds, etc.) of other date structures. The minimalist
strategy greatly simplifies its implementation and use.
Internally, NormalDate is stored as an integer with values
in a discontinuous range of -99990101 to 99991231. The
integer value is used principally for storage and to simplify
the user interface. Internal calculations are performed by
a scalar based on Jan 1, 1900.
Valid NormalDate ranges include (-9999,1,1) B.C.E. through
(9999,12,31) C.E./A.D.
1.0
No changes, except the version number. After 3 years of use by
various parties I think we can consider it stable.
0.8
Added Prof. Stephen Walton's suggestion for a range method
- module author resisted the temptation to use lambda <0.5 wink>
0.7
Added Dan Winkler's suggestions for __add__, __sub__ methods
0.6
Modifications suggested by Kevin Digweed to fix:
- dayOfWeek, dayOfWeekAbbrev, clone methods
- Permit NormalDate to be a better behaved superclass
0.5
Minor tweaking
0.4
- Added methods __cmp__, __hash__
- Added Epoch variable, scoped to the module
- Added setDay, setMonth, setYear methods
0.3
Minor touch-ups
0.2
- Fixed bug for certain B.C.E leap years
- Added Jim Fulton's suggestions for short alias class name =ND
and __getstate__, __setstate__ methods
Special thanks: Roedy Green
"""
def __init__(self, normalDate=None):
"""
Accept 1 of 4 values to initialize a NormalDate:
1. None - creates a NormalDate for the current day
2. integer in yyyymmdd format
3. string in yyyymmdd format
4. tuple in (yyyy, mm, dd) - localtime/gmtime can also be used
"""
if normalDate is None:
self.setNormalDate(time.localtime(time.time()))
else:
self.setNormalDate(normalDate)
def add(self, days):
"""add days to date; use negative integers to subtract"""
if not isinstance(days,int):
raise NormalDateException( \
'add method parameter must be integer type')
self.normalize(self.scalar() + days)
def __add__(self, days):
"""add integer to normalDate and return a new, calculated value"""
if not isinstance(days,int):
raise NormalDateException( \
'__add__ parameter must be integer type')
cloned = self.clone()
cloned.add(days)
return cloned
def __radd__(self,days):
'''for completeness'''
return self.__add__(days)
def clone(self):
"""return a cloned instance of this normalDate"""
return self.__class__(self.normalDate)
def __cmp__(self, target):
if target is None:
return 1
elif not hasattr(target, 'normalDate'):
return 1
else:
return cmp(self.normalDate, target.normalDate)
def day(self):
"""return the day as integer 1-31"""
return int(repr(self.normalDate)[-2:])
def dayOfWeek(self):
"""return integer representing day of week, Mon=0, Tue=1, etc."""
return dayOfWeek(*self.toTuple())
def dayOfWeekAbbrev(self):
"""return day of week abbreviation for current date: Mon, Tue, etc."""
return _dayOfWeekName[self.dayOfWeek()][:3]
def dayOfWeekName(self):
"""return day of week name for current date: Monday, Tuesday, etc."""
return _dayOfWeekName[self.dayOfWeek()]
def dayOfYear(self):
"""day of year"""
if self.isLeapYear():
daysByMonth = _daysInMonthLeapYear
else:
daysByMonth = _daysInMonthNormal
priorMonthDays = 0
for m in range(self.month() - 1):
priorMonthDays = priorMonthDays + daysByMonth[m]
return self.day() + priorMonthDays
def daysBetweenDates(self, normalDate):
"""
return value may be negative, since calculation is
self.scalar() - arg
"""
if isinstance(normalDate,NormalDate):
return self.scalar() - normalDate.scalar()
else:
return self.scalar() - NormalDate(normalDate).scalar()
def equals(self, target):
if isinstance(target,NormalDate):
if target is None:
return self.normalDate is None
else:
return self.normalDate == target.normalDate
else:
return 0
def endOfMonth(self):
"""returns (cloned) last day of month"""
return self.__class__(self.__repr__()[-8:-2]+str(self.lastDayOfMonth()))
def firstDayOfMonth(self):
"""returns (cloned) first day of month"""
return self.__class__(self.__repr__()[-8:-2]+"01")
def formatUS(self):
"""return date as string in common US format: MM/DD/YY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-6:-4])
def formatUSCentury(self):
"""return date as string in 4-digit year US format: MM/DD/YYYY"""
d = self.__repr__()
return "%s/%s/%s" % (d[-4:-2], d[-2:], d[-8:-4])
def _fmtM(self):
return str(self.month())
def _fmtMM(self):
return '%02d' % self.month()
def _fmtMMM(self):
return self.monthAbbrev()
def _fmtMMMM(self):
return self.monthName()
def _fmtMMMMM(self):
return self.monthName()[0]
def _fmtD(self):
return str(self.day())
def _fmtDD(self):
return '%02d' % self.day()
def _fmtDDD(self):
return self.dayOfWeekAbbrev()
def _fmtDDDD(self):
return self.dayOfWeekName()
def _fmtYY(self):
return '%02d' % (self.year()%100)
def _fmtYYYY(self):
return str(self.year())
def formatMS(self,fmt):
'''format like MS date using the notation
{YY} --> 2 digit year
{YYYY} --> 4 digit year
{M} --> month as digit
{MM} --> 2 digit month
{MMM} --> abbreviated month name
{MMMM} --> monthname
{MMMMM} --> first character of monthname
{D} --> day of month as digit
{DD} --> 2 digit day of month
{DDD} --> abrreviated weekday name
{DDDD} --> weekday name
'''
r = fmt[:]
f = 0
while 1:
m = _fmtPat.search(r,f)
if m:
y = getattr(self,'_fmt'+string.upper(m.group()[1:-1]))()
i, j = m.span()
r = (r[0:i] + y) + r[j:]
f = i + len(y)
else:
return r
def __getstate__(self):
"""minimize persistent storage requirements"""
return self.normalDate
def __hash__(self):
return hash(self.normalDate)
def __int__(self):
return self.normalDate
def isLeapYear(self):
"""
determine if specified year is leap year, returning true (1) or
false (0)
"""
return isLeapYear(self.year())
def _isValidNormalDate(self, normalDate):
"""checks for date validity in [-]yyyymmdd format"""
if not isinstance(normalDate,int):
return 0
if len(repr(normalDate)) > 9:
return 0
if normalDate < 0:
dateStr = "%09d" % normalDate
else:
dateStr = "%08d" % normalDate
if len(dateStr) < 8:
return 0
elif len(dateStr) == 9:
if (dateStr[0] != '-' and dateStr[0] != '+'):
return 0
year = int(dateStr[:-4])
if year < -9999 or year > 9999 or year == 0:
return 0 # note: zero (0) is not a valid year
month = int(dateStr[-4:-2])
if month < 1 or month > 12:
return 0
if isLeapYear(year):
maxDay = _daysInMonthLeapYear[month - 1]
else:
maxDay = _daysInMonthNormal[month - 1]
day = int(dateStr[-2:])
if day < 1 or day > maxDay:
return 0
if year == 1582 and month == 10 and day > 4 and day < 15:
return 0 # special case of 10 days dropped: Oct 5-14, 1582
return 1
def lastDayOfMonth(self):
"""returns last day of the month as integer 28-31"""
if self.isLeapYear():
return _daysInMonthLeapYear[self.month() - 1]
else:
return _daysInMonthNormal[self.month() - 1]
def localeFormat(self):
"""override this method to use your preferred locale format"""
return self.formatUS()
def month(self):
"""returns month as integer 1-12"""
return int(repr(self.normalDate)[-4:-2])
def monthAbbrev(self):
"""returns month as a 3-character abbreviation, i.e. Jan, Feb, etc."""
return _monthName[self.month() - 1][:3]
def monthName(self):
"""returns month name, i.e. January, February, etc."""
return _monthName[self.month() - 1]
def normalize(self, scalar):
"""convert scalar to normalDate"""
if scalar < _bigBangScalar:
msg = "normalize(%d): scalar below minimum" % \
_bigBangScalar
raise NormalDateException(msg)
if scalar > _bigCrunchScalar:
msg = "normalize(%d): scalar exceeds maximum" % \
_bigCrunchScalar
raise NormalDateException(msg)
from math import floor
if scalar >= -115860:
year = 1600 + int(floor((scalar + 109573) / 365.2425))
elif scalar >= -693597:
year = 4 + int(floor((scalar + 692502) / 365.2425))
else:
year = -4 + int(floor((scalar + 695058) / 365.2425))
days = scalar - firstDayOfYear(year) + 1
if days <= 0:
year = year - 1
days = scalar - firstDayOfYear(year) + 1
daysInYear = 365
if isLeapYear(year):
daysInYear = daysInYear + 1
if days > daysInYear:
year = year + 1
days = scalar - firstDayOfYear(year) + 1
# add 10 days if between Oct 15, 1582 and Dec 31, 1582
if (scalar >= -115860 and scalar <= -115783):
days = days + 10
if isLeapYear(year):
daysByMonth = _daysInMonthLeapYear
else:
daysByMonth = _daysInMonthNormal
dc = 0; month = 12
for m in range(len(daysByMonth)):
dc = dc + daysByMonth[m]
if dc >= days:
month = m + 1
break
# add up the days in prior months
priorMonthDays = 0
for m in range(month - 1):
priorMonthDays = priorMonthDays + daysByMonth[m]
day = days - priorMonthDays
self.setNormalDate((year, month, day))
def range(self, days):
"""Return a range of normalDates as a list. Parameter
may be an int or normalDate."""
if not isinstance(days,int):
days = days - self # if not int, assume arg is normalDate type
r = []
for i in range(days):
r.append(self + i)
return r
def __repr__(self):
"""print format: [-]yyyymmdd"""
# Note: When disassembling a NormalDate string, be sure to
# count from the right, i.e. epochMonth = int(repr(Epoch)[-4:-2]),
# or the slice won't work for dates B.C.
if self.normalDate < 0:
return "%09d" % self.normalDate
else:
return "%08d" % self.normalDate
def scalar(self):
"""days since baseline date: Jan 1, 1900"""
(year, month, day) = self.toTuple()
days = firstDayOfYear(year) + day - 1
if self.isLeapYear():
for m in range(month - 1):
days = days + _daysInMonthLeapYear[m]
else:
for m in range(month - 1):
days = days + _daysInMonthNormal[m]
if year == 1582:
if month > 10 or (month == 10 and day > 4):
days = days - 10
return days
def setDay(self, day):
"""set the day of the month"""
maxDay = self.lastDayOfMonth()
if day < 1 or day > maxDay:
msg = "day is outside of range 1 to %d" % maxDay
raise NormalDateException(msg)
(y, m, d) = self.toTuple()
self.setNormalDate((y, m, day))
def setMonth(self, month):
"""set the month [1-12]"""
if month < 1 or month > 12:
raise NormalDateException('month is outside range 1 to 12')
(y, m, d) = self.toTuple()
self.setNormalDate((y, month, d))
def setNormalDate(self, normalDate):
"""
accepts date as scalar string/integer (yyyymmdd) or tuple
(year, month, day, ...)"""
if isinstance(normalDate,int):
self.normalDate = normalDate
elif isinstance(normalDate,str):
try:
self.normalDate = int(normalDate)
except:
m = _iso_re.match(normalDate)
if m:
self.setNormalDate(m.group(1)+m.group(2)+m.group(3))
else:
raise NormalDateException("unable to setNormalDate(%s)" % repr(normalDate))
elif isinstance(normalDate,_DateSeqTypes):
self.normalDate = int("%04d%02d%02d" % normalDate[:3])
elif isinstance(normalDate,NormalDate):
self.normalDate = normalDate.normalDate
elif isinstance(normalDate,(datetime.datetime,datetime.date)):
self.normalDate = (normalDate.year*100+normalDate.month)*100+normalDate.day
if not self._isValidNormalDate(self.normalDate):
raise NormalDateException("unable to setNormalDate(%s)" % repr(normalDate))
def setYear(self, year):
if year == 0:
raise NormalDateException('cannot set year to zero')
elif year < -9999:
raise NormalDateException('year cannot be less than -9999')
elif year > 9999:
raise NormalDateException('year cannot be greater than 9999')
(y, m, d) = self.toTuple()
self.setNormalDate((year, m, d))
__setstate__ = setNormalDate
def __sub__(self, v):
if isinstance(v,int):
return self.__add__(-v)
return self.scalar() - v.scalar()
def __rsub__(self,v):
if isinstance(v,int):
return NormalDate(v) - self
else:
return v.scalar() - self.scalar()
def toTuple(self):
"""return date as (year, month, day) tuple"""
return (self.year(), self.month(), self.day())
def year(self):
"""return year in yyyy format, negative values indicate B.C."""
return int(repr(self.normalDate)[:-4])
################# Utility functions #################
def bigBang():
"""return lower boundary as a NormalDate"""
return NormalDate((-9999, 1, 1))
def bigCrunch():
"""return upper boundary as a NormalDate"""
return NormalDate((9999, 12, 31))
def dayOfWeek(y, m, d):
"""return integer representing day of week, Mon=0, Tue=1, etc."""
if m == 1 or m == 2:
m = m + 12
y = y - 1
return (d + 2*m + 3*(m+1)/5 + y + y/4 - y/100 + y/400) % 7
def firstDayOfYear(year):
"""number of days to the first of the year, relative to Jan 1, 1900"""
if not isinstance(year,int):
msg = "firstDayOfYear() expected integer, got %s" % type(year)
raise NormalDateException(msg)
if year == 0:
raise NormalDateException('first day of year cannot be zero (0)')
elif year < 0: # BCE calculation
firstDay = (year * 365) + int((year - 1) / 4) - 693596
else: # CE calculation
leapAdjust = int((year + 3) / 4)
if year > 1600:
leapAdjust = leapAdjust - int((year + 99 - 1600) / 100) + \
int((year + 399 - 1600) / 400)
firstDay = year * 365 + leapAdjust - 693963
if year > 1582:
firstDay = firstDay - 10
return firstDay
def FND(d):
'''convert to ND if required'''
return isinstance(d,NormalDate) and d or ND(d)
Epoch=bigBang()
ND=NormalDate
BDEpoch=ND(15821018)
BDEpochScalar = -115857
class BusinessDate(NormalDate):
"""
Specialised NormalDate
"""
def add(self, days):
"""add days to date; use negative integers to subtract"""
if not isinstance(days,int):
raise NormalDateException('add method parameter must be integer')
self.normalize(self.scalar() + days)
def __add__(self, days):
"""add integer to BusinessDate and return a new, calculated value"""
if not isinstance(days,int):
raise NormalDateException('__add__ parameter must be integer')
cloned = self.clone()
cloned.add(days)
return cloned
def __sub__(self, v):
return isinstance(v,int) and self.__add__(-v) or self.scalar() - v.scalar()
def asNormalDate(self):
return ND(self.normalDate)
def daysBetweenDates(self, normalDate):
return self.asNormalDate.daysBetweenDates(normalDate)
def _checkDOW(self):
if self.dayOfWeek()>4: raise NormalDateException("%r isn't a business day" % self.normalDate)
def normalize(self, i):
i = int(i)
NormalDate.normalize(self,(i/5)*7+i%5+BDEpochScalar)
def scalar(self):
d = self.asNormalDate()
i = d - BDEpoch #luckily BDEpoch is a Monday so we don't have a problem
#concerning the relative weekday
return 5*(i/7) + i%7
def setNormalDate(self, normalDate):
NormalDate.setNormalDate(self,normalDate)
self._checkDOW()
if __name__ == '__main__':
today = NormalDate()
print("NormalDate test:")
print(" Today (%s) is: %s %s" % (today, today.dayOfWeekAbbrev(), today.localeFormat()))
yesterday = today - 1
print(" Yesterday was: %s %s" % (yesterday.dayOfWeekAbbrev(), yesterday.localeFormat()))
tomorrow = today + 1
print(" Tomorrow will be: %s %s" % (tomorrow.dayOfWeekAbbrev(), tomorrow.localeFormat()))
print(" Days between tomorrow and yesterday: %d" % (tomorrow - yesterday))
print(today.formatMS('{d}/{m}/{yy}'))
print(today.formatMS('{dd}/{m}/{yy}'))
print(today.formatMS('{ddd} {d}/{m}/{yy}'))
print(today.formatMS('{dddd} {d}/{m}/{yy}'))
print(today.formatMS('{d}/{mm}/{yy}'))
print(today.formatMS('{d}/{mmm}/{yy}'))
print(today.formatMS('{d}/{mmmm}/{yy}'))
print(today.formatMS('{d}/{m}/{yyyy}'))
b = BusinessDate('20010116')
print('b=',b,'b.scalar()', b.scalar())
|
the-stack_0_18736 | class Categories:
"""
A class used to represent a category of a feature. A category represents a
choice given in a question.
Attributes
----------
name : str
the name of the category
index : int
the index is used to identify the categories. It preserves the order of the
categories in case of ordinal features.
prob : float
the occurence probability of a category of a feature in a particular population
entropy : float
the entropy of a category of a feature in a particular population given by
entropy = -p * log(p) where p is the occurence probability
"""
def __init__(self, name, index):
"""
Parameters
----------
name : str
the name of the category
index : int
the index is used to identify the categories. It preserves the order of the
categories in case of ordinal features.
"""
self.name = name
self.index = index
self.prob = 0
self.entropy = 0
|
the-stack_0_18737 | # -*- coding: utf-8 -*-
"""
pygments.lexers.fortran
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Fortran languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, words, using, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['FortranLexer', 'FortranFixedLexer']
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
.. versionadded:: 0.10
"""
name = 'Fortran'
aliases = ['fortran']
filenames = ['*.f03', '*.f90', '*.F03', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'^#.*\n', Comment.Preproc),
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][\w$]*', Name),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(words((
'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE',
'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND',
'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE',
'CODIMENSION', 'COMMON', 'CONCURRRENT', 'CONTIGUOUS', 'CONTAINS',
'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE',
'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END',
'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'EXIT', 'EXTENDS',
'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT',
'FUNCTION', 'GENERIC', 'GOTO', 'IF', 'IMAGES', 'IMPLICIT',
'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE',
'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY',
'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'OPEN', 'OPTIONAL',
'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', 'PRIVATE',
'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ',
'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE',
'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES',
'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE',
'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
# Data Types
(words((
'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER',
'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG',
'C_SIGNED_CHAR', 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T',
'C_INT64_T', 'C_INT_LEAST8_T', 'C_INT_LEAST16_T', 'C_INT_LEAST32_T',
'C_INT_LEAST64_T', 'C_INT_FAST8_T', 'C_INT_FAST16_T', 'C_INT_FAST32_T',
'C_INT_FAST64_T', 'C_INTMAX_T', 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE',
'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', 'C_DOUBLE_COMPLEX',
'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', 'C_FUNPTR'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[()\[\],:&%;.]', Punctuation),
# Intrinsics
(words((
'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL',
'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog',
'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH',
'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref',
'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0',
'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE',
'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp',
'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count',
'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift',
'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated',
'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer',
'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc',
'C_Sizeof', 'C_New_Line', 'C_Carriage_Return',
'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin',
'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY',
'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF',
'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax',
'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH',
'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime',
'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime',
'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of',
'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush',
'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell',
'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument',
'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog',
'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs',
'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits',
'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr',
'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index',
'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC',
'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End',
'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound',
'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk',
'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat',
'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent',
'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc',
'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits',
'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images',
'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product',
'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real',
'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift',
'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind',
'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape',
'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH',
'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand',
'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock',
'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer',
'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask',
'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp',
'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer),
(r'[+-]?\d*\.\d+([ed][-+]?\d+)?(_[a-z]\w+)?', Number.Float),
(r'[+-]?\d+\.\d*([ed][-+]?\d+)?(_[a-z]\w+)?', Number.Float),
],
}
class FortranFixedLexer(RegexLexer):
"""
Lexer for fixed format Fortran.
.. versionadded:: 2.1
"""
name = 'FortranFixed'
aliases = ['fortranfixed']
filenames = ['*.f', '*.F']
flags = re.IGNORECASE
def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value
tokens = {
'root': [
(r'[C*].*\n', Comment),
(r'#.*\n', Comment.Preproc),
(r' {0,4}!.*\n', Comment),
(r'(.{5})', Name.Label, 'cont-char'),
(r'.*\n', using(FortranLexer)),
],
'cont-char': [
(' ', Text, 'code'),
('0', Comment, 'code'),
('.', Generic.Strong, 'code'),
],
'code': [
(r'(.{66})(.*)(\n)',
bygroups(_lex_fortran, Comment, Text), 'root'),
(r'(.*)(\n)', bygroups(_lex_fortran, Text), 'root'),
default('root'),
]
}
|
the-stack_0_18739 | import copy
from cereal import car
from opendbc.can.can_define import CANDefine
from selfdrive.config import Conversions as CV
from selfdrive.car.interfaces import CarStateBase
from opendbc.can.parser import CANParser
from selfdrive.car.subaru.values import DBC, STEER_THRESHOLD, CAR, PREGLOBAL_CARS
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
self.shifter_values = can_define.dv["Transmission"]["Gear"]
def update(self, cp, cp_cam):
ret = car.CarState.new_message()
ret.gas = cp.vl["Throttle"]["Throttle_Pedal"] / 255.
ret.gasPressed = ret.gas > 1e-5
if self.car_fingerprint in PREGLOBAL_CARS:
ret.brakePressed = cp.vl["Brake_Pedal"]["Brake_Pedal"] > 2
else:
ret.brakePressed = cp.vl["Brake_Status"]["Brake"] == 1
ret.wheelSpeeds.fl = cp.vl["Wheel_Speeds"]["FL"] * CV.KPH_TO_MS
ret.wheelSpeeds.fr = cp.vl["Wheel_Speeds"]["FR"] * CV.KPH_TO_MS
ret.wheelSpeeds.rl = cp.vl["Wheel_Speeds"]["RL"] * CV.KPH_TO_MS
ret.wheelSpeeds.rr = cp.vl["Wheel_Speeds"]["RR"] * CV.KPH_TO_MS
ret.vEgoRaw = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr) / 4.
# Kalman filter, even though Subaru raw wheel speed is heaviliy filtered by default
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.01
# continuous blinker signals for assisted lane change
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_lamp(
50, cp.vl["Dashlights"]["LEFT_BLINKER"], cp.vl["Dashlights"]["RIGHT_BLINKER"])
if self.CP.enableBsm:
ret.leftBlindspot = (cp.vl["BSD_RCTA"]["L_ADJACENT"] == 1) or (cp.vl["BSD_RCTA"]["L_APPROACHING"] == 1)
ret.rightBlindspot = (cp.vl["BSD_RCTA"]["R_ADJACENT"] == 1) or (cp.vl["BSD_RCTA"]["R_APPROACHING"] == 1)
can_gear = int(cp.vl["Transmission"]["Gear"])
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(can_gear, None))
ret.steeringAngleDeg = cp.vl["Steering_Torque"]["Steering_Angle"]
ret.steeringTorque = cp.vl["Steering_Torque"]["Steer_Torque_Sensor"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD[self.car_fingerprint]
ret.cruiseState.enabled = cp.vl["CruiseControl"]["Cruise_Activated"] != 0
ret.cruiseState.available = cp.vl["CruiseControl"]["Cruise_On"] != 0
ret.cruiseState.speed = cp_cam.vl["ES_DashStatus"]["Cruise_Set_Speed"] * CV.KPH_TO_MS
if (self.car_fingerprint in PREGLOBAL_CARS and cp.vl["Dash_State2"]["UNITS"] == 1) or \
(self.car_fingerprint not in PREGLOBAL_CARS and cp.vl["Dashlights"]["UNITS"] == 1):
ret.cruiseState.speed *= CV.MPH_TO_KPH
ret.seatbeltUnlatched = cp.vl["Dashlights"]["SEATBELT_FL"] == 1
ret.doorOpen = any([cp.vl["BodyInfo"]["DOOR_OPEN_RR"],
cp.vl["BodyInfo"]["DOOR_OPEN_RL"],
cp.vl["BodyInfo"]["DOOR_OPEN_FR"],
cp.vl["BodyInfo"]["DOOR_OPEN_FL"]])
ret.steerError = cp.vl["Steering_Torque"]["Steer_Error_1"] == 1
if self.car_fingerprint in PREGLOBAL_CARS:
self.cruise_button = cp_cam.vl["ES_CruiseThrottle"]["Cruise_Button"]
self.ready = not cp_cam.vl["ES_DashStatus"]["Not_Ready_Startup"]
self.es_accel_msg = copy.copy(cp_cam.vl["ES_CruiseThrottle"])
else:
ret.steerWarning = cp.vl["Steering_Torque"]["Steer_Warning"] == 1
ret.cruiseState.nonAdaptive = cp_cam.vl["ES_DashStatus"]["Conventional_Cruise"] == 1
self.es_distance_msg = copy.copy(cp_cam.vl["ES_Distance"])
self.es_lkas_msg = copy.copy(cp_cam.vl["ES_LKAS_State"])
return ret
@staticmethod
def get_can_parser(CP):
# this function generates lists for signal, messages and initial values
signals = [
# sig_name, sig_address, default
("Steer_Torque_Sensor", "Steering_Torque", 0),
("Steering_Angle", "Steering_Torque", 0),
("Steer_Error_1", "Steering_Torque", 0),
("Cruise_On", "CruiseControl", 0),
("Cruise_Activated", "CruiseControl", 0),
("Brake_Pedal", "Brake_Pedal", 0),
("Throttle_Pedal", "Throttle", 0),
("LEFT_BLINKER", "Dashlights", 0),
("RIGHT_BLINKER", "Dashlights", 0),
("SEATBELT_FL", "Dashlights", 0),
("FL", "Wheel_Speeds", 0),
("FR", "Wheel_Speeds", 0),
("RL", "Wheel_Speeds", 0),
("RR", "Wheel_Speeds", 0),
("DOOR_OPEN_FR", "BodyInfo", 1),
("DOOR_OPEN_FL", "BodyInfo", 1),
("DOOR_OPEN_RR", "BodyInfo", 1),
("DOOR_OPEN_RL", "BodyInfo", 1),
("Gear", "Transmission", 0),
]
checks = [
# sig_address, frequency
("Throttle", 100),
("Brake_Pedal", 50),
("Wheel_Speeds", 50),
("Transmission", 100),
("Steering_Torque", 50),
("BodyInfo", 1),
]
if CP.enableBsm:
signals += [
("L_ADJACENT", "BSD_RCTA", 0),
("R_ADJACENT", "BSD_RCTA", 0),
("L_APPROACHING", "BSD_RCTA", 0),
("R_APPROACHING", "BSD_RCTA", 0),
]
checks += [
("BSD_RCTA", 17),
]
if CP.carFingerprint in PREGLOBAL_CARS:
checks += [
("BodyInfo", 1),
("CruiseControl", 50),
("Dash_State2", 1),
]
if CP.carFingerprint in [CAR.FORESTER_PREGLOBAL, CAR.LEVORG_PREGLOBAL, CAR.WRX_PREGLOBAL]:
checks += [
("Dashlights", 20),
]
elif CP.carFingerprint in [CAR.LEGACY_PREGLOBAL, CAR.LEGACY_PREGLOBAL_2018, CAR.OUTBACK_PREGLOBAL, CAR.OUTBACK_PREGLOBAL_2018]:
checks += [
("Dashlights", 10),
]
signals += [
("UNITS", "Dash_State2", 0),
]
else:
signals += [
("Steer_Warning", "Steering_Torque", 0),
("Brake", "Brake_Status", 0),
("UNITS", "Dashlights", 0),
]
checks += [
("Dashlights", 10),
("BodyInfo", 10),
("CruiseControl", 20),
("Brake_Status", 50),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 0)
@staticmethod
def get_cam_can_parser(CP):
if CP.carFingerprint in PREGLOBAL_CARS:
signals = [
("Cruise_Set_Speed", "ES_DashStatus", 0),
("Not_Ready_Startup", "ES_DashStatus", 0),
("Throttle_Cruise", "ES_CruiseThrottle", 0),
("Signal1", "ES_CruiseThrottle", 0),
("Cruise_Activated", "ES_CruiseThrottle", 0),
("Signal2", "ES_CruiseThrottle", 0),
("Brake_On", "ES_CruiseThrottle", 0),
("Distance_Swap", "ES_CruiseThrottle", 0),
("Standstill", "ES_CruiseThrottle", 0),
("Signal3", "ES_CruiseThrottle", 0),
("Close_Distance", "ES_CruiseThrottle", 0),
("Signal4", "ES_CruiseThrottle", 0),
("Standstill_2", "ES_CruiseThrottle", 0),
("Cruise_Fault", "ES_CruiseThrottle", 0),
("Signal5", "ES_CruiseThrottle", 0),
("Counter", "ES_CruiseThrottle", 0),
("Signal6", "ES_CruiseThrottle", 0),
("Cruise_Button", "ES_CruiseThrottle", 0),
("Signal7", "ES_CruiseThrottle", 0),
]
checks = [
("ES_DashStatus", 20),
("ES_CruiseThrottle", 20),
]
else:
signals = [
("Cruise_Set_Speed", "ES_DashStatus", 0),
("Conventional_Cruise", "ES_DashStatus", 0),
("Counter", "ES_Distance", 0),
("Signal1", "ES_Distance", 0),
("Cruise_Fault", "ES_Distance", 0),
("Cruise_Throttle", "ES_Distance", 0),
("Signal2", "ES_Distance", 0),
("Car_Follow", "ES_Distance", 0),
("Signal3", "ES_Distance", 0),
("Cruise_Brake_Active", "ES_Distance", 0),
("Distance_Swap", "ES_Distance", 0),
("Cruise_EPB", "ES_Distance", 0),
("Signal4", "ES_Distance", 0),
("Close_Distance", "ES_Distance", 0),
("Signal5", "ES_Distance", 0),
("Cruise_Cancel", "ES_Distance", 0),
("Cruise_Set", "ES_Distance", 0),
("Cruise_Resume", "ES_Distance", 0),
("Signal6", "ES_Distance", 0),
("Counter", "ES_LKAS_State", 0),
("LKAS_Alert_Msg", "ES_LKAS_State", 0),
("Signal1", "ES_LKAS_State", 0),
("LKAS_ACTIVE", "ES_LKAS_State", 0),
("LKAS_Dash_State", "ES_LKAS_State", 0),
("Signal2", "ES_LKAS_State", 0),
("Backward_Speed_Limit_Menu", "ES_LKAS_State", 0),
("LKAS_Left_Line_Enable", "ES_LKAS_State", 0),
("LKAS_Left_Line_Light_Blink", "ES_LKAS_State", 0),
("LKAS_Right_Line_Enable", "ES_LKAS_State", 0),
("LKAS_Right_Line_Light_Blink", "ES_LKAS_State", 0),
("LKAS_Left_Line_Visible", "ES_LKAS_State", 0),
("LKAS_Right_Line_Visible", "ES_LKAS_State", 0),
("LKAS_Alert", "ES_LKAS_State", 0),
("Signal3", "ES_LKAS_State", 0),
]
checks = [
("ES_DashStatus", 10),
("ES_Distance", 20),
("ES_LKAS_State", 10),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 2)
|
the-stack_0_18741 | #!/usr/bin/python3
import requests
import os
import argparse
from tqdm import tqdm
def pdfdownloader(url, endereco): #baixa os pdsf
resp = requests.get(url)
if resp.status_code == requests.codes.OK: #pylint: disable=E1101
with open(endereco, 'wb') as new_file:
new_file.write(resp.content)
resp.raise_for_status()
else:
print('Não Encontrado...')
def pdfcount(link):
print('Verificando quantidade de material disponível...')
link_num = link[-13:-12]
disponivel = []
quantidade = 0
if link_num == 'I' or 'II':
numero = ['I','II','III','IV','V','VI','VII']
else:
numero = ['1', '2', '3', '4', '5', '6', '7']
for i in tqdm(numero):
url = link[:-13] + i + link[-12:]
resp = requests.get(url)
if resp.status_code == requests.codes.OK: #pylint: disable=E1101
disponivel.append(i)
quantidade = quantidade + 1
print(f'Encontramos {quantidade} livros, preparando Download...')
return disponivel
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('link1', type=str, help='Primeira parte do link')
parser.add_argument('nome', type=str, help='nome da materia')
args = parser.parse_args()
print("""
""")
print(f"-------- {args.nome} ---------")
url = args.link1
numero = pdfcount(url)
materia = args.nome
os.mkdir(f'output/{materia}')
for i in tqdm(numero):
file_name = os.path.join(f'output/{materia}', materia+f' - Unidade {i}.pdf')
url_download = url[:-13] + i + url[-12:]
pdfdownloader(url_download, file_name)
print(f'Download finalizado com sucesso! Verifique na pasta output/{materia}')
|
the-stack_0_18742 | import pytest
from rasa.nlu import registry
from rasa.nlu.components import find_unavailable_packages
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.model import Metadata
from tests.nlu import utilities
@pytest.mark.parametrize("component_class", registry.component_classes)
def test_no_components_with_same_name(component_class):
"""The name of the components need to be unique as they will
be referenced by name when defining processing pipelines."""
names = [cls.name for cls in registry.component_classes]
assert (
names.count(component_class.name) == 1
), "There is more than one component named {}".format(component_class.name)
@pytest.mark.parametrize("pipeline_template", registry.registered_pipeline_templates)
def test_all_components_in_model_templates_exist(pipeline_template):
"""We provide a couple of ready to use pipelines, this test ensures
all components referenced by name in the
pipeline definitions are available."""
components = registry.registered_pipeline_templates[pipeline_template]
for component in components:
assert (
component in registry.registered_components
), "Model template contains unknown component."
@pytest.mark.parametrize("component_class", registry.component_classes)
def test_all_arguments_can_be_satisfied(component_class):
"""Check that `train` method parameters can be filled
filled from the context. Similar to `pipeline_init` test."""
# All available context arguments that will ever be generated during train
# it might still happen, that in a certain pipeline
# configuration arguments can not be satisfied!
provided_properties = {
provided for c in registry.component_classes for provided in c.provides
}
for req in component_class.requires:
assert req in provided_properties, "No component provides required property."
def test_find_unavailable_packages():
unavailable = find_unavailable_packages(
["my_made_up_package_name", "io", "foo_bar", "foo_bar"]
)
assert unavailable == {"my_made_up_package_name", "foo_bar"}
def test_builder_create_unknown(component_builder, default_config):
with pytest.raises(Exception) as excinfo:
component_config = {"name": "my_made_up_componment"}
component_builder.create_component(component_config, default_config)
assert "Unknown component name" in str(excinfo.value)
def test_builder_create_by_module_path(component_builder, default_config):
from rasa.nlu.featurizers.regex_featurizer import RegexFeaturizer
path = "rasa.nlu.featurizers.regex_featurizer.RegexFeaturizer"
component_config = {"name": path}
component = component_builder.create_component(component_config, default_config)
assert type(component) == RegexFeaturizer
def test_builder_load_unknown(component_builder):
with pytest.raises(Exception) as excinfo:
component_meta = {"name": "my_made_up_componment"}
component_builder.load_component(component_meta, "", Metadata({}, None))
assert "Unknown component name" in str(excinfo.value)
def test_example_component(component_builder, tmpdir_factory):
conf = RasaNLUModelConfig(
{"pipeline": [{"name": "tests.nlu.example_component.MyComponent"}]}
)
interpreter = utilities.interpreter_for(
component_builder,
data="./data/examples/rasa/demo-rasa.json",
path=tmpdir_factory.mktemp("projects").strpath,
config=conf,
)
r = interpreter.parse("test")
assert r is not None
|
the-stack_0_18744 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 4 19:38:37 2019
@author: Guest Group
"""
import pandas as pd
import numpy as np
from flexsolve import wegstein_secant, aitken_secant, secant
from copy import copy as copy_
from numba import njit
__all__ = ('TEA', 'CombinedTEA')
# TODO: Add 'SL', 'DB', 'DDB', 'SYD', 'ACRS' and 'MACRS' functions to generate depreciation data
# %% Depreciation data
_MACRS = {'MACRS5': np.array([.2000, .3200, .1920,
.1152, .1152, .0576]),
'MACRS7': np.array([.1429, .2449, .1749,
.1249, .0893, .0892,
.0893, .0446]),
'MACRS10': np.array([.1000, .1800, .1440,
.1152, .0922, .0737,
.0655, .0655, .0656,
.0655, .0328]),
'MACRS15': np.array([.0500, .0950, .0855,
.0770, .0693, .0623,
.0590, .0590, .0591,
.0590, .0591, .0590,
.0591, .0590, .0591,
.0295]),
'MACRS20': np.array([0.03750, 0.07219, 0.06677,
0.06177, 0.05713, 0.05285,
0.04888, 0.04522, 0.4462,
0.04461, 0.04462, 0.04461,
0.04462, 0.04461, 0.04462,
0.04461, 0.04462, 0.04461,
0.04462, 0.04461, 0.02231])}
# %% Utilities
@njit
def initial_loan_principal(loan, interest):
principal = 0
k = 1. + interest
for loan_i in loan:
principal += loan_i
principal *= k
return principal
@njit
def final_loan_principal(payment, principal, interest, years):
for iter in range(years):
principal += principal * interest - payment
return principal
def solve_payment(payment, loan, interest, years):
principal = initial_loan_principal(loan, interest)
return wegstein_secant(final_loan_principal,
payment, payment+10., 1e-4, 1e-4,
args=(principal, interest, years))
@njit
def net_earnings(D, C, S, start,
FCI, TDC, VOC, FOC, sales,
income_tax,
depreciation,
startup_time,
startup_VOCfrac,
startup_FOCfrac,
startup_salesfrac):
end = start+len(depreciation)
D[start:end] = TDC*depreciation
w0 = startup_time
w1 = 1. - w0
C[start] = (w0*startup_VOCfrac*VOC + w1*VOC
+ w0*startup_FOCfrac*FOC + w1*FOC)
S[start] = w0*startup_salesfrac*sales + w1*sales
start1 = start + 1
C[start1:] = VOC + FOC
S[start1:] = sales
return (S - C - D)*(1. - income_tax)
# %% Techno-Economic Analysis
class TEA:
"""
Abstract TEA class for cash flow analysis.
**Abstract methods**
_TDC(DPI) -> TDC
Should take direct permanent investment as an argument
and return total depreciable capital.
_FCI(TDC) -> FCI
Should take total depreciable capital as an argument and return
fixed capital investment.
_FOC(FCI) -> FOC
Should take fixed capital investment as an arguments and return
fixed operating cost without depreciation.
Parameters
----------
system : System
Should contain feed and product streams.
IRR : float
Internal rate of return (fraction).
duration : tuple[int, int]
Start and end year of venture (e.g. (2018, 2038)).
depreciation : str
'MACRS' + number of years (e.g. 'MACRS7').
operating_days : float
Number of operating days per year.
income_tax : float
Combined federal and state income tax rate (fraction).
lang_factor : float
Lang factor for getting fixed capital investment
from total purchase cost. If no lang factor, estimate
capital investment using bare module factors.
construction_schedule : tuple[float]
Construction investment fractions per year (e.g. (0.5, 0.5) for 50%
capital investment in the first year and 50% investment in the second).
startup_months : float
Startup time in months.
startup_FOCfrac : float
Fraction of fixed operating costs incurred during startup.
startup_VOCfrac : float
Fraction of variable operating costs incurred during startup.
startup_salesfrac : float
Fraction of sales achieved during startup.
WC_over_FCI : float
Working capital as a fraction of fixed capital investment.
finanace_interest : float
Yearly interest of capital cost financing as a fraction.
finance_years : int
Number of years the loan is paid for.
finance_fraction : float
Fraction of capital cost that needs to be financed.
Examples
--------
:doc:`tutorial/Techno-economic analysis`
"""
__slots__ = ('system', 'income_tax', 'lang_factor', 'WC_over_FCI',
'finance_interest', 'finance_years', 'finance_fraction',
'_construction_schedule', '_startup_time',
'startup_FOCfrac', 'startup_VOCfrac', 'startup_salesfrac',
'units', '_startup_schedule', '_operating_days',
'_annual_factor', '_duration', '_duration_array',
'_depreciation_array', '_depreciation', '_years',
'_duration', '_start', 'IRR', '_IRR', '_sales')
def __init_subclass__(self, isabstract=False):
if isabstract: return
for method in ('_TDC', '_FCI', '_FOC'):
if not hasattr(self, method):
raise NotImplementedError(f"subclass must implement a '{method}' method unless the 'isabstract' keyword argument is True")
@staticmethod
def like(system, other):
"""Create a Cashflow object from `system` with the same settings as `other`."""
self = copy_(other)
self.units = sorted(system._costunits, key=lambda x: x.line)
self.system = system
system._TEA = self
return self
def __init__(self, system, IRR, duration, depreciation, income_tax,
operating_days, lang_factor, construction_schedule,
startup_months, startup_FOCfrac, startup_VOCfrac,
startup_salesfrac, WC_over_FCI, finance_interest,
finance_years, finance_fraction):
self.duration = duration
self.depreciation = depreciation
self.construction_schedule = construction_schedule
self.startup_months = startup_months
self.operating_days = operating_days
#: [float] Internal rate of return (fraction).
self.IRR = IRR
#: [float] Combined federal and state income tax rate (fraction).
self.income_tax = income_tax
#: [float] Lang factor for getting fixed capital investment from total purchase cost. If no lang factor, estimate capital investment using bare module factors.
self.lang_factor = lang_factor
#: [float] Fraction of fixed operating costs incurred during startup.
self.startup_FOCfrac = startup_FOCfrac
#: [float] Fraction of variable operating costs incurred during startup.
self.startup_VOCfrac = startup_VOCfrac
#: [float] Fraction of sales achieved during startup.
self.startup_salesfrac = startup_salesfrac
#: [float] Working capital as a fraction of fixed capital investment.
self.WC_over_FCI = WC_over_FCI
#: [float] Yearly interest of capital cost financing as a fraction.
self.finance_interest = finance_interest
#: [int] Number of years the loan is paid for.
self.finance_years = finance_years
#: [float] Fraction of capital cost that needs to be financed.
self.finance_fraction = finance_fraction
#: Guess IRR for solve_IRR method
self._IRR = IRR
#: Guess cost for solve_price method
self._sales = 0
#: list[Unit] All unit operations considered
self.units = sorted(system._costunits, key=lambda x: x.line)
#: [System] Should contain feed and product streams.
self.system = system
system._TEA = self
@property
def operating_days(self):
"""[float] Number of operating days per year."""
return self._operating_days
@operating_days.setter
def operating_days(self, days):
"""[float] Number of operating days per year."""
self._operating_days = days
self._annual_factor = days*24
@property
def duration(self):
"""tuple[int, int] Start and end year of venture."""
return self._duration
@duration.setter
def duration(self, duration):
self._duration = duration
self._years = duration[1] - duration[0]
@property
def depreciation(self):
"""[str] 'MACRS' + number of years (e.g. 'MACRS7')."""
return self._depreciation
@depreciation.setter
def depreciation(self, depreciation):
try:
self._depreciation_array = _MACRS[depreciation]
except KeyError:
raise ValueError(f"depreciation must be either 'MACRS5', 'MACRS7', 'MACRS10' or 'MACRS15 (not {repr(depreciation)})")
self._depreciation = depreciation
@property
def construction_schedule(self):
"""tuple[float] Construction investment fractions per year, starting from year 0. For example, for 50% capital investment in year 0 and 50% investment in year 1: (0.5, 0.5)."""
return tuple(self._construction_schedule)
@construction_schedule.setter
def construction_schedule(self, schedule):
self._construction_schedule = np.array(schedule, dtype=float)
self._start = len(schedule)
@property
def startup_months(self):
return self._startup_time * 12.
@startup_months.setter
def startup_months(self, months):
assert months <= 12., "startup time must be less than a year"
self._startup_time = months/12.
@property
def utility_cost(self):
"""Total utility cost (USD/yr)."""
return sum([u.utility_cost for u in self.units]) * self._annual_factor
@property
def purchase_cost(self):
"""Total purchase cost (USD)."""
return sum([u.purchase_cost for u in self.units])
@property
def installation_cost(self):
"""Total installation cost (USD)."""
return sum([u.installation_cost for u in self.units])
@property
def DPI(self):
"""Direct permanent investment."""
return self.purchase_cost * self.lang_factor if self.lang_factor else self.installation_cost
@property
def TDC(self):
"""Total depreciable capital."""
return self._TDC(self.DPI)
@property
def FCI(self):
"""Fixed capital investment."""
return self._FCI(self.TDC)
@property
def TCI(self):
"""Total capital investment."""
return (1. + self.WC_over_FCI)*self.FCI
@property
def FOC(self):
"""Fixed operating costs (USD/yr)."""
return self._FOC(self.FCI)
@property
def VOC(self):
"""Variable operating costs (USD/yr)."""
return self.material_cost + self.utility_cost
@property
def AOC(self):
"""Annual operating cost excluding depreciation (USD/yr)."""
return self.FOC + self.VOC
@property
def working_capital(self):
return self.WC_over_FCI * self.TDC
@property
def material_cost(self):
"""Annual material cost."""
return sum([s.cost for s in self.system.feeds if s.price]) * self._annual_factor
@property
def annual_depreciation(self):
"""Depreciation (USD/yr) equivalent to FCI dived by the the duration of the venture."""
return self.TDC/(self.duration[1]-self.duration[0])
@property
def sales(self):
"""Annual sales revenue."""
return sum([s.cost for s in self.system.products if s.price]) * self._annual_factor
@property
def ROI(self):
"""Return on investment (1/yr) without accounting for annualized depreciation."""
FCI = self.FCI
net_earnings = (1-self.income_tax)*(self.sales-self._AOC(FCI))
TCI = FCI*(1.+self.WC_over_FCI)
return net_earnings/TCI
@property
def net_earnings(self):
"""Net earnings without accounting for annualized depreciation."""
return (1-self.income_tax)*(self.sales-self.AOC)
@property
def PBP(self):
"""Pay back period (yr) without accounting for annualized depreciation."""
FCI = self.FCI
net_earnings = (1-self.income_tax)*(self.sales-self._AOC(FCI))
return FCI/net_earnings
def get_cashflow_table(self):
"""Return DataFrame of the cash flow analysis."""
# Cash flow data and parameters
# index: Year since construction until end of venture
# C_D: Depreciable capital
# C_FC: Fixed capital
# C_WC: Working capital
# D: Depreciation
# L: Loan revenue
# LI: Loan interest payment
# LP: Loan payment
# LPl: Loan principal
# C: Annual operating cost (excluding depreciation)
# S: Sales
# NE: Net earnings
# CF: Cash flow
# DF: Discount factor
# NPV: Net present value
# CNPV: Cumulative NPV
TDC = self.TDC
FCI = self._FCI(TDC)
start = self._start
years = self._years
FOC = self._FOC(FCI)
VOC = self.VOC
sales = self.sales
self._duration_array = np.arange(-start+1, years+1, dtype=float)
length = start+years
C_D, C_FC, C_WC, D, L, LI, LP, LPl, C, S, NE, CF, DF, NPV, CNPV = data = np.zeros((15, length))
depreciation = self._depreciation_array
D[start:start+len(depreciation)] = TDC*depreciation
w0 = self._startup_time
w1 = 1. - w0
C[start] = (w0*self.startup_VOCfrac*VOC + w1*VOC
+ w0*self.startup_FOCfrac*FOC + w1*FOC)
S[start] = w0*self.startup_salesfrac*sales + w1*sales
start1 = start + 1
C[start1:] = VOC + FOC
S[start1:] = sales
NE[:] = (S - C - D)*(1. - self.income_tax)
WC = self.WC_over_FCI * FCI
C_D[:start] = TDC*self._construction_schedule
C_FC[:start] = FCI*self._construction_schedule
C_WC[start-1] = WC
C_WC[-1] = -WC
if self.finance_interest:
interest = self.finance_interest
years = self.finance_years
end = start+years
L[:start] = loan = self.finance_fraction*(C_FC[:start]+C_WC[:start])
f_interest = (1.+interest)
LP[start:end] = solve_payment(loan.sum()/years * f_interest,
loan, interest, years)
loan_principal = 0
for i in range(end):
LI[i] = li = (loan_principal + L[i]) * interest
LPl[i] = loan_principal = loan_principal - LP[i] + li + L[i]
CF[:] = NE + D + L - C_FC - C_WC - LP
else:
CF[:] = NE + D - C_FC - C_WC
DF[:] = 1/(1.+self.IRR)**self._duration_array
NPV[:] = CF*DF
CNPV[:] = NPV.cumsum()
return pd.DataFrame(data.transpose(),
index=np.arange(self._duration[0]-start, self._duration[1]),
columns=('Depreciable capital',
'Fixed capital investment',
'Working capital',
'Depreciation',
'Loan',
'Loan interest payment',
'Loan payment',
'Loan principal',
'Annual operating cost (excluding depreciation)',
'Sales',
'Net earnings',
'Cash flow',
'Discount factor',
'Net present value (NPV)',
'Cumulative NPV'))
@property
def NPV(self):
"""Net present value."""
return self._NPV_at_IRR(self.IRR, self.cashflow)
def _AOC(self, FCI):
"""Return AOC at given FCI"""
return self._FOC(FCI) + self.VOC
def production_cost(self, products, with_annual_depreciation=True):
"""Return production cost of products [USD/yr].
Parameters
----------
products : Iterable[Stream]
Main products of the system
with_annual_depreciation=True : bool, optional
Notes
-----
If there is more than one main product, The production cost is
proportionally allocated to each of the main products with respect to
their marketing values. The marketing value of each product is
determined by the annual production multiplied by its selling price.
"""
market_values = np.array([i.cost for i in products])
total_market_value = market_values.sum()
weights = market_values/total_market_value
return weights * self.total_production_cost(products, with_annual_depreciation)
def total_production_cost(self, products, with_annual_depreciation):
"""Return total production cost of products [USD/yr].
Parameters
----------
products : Iterable[Stream]
Main products of the system
with_annual_depreciation=True : bool, optional
"""
coproducts = self.system.products.difference(products)
coproduct_sales = sum([s.cost for s in coproducts if s.price]) * self._annual_factor
if with_annual_depreciation:
TDC = self.TDC
annual_depreciation = TDC/(self.duration[1]-self.duration[0])
AOC = self._AOC(self._FCI(TDC))
return AOC + coproduct_sales + annual_depreciation
else:
return self.AOC + coproduct_sales
@property
def cashflow(self):
# Cash flow data and parameters
# C_FC: Fixed capital
# C_WC: Working capital
# Loan: Money gained from loan
# LP: Loan payment
# D: Depreciation
# C: Annual operating cost (excluding depreciation)
# S: Sales
# NE: Net earnings
# CF: Cash flow
TDC = self.TDC
FCI = self._FCI(TDC)
start = self._start
years = self._years
FOC = self._FOC(FCI)
VOC = self.VOC
self._duration_array = np.arange(-start+1, years+1, dtype=float)
D, C_FC, C_WC, Loan, LP, C, S = np.zeros((7, start+years))
NE = net_earnings(D, C, S, start,
FCI, TDC, VOC, FOC, self.sales,
self.income_tax,
self._depreciation_array,
self._startup_time,
self.startup_VOCfrac,
self.startup_FOCfrac,
self.startup_salesfrac)
WC = self.WC_over_FCI * FCI
C_FC[:start] = FCI*self._construction_schedule
C_WC[start-1] = WC
C_WC[-1] = -WC
if self.finance_interest:
interest = self.finance_interest
years = self.finance_years
Loan[:start] = loan = self.finance_fraction*(C_FC[:start]+C_WC[:start])
LP[start:start+years] = solve_payment(loan.sum()/years * (1. + interest),
loan, interest, years)
return NE + D + Loan - C_FC - C_WC - LP
else:
return NE + D - C_FC - C_WC
def _NPV_at_IRR(self, IRR, cashflow):
"""Return NPV at given IRR and cashflow data."""
return (cashflow/(1.+IRR)**self._duration_array).sum()
def _NPV_with_sales(self, sales, NPV, coefficients, discount_factors):
"""Return NPV with an additional annualized sales."""
return NPV + (sales*coefficients/discount_factors).sum()
def solve_IRR(self):
"""Return the IRR at the break even point (NPV = 0) through cash flow analysis."""
try:
self._IRR = wegstein_secant(self._NPV_at_IRR,
self._IRR, self._IRR+1e-6,
xtol=1e-6, maxiter=200,
args=(self.cashflow,))
except:
self._IRR = secant(self._NPV_at_IRR,
0.15, 0.15001,
xtol=1e-6, maxiter=200,
args=(self.cashflow,))
return self._IRR
def _price2cost(self, stream):
"""Get factor to convert stream price to cost for cashflow in solve_price method."""
return stream.F_mass*self._annual_factor*(1-self.income_tax)
def solve_price(self, stream):
"""Return the price (USD/kg) of stream at the break even point (NPV = 0) through cash flow analysis.
Parameters
----------
stream : Stream
Stream with variable selling price.
"""
price2cost = self._price2cost(stream)
discount_factors = (1 + self.IRR)**self._duration_array
cashflow = self.cashflow
NPV = (cashflow/discount_factors).sum()
coefficients = np.ones_like(discount_factors)
start = self._start
coefficients[:start] = 0
w0 = self._startup_time
coefficients[self._start] = w0*self.startup_VOCfrac + (1-w0)
try:
self._sales = wegstein_secant(self._NPV_with_sales,
self._sales, self._sales+1e-6,
xtol=1e-6, maxiter=200,
args=(NPV, coefficients, discount_factors))
except:
self._sales = secant(self._NPV_with_sales,
0, 1e-6,
xtol=1e-6, maxiter=200,
args=(NPV, coefficients, discount_factors))
if stream.sink:
return stream.price - self._sales/price2cost
elif stream.source:
return stream.price + self._sales/price2cost
else:
raise ValueError(f"stream must be either a feed or a product")
def __repr__(self):
return f'<{type(self).__name__}: {self.system.ID}>'
def _info(self):
return (f'{type(self).__name__}: {self.system.ID}\n'
f' NPV: {self.NPV:.3g} USD at {self.IRR:.1%} IRR\n'
f' ROI: {self.ROI:.3g} 1/yr\n'
f' PBP: {self.PBP:.3g} yr')
def show(self):
"""Prints information on unit."""
print(self._info())
_ipython_display_ = show
class CombinedTEA(TEA):
_TDC = _FCI = _FOC = NotImplemented
__slots__ = ('TEAs',)
def __init__(self, TEAs, IRR):
#: iterable[TEA] All TEA objects for cashflow calculation
self.TEAs = TEAs
#: [float] Internal rate of return (fraction)
self.IRR = IRR
#: Guess IRR for solve_IRR method
self._IRR = IRR
#: Guess sales for solve_price method
self._sales = 0
@property
def operating_days(self):
v_all = [i.operating_days for i in self.TEAs]
v0, *vs = v_all
if all([v0 == v for v in vs]): return v0
else: return tuple(v_all)
@operating_days.setter
def operating_days(self, operating_days):
vector = np.zeros(len(self.TEAs))
vector[:] = operating_days
for i, j in zip(self.TEAs, vector): i.operating_days = j
@property
def startup_months(self):
v_all = [i.startup_months for i in self.TEAs]
v0, *vs = v_all
if all([v0 == v for v in vs]): return v0
else: return tuple(v_all)
@startup_months.setter
def startup_months(self, startup_months):
vector = np.zeros(len(self.TEAs))
vector[:] = startup_months
for i, j in zip(self.TEAs, vector): i.startup_months = j
@property
def income_tax(self):
v_all = [i.income_tax for i in self.TEAs]
v0, *vs = v_all
if all([v0 == v for v in vs]): return v0
else: return tuple(v_all)
@income_tax.setter
def income_tax(self, income_tax):
vector = np.zeros(len(self.TEAs))
vector[:] = income_tax
for i, j in zip(self.TEAs, vector): i.income_tax = j
@property
def cashflow(self):
return sum([i.cashflow for i in self.TEAs])
@property
def utility_cost(self):
"""Total utility cost (USD/yr)."""
return sum([i.utility_cost for i in self.TEAs])
@property
def purchase_cost(self):
"""Total purchase cost (USD)."""
return sum([i.purchase_cost for i in self.TEAs])
@property
def installation_cost(self):
"""Total installation cost (USD)."""
return sum([i.installation_cost for i in self.TEAs])
@property
def NPV(self):
return sum([i.NPV for i in self.TEAs])
@property
def DPI(self):
"""Direct permanent investment."""
return sum([i.DPI for i in self.TEAs])
@property
def TDC(self):
"""Total depreciable capital."""
return sum([i.TDC for i in self.TEAs])
@property
def FCI(self):
"""Fixed capital investment."""
return sum([i.FCI for i in self.TEAs])
@property
def TCI(self):
"""Total capital investment."""
return sum([i.TCI for i in self.TEAs])
@property
def FOC(self):
"""Fixed operating costs (USD/yr)."""
return sum([i.FOC for i in self.TEAs])
@property
def VOC(self):
"""Variable operating costs (USD/yr)."""
return self.material_cost + self.utility_cost
@property
def AOC(self):
"""Annual operating cost excluding depreciation (USD/yr)."""
return self.FOC + self.VOC
@property
def working_capital(self):
return sum([i.working_capital for i in self.TEAs])
@property
def material_cost(self):
"""Annual material cost."""
return sum([i.material_cost for i in self.TEAs])
@property
def annual_depreciation(self):
"""Depreciation (USD/yr) equivalent to FCI dived by the the duration of the venture."""
return sum([i.annual_depreciation for i in self.TEAs])
@property
def sales(self):
"""Annual sales revenue."""
return sum([i.sales for i in self.TEAs])
@property
def net_earnings(self):
"""Net earnings without accounting for annualized depreciation."""
return sum([i.net_earnings for i in self.TEAs])
@property
def ROI(self):
"""Return on investment (1/yr) without accounting for annualized depreciation."""
return sum([i.ROI for i in self.TEAs])
@property
def PBP(self):
"""Pay back period (yr) without accounting for annualized depreciation."""
return self.FCI/self.net_earnings
def get_cashflow_table(self):
"""Return DataFrame of the cash flow analysis."""
TEA, *TEAs = self.TEAs
table = TEA.get_cashflow_table()
DF = table['Discount factor']
DF_data = np.array(DF)
for i in TEAs:
i_table = i.get_cashflow_table()
if (i_table.index != table.index).any():
raise NotImplementedError('cannot yet create cashflow table from TEAs with different venture years')
table[:] += np.asarray(i_table)
DF[:] = DF_data
return table
def _NPV_at_IRR(self, IRR, TEA_cashflows):
"""Return NPV at given IRR and cashflow data."""
return sum([i._NPV_at_IRR(IRR, j) for i, j in TEA_cashflows])
def _NPV_with_sales(self, sales, NPV, TEA, coefficients, discount_factors):
"""Return NPV with additional sales."""
return TEA._NPV_with_sales(sales, NPV, coefficients, discount_factors)
def production_cost(self, products, with_annual_depreciation=True):
"""Return production cost of products [USD/yr].
Parameters
----------
products : Iterable[Stream]
Main products of the system
with_annual_depreciation=True : bool, optional
Notes
-----
If there is more than one main product, The production cost is
proportionally allocated to each of the main products with respect to
their marketing values. The marketing value of each product is
determined by the annual production multiplied by its selling price.
"""
market_values = np.array([i.cost for i in products])
total_market_value = market_values.sum()
weights = market_values/total_market_value
total_production_cost = 0
for TEA in self.TEAs:
total_production_cost += TEA.total_production_cost(products, with_annual_depreciation)
return weights * total_production_cost
def solve_IRR(self):
"""Return the IRR at the break even point (NPV = 0) through cash flow analysis."""
try:
self._IRR = aitken_secant(self._NPV_at_IRR,
self._IRR, self._IRR+1e-6,
xtol=1e-8, maxiter=200,
args=([(i, i.cashflow)
for i in self.TEAs],))
except:
self._IRR = secant(self._NPV_at_IRR,
0.15, 0.15001,
xtol=5e-8, maxiter=200,
args=([(i, i.cashflow)
for i in self.TEAs],))
return self._IRR
def solve_price(self, stream, TEA=None):
"""Return the price (USD/kg) of stream at the break even point (NPV = 0) through cash flow analysis.
Parameters
----------
stream : Stream
Stream with variable selling price.
TEA : TEA, optional
Stream should belong here.
"""
if not TEA:
for TEA in self.TEAs:
if stream in TEA.system.feeds or stream in TEA.system.products: break
price2cost = TEA._price2cost(stream)
IRR = self.IRR
NPV = sum([i._NPV_at_IRR(IRR, i.cashflow) for i in self.TEAs])
discount_factors = (1.+IRR)**TEA._duration_array
coefficients = np.ones_like(discount_factors)
start = TEA._start
coefficients[:start] = 0
w0 = TEA._startup_time
coefficients[TEA._start] = w0*TEA.startup_VOCfrac + (1-w0)
try:
self._sales = aitken_secant(self._NPV_with_sales,
self._sales, self._sales+1e-6,
xtol=5e-8, maxiter=200,
args=(NPV, TEA,
coefficients,
discount_factors))
except:
self._sales = secant(self._NPV_with_sales,
0, 1e-6,
xtol=5e-8, maxiter=200,
args=(NPV, TEA,
coefficients,
discount_factors))
if stream.sink:
return stream.price - self._sales/price2cost
elif stream.source:
return stream.price + self._sales/price2cost
else:
raise ValueError(f"stream must be either a feed or a product")
def __repr__(self):
return f'<{type(self).__name__}: {", ".join([i.system.ID for i in self.TEAs])}>'
def _info(self):
return (f'{type(self).__name__}: {", ".join([i.system.ID for i in self.TEAs])}\n'
f' NPV: {self.NPV:.3g} USD at {self.IRR:.1%} IRR\n'
f' ROI: {self.ROI:.3g} 1/yr\n'
f' PBP: {self.PBP:.3g} yr')
def show(self):
"""Prints information on unit."""
print(self._info())
_ipython_display_ = show
# def update_loan_principal(loan_principal, loan, loan_payment, interest):
# principal = 0
# for i, loan_i in enumerate(loan):
# loan_principal[i] = principal = loan_i + principal * interest - loan_payment[i] |
the-stack_0_18746 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" qubit_order,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from cirq.ops.qubit_order import default_sorting_key
from cirq.ops import QubitOrder, NamedQubit
def test_default_sorting_key():
assert default_sorting_key('') == ''
assert default_sorting_key('a') == 'a'
assert default_sorting_key('a0') == 'a00000000:1'
assert default_sorting_key('a00') == 'a00000000:2'
assert default_sorting_key('a1bc23') == 'a00000001:1bc00000023:2'
assert default_sorting_key('a9') == 'a00000009:1'
assert default_sorting_key('a09') == 'a00000009:2'
assert default_sorting_key('a00000000:8') == 'a00000000:8:00000008:1'
def test_sorted_by_default_sorting_key():
actual = [
'',
'1',
'a',
'a00000000',
'a00000000:8',
'a9',
'a09',
'a10',
'a11',
]
assert sorted(actual, key=default_sorting_key) == actual
assert sorted(reversed(actual), key=default_sorting_key) == actual
def test_default():
a2 = NamedQubit('a2')
a10 = NamedQubit('a10')
b = NamedQubit('b')
assert QubitOrder.DEFAULT.order_for([]) == ()
assert QubitOrder.DEFAULT.order_for([a10, a2, b]) == (a2, a10, b)
def test_explicit():
a2 = NamedQubit('a2')
a10 = NamedQubit('a10')
b = NamedQubit('b')
with pytest.raises(ValueError):
_ = QubitOrder.explicit([b, b])
q = QubitOrder.explicit([a10, a2, b])
assert q.order_for([b]) == (a10, a2, b)
assert q.order_for([a2]) == (a10, a2, b)
assert q.order_for([]) == (a10, a2, b)
with pytest.raises(ValueError):
_ = q.order_for([NamedQubit('c')])
def test_explicit_with_fallback():
a2 = NamedQubit('a2')
a10 = NamedQubit('a10')
b = NamedQubit('b')
q = QubitOrder.explicit([b], fallback=QubitOrder.DEFAULT)
assert q.order_for([]) == (b,)
assert q.order_for([b]) == (b,)
assert q.order_for([b, a2]) == (b, a2)
assert q.order_for([a2]) == (b, a2)
assert q.order_for([a10, a2]) == (b, a2, a10)
def test_sorted_by():
a = NamedQubit('2')
b = NamedQubit('10')
c = NamedQubit('-5')
q = QubitOrder.sorted_by(lambda e: -int(str(e)))
assert q.order_for([]) == ()
assert q.order_for([a]) == (a,)
assert q.order_for([a, b]) == (b, a)
assert q.order_for([a, b, c]) == (b, a, c)
def test_map():
b = NamedQubit('b!')
q = QubitOrder.explicit([NamedQubit('b')]).map(
internalize=lambda e: NamedQubit(e.name[:-1]),
externalize=lambda e: NamedQubit(e.name + '!'))
assert q.order_for([]) == (b,)
assert q.order_for([b]) == (b,)
def test_qubit_order_or_list():
b = NamedQubit('b')
implied_by_list = QubitOrder.as_qubit_order([b])
assert implied_by_list.order_for([]) == (b,)
implied_by_generator = QubitOrder.as_qubit_order(
NamedQubit(e.name + '!') for e in [b])
assert implied_by_generator.order_for([]) == (NamedQubit('b!'),)
assert implied_by_generator.order_for([]) == (NamedQubit('b!'),)
ordered = QubitOrder.sorted_by(repr)
passed_through = QubitOrder.as_qubit_order(ordered)
assert ordered is passed_through
|
the-stack_0_18747 | #Source : https://leetcode.com/problems/reverse-linked-list/
#Author : Yuan Wang
#Date : 2018-07-25
'''
**********************************************************************************
*Reverse a singly linked list.
*
*Example:
*
*Input: 1->2->3->4->5->NULL
*Output: 5->4->3->2->1->NULL
*Follow up:
*
*A linked list can be reversed either iteratively or recursively. Could you implement both?
**********************************************************************************/
'''
from SLinkedList import SLinked_List, ListNode
def reverseList(head: ListNode) -> ListNode:
current = head
previous = None
while current:
temp = current.next
current.next = previous
previous = current
current = temp
return previous
elements=[1,2,3,4,5]
sll=SLinked_List()
sll.add_elements(elements)
print(sll)
reverseList(sll.head)
print(sll)
|
the-stack_0_18749 | #!/usr/bin/env python3
import os
import random as random
import matplotlib.pyplot as plt
from wavedata.tools.core import calib_utils
from wavedata.tools.obj_detection import obj_utils
from wavedata.tools.visualization import vis_utils
ROOTDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
def main():
# Start of the Kitti demo code
print('=== Python Kitti Wrapper Demo ===')
# Setting Paths
data_set = 'training'
cam = 2
root_dir = os.path.expanduser('~') + '/Kitti/object/'
image_dir = os.path.join(root_dir, data_set) + '/image_' + str(cam)
label_dir = os.path.join(root_dir, data_set) + '/label_' + str(cam)
calib_dir = os.path.join(root_dir, data_set) + '/calib'
img_idx = int(random.random()*100)
print('img_idx', img_idx)
# Run Visualization Function
f, ax1, ax2 = vis_utils.visualization(image_dir, img_idx)
# Run the main loop to run throughout the images
frame_calibration_info = calib_utils.read_calibration(calib_dir, img_idx)
p = frame_calibration_info.p2
# Load labels
objects = obj_utils.read_labels(label_dir, img_idx)
# For all annotated objects
for obj in objects:
# Draw 2D and 3D boxes
vis_utils.draw_box_2d(ax1, obj)
vis_utils.draw_box_3d(ax2, obj, p)
# Render results
plt.draw()
plt.show()
if __name__ == "__main__":
main()
|
the-stack_0_18751 | from copy import copy
from collections import defaultdict
import numpy as np
from scipy.signal import medfilt
import math
import itertools
def meanfilt(data, window_width):
cumsum_vec = np.cumsum(np.insert(data, 0, 0))
ma_vec = (cumsum_vec[window_width:] -
cumsum_vec[:-window_width]) / window_width
ma_vec = data[:1] + list(ma_vec) + data[-1:]
return ma_vec
class ready_hill:
def __init__(self, intensity, scan_id, mass, ion_mobility):
# self.mz = np.median(mass)
# self.mz = np.mean(mass)
# self.mz_std = np.std(mass)
self.intensity = intensity
self.scan_id = scan_id
self.scan_set = set(scan_id)
self.mass = mass
self.diff_for_output = 0
tmp = max(range(len(self.intensity)), key=self.intensity.__getitem__)
self.scan_of_max_intensity = self.scan_id[tmp]
self.max_intensity = self.intensity[tmp]
# self.mz = np.average(self.mass, weights=self.intensity)
# self.mz = sum(weight * value for weight, value in zip(self.intensity, self.mass)) / sum(self.intensity)
# self.mz = self.mass[tmp]
# self.max_intensity = sum(self.intensity)
if not (ion_mobility is None):
self.ion_mobility = ion_mobility
self.opt_ion_mobility = self.ion_mobility[tmp]
else:
self.ion_mobility = None
self.opt_ion_mobility = None
self.scan_len = len(self.scan_id)
self.idict = dict()
for i, j in zip(self.scan_id, self.intensity):
self.idict[i] = j
# self.sqrt_of_i_sum_squares = math.sqrt(
# sum(v**2 for v in self.idict.values()))
intensity_np = np.array(intensity)
self.sqrt_of_i_sum_squares = np.sqrt(np.sum(np.power(intensity_np, 2)))
class next_peak:
def __init__(
self,
next_mz_array,
next_intensity_array,
next_scan_id,
next_ion_mobility_array):
self.next_mz_array = next_mz_array
self.next_intensity_array = next_intensity_array
self.next_ion_mobility_array = next_ion_mobility_array
self.next_scan_id = next_scan_id
class peak_ion_mobility:
def __init__(self, mz, intensity, ion_mobility):
self.mz_array = [mz, ]
self.mass_array = [[mz, ], ]
self.intensity_array = [[intensity, ]]
self.ion_mobility_array = [[ion_mobility, ]]
self.intensity_max = [intensity, ]
self.ion_mobility_opt = [ion_mobility, ]
self.ion_mobility_max = [ion_mobility, ]
self.ion_mobility_min = [ion_mobility, ]
self.total = 1
def get_nearest_values(self, value):
return np.argsort(np.abs(self.mz_array) - value)
def extend(self, mz, intensity, ion_mobility):
self.mz_array.append(mz)
self.mass_array.append([mz, ])
self.intensity_array.append([intensity, ])
self.intensity_max.append(intensity)
self.ion_mobility_opt.append(ion_mobility)
self.ion_mobility_array.append([ion_mobility, ])
self.ion_mobility_max.append(ion_mobility)
self.ion_mobility_min.append(ion_mobility)
self.total += 1
def append_and_recalc(self, mz, intensity, ion_mobility, index):
self.mass_array[index].append(mz)
self.intensity_array[index].append(intensity)
self.ion_mobility_array[index].append(ion_mobility)
self.recalc(index)
def recalc(self, index):
self.mz_array[index] = np.mean(self.mass_array[index])
self.ion_mobility_max[index] = max(self.ion_mobility_array[index])
self.ion_mobility_min[index] = min(self.ion_mobility_array[index])
# if self.intensity_array[index][-1] > self.intensity_max[index]:
# # self.mz_array[index] =
# self.intensity_max[index] = self.intensity_array[index][-1]
# self.ion_mobility_opt[index] = self.ion_mobility_array[index][-1]
def push_me_to_the_peak_ion_mob(self, mz, intensity, ion_mobility, diff):
# nearest_ids = self.get_nearest_values(mz)
flag = 0
nearest_id = self.total - 1
mass_accuracy = diff * 1e-6 * mz
while nearest_id >= 0:
tmp_diff = abs(self.mz_array[nearest_id] - mz)
# tmp_diff = abs(self.mz_array[nearest_id] - mz) / mz
# if tmp_diff <= diff * 1e-6:
if tmp_diff <= mass_accuracy:
if abs(
self.ion_mobility_max[nearest_id] -
ion_mobility) <= 0.05 or abs(
self.ion_mobility_min[nearest_id] -
ion_mobility) <= 0.05:
flag = 1
self.append_and_recalc(
mz, intensity, ion_mobility, nearest_id)
break
else:
break
nearest_id -= 1
if not flag:
self.extend(mz, intensity, ion_mobility)
class peak:
def __init__(
self,
mz_array,
intensity,
scan_id,
start_id,
ion_mobility_array):
self.mz_array = copy(mz_array)
self.scan_id = [[scan_id, ] for _ in range(len(mz_array))]
# self.scan_id = []
# for _ in range(len(mz_array)):
# self.scan_id.append([scan_id, ])
self.intensity = [[i, ] for i in intensity]
if not (ion_mobility_array is None):
self.ion_mobility = [[i, ] for i in ion_mobility_array]
else:
self.ion_mobility = None
# self.intensity = []
# for i in intensity:
# self.intensity.append([i, ])
self.mass_array = [[i, ] for i in mz_array]
# self.mass_array = []
# for i in mz_array:
# self.mass_array.append([i, ])
self.finished_hills = []
self.crosslinked_hills = []
self.intervals = [start_id, ]
self.actual_degree = 0
self.medar = [1.0, ]
def get_potential_isotope_id(self, i_fast, i_idx):
tmp = self.finished_hills_fast_dict.get(i_fast, [])
# tmp.remove(i_idx)
return tmp
def recalc_fast_array_for_finished_hills(self, mz_step):
m_koef = mz_step
im_koef = 0.02
self.finished_hills_fast_array = [int(fh.mz/m_koef) for fh in self.finished_hills]
self.finished_hills_fast_dict = defaultdict(set)
for idx, fm in enumerate(self.finished_hills_fast_array):
self.finished_hills_fast_dict[fm-1].add(idx)
self.finished_hills_fast_dict[fm+1].add(idx)
self.finished_hills_fast_dict[fm].add(idx)
def recalc_fast_array(self, mz_step):
m_koef = mz_step
im_koef = 0.02
# self.fast_array = [int(tm/m_koef) for tm in self.mz_array]
self.fast_array = (self.mz_array/m_koef).astype(int)
self.fast_dict = defaultdict(set)
for idx, fm in enumerate(self.fast_array):
self.fast_dict[fm-1].add(idx)
self.fast_dict[fm+1].add(idx)
self.fast_dict[fm].add(idx)
def concat_peak_with(self, second_peak):
self.mz_array = self.mz_array + second_peak.mz_array
self.intensity = self.intensity + second_peak.intensity
if not (self.ion_mobility is None):
self.ion_mobility = self.ion_mobility + second_peak.ion_mobility
self.mass_array = self.mass_array + second_peak.mass_array
self.finished_hills = self.finished_hills + second_peak.finished_hills
self.crosslinked_hills = self.crosslinked_hills + \
second_peak.crosslinked_hills
self.intervals = self.intervals + second_peak.intervals
def crosslink_simple(self, mass_accuracy):
mz_step = mass_accuracy * 1e-6 * 2500
crosslink_counter = 0
self.finished_hills = sorted(
self.finished_hills,
key=lambda x: x.scan_id[0])
allowed_ids = set()
for i in self.intervals:
allowed_ids.add(i - 1)
allowed_ids.add(i - 2)
allowed_ids2 = set()
for i in self.intervals:
allowed_ids2.add(i)
allowed_ids2.add(i+1)
map_ids_1 = defaultdict(list)
map_ids_2 = defaultdict(set)
self.finished_hills_fast_dict = defaultdict(set)
m_koef = mz_step
for i, hill in enumerate(self.finished_hills):
end_scan = hill.scan_id[-1]
if end_scan in allowed_ids:
map_ids_1[end_scan].append(i)
fm = int(hill.mz / m_koef)
self.finished_hills_fast_dict[fm-1].add(i)
self.finished_hills_fast_dict[fm+1].add(i)
self.finished_hills_fast_dict[fm].add(i)
start_scan = hill.scan_id[0]
if start_scan in allowed_ids2:
map_ids_2[start_scan].add(i)
fm = int(hill.mz / m_koef)
self.finished_hills_fast_dict[fm-1].add(i)
self.finished_hills_fast_dict[fm+1].add(i)
self.finished_hills_fast_dict[fm].add(i)
banned_ids = set()
way_to_combine = []
for al_id in sorted(allowed_ids):
for i in map_ids_1[al_id]:
if i not in banned_ids:
hill = self.finished_hills[i]
fm = int(hill.mz / m_koef)
for j in self.finished_hills_fast_dict[fm]:
if (j in map_ids_2[al_id+1] or j in map_ids_2[al_id+2]) and j not in banned_ids:
hill2 = self.finished_hills[j]
if abs(hill.mz - hill2.mz) / \
hill.mz <= mass_accuracy * 1e-6:
banned_ids.add(i)
banned_ids.add(j)
way_to_combine.append((i, j))
for i, j in way_to_combine[::-1]:
self.finished_hills[i] = ready_hill(
intensity=hill.intensity +
hill2.intensity,
scan_id=hill.scan_id +
hill2.scan_id,
mass=hill.mass +
hill2.mass,
ion_mobility=(
hill.ion_mobility +
hill2.ion_mobility
if not (hill.ion_mobility is None)
else None))
del self.finished_hills[j]
for i in list(range(len(self.finished_hills)))[::-1]:
if len(self.finished_hills[i].scan_id) < 3:
del self.finished_hills[i]
def crosslink(self, mass_accuracy):
crosslink_counter = 0
# crosslink_counter2 = 0
self.finished_hills = sorted(
self.finished_hills,
key=lambda x: x.scan_id[0])
i = 0
ini_len = len(self.finished_hills)
while i < ini_len:
hill = self.finished_hills[i]
j = i + 1
while j < ini_len:
hill2 = self.finished_hills[j]
# if hill.scan_id[-1] == hill2.scan_id[0]:
if abs(hill.scan_id[-1] - hill2.scan_id[0]) <= 1:
# crosslink_counter2 += 1
if abs(hill.mz - hill2.mz) / \
hill.mz <= mass_accuracy * 1e-6:
self.finished_hills[i] = ready_hill(
intensity=hill.intensity + hill2.intensity,
scan_id=hill.scan_id + hill2.scan_id,
mass=hill.mass + hill2.mass,
ion_mobility=hill.ion_mobility +
hill2.ion_mobility)
del self.finished_hills[j]
ini_len -= 1
crosslink_counter += 1
elif hill2.scan_id[0] > hill.scan_id[-1] + 1:
break
j += 1
i += 1
# print(crosslink_counter)
# print(crosslink_counter2)
def sort_finished_hills(self):
self.finished_hills = sorted(self.finished_hills, key=lambda x: x.mz)
def check_its_ready(self, id_real, check_degree, min_length):
# ar_for_median = []
# for m_ar, scan_ar in zip(self.mass_array, self.scan_id):
# if scan_ar[-1] == id_real - 1:
# if len(m_ar) >= 2:
# ar_for_median.append(m_ar[-1]/m_ar[-2])
# # print(np.median(ar_for_median), 'median!')
# if len(ar_for_median) >= 20:
# self.medar.append(np.median(ar_for_median))
# else:
# self.medar.append(1.0)
mask_to_del = [True] * self.mz_array.size
set_to_del = set()
for i in range(self.mz_array.size)[::-1]:
# degree_actual = id_real - self.scan_id[i][0] - len(self.scan_id[i]) + 1
degree_actual = id_real - self.scan_id[i][-1]
# or (degree_actual == 2 and len(self.scan_id[i]) == 1):
if degree_actual > check_degree:
# degree_actual = id_real - self.scan_id[i][-1]
# if degree_actual > check_degree or (degree_actual == 2 and
# len(self.scan_id[i]) <= 3):
# list_intensity = self.intensity.pop(i)
list_intensity = self.intensity[i]
if not (self.ion_mobility is None):
# list_ion_mobility = self.ion_mobility.pop(i)
list_ion_mobility = self.ion_mobility[i]
else:
list_ion_mobility = None
# list_scan_id = self.scan_id.pop(i)
list_scan_id = self.scan_id[i]
# list_mass = self.mass_array.pop(i)
list_mass = self.mass_array[i]
lsi = len(list_scan_id)
if lsi >= min_length:
tmp_ready_hill = ready_hill(intensity=list_intensity,
scan_id=list_scan_id,
mass=list_mass,
ion_mobility=list_ion_mobility,
)
self.finished_hills.append(tmp_ready_hill)
mask_to_del[i] = False
set_to_del.add(i)
# if len(tmp_ready_hill.scan_id) >= min_length:
# self.finished_hills.append(tmp_ready_hill)
self.intensity = [i for j, i in enumerate(self.intensity) if j not in set_to_del]
self.scan_id = [i for j, i in enumerate(self.scan_id) if j not in set_to_del]
self.mass_array = [i for j, i in enumerate(self.mass_array) if j not in set_to_del]
if not (self.ion_mobility is None):
self.ion_mobility = [i for j, i in enumerate(self.ion_mobility) if j not in set_to_del]
self.mz_array = self.mz_array[mask_to_del]
def push_left(self, min_length):
mask_to_del = [True] * self.mz_array.size
for i in range(self.mz_array.size)[::-1]:
tmp_ready_hill = ready_hill(
intensity=self.intensity.pop(i),
scan_id=self.scan_id.pop(i),
mass=self.mass_array.pop(i),
ion_mobility=(
self.ion_mobility.pop(i) if not (
self.ion_mobility is None) else None),
)
mask_to_del[i] = False
if len(tmp_ready_hill.scan_id) >= min_length:
self.finished_hills.append(tmp_ready_hill)
self.mz_array = self.mz_array[mask_to_del]
# self.medar.append(1.0)
def get_nearest_value(self, value, mask):
return np.argmin(np.abs(self.mz_array[mask] - value))
def newid(self, nearest, mask):
return np.nonzero(mask)[0][nearest]
def get_potential_nearest(self, i_fast):
return self.fast_dict.get(i_fast, None)
def get_nearest_id(self, i, prev_nearest, diff, mz_array_l, ion_mobility, mask, mz_step):
mass_diff = diff * 1e-6 * i
best_diff = 2 * mass_diff
best_id = False
cur_md_abs = 0
best_prev_nearest_id = False
i_fast = int(i / mz_step)
set_idx = self.get_potential_nearest(i_fast)
if set_idx:
for nearest_id in set_idx:
if mask[nearest_id]:
# nearest_id = prev_nearest
# while nearest_id < mz_array_l:
cur_md = self.mz_array[nearest_id] - i
cur_md_abs = abs(cur_md)
if cur_md_abs <= mass_diff:
if not best_prev_nearest_id:
best_prev_nearest_id = int(nearest_id)
if (ion_mobility is None) or \
abs(ion_mobility -
self.ion_mobility[nearest_id][-1]) <= 0.1:
if cur_md_abs <= best_diff:
best_diff = float(cur_md_abs)
best_id = int(nearest_id)
# prev_nearest = int(nearest_id)
# elif cur_md > mass_diff:
# break
# nearest_id += 1
if not best_prev_nearest_id:
best_prev_nearest_id = prev_nearest
return best_id, best_diff / i, best_prev_nearest_id
def get_arrays(self, tmp1):
tmp1_nearest_id_arr = np.array([x[0] for x in tmp1])
tmp1_idx_arr = np.array([x[1] for x in tmp1])
tmp1_diff_arr = np.array([x[2] for x in tmp1])
return tmp1_nearest_id_arr, tmp1_idx_arr, tmp1_diff_arr
def push_me_to_the_peak(self, next_peak, diff, min_length, mz_step):
next_mz_array = next_peak.next_mz_array
next_intensity_array = next_peak.next_intensity_array
next_ion_mobility_array = next_peak.next_ion_mobility_array
next_scan_id = next_peak.next_scan_id
self.check_its_ready(
id_real=next_scan_id,
check_degree=2,
min_length=min_length)
mask = [True] * (len(self.mz_array))
tmp1 = []
tmp2 = []
prev_nearest = 0
self.recalc_fast_array(mz_step)
mask = [True] * (len(self.mz_array))
mz_array_l = len(self.mz_array)
for idx, i in enumerate(next_mz_array):
best_id, \
md_res, \
prev_nearest = self.get_nearest_id(
i,
prev_nearest,
diff,
mz_array_l,
(next_ion_mobility_array[idx]
if not (
next_ion_mobility_array is None)
else None), mask, mz_step)
if best_id:
tmp1.append([best_id, idx, md_res])
tmp1_nearest_id_arr, tmp1_idx_arr, tmp1_diff_arr = self.get_arrays(
tmp1)
sort_list = np.argsort(tmp1_diff_arr) # try different kinds
tmp1_nearest_id_arr = tmp1_nearest_id_arr[sort_list]
tmp1_idx_arr = tmp1_idx_arr[sort_list]
tmp1_diff_arr = tmp1_diff_arr[sort_list]
saved_index = set()
while tmp1:
# tmp_id = tmp1_idx_arr[0]
if tmp1_diff_arr.size == 0:
break
if tmp1_diff_arr[0] > diff * 1e-6:
break
tmp2.append((tmp1_nearest_id_arr[0], tmp1_idx_arr[0]))
saved_index.add(tmp1_idx_arr[0])
mask[tmp2[-1][0]] = False
if any(mask):
tmp1_nearest_id_arr = tmp1_nearest_id_arr[1:]
tmp1_idx_arr = tmp1_idx_arr[1:]
tmp1_diff_arr = tmp1_diff_arr[1:]
if tmp1_diff_arr.size == 0:
break
if tmp1_nearest_id_arr[0] in saved_index:
for idx, element in enumerate(tmp1_idx_arr):
if tmp1_nearest_id_arr[idx] in saved_index:
element_mz = next_mz_array[element]
element_im = (next_ion_mobility_array[element]
if not (
next_ion_mobility_array is None)
else None)
# nearest = self.get_nearest_value(element_mz, mask)
# nearest_id_old = self.newid(nearest, mask)
nearest_id, \
md_res, \
prev_nearest = self.get_nearest_id(
element_mz,
0,
diff,
0,
element_im, mask, mz_step)
if not nearest_id:
nearest_id = 0
md_res = 1e6
tmp1_nearest_id_arr[idx] = nearest_id
tmp1_diff_arr[idx] = md_res
else:
break
sort_list = np.argsort(
tmp1_diff_arr, kind='quicksort') # try different kinds
tmp1_nearest_id_arr = tmp1_nearest_id_arr[sort_list]
tmp1_idx_arr = tmp1_idx_arr[sort_list]
tmp1_diff_arr = tmp1_diff_arr[sort_list]
else:
break
for i, idx in tmp2:
# FIXME
# self.mz_array[i] = (self.mz_array[i] + next_mz_array[idx])/2
self.scan_id[i].append(next_scan_id)
self.intensity[i].append(next_intensity_array[idx])
if not (self.ion_mobility is None):
self.ion_mobility[i].append(next_ion_mobility_array[idx])
self.mass_array[i].append(next_mz_array[idx])
tmp_mass_array = self.mass_array[i][-3:]
self.mz_array[i] = sum(tmp_mass_array)/len(tmp_mass_array)
# self.mz_array[i] = np.average(self.mass_array[i][-3:], weights=self.intensity[i][-3:])
added = set(x[1] for x in tmp2)
mask2 = [(False if i in added else True)
for i in range(len(next_mz_array))]
next_mz_array_size = next_mz_array[mask2].size
self.mz_array = np.append(self.mz_array, next_mz_array[mask2])
n_i_a_m = next_intensity_array[mask2]
if not (self.ion_mobility is None):
n_im_a_m = next_ion_mobility_array[mask2]
n_m_a_m = next_mz_array[mask2]
for i in range(next_mz_array_size):
self.scan_id.append([next_scan_id, ])
self.intensity.append([n_i_a_m[i], ])
if not (self.ion_mobility is None):
self.ion_mobility.append([n_im_a_m[i], ])
self.mass_array.append([n_m_a_m[i], ])
self.selfsort()
def selfsort(self):
idx = np.argsort(self.mz_array)
self.mz_array = self.mz_array[idx]
self.scan_id = [self.scan_id[i] for i in idx]
self.intensity = [self.intensity[i] for i in idx]
if not (self.ion_mobility is None):
self.ion_mobility = [self.ion_mobility[i] for i in idx]
self.mass_array = [self.mass_array[i] for i in idx]
def cutting_down(self, intensity_propotion):
for idx, peak in enumerate(self.finished_hills):
max_intensity_propotion = peak.max_intensity * intensity_propotion
# FIXME try "and"
if (
peak.intensity[0] >= max_intensity_propotion and
peak.intensity[-1] >= max_intensity_propotion):
del self.finished_hills[idx]
def split_peaks2(self, hillValleyFactor):
set_to_del = set()
new_hills = []
for hill_idx, hill in enumerate(self.finished_hills):
if len(hill.mass) >= 6:
mz_diff = np.array([z - hill.mz for z in hill.mass])
std_5 = np.std(np.diff(mz_diff))
smothed_intensity = list(np.abs(np.diff(mz_diff))/std_5)
c_len = len(smothed_intensity) - 3
idx = 3
min_idx_list = []
min_val = 1.0
while idx <= c_len:
mult_val = smothed_intensity[idx]
if mult_val >= hillValleyFactor:
# if not len(min_idx_list) or idx >= min_idx_list[-1] + 3:
# min_idx_list.append(idx)
# min_val = mult_val
# elif mult_val < min_val:
# min_idx_list[-1] = idx
# min_val = mult_val
if (not len(min_idx_list) or idx >= min_idx_list[-1] + 3) and max(hill.intensity[0:idx-1]) >= 1.5 * max(hill.intensity[0], hill.intensity[idx-1]) and max(hill.intensity[idx:]) >= 1.5 * max(hill.intensity[idx], hill.intensity[-1]):
min_idx_list.append(idx)
min_val = mult_val
elif (mult_val < min_val) and max(hill.intensity[0:idx-1]) >= 1.5 * max(hill.intensity[0], hill.intensity[idx-1]) and max(hill.intensity[idx:]) >= 1.5 * max(hill.intensity[idx], hill.intensity[-1]):
min_idx_list[-1] = idx
min_val = mult_val
idx += 1
if len(min_idx_list):
set_to_del.add(hill_idx)
prev_idx = 1
for min_idx in min_idx_list:
new_hills.append(ready_hill(
intensity=hill.intensity[prev_idx-1:min_idx],
scan_id=hill.scan_id[prev_idx-1:min_idx],
mass=hill.mass[prev_idx-1:min_idx],
ion_mobility=(
hill.ion_mobility[prev_idx-1:min_idx] if not
(hill.ion_mobility is None) else
None)))
prev_idx = min_idx
new_hills.append(ready_hill(
intensity=hill.intensity[min_idx-1:],
scan_id=hill.scan_id[min_idx-1:],
mass=hill.mass[min_idx-1:],
ion_mobility=(
hill.ion_mobility[min_idx-1:] if not
(hill.ion_mobility is None) else
None)))
print(len(self.finished_hills))
for idx in sorted(list(set_to_del))[::-1]:
del self.finished_hills[idx]
print(len(self.finished_hills))
self.finished_hills.extend(new_hills)
print(len(self.finished_hills))
def calc_accurate_mz(self):
for hill in self.finished_hills:
hill.mz = sum(weight * value for weight, value in zip(hill.intensity, hill.mass)) / sum(hill.intensity)
def split_peaks(self, hillValleyFactor, min_length_hill):
set_to_del = set()
new_hills = []
for hill_idx, hill in enumerate(self.finished_hills):
hill_length = len(hill.intensity)
if hill_length >= min_length_hill * 2:
# smothed_intensity = hill.intensity
smothed_intensity = meanfilt(hill.intensity, 2)
# smothed_intensity = medfilt(smothed_intensity, 3)
# smothed_intensity = medfilt(hill.intensity, 3)
# smothed_intensity = meanfilt(smothed_intensity, 3)
c_len = hill_length - min_length_hill
idx = int(min_length_hill)
# min_idx = False
min_idx_list = []
min_val = 0
l_idx = 0
while idx <= c_len:
if len(min_idx_list) and idx >= min_idx_list[-1] + min_length_hill:
l_idx = min_idx_list[-1]
l_r = max(smothed_intensity[l_idx:idx]) / float(smothed_intensity[idx])
if l_r >= hillValleyFactor:
r_r = max(smothed_intensity[idx:]) / float(smothed_intensity[idx])
if r_r >= hillValleyFactor:
# print(l_r, r_r)
# if l_r >= hillValleyFactor and r_r >= hillValleyFactor:
mult_val = l_r * r_r
# if mult_val < min_val:
# min_val = mult_val
if not len(min_idx_list) or idx >= min_idx_list[-1] + min_length_hill:
min_idx_list.append(idx)
min_val = mult_val
elif mult_val > min_val:
min_idx_list[-1] = idx
min_val = mult_val
# min_idx = idx
idx += 1
if len(min_idx_list):
set_to_del.add(hill_idx)
prev_idx = 0
for min_idx in min_idx_list:
new_hills.append(ready_hill(
intensity=hill.intensity[prev_idx:min_idx+1],
scan_id=hill.scan_id[prev_idx:min_idx+1],
mass=hill.mass[prev_idx:min_idx+1],
ion_mobility=(
hill.ion_mobility[prev_idx:min_idx+1] if not
(hill.ion_mobility is None) else
None)))
prev_idx = min_idx
new_hills.append(ready_hill(
intensity=hill.intensity[min_idx:],
scan_id=hill.scan_id[min_idx:],
mass=hill.mass[min_idx:],
ion_mobility=(
hill.ion_mobility[min_idx:] if not
(hill.ion_mobility is None) else
None)))
# print(len(new_hills))
# print(len(set_to_del))
for idx in sorted(list(set_to_del))[::-1]:
del self.finished_hills[idx]
self.finished_hills.extend(new_hills)
# self.finished_hills = result
class feature:
def __init__(self, finished_hills, each, each_id, negative_mode, isotopes_mass_error_map, mass_accuracy):
self.charge = each[1][0][1]
self.shift = each[3]
# self.mz = finished_hills[each[0]].mz
# a_cus = 0.0033946045716987906 / 1000
# b_cus = -1.8123641799696435
mass_for_average2 = [np.average(finished_hills[each[0]].mass, weights=finished_hills[each[0]].intensity)]
intensity_for_average2 = [finished_hills[each[0]].max_intensity, ]
# for i_numb, ech in enumerate(each[1]):
# mass_for_average2.append(np.average(finished_hills[ech[0]].mass, weights=finished_hills[ech[0]].intensity) - (i_numb+1)*1.00335/ech[1])
# intensity_for_average2.append(finished_hills[ech[0]].max_intensity)
# mass_for_average2 = [zm * (1 - 1e-6 * (a_cus * zi + b_cus)) for zm, zi in zip(mass_for_average2, intensity_for_average2)]
self.mz = np.average(mass_for_average2, weights=intensity_for_average2)
mass_acc = mass_accuracy
self.mz_tol = mass_acc*1e-6*self.mz
# mass_for_average = finished_hills[each[0]].mass + list(itertools.chain.from_iterable([(z * (1 - 1e-6 * isotopes_mass_error_map[i_numb+1][0]) - (i_numb+1)*1.00335/ech[1]) for z in finished_hills[ech[0]].mass] for i_numb, ech in enumerate(each[1])))
# # mass_for_average = finished_hills[each[0]].mass + list(itertools.chain.from_iterable([(z - (i_numb+1)*1.00335/ech[1]) for z in finished_hills[ech[0]].mass] for i_numb, ech in enumerate(each[1])))
intensity_for_average = finished_hills[each[0]].intensity + list(itertools.chain.from_iterable(finished_hills[ech[0]].intensity for ech in each[1]))
# # mass_for_average = [zm * (1 - 1e-6 * (a_cus * zi + b_cus)) for zm, zi in zip(mass_for_average, intensity_for_average)]
# scans_for_average = finished_hills[each[0]].scan_id + list(itertools.chain.from_iterable(finished_hills[ech[0]].scan_id for ech in each[1]))
# # print(mass_for_average, intensity_for_average)
# self.mz = np.average(mass_for_average, weights=intensity_for_average)
# # self.mz = np.median(mass_for_average)
scans_for_average = finished_hills[each[0]].scan_id + list(itertools.chain.from_iterable(finished_hills[ech[0]].scan_id for ech in each[1]))
# self.mz = np.median(finished_hills[each[0]].mass)
# self.mz = np.mean(finished_hills[each[0]].mass)
self.negative_mode = negative_mode
if negative_mode == True:
self.neutral_mass = self.mz * self.charge + \
1.0072765 * self.charge - self.shift * 1.00335
else:
self.neutral_mass = self.mz * self.charge - \
1.0072765 * self.charge - self.shift * 1.00335
self.isotopes_numb = len(each[1])
self.scan_numb = len(finished_hills[each[0]].scan_id)
self.scans = finished_hills[each[0]].scan_id
self.id_for_scan = finished_hills[each[0]].intensity.index(
max(finished_hills[each[0]].intensity))
self.intensity = finished_hills[each[0]].max_intensity
# self.mz = self.mz * (1 - 1e-6 * (a_cus * max(intensity_for_average2) + b_cus))
# self.id_for_scan = intensity_for_average.index(
# max(intensity_for_average))
# self.intensity = max(intensity_for_average)
self.idict = finished_hills[each[0]].idict
self.sqrt_of_i_sum_squares = math.sqrt(
sum(v**2 for v in self.idict.values()))
self.scan_set = finished_hills[each[0]].scan_set
if not (finished_hills[each[0]].ion_mobility is None):
self.ion_mobility = finished_hills[each[0]].opt_ion_mobility
else:
self.ion_mobility = None
# self.scan_id = scans_for_average[self.id_for_scan]
# self.scan_id = finished_hills[each[0]].scan_id[self.id_for_scan]
# self.RT = self.scan_numb
self.scan_id = int(np.average(scans_for_average, weights=intensity_for_average))
self.RT = int(np.average(scans_for_average, weights=intensity_for_average))
# self.sulfur = (1 if each[2] else 0)
self.sulfur = (each[1][1][4] if len(each[1]) > 1 else -1)
self.cos_corr = each[4][0]
self.cos_corr_2 = each[4][1]
self.corr_fill_zero = each[4][2]
self.diff_for_output = each[4][3]
self.intensity_1 = each[4][4]
self.scan_id_1 = each[4][5]
self.mz_std_1 = np.std(each[4][6])
self.intensity_2 = each[4][7]
self.scan_id_2 = each[4][8]
self.mz_std_2 = np.std(each[4][9])
self.id = each_id
self.ms2_scan = []
def targeted(self, scan):
self.ms2_scan.append(scan)
|
the-stack_0_18753 | import copy
import random
import threading
import time
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds = [1, 5, 10, 30, 60, 100, 500]
sec = 10
maxthreads = 100
trList = []
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s = ""
for i in range(12):
s = s + random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo = "%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, eosio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract = "eosio"
action = "issue"
data = "{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 " + CORE_SYMBOL + "\"}"
opts = "--permission eosio@active"
tr = node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
nthreads = self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (
acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target=self._transfer, args=(node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1 - t0 < delay):
time.sleep(delay - (t1 - t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
the-stack_0_18754 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import os
import sys
from distutils.version import LooseVersion
import numpy as np
from vispy.visuals.line.arrow import ARROW_TYPES
from vispy.scene import visuals, transforms
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main, assert_raises, SkipTest)
from vispy.testing.image_tester import assert_image_approved
vertices = np.array([
[25, 25],
[25, 75],
[50, 25],
[50, 75],
[75, 25],
[75, 75]
], np.float32)
vertices += 0.33
arrows = np.array([
vertices[:2],
vertices[3:1:-1],
vertices[4:],
vertices[-1:-3:-1]
]).reshape((4, 4))
@requires_application()
def test_arrow_draw():
"""Test drawing arrows without transforms"""
if os.getenv('TRAVIS', 'false') == 'true' and sys.version[:3] == '2.6':
# TODO: Fix this (issue #1042)
raise SkipTest('Travis fails due to FB stack problem')
with TestingCanvas() as c:
for arrow_type in ARROW_TYPES:
arrow = visuals.Arrow(pos=vertices, arrow_type=arrow_type,
arrows=arrows, arrow_size=10, color='red',
connect="segments", parent=c.scene)
assert_image_approved(c.render(), 'visuals/arrow_type_%s.png' %
arrow_type)
arrow.parent = None
@requires_application()
def test_arrow_transform_draw():
"""Tests the ArrowVisual when a transform is applied"""
# TODO: fix AppVeyor - error comes up with bollu/vispy:cassowary-constaints
# commit SHA: 29303009a76d5c6931b1991aa7bdf5192ace9c4f
if os.getenv('APPVEYOR', '').lower() == 'true':
raise SkipTest('AppVeyor has unknown failure')
old_numpy = LooseVersion(np.__version__) < '1.8'
if os.getenv('TRAVIS', 'false') == 'true' and (sys.version[:3] == '2.6' or
old_numpy):
# TODO: Fix this (issue #1042
raise SkipTest('Travis fails due to FB stack problem')
with TestingCanvas() as c:
for arrow_type in ARROW_TYPES:
arrow = visuals.Arrow(pos=vertices, arrow_type=arrow_type,
arrows=arrows, arrow_size=10, color='red',
connect="segments", parent=c.scene)
arrow.transform = transforms.STTransform(scale=(0.5, 0.75),
translate=(-20, -20))
assert_image_approved(c.render(),
'visuals/arrow_transform_type_%s.png' %
arrow_type)
arrow.parent = None
@requires_application()
def test_arrow_reactive():
"""Tests the reactive behaviour of the ArrowVisual properties"""
# TODO: fix AppVeyor - error comes up with bollu/vispy:cassowary-constaints
# commit SHA: 29303009a76d5c6931b1991aa7bdf5192ace9c4f
if os.getenv('APPVEYOR', '').lower() == 'true':
raise SkipTest('AppVeyor has unknown failure')
with TestingCanvas() as c:
arrow = visuals.Arrow(pos=vertices, arrows=arrows,
connect="segments", parent=c.scene)
arrow.arrow_type = "stealth"
assert_image_approved(c.render(), 'visuals/arrow_reactive1.png')
arrow.arrow_size = 20
assert_image_approved(c.render(), 'visuals/arrow_reactive2.png')
@requires_application()
def test_arrow_attributes():
"""Tests if the ArrowVisual performs the required checks for the
attributes"""
with TestingCanvas() as c:
arrow = visuals.Arrow(pos=vertices, arrow_type="stealth",
arrows=arrows, arrow_size=10, color='red',
connect="segments", parent=c.scene)
def size_test():
arrow.arrow_size = 0.0
def type_test():
arrow.arrow_type = "random_non_existent"
assert_raises(
ValueError, size_test
)
assert_raises(
ValueError, type_test
)
run_tests_if_main()
|
the-stack_0_18755 | # -*- coding: utf-8 -*-
"""
DCI-P3 & DCI-P3+ Colourspaces
=============================
Defines the *DCI-P3* and *DCI-P3+* colourspaces:
- :attr:`colour.models.DCI_P3_COLOURSPACE`.
- :attr:`colour.models.DCI_P3_P_COLOURSPACE`.
See Also
--------
`RGB Colourspaces Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/models/rgb.ipynb>`_
References
----------
- :cite:`Canon2014a` : Canon. (2014). EOS C500 Firmware Update. Retrieved
August 27, 2016, from https://www.usa.canon.com/internet/portal/us/home/\
explore/product-showcases/cameras-and-lenses/cinema-eos-firmware/c500
- :cite:`DigitalCinemaInitiatives2007b` : Digital Cinema Initiatives. (2007).
Digital Cinema System Specification - Version 1.1. Retrieved from
http://www.dcimovies.com/archives/spec_v1_1/\
DCI_DCinema_System_Spec_v1_1.pdf
- :cite:`Hewlett-PackardDevelopmentCompany2009a` : Hewlett-Packard
Development Company. (2009). Understanding the HP DreamColor LP2480zx
DCI-P3 Emulation Color Space. Retrieved from http://www.hp.com/\
united-states/campaigns/workstations/pdfs/lp2480zx-dci--p3-emulation.pdf
"""
from __future__ import division, unicode_literals
import numpy as np
from functools import partial
from colour.colorimetry import ILLUMINANTS
from colour.models.rgb import (RGB_Colourspace, gamma_function,
normalised_primary_matrix)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'DCI_P3_PRIMARIES', 'DCI_P3_P_PRIMARIES', 'DCI_P3_WHITEPOINT_NAME',
'DCI_P3_WHITEPOINT', 'DCI_P3_TO_XYZ_MATRIX', 'XYZ_TO_DCI_P3_MATRIX',
'DCI_P3_P_TO_XYZ_MATRIX', 'XYZ_TO_DCI_P3_P_MATRIX', 'DCI_P3_COLOURSPACE',
'DCI_P3_P_COLOURSPACE'
]
DCI_P3_PRIMARIES = np.array([
[0.6800, 0.3200],
[0.2650, 0.6900],
[0.1500, 0.0600],
])
"""
*DCI-P3* colourspace primaries.
DCI_P3_PRIMARIES : ndarray, (3, 2)
"""
DCI_P3_P_PRIMARIES = np.array([
[0.7400, 0.2700],
[0.2200, 0.7800],
[0.0900, -0.0900],
])
"""
*DCI-P3+* colourspace primaries.
DCI_P3_P_PRIMARIES : ndarray, (3, 2)
"""
DCI_P3_WHITEPOINT_NAME = 'DCI-P3'
"""
*DCI-P3* colourspace whitepoint name.
DCI_P3_WHITEPOINT_NAME : unicode
Warning
-------
DCI-P3 illuminant has no associated spectral distribution. DCI has no
official reference spectral measurement for this whitepoint. The closest
matching spectral distribution is Kinoton 75P projector.
"""
DCI_P3_WHITEPOINT = (
ILLUMINANTS['CIE 1931 2 Degree Standard Observer'][DCI_P3_WHITEPOINT_NAME])
"""
*DCI-P3* colourspace whitepoint.
DCI_P3_WHITEPOINT : ndarray
"""
DCI_P3_TO_XYZ_MATRIX = normalised_primary_matrix(DCI_P3_PRIMARIES,
DCI_P3_WHITEPOINT)
"""
*DCI-P3* colourspace to *CIE XYZ* tristimulus values matrix.
DCI_P3_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_DCI_P3_MATRIX = np.linalg.inv(DCI_P3_TO_XYZ_MATRIX)
"""
*CIE XYZ* tristimulus values to *DCI-P3* colourspace matrix.
XYZ_TO_DCI_P3_MATRIX : array_like, (3, 3)
"""
DCI_P3_P_TO_XYZ_MATRIX = normalised_primary_matrix(DCI_P3_P_PRIMARIES,
DCI_P3_WHITEPOINT)
"""
*DCI-P3+* colourspace to *CIE XYZ* tristimulus values matrix.
DCI_P3_P_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_DCI_P3_P_MATRIX = np.linalg.inv(DCI_P3_P_TO_XYZ_MATRIX)
"""
*CIE XYZ* tristimulus values to *DCI-P3+* colourspace matrix.
XYZ_TO_DCI_P3_P_MATRIX : array_like, (3, 3)
"""
DCI_P3_COLOURSPACE = RGB_Colourspace(
'DCI-P3',
DCI_P3_PRIMARIES,
DCI_P3_WHITEPOINT,
DCI_P3_WHITEPOINT_NAME,
DCI_P3_TO_XYZ_MATRIX,
XYZ_TO_DCI_P3_MATRIX,
partial(gamma_function, exponent=1 / 2.6),
partial(gamma_function, exponent=2.6),
)
DCI_P3_COLOURSPACE.__doc__ = """
*DCI-P3* colourspace.
References
----------
:cite:`DigitalCinemaInitiatives2007b`,
:cite:`Hewlett-PackardDevelopmentCompany2009a`
DCI_P3_COLOURSPACE : RGB_Colourspace
"""
DCI_P3_P_COLOURSPACE = RGB_Colourspace(
'DCI-P3+',
DCI_P3_P_PRIMARIES,
DCI_P3_WHITEPOINT,
DCI_P3_WHITEPOINT_NAME,
DCI_P3_P_TO_XYZ_MATRIX,
XYZ_TO_DCI_P3_P_MATRIX,
partial(gamma_function, exponent=1 / 2.6),
partial(gamma_function, exponent=2.6),
)
DCI_P3_P_COLOURSPACE.__doc__ = """
*DCI-P3+* colourspace.
References
----------
:cite:`Canon2014a`
DCI_P3_P_COLOURSPACE : RGB_Colourspace
"""
|
the-stack_0_18756 | import turtle
turtle.pensize(2)
turtle.speed(12)
turtle.bgcolor("Black")
for i in range(10):
for colours in ["Yellow","Blue","White","Green","Red","Pink"]:
turtle.color(colours)
turtle.circle(200)
turtle.left(10)
turtle.hideturtle()
|
the-stack_0_18759 | import os
from PyQt5.QtWidgets import (QPushButton, QVBoxLayout, QHBoxLayout, QFrame, QLabel, QFileDialog, QMessageBox, QComboBox,
QProgressBar, QSizePolicy)
from app.components.stretch_wrapper import NoStretch
import pandas as pd
from model.predict_from_file import predict_dataset
from model.predict_from_folder import predict_folder
class Model(QFrame):
default_model_text = "<i>Please select a TensorFlow model directory.<\i>"
default_file_text = "<i>Please select a folder of images or a csv of URLs.<\i>"
predict_text = "Predict"
predicting_text = "Predicting..."
def __init__(self, app):
super().__init__()
# initialize our variables
self.app = app
self.tf_directory = None
self.file = None
self.folder = None
self.init_ui()
def init_ui(self):
# make our UI
self.setObjectName("content")
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
# our main content area
content = QFrame()
content_layout = QVBoxLayout()
# some info
title = QLabel("Model")
title.setObjectName("h1")
description = QLabel(
"Run your exported TensorFlow model from Lobe \non a folder of images or a .csv/.xlsx file of image URLs.\nThis will produce a new prediction .csv with the image filepath or URL, \nthe model's prediction, and the model's confidence.")
description.setObjectName("h2")
# model select button
self.model_button = QPushButton("Select model directory")
self.model_button.clicked.connect(self.select_directory)
model_container = NoStretch(self.model_button)
model_container.setObjectName("separate")
self.model_label = QLabel(self.default_model_text)
# file or folder selection button
self.folder_button = QPushButton("Select folder")
self.folder_button.clicked.connect(self.select_image_folder)
self.file_button = QPushButton("Select file")
self.file_button.clicked.connect(self.select_file)
buttons_container = NoStretch([self.folder_button, self.file_button])
buttons_container.setObjectName("separate")
self.path_label = QLabel(self.default_file_text)
# url column header
self.url_label = QLabel("Column with image URLs:")
self.url_label.setObjectName("separateSmall")
self.url_label.hide()
self.url_dropdown = QComboBox()
self.url_dropdown.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.url_dropdown.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.url_container = NoStretch(self.url_dropdown)
self.url_container.hide()
# predict button
self.predict_button = QPushButton(self.predict_text)
self.predict_button.setEnabled(False)
self.predict_button.clicked.connect(self.predict)
predict_container = NoStretch(self.predict_button)
predict_container.setObjectName("separate")
self.progress_bar = QProgressBar()
self.progress_bar.hide()
# make our content layout
content_layout.addWidget(title)
content_layout.addWidget(description)
content_layout.addWidget(model_container)
content_layout.addWidget(self.model_label)
content_layout.addWidget(buttons_container)
content_layout.addWidget(self.path_label)
content_layout.addWidget(self.url_label)
content_layout.addWidget(self.url_container)
content_layout.addWidget(predict_container)
content_layout.addWidget(self.progress_bar)
content_layout.addStretch(1)
content.setLayout(content_layout)
layout.addWidget(content)
layout.addStretch(1)
self.setLayout(layout)
def select_directory(self):
self.tf_directory = QFileDialog.getExistingDirectory(self, "Select TensorFlow Model Directory")
self.model_label.setText(f"<i>{self.tf_directory}</i>" if self.tf_directory else self.default_model_text)
self.check_predict_button()
def select_file(self):
self.file = QFileDialog.getOpenFileName(self, 'Select CSV File', filter="CSV (*.csv *.xlsx)")[0]
self.path_label.setText(f"<i>{self.file}</i>" if self.file else self.default_file_text)
self.folder = None
self.parse_headers()
self.check_predict_button()
def select_image_folder(self):
self.folder = QFileDialog.getExistingDirectory(self, "Select Images Directory")
self.path_label.setText(f"<i>{self.folder}</i>" if self.folder else self.default_file_text)
self.file = None
self.parse_headers()
self.check_predict_button()
def check_predict_button(self):
# enable the button when we have both a model and file
if self.tf_directory and (self.file or self.folder):
self.predict_button.setEnabled(True)
else:
self.predict_button.setEnabled(False)
def parse_headers(self):
if self.file:
# read the file for its headers and set our dropdown boxes appropriately
try:
if os.path.splitext(self.file)[1] == ".csv":
csv = pd.read_csv(self.file, header=0)
else:
csv = pd.read_excel(self.file, header=0)
self.url_dropdown.clear()
for header in list(csv.columns):
self.url_dropdown.addItem(header)
self.url_dropdown.adjustSize()
self.url_label.show()
self.url_container.show()
except Exception as e:
QMessageBox.about(self, "Alert", f"Error reading csv: {e}")
self.clear_headers()
else:
self.clear_headers()
def clear_headers(self):
self.url_dropdown.clear()
self.url_label.hide()
self.url_container.hide()
def predict(self):
# disable the buttons so we can't click again
self.predict_button.setEnabled(False)
self.predict_button.setText(self.predicting_text)
self.model_button.setEnabled(False)
self.file_button.setEnabled(False)
self.folder_button.setEnabled(False)
self.progress_bar.setValue(0)
self.progress_bar.show()
self.app.processEvents()
url_col = self.url_dropdown.currentText()
try:
if self.file:
predict_dataset(model_dir=self.tf_directory, filepath=self.file, url_col=url_col,
progress_hook=self.progress_hook)
elif self.folder:
predict_folder(model_dir=self.tf_directory, img_dir=self.folder, move=True, csv=True,
progress_hook=self.progress_hook)
except Exception as e:
QMessageBox.about(self, "Alert", f"Error predicting: {e}")
finally:
self.done()
def progress_hook(self, current, total):
self.progress_bar.setValue(float(current) / total * 100)
if current == total:
self.done()
# make sure to update the UI
self.app.processEvents()
def done(self):
self.progress_bar.setValue(0)
self.progress_bar.hide()
self.predict_button.setEnabled(True)
self.predict_button.setText(self.predict_text)
self.model_button.setEnabled(True)
self.file_button.setEnabled(True)
self.folder_button.setEnabled(True)
self.app.processEvents()
|
the-stack_0_18760 | import sys
from PyQt5.QtGui import QIcon
import infomaker
from PyQt5.QtWidgets import *
import datetime
root = infomaker.maker()
# UI창
class UI(QWidget):
def __init__(self, weather, musicrank, schoolmenu):
super().__init__()
self.weather = weather
self.music = musicrank
self.menu = schoolmenu
# window size & position
self.top = 100
self.left = 500
self.width = 900
self.height = 600
self.setGeometry(self.left, self.top, self.width, self.height)
# window title & icon
self.setWindowTitle('서울 양전초등학교 정보 프로그램')
self.setWindowIcon(QIcon('./asset/Logo.png'))
# background color
self.setStyleSheet("background-color: white;")
# call lables()
self.labels()
# weather, music rank, school menu labels
def labels(self):
# weather
weatherTitle = QLabel('오늘의 날씨', self)
weatherlabel = QLabel(self.weather, self)
# music ranking
musicTitle = QLabel('멜론 차트 랭킹', self)
musiclabel = QLabel(self.music, self)
# school menus
menuTitle = QLabel('학교 급식', self)
menulabel = QLabel(self.menu, self)
# title font
font1 = weatherTitle.font()
font1.setPointSize(30)
font1.setBold(True)
font1.setFamily('맑은 고딕')
# text font
font2 = weatherlabel.font()
font2.setPointSize(12)
font2.setFamily('맑은 고딕 Semilight')
# apply title labels to font1
weatherTitle.setFont(font1)
musicTitle.setFont(font1)
menuTitle.setFont(font1)
# apply text labels to font2
weatherlabel.setFont(font2)
musiclabel.setFont(font2)
menulabel.setFont(font2)
grid = QGridLayout()
# register labels to grid
grid.addWidget(weatherTitle)
grid.addWidget(weatherlabel)
grid.addWidget(musicTitle)
grid.addWidget(musiclabel)
grid.addWidget(menuTitle)
grid.addWidget(menulabel)
self.setLayout(grid)
if __name__ == '__main__':
if datetime.date.today() != root.start:
print("/Log> root.start was updated")
root.start = datetime.date.today()
weather = root.weather("개포동")
musicRank = root.music_rank(5)
schoolMenu = root.school_menu("양전초등학교")
print("/Log> Data was updated")
app = QApplication(sys.argv)
win = UI(weather, musicRank, schoolMenu)
win.show()
app.exec_()
|
the-stack_0_18762 | #!/usr/bin/env python3
##########################################################################################
# Developers: Aditi Malladi and Icaro Alzuru
# Project: HuMaIN (http://humain.acis.ufl.edu)
# Description: A script to generate a box and whisker plot from input csv files
##########################################################################################
# Copyright 2019 Advanced Computing and Information Systems (ACIS) Lab - UF
# (https://www.acis.ufl.edu/)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
##########################################################################################
import argparse, csv, sys
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
# agg backend is used to create plot as a .png file
mpl.use('agg')
def plot_box_whisker(data_to_plot):
fig = plt.figure(1, figsize=(9, 6))
ax = fig.add_subplot(111)
bp = ax.boxplot(data_to_plot, patch_artist=True)
try:
fig.savefig(args.output_file, bbox_inches='tight')
except:
print("\nERROR: The output image file was not created successfully. Please use a valid extension for an image (e.g .png) and path.\n")
sys.exit(1)
def read_files():
for f in args.files:
values = []
with open(f, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
temp = 0
for val in row[1:]:
temp = temp + float(val)
values.append(temp)
values = np.array(values).astype(np.float)
data_to_plot.append(values)
plot_box_whisker(data_to_plot)
if __name__ == '__main__':
""" Creates a box whisker plot. Takes input of .csv files sepearted by "," delimiter.
"""
parser = argparse.ArgumentParser("Creates a box whisker plot.")
parser.add_argument('-f', '--files', action="append",
required=True, help="Files to import values from")
parser.add_argument('-o', '--output_file', action="store",
required=True, help="Output box plot image to be saved")
args = parser.parse_args()
data_to_plot = []
# Can pass one or multiple files to combine
# Can pass files with multiple rows - all values of each row will be added before generating box plot
read_files()
# usage: python3 common/post-processing/box_whisker_plot.py -f duration_multiple.csv
# usage: python3 common/post-processing/box_whisker_plot.py -f duration.csv
# usage: python3 common/post-processing/box_whisker_plot.py -f duration.csv -f duration_multiple.csv
|
the-stack_0_18764 | #from distutils.core import setup
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory,'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'lofarSun', # How you named your package folder
packages = ['lofarSun'], # Chose the same as "name"
version = '0.2.6', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'tools to process the lofar solar data', # Give a short description about your library
author = 'Peijin', # Type in your name
author_email = '[email protected]', # Type in your E-Mail
url = 'https://github.com/Pjer-zhang/LOFAR_Solar', # Provide either the link to your github or to your website
download_url = 'https://github.com/Pjer-zhang/LOFAR_Solar/archive/master.zip',
keywords = ['LOFAR', 'Solar', 'radio'], # Keywords that define your package best
install_requires=[ # I get to this in a second
'matplotlib',
'sunpy',
'opencv-python',
'astropy',
'h5py',
'scipy',
'scikit-image'
],
classifiers=[
'Development Status :: 4 - Beta', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license,
'Programming Language :: Python :: 3.8',
],
long_description=long_description,
long_description_content_type='text/markdown'
) |
the-stack_0_18765 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network ops for LabeledTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import nn as contrib_nn
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.ops import nn
relu = core.define_unary_op('relu', nn.relu)
relu6 = core.define_unary_op('relu6', nn.relu6)
crelu = core.define_unary_op('crelu', nn.crelu)
elu = core.define_unary_op('elu', nn.elu)
softplus = core.define_unary_op('softplus', nn.softplus)
l2_loss = core.define_unary_op('l2_loss', nn.l2_loss)
sigmoid_cross_entropy_with_logits = core.define_binary_op(
'sigmoid_cross_entropy_with_logits',
contrib_nn.deprecated_flipped_sigmoid_cross_entropy_with_logits)
softmax = core.define_unary_op('softmax', nn.softmax)
log_softmax = core.define_unary_op('log_softmax', nn.log_softmax)
softmax_cross_entropy_with_logits = core.define_binary_op(
'softmax_cross_entropy_with_logits',
contrib_nn.deprecated_flipped_softmax_cross_entropy_with_logits)
sparse_softmax_cross_entropy_with_logits = core.define_binary_op(
'sparse_softmax_cross_entropy_with_logits',
contrib_nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits)
|
the-stack_0_18766 | import datetime
import json
import logging
import os
import pytest
import re
import shutil
from time import sleep
from dateutil.tz import tzutc
from schedules_tools import jsondate
from schedules_tools.converter import ScheduleConverter
from schedules_tools.models import Schedule
from schedules_tools.schedule_handlers.smart_sheet import (
SmartSheetExportException)
from smartsheet import Smartsheet
# smartsheet log
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def is_taskjuggler_available():
from distutils.spawn import find_executable
return find_executable('taskjuggler') is not None
def pytest_generate_tests(metafunc):
source_function = metafunc.function.__name__
if source_function == 'test_import':
argnames = ['handler_name', 'import_schedule_file', 'interm_reference_file_name']
# Remove test scenarios without available dependencies
argvalues = []
for scenario in metafunc.cls.scenarios_import_combinations:
argvalues.append(scenario)
metafunc.parametrize(argnames, argvalues)
elif source_function == 'test_export':
argnames = ['handler_name', 'export_schedule_file', 'flat',
'flag_show', 'flag_hide',
'options', 'sort']
argvalues = metafunc.cls.scenarios_export_combinations
metafunc.parametrize(argnames, argvalues)
class TestHandlers(object):
intermediary_reference_file = 'intermediary-struct-reference.json'
schedule_files_dir = 'schedule_files'
basedir = os.path.dirname(os.path.realpath(__file__))
test_import_start_timestamp = None
scenarios_import_combinations = [
('msp', 'import-schedule-msp.xml', None),
('msp', 'import-schedule-msp-duplicated-names.xml',
'intermediary-struct-reference-duplicated-names.json'),
('smartsheet', '', None),
('json', 'import-schedule-json.json', None),
]
scenarios_export_combinations = [
('msp', 'export-schedule-msp.xml', False, [], [], {}, None),
('json', 'export-schedule-json.json', False, [], [], {}, None),
('json', 'export-schedule-json-sort-name.json', False, [], [], {}, 'name'),
('json', 'export-schedule-json-sort-date.json', False, [], [], {}, 'dStart'),
('json', 'export-schedule-json.json-flags', False, ['flag1'], ['flag2'], {}, None),
('json', 'export-schedule-json-flat.json', True, [], [], {}, None),
('json', 'export-schedule-json-flat-sort-date.json', True, [], [], {}, 'dStart'),
('json', 'export-schedule-json-flat-flags.json', True, ['flag1'], ['flag2'], {}, None),
('ics', 'export-schedule-ics.ics', False, [], [], {}, None),
('html', 'export-schedule-html.html', False, [], [], {}, None),
('html', 'export-schedule-html-sort-date.html', False, [], [], {}, 'dStart'),
('html', 'export-schedule-html-options.html', False, [], [],
dict(html_title='Test title', html_table_header='<p>Test header</p>',
html_table_fooder='<p>Test footer</p>'), None),
('html', 'export-schedule-html-css.html', False, [], [],
dict(html_css_href='./test.css'), None),
('html', 'export-schedule-html-indent.html', False, [], [],
dict(html_level_indent='2'), None),
]
smartsheet_columns_ids = ()
smartsheet_sheet_id = None
smartsheet_client = None
replace_time_opts = dict(hour=0, minute=0, tzinfo=None)
datetime_fmt = '%Y-%m-%dT%H:%M:%SZ'
def _sanitize_export_test_ics(self, content):
return re.sub(r'DTSTAMP:[0-9]+T[0-9]+Z', 'DTSTAMP:20170101T010101Z', content)
def _clean_interm_struct(self, input_dict):
"""Removes keys that is not needed for comparison,
unify time-part of dates"""
keys_to_remove = ['unique_id_re', 'tasks_slugs', 'ext_attr', 'flags_attr_id',
'resources', 'mtime']
# remove schedule attrs
for key in keys_to_remove:
if key in input_dict:
input_dict.pop(key)
# Schedule attrs
if isinstance(input_dict['dStart'], datetime.datetime):
input_dict['dStart'] = input_dict['dStart'].replace(**self.replace_time_opts)
input_dict['dFinish'] = input_dict['dFinish'].replace(**self.replace_time_opts)
else:
input_dict['dStart'] = datetime.datetime.strptime(input_dict['dStart'],
self.datetime_fmt)
input_dict['dFinish'] = datetime.datetime.strptime(input_dict['dFinish'],
self.datetime_fmt)
# Task(s) attrs
for task in input_dict['tasks']:
self._clear_task_time(task)
def _clear_task_time(self, task):
"""For comparison purpose we ignore hours and minutes of task."""
if isinstance(task['dStart'], datetime.datetime):
task['dStart'] = task['dStart'].replace(**self.replace_time_opts)
task['dFinish'] = task['dFinish'].replace(**self.replace_time_opts)
else:
task['dStart'] = datetime.datetime.strptime(task['dStart'], self.datetime_fmt)
task['dFinish'] = datetime.datetime.strptime(task['dFinish'], self.datetime_fmt)
for inner_task in task['tasks']:
self._clear_task_time(inner_task)
def get_intermediary_reference_schedule(self):
interm_reference_file = os.path.join(self.basedir,
self.intermediary_reference_file)
with open(interm_reference_file) as fd:
intermediary_input_dict = jsondate.load(fd)
schedule = Schedule.load_from_dict(intermediary_input_dict)
return schedule
def import_setup_handle_smartsheet(self):
token = os.environ.get('SMARTSHEET_TOKEN', None)
if not token:
pytest.fail('You need to specify environment variable '
'SMARTSHEET_TOKEN to run smartsheet tests.')
converter_options = {
'smartsheet_token': token,
}
intermediary_input = self.get_intermediary_reference_schedule()
conv = ScheduleConverter(intermediary_input)
sheet_id = conv.export_schedule(output=None,
target_format='smartsheet',
options=converter_options)
self._smartsheet_inject_extra_column(sheet_id, converter_options)
return sheet_id, converter_options
def _smartsheet_inject_extra_column(self, sheet_id, converter_options):
"""
Add extra column with duplicated name. For testing only.
Value of this extra column are shifted by 3 days into future from
original.
"""
client = Smartsheet(converter_options['smartsheet_token'])
sheet = client.Sheets.get_sheet(sheet_id, page_size=None, page=None)
original_sheet_version = sheet.version
start_column_id = None
for column in sheet.columns:
if column.title in ['Start', 'Start Date']:
start_column_id = column.id
break
# Add intentionally another Start Date column - just for test purpose
column_startdate_dup = client.models.Column({
'title': 'Start Date',
'type': 'DATE',
'index': 11,
})
resp = client.Sheets.add_columns(sheet_id, column_startdate_dup)
duplicated_column_id = resp.result[0].id
if resp.message != 'SUCCESS':
msg = 'Adding column failed: {}'.format(resp)
raise SmartSheetExportException(msg, source=sheet_id)
datetime_format = '%Y-%m-%dT%H:%M:%S'
updated_rows = []
for row in sheet.rows:
original_cell_value = row.get_column(start_column_id).value
new_value = datetime.datetime.strptime(original_cell_value,
datetime_format)
new_value += datetime.timedelta(days=3)
cell = client.models.Cell()
cell.column_id = duplicated_column_id
cell.value = new_value.strftime(datetime_format)
new_row = client.models.Row()
new_row.id = row.id
new_row.cells.append(cell)
updated_rows.append(new_row)
resp = client.Sheets.update_rows(sheet_id, updated_rows)
if resp.message != 'SUCCESS':
msg = 'Inserting duplicated cells failed: {}'.format(resp)
raise SmartSheetExportException(msg, source=sheet_id)
# refresh sheet object
sheet = client.Sheets.get_sheet(sheet_id, page_size=None, page=None)
assert sheet.version > original_sheet_version
def import_teardown_handle_smartsheet(self, handle, converter_options):
client = Smartsheet(converter_options['smartsheet_token'])
client.Sheets.delete_sheet(handle)
def import_assert_changelog_smartsheet(self,
reference_schedule_dict,
imported_schedule_dict):
changelog = imported_schedule_dict['changelog']
assert len(changelog.keys()) == 1
assert isinstance(list(changelog.keys())[0], int)
record = list(changelog.values())[0]
date_now = datetime.datetime.now(tz=tzutc())
assert self.test_import_start_timestamp < record['date']
assert record['date'] < date_now
def test_import(self, handler_name, import_schedule_file, interm_reference_file_name):
"""
Generic pytest fixture that provides arguments for import_schedule meth.
If there is specified 'import_schedule_file' value of argument,
its considered as filename of file-based handle.
Otherwise will try to call 'fixture_import_handlename' method that
provides these arguments (handle, options).
Also it works as setup/teardown-like method of the handle's test
(using yield within the 'fixture_import_' method).
"""
handle = None
converter_options = dict()
self.test_import_start_timestamp = datetime.datetime.now(tz=tzutc()).replace(microsecond=0)
sleep(1) # make sure import takes at least second
if import_schedule_file:
handle = os.path.join(self.basedir, self.schedule_files_dir,
import_schedule_file)
converter_options = {
'source_storage_format': 'local'
}
else:
callback_name = 'import_setup_handle_' + handler_name
if hasattr(self, callback_name):
import_setup_fn = getattr(self, callback_name)
handle, converter_options = import_setup_fn()
try:
conv = ScheduleConverter()
schedule = conv.import_schedule(handle,
schedule_src_format=handler_name,
options=converter_options)
assert 0 == len(schedule.errors_import)
imported_schedule_dict = schedule.dump_as_dict()
self._clean_interm_struct(imported_schedule_dict)
interm_reference_file = os.path.join(
self.basedir, interm_reference_file_name or self.intermediary_reference_file)
regenerate = os.environ.get('REGENERATE', False) == 'true'
if regenerate:
log.info('test_import: Regenerating interm. reference file'
'from imported schedule.')
with open(interm_reference_file, 'w+') as fd:
jsondate.dump(imported_schedule_dict,
fd,
sort_keys=True,
indent=4,
separators=(',', ': '))
with open(interm_reference_file) as fd:
reference_dict = json.load(fd)
self._clean_interm_struct(reference_dict)
# test/assert changelog separately, if there exists a hook
callback_name = 'import_assert_changelog_' + handler_name
if hasattr(self, callback_name):
assert_changelog_fn = getattr(self, callback_name)
assert_changelog_fn(reference_dict, imported_schedule_dict)
# If asserting of changelog went well,
# drop it from schedules dicts
imported_schedule_dict.pop('changelog', None)
reference_dict.pop('changelog', None)
assert reference_dict == imported_schedule_dict
finally:
callback_name = 'import_teardown_handle_' + handler_name
if not import_schedule_file and hasattr(self, callback_name):
import_setup_fn = getattr(self, callback_name)
import_setup_fn(handle, converter_options)
def test_export(self, handler_name, export_schedule_file,
flat, flag_show, flag_hide, options,
tmpdir, sort):
full_export_schedule_file = os.path.join(self.basedir,
self.schedule_files_dir,
export_schedule_file)
intermediary_input = self.get_intermediary_reference_schedule()
export_output_file = tmpdir.join('exported_file')
export_output_filename = str(export_output_file)
conv = ScheduleConverter(intermediary_input)
if flat:
conv.schedule.make_flat()
if sort:
conv.schedule.sort_tasks(sort)
conv.schedule.filter_flags(flag_show, flag_hide)
conv.export_schedule(export_output_filename, handler_name,
options=options)
actual_output = export_output_file.read()
regenerate = os.environ.get('REGENERATE', False) == 'true'
if regenerate:
log.info('test_export: Regenerating exported file from '
'reference schedule.')
shutil.copy(export_output_filename, full_export_schedule_file)
with open(full_export_schedule_file) as fd:
expected_output = fd.read()
# sanitize if needed
sanitize_fn_name = '_sanitize_export_test_{}'.format(handler_name)
if hasattr(self, sanitize_fn_name):
sanitize_func = getattr(self, sanitize_fn_name)
expected_output = sanitize_func(expected_output)
actual_output = sanitize_func(actual_output)
assert expected_output == actual_output
|
the-stack_0_18767 | from __future__ import print_function
import sys
import gdb.printing
import gdb.types
class Iterator:
def __iter__(self):
return self
if sys.version_info.major == 2:
def next(self):
return self.__next__()
def children(self):
return self
def escape_bytes(val, l):
return '"' + val.string(encoding='Latin-1', length=l).encode('unicode_escape').decode() + '"'
class SmallStringPrinter:
"""Print an llvm::SmallString object."""
def __init__(self, val):
self.val = val
def to_string(self):
begin = self.val['BeginX']
return escape_bytes(begin.cast(gdb.lookup_type('char').pointer()), self.val['Size'])
class StringRefPrinter:
"""Print an llvm::StringRef object."""
def __init__(self, val):
self.val = val
def to_string(self):
return escape_bytes(self.val['Data'], self.val['Length'])
class SmallVectorPrinter(Iterator):
"""Print an llvm::SmallVector object."""
def __init__(self, val):
self.val = val
t = val.type.template_argument(0).pointer()
self.begin = val['BeginX'].cast(t)
self.size = val['Size']
self.i = 0
def __next__(self):
if self.i == self.size:
raise StopIteration
ret = '[{}]'.format(self.i), (self.begin+self.i).dereference()
self.i += 1
return ret
def to_string(self):
return 'llvm::SmallVector of Size {}, Capacity {}'.format(self.size, self.val['Capacity'])
def display_hint (self):
return 'array'
class ArrayRefPrinter:
"""Print an llvm::ArrayRef object."""
class _iterator:
def __init__(self, begin, end):
self.cur = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.cur == self.end:
raise StopIteration
count = self.count
self.count = self.count + 1
cur = self.cur
self.cur = self.cur + 1
return '[%d]' % count, cur.dereference()
if sys.version_info.major == 2:
next = __next__
def __init__(self, val):
self.val = val
def children(self):
data = self.val['Data']
return self._iterator(data, data + self.val['Length'])
def to_string(self):
return 'llvm::ArrayRef of length %d' % (self.val['Length'])
def display_hint (self):
return 'array'
class ExpectedPrinter(Iterator):
"""Print an llvm::Expected object."""
def __init__(self, val):
self.val = val
def __next__(self):
val = self.val
if val is None:
raise StopIteration
self.val = None
if val['HasError']:
return ('error', val['ErrorStorage'].address.cast(
gdb.lookup_type('llvm::ErrorInfoBase').pointer()).dereference())
return ('value', val['TStorage'].address.cast(
val.type.template_argument(0).pointer()).dereference())
def to_string(self):
return 'llvm::Expected{}'.format(' is error' if self.val['HasError'] else '')
class OptionalPrinter(Iterator):
"""Print an llvm::Optional object."""
def __init__(self, val):
self.val = val
def __next__(self):
val = self.val
if val is None:
raise StopIteration
self.val = None
if not val['Storage']['hasVal']:
raise StopIteration
return ('value', val['Storage']['value'])
def to_string(self):
return 'llvm::Optional{}'.format('' if self.val['Storage']['hasVal'] else ' is not initialized')
class DenseMapPrinter:
"Print a DenseMap"
class _iterator:
def __init__(self, key_info_t, begin, end):
self.key_info_t = key_info_t
self.cur = begin
self.end = end
self.advancePastEmptyBuckets()
self.first = True
def __iter__(self):
return self
def advancePastEmptyBuckets(self):
# disabled until the comments below can be addressed
# keeping as notes/posterity/hints for future contributors
return
n = self.key_info_t.name
is_equal = gdb.parse_and_eval(n + '::isEqual')
empty = gdb.parse_and_eval(n + '::getEmptyKey()')
tombstone = gdb.parse_and_eval(n + '::getTombstoneKey()')
# the following is invalid, GDB fails with:
# Python Exception <class 'gdb.error'> Attempt to take address of value
# not located in memory.
# because isEqual took parameter (for the unsigned long key I was testing)
# by const ref, and GDB
# It's also not entirely general - we should be accessing the "getFirst()"
# member function, not the 'first' member variable, but I've yet to figure
# out how to find/call member functions (especially (const) overloaded
# ones) on a gdb.Value.
while self.cur != self.end and (is_equal(self.cur.dereference()['first'], empty) or is_equal(self.cur.dereference()['first'], tombstone)):
self.cur = self.cur + 1
def __next__(self):
if self.cur == self.end:
raise StopIteration
cur = self.cur
v = cur.dereference()['first' if self.first else 'second']
if not self.first:
self.cur = self.cur + 1
self.advancePastEmptyBuckets()
self.first = True
else:
self.first = False
return 'x', v
if sys.version_info.major == 2:
next = __next__
def __init__(self, val):
self.val = val
def children(self):
t = self.val.type.template_argument(3).pointer()
begin = self.val['Buckets'].cast(t)
end = (begin + self.val['NumBuckets']).cast(t)
return self._iterator(self.val.type.template_argument(2), begin, end)
def to_string(self):
return 'llvm::DenseMap with %d elements' % (self.val['NumEntries'])
def display_hint(self):
return 'map'
class TwinePrinter:
"Print a Twine"
def __init__(self, val):
self._val = val
def display_hint(self):
return 'string'
def string_from_pretty_printer_lookup(self, val):
'''Lookup the default pretty-printer for val and use it.
If no pretty-printer is defined for the type of val, print an error and
return a placeholder string.'''
pp = gdb.default_visualizer(val)
if pp:
s = pp.to_string()
# The pretty-printer may return a LazyString instead of an actual Python
# string. Convert it to a Python string. However, GDB doesn't seem to
# register the LazyString type, so we can't check
# "type(s) == gdb.LazyString".
if 'LazyString' in type(s).__name__:
s = s.value().address.string()
else:
print(('No pretty printer for {} found. The resulting Twine ' +
'representation will be incomplete.').format(val.type.name))
s = '(missing {})'.format(val.type.name)
return s
def is_twine_kind(self, kind, expected):
if not kind.endswith(expected):
return False
# apparently some GDB versions add the NodeKind:: namespace
# (happens for me on GDB 7.11)
return kind in ('llvm::Twine::' + expected,
'llvm::Twine::NodeKind::' + expected)
def string_from_child(self, child, kind):
'''Return the string representation of the Twine::Child child.'''
if self.is_twine_kind(kind, 'EmptyKind') or self.is_twine_kind(kind, 'NullKind'):
return ''
if self.is_twine_kind(kind, 'TwineKind'):
return self.string_from_twine_object(child['twine'].dereference())
if self.is_twine_kind(kind, 'CStringKind'):
return child['cString'].string()
if self.is_twine_kind(kind, 'StdStringKind'):
val = child['stdString'].dereference()
return self.string_from_pretty_printer_lookup(val)
if self.is_twine_kind(kind, 'StringRefKind'):
val = child['stringRef'].dereference()
pp = StringRefPrinter(val)
return pp.to_string()
if self.is_twine_kind(kind, 'SmallStringKind'):
val = child['smallString'].dereference()
pp = SmallStringPrinter(val)
return pp.to_string()
if self.is_twine_kind(kind, 'CharKind'):
return chr(child['character'])
if self.is_twine_kind(kind, 'DecUIKind'):
return str(child['decUI'])
if self.is_twine_kind(kind, 'DecIKind'):
return str(child['decI'])
if self.is_twine_kind(kind, 'DecULKind'):
return str(child['decUL'].dereference())
if self.is_twine_kind(kind, 'DecLKind'):
return str(child['decL'].dereference())
if self.is_twine_kind(kind, 'DecULLKind'):
return str(child['decULL'].dereference())
if self.is_twine_kind(kind, 'DecLLKind'):
return str(child['decLL'].dereference())
if self.is_twine_kind(kind, 'UHexKind'):
val = child['uHex'].dereference()
return hex(int(val))
print(('Unhandled NodeKind {} in Twine pretty-printer. The result will be '
'incomplete.').format(kind))
return '(unhandled {})'.format(kind)
def string_from_twine_object(self, twine):
'''Return the string representation of the Twine object twine.'''
lhs_str = ''
rhs_str = ''
lhs = twine['LHS']
rhs = twine['RHS']
lhs_kind = str(twine['LHSKind'])
rhs_kind = str(twine['RHSKind'])
lhs_str = self.string_from_child(lhs, lhs_kind)
rhs_str = self.string_from_child(rhs, rhs_kind)
return lhs_str + rhs_str
def to_string(self):
return self.string_from_twine_object(self._val)
def get_pointer_int_pair(val):
"""Get tuple from llvm::PointerIntPair."""
info_name = val.type.template_argument(4).strip_typedefs().name
# Note: this throws a gdb.error if the info type is not used (by means of a
# call to getPointer() or similar) in the current translation unit.
enum_type = gdb.lookup_type(info_name + '::MaskAndShiftConstants')
enum_dict = gdb.types.make_enum_dict(enum_type)
ptr_mask = enum_dict[info_name + '::PointerBitMask']
int_shift = enum_dict[info_name + '::IntShift']
int_mask = enum_dict[info_name + '::IntMask']
pair_union = val['Value']
pointer = (pair_union & ptr_mask)
value = ((pair_union >> int_shift) & int_mask)
return (pointer, value)
class PointerIntPairPrinter:
"""Print a PointerIntPair."""
def __init__(self, pointer, value):
self.pointer = pointer
self.value = value
def children(self):
yield ('pointer', self.pointer)
yield ('value', self.value)
def make_pointer_int_pair_printer(val):
"""Factory for an llvm::PointerIntPair printer."""
try:
pointer, value = get_pointer_int_pair(val)
except gdb.error:
return None # If PointerIntPair cannot be analyzed, print as raw value.
pointer_type = val.type.template_argument(0)
value_type = val.type.template_argument(2)
return PointerIntPairPrinter(pointer.cast(pointer_type),
value.cast(value_type))
class PointerUnionPrinter:
"""Print a PointerUnion."""
def __init__(self, pointer):
self.pointer = pointer
def children(self):
yield ('pointer', self.pointer)
def to_string(self):
return "Containing %s" % self.pointer.type
def make_pointer_union_printer(val):
"""Factory for an llvm::PointerUnion printer."""
try:
pointer, value = get_pointer_int_pair(val['Val'])
except gdb.error:
return None # If PointerIntPair cannot be analyzed, print as raw value.
pointer_type = val.type.template_argument(int(value))
return PointerUnionPrinter(pointer.cast(pointer_type))
class IlistNodePrinter:
"""Print an llvm::ilist_node object."""
def __init__(self, val):
impl_type = val.type.fields()[0].type
base_type = impl_type.fields()[0].type
derived_type = val.type.template_argument(0)
def get_prev_and_sentinel(base):
# One of Prev and PrevAndSentinel exists. Depending on #defines used to
# compile LLVM, the base_type's template argument is either true of false.
if base_type.template_argument(0):
return get_pointer_int_pair(base['PrevAndSentinel'])
return base['Prev'], None
# Casts a base_type pointer to the appropriate derived type.
def cast_pointer(pointer):
sentinel = get_prev_and_sentinel(pointer.dereference())[1]
pointer = pointer.cast(impl_type.pointer())
if sentinel:
return pointer
return pointer.cast(derived_type.pointer())
# Repeated cast becaue val.type's base_type is ambiguous when using tags.
base = val.cast(impl_type).cast(base_type)
(prev, sentinel) = get_prev_and_sentinel(base)
prev = prev.cast(base_type.pointer())
self.prev = cast_pointer(prev)
self.next = cast_pointer(val['Next'])
self.sentinel = sentinel
def children(self):
if self.sentinel:
yield 'sentinel', 'yes'
yield 'prev', self.prev
yield 'next', self.next
class IlistPrinter:
"""Print an llvm::simple_ilist or llvm::iplist object."""
def __init__(self, val):
self.node_type = val.type.template_argument(0)
sentinel = val['Sentinel']
# First field is common base type of sentinel and ilist_node.
base_type = sentinel.type.fields()[0].type
self.sentinel = sentinel.address.cast(base_type.pointer())
def _pointers(self):
pointer = self.sentinel
while True:
pointer = pointer['Next'].cast(pointer.type)
if pointer == self.sentinel:
return
yield pointer.cast(self.node_type.pointer())
def children(self):
for k, v in enumerate(self._pointers()):
yield ('[%d]' % k, v.dereference())
pp = gdb.printing.RegexpCollectionPrettyPrinter("LLVMSupport")
pp.add_printer('llvm::SmallString', '^llvm::SmallString<.*>$', SmallStringPrinter)
pp.add_printer('llvm::StringRef', '^llvm::StringRef$', StringRefPrinter)
pp.add_printer('llvm::SmallVectorImpl', '^llvm::SmallVector(Impl)?<.*>$', SmallVectorPrinter)
pp.add_printer('llvm::ArrayRef', '^llvm::(Mutable)?ArrayRef<.*>$', ArrayRefPrinter)
pp.add_printer('llvm::Expected', '^llvm::Expected<.*>$', ExpectedPrinter)
pp.add_printer('llvm::Optional', '^llvm::Optional<.*>$', OptionalPrinter)
pp.add_printer('llvm::DenseMap', '^llvm::DenseMap<.*>$', DenseMapPrinter)
pp.add_printer('llvm::Twine', '^llvm::Twine$', TwinePrinter)
pp.add_printer('llvm::PointerIntPair', '^llvm::PointerIntPair<.*>$', make_pointer_int_pair_printer)
pp.add_printer('llvm::PointerUnion', '^llvm::PointerUnion<.*>$', make_pointer_union_printer)
pp.add_printer('llvm::ilist_node', '^llvm::ilist_node<.*>$', IlistNodePrinter)
pp.add_printer('llvm::iplist', '^llvm::iplist<.*>$', IlistPrinter)
pp.add_printer('llvm::simple_ilist', '^llvm::simple_ilist<.*>$', IlistPrinter)
gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)
|
the-stack_0_18770 | from pathlib import Path
from kivy import platform
from kivy.storage.dictstore import DictStore
file_path = Path('.')
if platform == 'macosx':
file_path = Path(
'~/Library/Application Support/%s' % 'simple-time-tracker'
).expanduser()
file_path.mkdir(parents=True, exist_ok=True)
store = DictStore(file_path.joinpath('settings.json'))
|
the-stack_0_18772 | # coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models import transformer
import tensorflow as tf
BATCH_SIZE = 3
INPUT_LENGTH = 5
TARGET_LENGTH = 7
VOCAB_SIZE = 10
class TransformerTest(tf.test.TestCase):
def getModel(self, hparams, mode=tf.estimator.ModeKeys.TRAIN, has_input=True):
hparams.hidden_size = 8
hparams.filter_size = 32
hparams.num_heads = 1
hparams.layer_prepostprocess_dropout = 0.0
p_hparams = problem_hparams.test_problem_hparams(VOCAB_SIZE, VOCAB_SIZE)
if not has_input:
p_hparams.input_modality = {}
hparams.problems = [p_hparams]
inputs = -1 + np.random.random_integers(
VOCAB_SIZE, size=(BATCH_SIZE, INPUT_LENGTH, 1, 1))
targets = -1 + np.random.random_integers(
VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1))
features = {
"inputs": tf.constant(inputs, dtype=tf.int32, name="inputs"),
"targets": tf.constant(targets, dtype=tf.int32, name="targets"),
"target_space_id": tf.constant(1, dtype=tf.int32)
}
return transformer.Transformer(hparams, mode, p_hparams), features
def testTransformer(self):
model, features = self.getModel(transformer.transformer_small())
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, 1, 1, VOCAB_SIZE))
def testTransformerRelative(self):
model, features = self.getModel(transformer.transformer_relative_tiny())
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, 1, 1, VOCAB_SIZE))
def testGreedyVsFast(self):
model, features = self.getModel(transformer.transformer_small())
decode_length = 2
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(100):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
greedy_result = model._slow_greedy_infer(
features, decode_length)["outputs"]
greedy_result = tf.squeeze(greedy_result, axis=[2, 3])
fast_result = model._greedy_infer(features, decode_length)["outputs"]
with self.test_session():
greedy_res = greedy_result.eval()
fast_res = fast_result.eval()
self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length))
self.assertAllClose(greedy_res, fast_res)
def testSlowVsFastNoInput(self):
model, features = self.getModel(
transformer.transformer_small(), has_input=False)
decode_length = 2
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(100):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
slow_result = model._slow_greedy_infer(
features, decode_length)["outputs"]
slow_result = tf.squeeze(slow_result, axis=[2, 3])
fast_result = model._greedy_infer(features, decode_length)["outputs"]
with self.test_session():
slow_res = slow_result.eval()
fast_res = fast_result.eval()
self.assertEqual(fast_res.shape, (BATCH_SIZE, decode_length))
self.assertAllClose(slow_res, fast_res)
def testBeamVsFast(self):
model, features = self.getModel(transformer.transformer_small())
decode_length = 2
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(100):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
beam_result = model._beam_decode_slow(
features,
decode_length,
beam_size=4,
top_beams=1,
alpha=1.0)["outputs"]
fast_result = model._beam_decode(
features,
decode_length,
beam_size=4,
top_beams=1,
alpha=1.0)["outputs"]
with self.test_session():
beam_res = beam_result.eval()
fast_res = fast_result.eval()
self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length))
self.assertAllClose(beam_res, fast_res)
def testTransformerWithoutProblem(self):
hparams = transformer.transformer_test()
embedded_inputs = np.random.random_sample(
(BATCH_SIZE, INPUT_LENGTH, 1, hparams.hidden_size))
embedded_targets = np.random.random_sample(
(BATCH_SIZE, TARGET_LENGTH, 1, hparams.hidden_size))
transformed_features = {
"inputs": tf.constant(embedded_inputs, dtype=tf.float32),
"targets": tf.constant(embedded_targets, dtype=tf.float32)
}
model = transformer.Transformer(hparams)
body_out, _ = model(transformed_features)
self.assertAllEqual(
body_out.get_shape().as_list(),
[BATCH_SIZE, TARGET_LENGTH, 1, hparams.hidden_size])
def testTransformerWithEncoderDecoderAttentionLoss(self):
model, features = self.getModel(transformer.transformer_small())
expected_attention_weights = np.random.random_sample(
size=(BATCH_SIZE, TARGET_LENGTH, INPUT_LENGTH))
features["expected_attention_weights"] = tf.constant(
expected_attention_weights, dtype=tf.float32)
_, extra_loss = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
res = session.run(extra_loss["attention_loss"])
self.assertEqual(res.shape, ())
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_18774 | """Abstract classes for manifolds.
Lead authors: Nicolas Guigui and Nina Miolane.
"""
import abc
import geomstats.backend as gs
from geomstats.geometry.manifold import Manifold
POINT_TYPES = {1: "vector", 2: "matrix"}
class VectorSpace(Manifold, abc.ABC):
"""Abstract class for vector spaces.
Parameters
----------
shape : tuple
Shape of the elements of the vector space. The dimension is the
product of these values by default.
default_point_type : str, {'vector', 'matrix'}
Point type.
Optional, default: 'vector'.
"""
def __init__(self, shape, **kwargs):
if "dim" not in kwargs.keys():
kwargs["dim"] = int(gs.prod(gs.array(shape)))
super(VectorSpace, self).__init__(shape=shape, **kwargs)
self.shape = shape
def belongs(self, point, atol=gs.atol):
"""Evaluate if the point belongs to the vector space.
This method checks the shape of the input point.
Parameters
----------
point : array-like, shape=[.., {dim, [n, n]}]
Point to test.
atol : float
Unused here.
Returns
-------
belongs : array-like, shape=[...,]
Boolean evaluating if point belongs to the space.
"""
minimal_ndim = len(self.shape)
belongs = point.shape[-minimal_ndim:] == self.shape
if point.ndim == minimal_ndim:
return belongs
return gs.tile(gs.array([belongs]), [point.shape[0]])
@staticmethod
def projection(point):
"""Project a point to the vector space.
This method is for compatibility and returns `point`. `point` should
have the right shape,
Parameters
----------
point: array-like, shape[..., {dim, [n, n]}]
Point.
Returns
-------
point: array-like, shape[..., {dim, [n, n]}]
Point.
"""
return point
def is_tangent(self, vector, base_point=None, atol=gs.atol):
"""Check whether the vector is tangent at base_point.
Tangent vectors are identified with points of the vector space so
this checks the shape of the input vector.
Parameters
----------
vector : array-like, shape=[..., {dim, [n, n]}]
Vector.
base_point : array-like, shape=[..., {dim, [n, n]}]
Point in the vector space.
atol : float
Absolute tolerance.
Optional, default: backend atol.
Returns
-------
is_tangent : bool
Boolean denoting if vector is a tangent vector at the base point.
"""
return self.belongs(vector, atol)
def to_tangent(self, vector, base_point=None):
"""Project a vector to a tangent space of the vector space.
This method is for compatibility and returns vector.
Parameters
----------
vector : array-like, shape=[..., {dim, [n, n]}]
Vector.
base_point : array-like, shape=[..., {dim, [n, n]}]
Point in the vector space
Returns
-------
tangent_vec : array-like, shape=[..., {dim, [n, n]}]
Tangent vector at base point.
"""
return self.projection(vector)
def random_point(self, n_samples=1, bound=1.0):
"""Sample in the vector space with a uniform distribution in a box.
Parameters
----------
n_samples : int
Number of samples.
Optional, default: 1.
bound : float
Side of hypercube support of the uniform distribution.
Optional, default: 1.0
Returns
-------
point : array-like, shape=[..., dim]
Sample.
"""
size = self.shape
if n_samples != 1:
size = (n_samples,) + self.shape
point = bound * (gs.random.rand(*size) - 0.5) * 2
return point
class LevelSet(Manifold, abc.ABC):
"""Class for manifolds embedded in a vector space by a submersion.
Parameters
----------
dim : int
Dimension of the embedded manifold.
embedding_space : VectorSpace
Embedding space.
default_coords_type : str, {'intrinsic', 'extrinsic', etc}
Coordinate type.
Optional, default: 'intrinsic'.
"""
def __init__(
self,
dim,
embedding_space,
submersion,
value,
tangent_submersion,
default_coords_type="intrinsic",
**kwargs
):
if "shape" not in kwargs:
kwargs["shape"] = embedding_space.shape
super(LevelSet, self).__init__(
dim=dim,
default_point_type=embedding_space.default_point_type,
default_coords_type=default_coords_type,
**kwargs
)
self.embedding_space = embedding_space
self.embedding_metric = embedding_space.metric
self.submersion = submersion
if isinstance(value, float):
value = gs.array(value)
self.value = value
self.tangent_submersion = tangent_submersion
def belongs(self, point, atol=gs.atol):
"""Evaluate if a point belongs to the manifold.
Parameters
----------
point : array-like, shape=[..., dim]
Point to evaluate.
atol : float
Absolute tolerance.
Optional, default: backend atol.
Returns
-------
belongs : array-like, shape=[...,]
Boolean evaluating if point belongs to the manifold.
"""
belongs = self.embedding_space.belongs(point, atol)
if not gs.any(belongs):
return belongs
value = self.value
constraint = gs.isclose(self.submersion(point), value, atol=atol)
if value.ndim == 2:
constraint = gs.all(constraint, axis=(-2, -1))
elif value.ndim == 1:
constraint = gs.all(constraint, axis=-1)
return gs.logical_and(belongs, constraint)
def is_tangent(self, vector, base_point, atol=gs.atol):
"""Check whether the vector is tangent at base_point.
Parameters
----------
vector : array-like, shape=[..., dim]
Vector.
base_point : array-like, shape=[..., dim]
Point on the manifold.
atol : float
Absolute tolerance.
Optional, default: backend atol.
Returns
-------
is_tangent : bool
Boolean denoting if vector is a tangent vector at the base point.
"""
belongs = self.embedding_space.is_tangent(vector, base_point, atol)
tangent_sub_applied = self.tangent_submersion(vector, base_point)
constraint = gs.isclose(tangent_sub_applied, 0.0, atol=atol)
value = self.value
if value.ndim == 2:
constraint = gs.all(constraint, axis=(-2, -1))
elif value.ndim == 1:
constraint = gs.all(constraint, axis=-1)
return gs.logical_and(belongs, constraint)
def intrinsic_to_extrinsic_coords(self, point_intrinsic):
"""Convert from intrinsic to extrinsic coordinates.
Parameters
----------
point_intrinsic : array-like, shape=[..., dim]
Point in the embedded manifold in intrinsic coordinates.
Returns
-------
point_extrinsic : array-like, shape=[..., dim_embedding]
Point in the embedded manifold in extrinsic coordinates.
"""
raise NotImplementedError("intrinsic_to_extrinsic_coords is not implemented.")
def extrinsic_to_intrinsic_coords(self, point_extrinsic):
"""Convert from extrinsic to intrinsic coordinates.
Parameters
----------
point_extrinsic : array-like, shape=[..., dim_embedding]
Point in the embedded manifold in extrinsic coordinates,
i. e. in the coordinates of the embedding manifold.
Returns
-------
point_intrinsic : array-lie, shape=[..., dim]
Point in the embedded manifold in intrinsic coordinates.
"""
raise NotImplementedError("extrinsic_to_intrinsic_coords is not implemented.")
@abc.abstractmethod
def projection(self, point):
"""Project a point in embedding manifold on embedded manifold.
Parameters
----------
point : array-like, shape=[..., dim_embedding]
Point in embedding manifold.
Returns
-------
projected : array-like, shape=[..., dim_embedding]
Projected point.
"""
@abc.abstractmethod
def to_tangent(self, vector, base_point):
"""Project a vector to a tangent space of the manifold.
Parameters
----------
vector : array-like, shape=[..., dim]
Vector.
base_point : array-like, shape=[..., dim]
Point on the manifold.
Returns
-------
tangent_vec : array-like, shape=[..., dim]
Tangent vector at base point.
"""
class OpenSet(Manifold, abc.ABC):
"""Class for manifolds that are open sets of a vector space.
In this case, tangent vectors are identified with vectors of the ambient
space.
Parameters
----------
dim: int
Dimension of the manifold. It is often the same as the ambient space
dimension but may differ in some cases.
ambient_space: VectorSpace
Ambient space that contains the manifold.
"""
def __init__(self, dim, ambient_space, **kwargs):
if "default_point_type" not in kwargs:
kwargs["default_point_type"] = ambient_space.default_point_type
if "shape" not in kwargs:
kwargs["shape"] = ambient_space.shape
super().__init__(dim=dim, **kwargs)
self.ambient_space = ambient_space
def is_tangent(self, vector, base_point=None, atol=gs.atol):
"""Check whether the vector is tangent at base_point.
Parameters
----------
vector : array-like, shape=[..., dim]
Vector.
base_point : array-like, shape=[..., dim]
Point on the manifold.
atol : float
Absolute tolerance.
Optional, default: backend atol.
Returns
-------
is_tangent : bool
Boolean denoting if vector is a tangent vector at the base point.
"""
return self.ambient_space.belongs(vector, atol)
def to_tangent(self, vector, base_point=None):
"""Project a vector to a tangent space of the manifold.
Parameters
----------
vector : array-like, shape=[..., dim]
Vector.
base_point : array-like, shape=[..., dim]
Point on the manifold.
Returns
-------
tangent_vec : array-like, shape=[..., dim]
Tangent vector at base point.
"""
return self.ambient_space.projection(vector)
def random_point(self, n_samples=1, bound=1.0):
"""Sample random points on the manifold.
If the manifold is compact, a uniform distribution is used.
Parameters
----------
n_samples : int
Number of samples.
Optional, default: 1.
bound : float
Bound of the interval in which to sample for non compact manifolds.
Optional, default: 1.
Returns
-------
samples : array-like, shape=[..., {dim, [n, n]}]
Points sampled on the hypersphere.
"""
sample = self.ambient_space.random_point(n_samples, bound)
return self.projection(sample)
@abc.abstractmethod
def projection(self, point):
"""Project a point in ambient manifold on manifold.
Parameters
----------
point : array-like, shape=[..., dim]
Point in ambient manifold.
Returns
-------
projected : array-like, shape=[..., dim]
Projected point.
"""
|
the-stack_0_18776 | #! /usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This module publishes the car control message.
"""
#from Tkinter import *
from Tkinter import TK
from Tkinter import Frame
from std_msgs.msg import String
import time
import threading
import rospy
g_mount_status = False
g_image_status = False
g_can_status = False
g_ins_status = False
g_rtk_status = False
g_est_status = False
g_act_status = False
def est_callback(data):
"""car_control_estimate_callback function."""
global g_est_status
g_est_status = data.data
rospy.loginfo("received est" + str(g_est_status))
def act_callback(data):
"""car_act_callback function."""
global g_act_status
g_act_status = data.data
rospy.loginfo("received act " + str(g_act_status))
def ins_callback(data):
"""inspva_callback function."""
global g_ins_status
g_ins_status = data.data
rospy.loginfo("received ins " + str(g_ins_status))
def rtk_callback(data):
"""rtk_status_callback function."""
global g_rtk_status
g_rtk_status = data.data
rospy.loginfo("received rtk" + str(g_rtk_status))
def mount_callback(data):
"""disk_mount_status_callback function."""
global g_mount_status
g_mount_status = data.data
rospy.loginfo("received mount " + str(g_mount_status))
def image_callback(data):
"""image_receive_status_callback function."""
global g_image_status
g_image_status = data.data
rospy.loginfo("received image " + str(g_image_status))
def can_callback(data):
"""can_status_callback function."""
global g_can_status
g_can_status = data.data
rospy.loginfo("received can " + str(g_can_status))
def work():
"""A monitor is initialized to monitor all import status."""
global g_mount_status
global g_image_status
global g_can_status
global g_rtk_status
global g_ins_status
global g_est_status
global g_act_status
is_show = rospy.get_param("~is_show", False)
rospy.loginfo("~is show " + str(is_show))
try:
while True:
if (g_rtk_status and g_can_status and g_image_status
and (is_show or g_mount_status) and (not is_show
or (g_est_status
and g_act_status
and g_ins_status))):
frame['bg'] = '#00FF00'
rospy.loginfo("all true")
else:
frame['bg'] = '#FF0000'
rospy.loginfo("not all true")
if is_show:
rospy.loginfo(
"image " +
str(g_image_status) +
" can " +
str(g_can_status) +
" rtk " +
str(g_rtk_status) +
" est " +
str(g_est_status) +
" act " +
str(g_act_status) +
" ins " +
str(g_ins_status))
else:
rospy.loginfo(
"image " +
str(g_image_status) +
" can " +
str(g_can_status) +
" rtk " +
str(g_rtk_status) +
" mount " +
str(g_mount_status))
time.sleep(1)
except KeyboardInterrupt:
exit(0)
rospy.init_node('system_check', anonymous=True)
mount_sub = rospy.Subscriber(
'/system_info/mount',
Bool,
mount_callback,
queue_size=1,
buff_size=1024 * 1024 * 8)
can_sub = rospy.Subscriber(
'/system_info/can',
Bool,
can_callback,
queue_size=1,
buff_size=1024 * 1024 * 8)
image_sub = rospy.Subscriber(
'/system_info/image',
Bool,
image_callback,
queue_size=1,
buff_size=1024 * 1024 * 8)
ins_sub = rospy.Subscriber(
'/system_info/ins',
Bool,
ins_callback,
queue_size=1,
buff_size=1024 * 1024 * 8)
rtk_sub = rospy.Subscriber(
'/system_info/rtk',
Bool,
rtk_callback,
queue_size=1,
buff_size=1024 * 1024 * 8)
est_sub = rospy.Subscriber(
'/system_info/est',
Bool,
est_callback,
queue_size=1,
buff_size=1024 * 1024 * 8)
act_sub = rospy.Subscriber(
'/system_info/act',
Bool,
act_callback,
queue_size=1,
buff_size=1024 * 1024 * 8)
root = Tk(className='system_check')
frame = Frame(root, width=1000, height=1000)
frame.pack()
th = threading.Thread(target=work)
th.setDaemon(True)
th.start()
root.mainloop()
|
the-stack_0_18779 | """
Loader for PFS spectrum files.
https://github.com/Subaru-PFS/datamodel/blob/master/datamodel.txt
"""
import os
import re
from astropy.io import fits
from astropy.units import Unit
from astropy.nddata import StdDevUncertainty
import numpy as np
from ...spectra import Spectrum1D
from ..registers import data_loader
from ..parsing_utils import _fits_identify_by_name, read_fileobj_or_hdulist
__all__ = ['spec_identify', 'spec_loader']
# This RE matches the file name pattern defined in Subaru-PFS' datamodel.txt :
# "pfsObject-%05d-%s-%3d-%08x-%02d-0x%08x.fits" % (tract, patch, catId, objId,
# nVisit % 100, pfsVisitHash)
_spec_pattern = re.compile(r'pfsObject-(?P<tract>\d{5})-(?P<patch>.{3})-'
r'(?P<catId>\d{3})-(?P<objId>\d{8})-'
r'(?P<nVisit>\d{2})-(?P<pfsVisitHash>0x\w{8})'
r'\.fits')
def identify_pfs_spec(origin, *args, **kwargs):
"""
Check whether given file is FITS and name matches `_spec_pattern`.
"""
return _fits_identify_by_name(origin, *args, pattern=_spec_pattern)
@data_loader(label="Subaru-pfsObject", identifier=identify_pfs_spec,
extensions=['fits'])
def pfs_spec_loader(file_obj, **kwargs):
"""
Loader for PFS combined spectrum files.
Parameters
----------
file_obj : str or file-like
FITS file name or object (provided from name by Astropy I/O Registry).
Returns
-------
data : Spectrum1D
The spectrum that is represented by the data in this table.
"""
# This will fail for file-like objects without 'name' property like `bz2.BZ2File`,
# workarund needed (or better yet, a scheme to parse the `meta` items from the header).
if isinstance(file_obj, str):
file_name = file_obj
else:
file_name = file_obj.name
m = _spec_pattern.match(os.path.basename(file_name))
with read_fileobj_or_hdulist(file_obj, **kwargs) as hdulist:
header = hdulist[0].header
meta = {'header': header,
'tract': m['tract'],
'patch': m['patch'],
'catId': m['catId'],
'objId': m['objId'],
'nVisit': m['nVisit'],
'pfsVisitHash': m['pfsVisitHash']}
# spectrum is in HDU 2
data = hdulist[2].data['flux']
unit = Unit('nJy')
error = hdulist[2].data['fluxVariance']
uncertainty = StdDevUncertainty(np.sqrt(error))
wave = hdulist[2].data['lambda']
wave_unit = Unit('nm')
mask = hdulist[2].data['mask'] != 0
return Spectrum1D(flux=data * unit,
spectral_axis=wave * wave_unit,
uncertainty=uncertainty,
meta=meta,
mask=mask)
|
the-stack_0_18781 | from django.db import models
from django.core.validators import MinValueValidator
from django.db.models import Exists, F, OuterRef, Sum, Value
from users.models import User
class RecipeQuerySet(models.QuerySet):
def opt_annotations(self, user):
if user.is_anonymous:
return self.annotate(
is_favorited=Value(
False, output_field=models.BooleanField()
),
is_in_shopping_cart=Value(
False, output_field=models.BooleanField()
)
)
return self.annotate(
is_favorited=Exists(FavorRecipe.objects.filter(
author=user, recipes_id=OuterRef('pk')
)),
is_in_shopping_cart=Exists(ShoppingList.objects.filter(
author=user, recipes_id=OuterRef('pk')
))
)
class Ingredient(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Название'
)
measurement_unit = models.CharField(
max_length=16,
verbose_name='Единица измерения'
)
class Meta:
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
ordering = ('pk', )
def __str__(self):
return f'{self.name}, {self.measurement_unit}'
class Tag(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Тэг'
)
color = models.CharField(
max_length=200,
verbose_name='Цвет',
null=True
)
slug = models.SlugField(
unique=True,
max_length=200,
verbose_name='Короткое имя'
)
class Meta:
verbose_name = 'Тэг'
verbose_name_plural = 'Тэги'
ordering = ('pk', )
def __str__(self):
return self.name
class Recipe(models.Model):
name = models.CharField(max_length=200, verbose_name='Название')
image = models.ImageField(
upload_to='media/',
blank=True, null=True,
verbose_name='Картинка рецепта'
)
author = models.ForeignKey(
User,
related_name='recipes',
verbose_name='Автор',
on_delete=models.CASCADE
)
ingredients = models.ManyToManyField(
Ingredient,
verbose_name='Ингредиенты',
through='RecipeComponent',
)
text = models.TextField(
verbose_name='Текст',
null=True
)
tags = models.ManyToManyField(
Tag,
related_name='recipes',
verbose_name='Тэги'
)
cooking_time = models.PositiveSmallIntegerField(
verbose_name='Время приготовления',
validators=[
MinValueValidator(
limit_value=0,
message='Время приготовления - неотрицательное значение'
)
]
)
pub_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата создания'
)
objects = RecipeQuerySet.as_manager()
class Meta:
verbose_name = 'Рецепт'
verbose_name_plural = 'Рецепты'
ordering = ('-pk', )
def __str__(self):
return self.name[:32]
class ShoppingList(models.Model):
recipes = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Рецепт',
related_name='shop_list'
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Пользователь',
related_name='author'
)
class Meta:
verbose_name = 'Рецепт в корзине'
verbose_name_plural = 'Рецепты в корзине'
constraints = [
models.UniqueConstraint(
fields=['author', 'recipes'],
name='shopping_author_recipes_unique'
)
]
ordering = ('-pk', )
def __str__(self):
return f'{self.recipes.name} в корзине у {self.author.username}'
class FavorRecipe(models.Model):
recipes = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Избранные рецепты',
related_name='favorite_recipes',
null=True
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='user_favorites',
verbose_name='Пользователь'
)
class Meta:
verbose_name = 'Избранное'
verbose_name_plural = 'Избранное'
constraints = [
models.UniqueConstraint(
name='favorite_author_unique_recipes',
fields=['author', 'recipes']
)
]
ordering = ('-pk', )
def __str__(self):
return f'{self.recipes.name} в избранном у {self.author.username}'
class RecipeComponentQuerySet(models.QuerySet):
def shop_list(self, user):
qset = self.filter(recipe__shop_list__author=user).values(
'ingredient', 'ingredient__name', 'ingredient__measurement_unit'
).order_by('ingredient').annotate(
sum=Sum('amount'), name=F('ingredient__name'),
unit=F('ingredient__measurement_unit')
)
return qset
class RecipeComponent(models.Model):
ingredient = models.ForeignKey(
Ingredient,
on_delete=models.CASCADE,
related_name='recipe_ingredient',
verbose_name='Ингредиенты'
)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='component_recipes',
verbose_name='Рецепт'
)
amount = models.PositiveSmallIntegerField(
verbose_name='Количество',
)
objects = RecipeComponentQuerySet.as_manager()
class Meta:
verbose_name = 'Ингредиент в рецепте'
verbose_name_plural = 'Ингредиенты в рецепте'
constraints = [
models.UniqueConstraint(
name='recipe_unique_component',
fields=['ingredient', 'recipe']
)
]
ordering = ('-pk', )
def __str__(self):
return self.ingredient.name
|
the-stack_0_18783 | from . import main
from flask import render_template, abort, redirect, url_for, request, flash
from ..requests import get_quote
from ..models import User, BlogPost, Comment
from .forms import UpdateUserProfile, BlogPostForm, CommentForm
from flask_login import login_required, current_user
from .. import photos,db
from datetime import datetime
from ..email import mailer
from dateutil.parser import parse
@main.route('/')
def index():
'''Main index route'''
quotes = get_quote()
latest_blogs = BlogPost.query.order_by(db.desc(BlogPost.posted)).first()
all_blogs = BlogPost.query.order_by(db.desc(BlogPost.posted)).all()
return render_template('index.html', quotes = quotes, latest = latest_blogs, all = all_blogs)
@main.route('/profile/<uname>')
def profile(uname):
'''Route to the User Profile'''
user = User.query.filter_by(username = uname).first()
posts = BlogPost.query.filter_by(user_id = user.id).all()
if user is None:
abort(404)
return render_template('profile/profile.html', user = user, posts = posts)
@main.route('/profile/<uname>/update', methods=['GET','POST'])
@login_required
def update_profile(uname):
'''Function to update the user profile'''
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateUserProfile()
if form.validate_on_submit():
if request.method == 'POST':
update_form = request.form
bio = update_form.get('bio')
if not bio:
flash('User Bio is required')
return redirect(url_for('main.update_profile'))
user.bio = form.bio.data
user.save_user()
return redirect(url_for('.profile', uname = user.username))
return render_template('profile/update.html', form = form)
@main.route('/profile/<uname>/update/pic', methods = ['POST'])
@login_required
def update_picture(uname):
'''Function for user to update profile picture'''
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
user.save_user()
return redirect(url_for('.profile',uname = uname))
@main.route('/subscribe/<email>')
@login_required
def subscribe(email):
'''Function to subscribe to mailing list'''
user = User.query.filter_by(email = email).first()
user.subscribed = True
user.save_user()
flash('Thank you for subscribing!')
return redirect(url_for('.index'))
@main.route('/post/new/')
@main.route('/post/new/<uname>', methods = ['GET', 'POST'])
@login_required
def new_post(uname):
'''Post a blog'''
user = User.query.filter_by(username = uname).first()
if user is None:
flash('Sign in to post a pitch')
return redirect(url_for('auth.login'))
abort(404)
if user.role == 'user':
flash('Only Writers can create posts')
return redirect(url_for('main.index'))
abort(404)
form = BlogPostForm()
if form.validate_on_submit():
if request.method == 'POST':
blog_form = request.form
blog_title = blog_form.get('title')
if not blog_title:
flash('Blog Post Title MUST be provided!')
return redirect(url_for('.new_post', uname =user.username))
blog_content = blog_form.get('blogcontent')
if not blog_content:
flash('Blog Post Content MUST be provided!')
return redirect(url_for('.new_post', uname = user.username))
blog = BlogPost(title = form.title.data, content = form.blogcontent.data, user_id = current_user.id)
blog.save_post()
users = User.query.all()
for user in users:
if user.subscribed == True:
blog = BlogPost.query.filter_by(user_id = user.id).first()
mailer('New Post Notification!!!', 'email/notification', user.email, user = user, blog = blog)
return redirect(url_for('.index'))
return render_template('post/post.html', form = form)
@main.route('/post/delete/<post_id>/<user_id>', methods = ['GET', 'POST'])
@login_required
def delete_post(post_id, user_id):
'''Function to delete a post'''
post = BlogPost.query.filter_by(id = post_id).first()
user = User.query.filter_by(id = user_id).first()
if user is None:
abort(404)
if user.role == 'user':
flash('Only Writers can delete posts')
return redirect(url_for('main.index'))
abort(404)
if user.id != post.user_id:
flash('Only the original Writer can delete their posts')
return redirect(url_for('.view_post', post_id = post_id))
abort(404)
post.delete_post()
flash('Post successfully deleted')
return redirect(url_for('.index'))
@main.route('/post/update/<post_id>/<user_id>', methods = ['GET', 'POST'])
@login_required
def update_post(post_id, user_id):
'''Function to update the Post'''
post_fetched = BlogPost.query.filter_by(id = post_id).first()
user = User.query.filter_by(id = user_id).first()
print(post_fetched)
if user is None:
abort(404)
if user.role == 'user':
flash('Only Writers can Update posts')
return redirect(url_for('main.index', post_id = post.id))
abort(404)
if user.id != post_fetched.user_id:
flash('Only the original Writer can delete their posts')
return redirect(url_for('.view_post', post_id = post_id))
abort(404)
form = BlogPostForm()
if form.validate_on_submit():
if request.method == 'POST':
blog_form = request.form
blog_title = blog_form.get('title')
if not blog_title:
flash('Blog Post Title MUST be provided!')
return redirect(url_for('.new_post', uname =user.username))
blog_content = blog_form.get('blogcontent')
if not blog_content:
flash('Blog Post Content MUST be provided!')
return redirect(url_for('.new_post', uname = user.username))
post_fetched.title = form.title.data
post_fetched.content = form.content.data
post_fetched.user_id = current_user.id
post_fetched.updated = datetime.utcnow
db.session.commit()
return redirect(url_for('main.index'))
return render_template('post/post.html', form = form)
@main.route('/post/comment/<post_id>', methods = ['GET', 'POST'])
@login_required
def post_comment(post_id):
'''Function to post a comment on a post'''
post = BlogPost.query.filter_by(id = post_id).first()
form = CommentForm()
if form.validate_on_submit():
if request.method == 'POST':
comment_form = request.form
form_content = comment_form.get('content')
if not form_content:
flash('Comment must be provided')
return redirect(url_for('.view_post', post_id = post.id))
comment = Comment(content = form.content.data, post_id = post_id)
comment.save_comment()
return redirect(url_for('.view_post', post_id = post_id))
return render_template('post/comment.html', form = form, post = post)
@main.route('/post/comment/delete/<post_id>/<user_id>')
@login_required
def delete_comment(post_id, user_id):
'''Function to delete a comment'''
post = BlogPost.query.filter_by(id = post_id).first()
comment = Comment.query.filter_by(post_id = post.id).first()
user = User.query.filter_by(id = user_id).first()
if user is None:
abort(404)
if user.role == 'user':
flash('Only Writers can delete comments on posts')
return redirect(url_for('.view_post', post_id = post_id))
abort(404)
if user.id != post.user_id:
flash('Only the original Writer can delete comments on posts')
return redirect(url_for('.view_post', post_id = post_id))
abort(404)
comment.delete_comment()
return redirect(url_for('.view_post', post_id = post_id))
flash('Comment succesfully deleted')
return redirect(url_for('.index'))
@main.route('/post/view/<post_id>')
def view_post(post_id):
'''Function to view a specific post'''
post = BlogPost.query.filter_by(id = post_id).first()
comments = Comment.query.filter_by(post_id = post_id).all()
return render_template('post/specific_post.html', comments = comments, post = post)
|
the-stack_0_18785 | import json
import boto3
client = boto3.client('events')
def main(event, context):
print("Pedido Incluir: ", event["body"])
event_response = client.put_events(
Entries=[
{
'Source': 'PedidoIncluido',
'DetailType': 'Pedido Incluido',
'Detail': event["body"],
'EventBusName': 'ecommerce-event-bridge-bus'
},
]
)
response = {
"statusCode": 200,
"body": json.dumps(event_response)
}
return response
|
the-stack_0_18792 | from django.urls import reverse
from seahub.test_utils import BaseTestCase
class RepoHistoryViewTest(BaseTestCase):
def setUp(self):
self.login_as(self.user)
self.url = reverse('repo_history_view', args=[self.repo.id]) + '?commit_id=' + self.repo.head_cmmt_id
def test_can_render(self):
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'repo_history_view.html')
assert resp.context['user_perm'] == 'rw'
|
the-stack_0_18795 | import os
import re
import subprocess
import sys
import tempfile
from .output_helpers import Logger, normal_print, red_print, yellow_print
from .web_socket_client import WebSocketClient
class GDBHelper:
def __init__(self, toolchain_prefix, websocket_client, elf_file, port, baud_rate):
# type: (str, WebSocketClient, str, int, int) -> None
self._gdb_buffer = b'' # type: bytes
self._gdb_exit = False # type: bool
self.toolchain_prefix = toolchain_prefix
self.websocket_client = websocket_client
self.elf_file = elf_file
self.port = port
self.baud_rate = baud_rate
@property
def gdb_buffer(self): # type: () -> bytes
return self._gdb_buffer
@gdb_buffer.setter
def gdb_buffer(self, value): # type: (bytes) -> None
self._gdb_buffer = value
@property
def gdb_exit(self): # type: () -> bool
return self._gdb_exit
@gdb_exit.setter
def gdb_exit(self, value): # type: (bool) -> None
self._gdb_exit = value
def run_gdb(self):
# type: () -> None
normal_print('')
try:
cmd = ['%sgdb' % self.toolchain_prefix,
'-ex', 'set serial baud %d' % self.baud_rate,
'-ex', 'target remote %s' % self.port,
self.elf_file]
# Here we handling GDB as a process
# Open GDB process
try:
process = subprocess.Popen(cmd, cwd='.')
except KeyboardInterrupt:
pass
# We ignore Ctrl+C interrupt form external process abd wait response util GDB will be finished.
while True:
try:
process.wait()
break
except KeyboardInterrupt:
pass # We ignore the Ctrl+C
self.gdb_exit = True
except OSError as e:
red_print('%s: %s' % (' '.join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except Exception: # noqa
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(['stty', 'sane'])
except Exception: # noqa
pass # don't care if there's no stty, we tried...
def check_gdb_stub_trigger(self, line):
# type: (bytes) -> bool
line = self.gdb_buffer + line
self.gdb_buffer = b''
m = re.search(b'\\$(T..)#(..)', line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(bytes([p])) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError: # payload wasn't valid hex digits
return False
if chsum == calc_chsum:
if self.websocket_client:
yellow_print('Communicating through WebSocket')
self.websocket_client.send({'event': 'gdb_stub',
'port': self.port,
'prog': self.elf_file})
yellow_print('Waiting for debug finished event')
self.websocket_client.wait([('event', 'debug_finished')])
yellow_print('Communications through WebSocket is finished')
else:
return True
else:
red_print('Malformed gdb message... calculated checksum %02x received %02x' % (chsum, calc_chsum))
return False
def process_panic_output(self, panic_output, logger, target): # type: (bytes, Logger, str) -> None
panic_output_decode_script = os.path.join(os.path.dirname(__file__), '..', 'gdb_panic_server.py')
panic_output_file = None
try:
# On Windows, the temporary file can't be read unless it is closed.
# Set delete=False and delete the file manually later.
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as panic_output_file:
panic_output_file.write(panic_output)
panic_output_file.flush()
cmd = [self.toolchain_prefix + 'gdb',
'--batch', '-n',
self.elf_file,
'-ex', "target remote | \"{python}\" \"{script}\" --target {target} \"{output_file}\""
.format(python=sys.executable,
script=panic_output_decode_script,
target=target,
output_file=panic_output_file.name),
'-ex', 'bt']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
yellow_print('\nBacktrace:\n\n')
logger.print(output) # noqa: E999
except subprocess.CalledProcessError as e:
yellow_print('Failed to run gdb_panic_server.py script: {}\n{}\n\n'.format(e, e.output))
logger.print(panic_output)
finally:
if panic_output_file is not None:
try:
os.unlink(panic_output_file.name)
except OSError as e:
yellow_print('Couldn\'t remove temporary panic output file ({})'.format(e))
|
the-stack_0_18796 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sys import version_info
PY3 = version_info[0] == 3
_DEFAULT_CHARSET = "utf-8"
if PY3:
bytes = bytes
unicode = str
class_types = type
def unicode_compatible(cls):
return cls
else:
bytes = str
unicode = unicode
from types import ClassType
class_types = (ClassType, type)
def unicode_compatible(cls):
cls.__unicode__ = cls.__str__
if hasattr(cls, "__bytes__"):
cls.__str__ = cls.__bytes__
delattr(cls, "__bytes__")
else:
cls.__str__ = lambda self: self.__unicode__().encode(_DEFAULT_CHARSET)
return cls
string_types = (bytes, unicode,)
unicode_compatible.__doc__ = """
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
def to_bytes(value, encoding=_DEFAULT_CHARSET, strict=True):
try:
if isinstance(value, bytes):
return value
elif isinstance(value, unicode):
return value.encode(encoding)
else:
# try encode instance to bytes
return _instance_to_bytes(value, encoding)
except UnicodeError:
if strict:
raise
# recover from codec error and use 'repr' function
return to_bytes(repr(value), encoding)
def to_unicode(value, encoding=_DEFAULT_CHARSET, strict=True):
try:
if isinstance(value, unicode):
return value
elif isinstance(value, bytes):
return value.decode(encoding)
else:
# try decode instance to unicode
return _instance_to_unicode(value, encoding)
except UnicodeError:
if strict:
raise
# recover from codec error and use 'repr' function
return to_unicode(repr(value), encoding)
# converts value to native string
to_string = to_unicode if PY3 else to_bytes
def _instance_to_bytes(instance, encoding):
if PY3:
if hasattr(instance, "__bytes__"):
return bytes(instance)
elif hasattr(instance, "__str__"):
return unicode(instance).encode(encoding)
else:
if hasattr(instance, "__str__"):
return bytes(instance)
elif hasattr(instance, "__unicode__"):
return unicode(instance).encode(encoding)
return to_bytes(repr(instance), encoding)
def _instance_to_unicode(instance, encoding):
if PY3:
if hasattr(instance, "__str__"):
return unicode(instance)
elif hasattr(instance, "__bytes__"):
return bytes(instance).decode(encoding)
else:
if hasattr(instance, "__unicode__"):
return unicode(instance)
elif hasattr(instance, "__str__"):
return bytes(instance).decode(encoding)
return to_unicode(repr(instance), encoding)
|
the-stack_0_18797 | # coding: utf-8
"""Tests for profile-related functions.
Currently only the startup-dir functionality is tested, but more tests should
be added for:
* ipython profile create
* ipython profile list
* ipython profile create --parallel
* security dir permissions
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import shutil
import sys
import tempfile
from unittest import TestCase
import nose.tools as nt
from IPython.core.profileapp import list_profiles_in, list_bundled_profiles
from IPython.core.profiledir import ProfileDir
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
TMP_TEST_DIR = tempfile.mkdtemp()
HOME_TEST_DIR = os.path.join(TMP_TEST_DIR, "home_test_dir")
IP_TEST_DIR = os.path.join(HOME_TEST_DIR,'.ipython')
#
# Setup/teardown functions/decorators
#
def setup():
"""Setup test environment for the module:
- Adds dummy home dir tree
"""
# Do not mask exceptions here. In particular, catching WindowsError is a
# problem because that exception is only defined on Windows...
os.makedirs(IP_TEST_DIR)
def teardown():
"""Teardown test environment for the module:
- Remove dummy home dir tree
"""
# Note: we remove the parent test dir, which is the root of all test
# subdirs we may have created. Use shutil instead of os.removedirs, so
# that non-empty directories are all recursively removed.
shutil.rmtree(TMP_TEST_DIR)
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def win32_without_pywin32():
if sys.platform == 'win32':
try:
import pywin32
except ImportError:
return True
return False
class ProfileStartupTest(TestCase):
def setUp(self):
# create profile dir
self.pd = ProfileDir.create_profile_dir_by_name(IP_TEST_DIR, 'test')
self.options = ['--ipython-dir', IP_TEST_DIR, '--profile', 'test']
self.fname = os.path.join(TMP_TEST_DIR, 'test.py')
def tearDown(self):
# We must remove this profile right away so its presence doesn't
# confuse other tests.
shutil.rmtree(self.pd.location)
def init(self, startup_file, startup, test):
# write startup python file
with open(os.path.join(self.pd.startup_dir, startup_file), 'w') as f:
f.write(startup)
# write simple test file, to check that the startup file was run
with open(self.fname, 'w') as f:
f.write(py3compat.doctest_refactor_print(test))
def validate(self, output):
tt.ipexec_validate(self.fname, output, '', options=self.options)
@dec.skipif(win32_without_pywin32(), "Test requires pywin32 on Windows")
def test_startup_py(self):
self.init('00-start.py', 'zzz=123\n',
py3compat.doctest_refactor_print('print zzz\n'))
self.validate('123')
@dec.skipif(win32_without_pywin32(), "Test requires pywin32 on Windows")
def test_startup_ipy(self):
self.init('00-start.ipy', '%profile\n', '')
self.validate('test')
def test_list_profiles_in():
# No need to remove these directories and files, as they will get nuked in
# the module-level teardown.
td = tempfile.mkdtemp(dir=TMP_TEST_DIR)
td = py3compat.str_to_unicode(td)
for name in ('profile_foo', 'profile_hello', 'not_a_profile'):
os.mkdir(os.path.join(td, name))
if dec.unicode_paths:
os.mkdir(os.path.join(td, 'profile_ünicode'))
with open(os.path.join(td, 'profile_file'), 'w') as f:
f.write("I am not a profile directory")
profiles = list_profiles_in(td)
# unicode normalization can turn u'ünicode' into u'u\0308nicode',
# so only check for *nicode, and that creating a ProfileDir from the
# name remains valid
found_unicode = False
for p in list(profiles):
if p.endswith('nicode'):
pd = ProfileDir.find_profile_dir_by_name(td, p)
profiles.remove(p)
found_unicode = True
break
if dec.unicode_paths:
nt.assert_true(found_unicode)
nt.assert_equal(set(profiles), set(['foo', 'hello']))
def test_list_bundled_profiles():
# This variable will need to be updated when a new profile gets bundled
bundled_true = ['cluster', 'math', 'pysh', 'sympy']
bundled = sorted(list_bundled_profiles())
nt.assert_equal(bundled, bundled_true)
|
the-stack_0_18799 | from __future__ import with_statement
from gensim import interfaces, utils
from my_gensim_dict import MyDictionary
from collect_champions_usage import ChampionsUsage
import logging
import json
logger = logging.getLogger('champion_usage_corpus')
class ChampionUsageCorpus(interfaces.CorpusABC):
def __init__(self, input=None):
super(ChampionUsageCorpus, self).__init__()
self.input = input
self.dictionary = MyDictionary()
self.metadata = False
if input is not None:
self.dictionary.add_documents(self.get_texts())
else:
logger.warning("No input document stream provided; assuming "
"dictionary will be initialized some other way.")
def getstream(self):
return utils.file_or_filename(self.input)
def get_texts(self):
"""
Iterate over the collection, yielding one document at a time. A document
is a sequence of words (strings) that can be fed into `Dictionary.doc2bow`.
Override this function to match your input (parse input files, do any
text preprocessing, lowercasing, tokenizing etc.). There will be no further
preprocessing of the words coming out of this function.
"""
# take a dictionary, make sure champion ids are properly encoded strings
if not isinstance (self.input, ChampionsUsage):
raise TypeError("expecting ChampionsUsage")
for summno, doc in enumerate(self.input):
#remove total games played
if '0' in doc:
del doc['0']
yield doc
#check encoding ?
def __iter__(self):
"""
The function that defines a corpus.
Iterating over the corpus must yield sparse vectors, one for each document.
"""
for text in self.get_texts():
if self.metadata:
yield self.dictionary.doc2bow(text[0], allow_update=False), text[1]
else:
yield self.dictionary.doc2bow(text, allow_update=False)
def __len__(self):
if not hasattr(self, 'length'):
# cache the corpus length
if isinstance(self.input, ChampionsUsage):
ns = self.input.get_n_summoners
if ns is not None:
self.length = ns
else:
self.length = sum(1 for _ in self.get_texts())
return self.length
|
the-stack_0_18800 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
from typing import Dict
import torch
from torch import nn
from detectron2.layers import ShapeSpec
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from ..backbone.resnet import BottleneckBlock, make_stage
from ..box_regression import Box2BoxTransform
from ..matcher import Matcher
from ..poolers import ROIPooler
from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals
from ..sampling import subsample_labels
from .box_head import build_box_head
from .fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs
from .keypoint_head import build_keypoint_head, keypoint_rcnn_inference, keypoint_rcnn_loss
from .mask_head import build_mask_head, mask_rcnn_inference, mask_rcnn_loss
ROI_HEADS_REGISTRY = Registry("ROI_HEADS")
ROI_HEADS_REGISTRY.__doc__ = """
Registry for ROI heads in a generalized R-CNN model.
ROIHeads take feature maps and region proposals, and
perform per-region computation.
The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`ROIHeads`.
"""
logger = logging.getLogger(__name__)
def build_roi_heads(cfg, input_shape):
"""
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
"""
name = cfg.MODEL.ROI_HEADS.NAME
return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape)
def select_foreground_proposals(proposals, bg_label):
"""
Given a list of N Instances (for N images), each containing a `gt_classes` field,
return a list of Instances that contain only instances with `gt_classes != -1 &&
gt_classes != bg_label`.
Args:
proposals (list[Instances]): A list of N Instances, where N is the number of
images in the batch.
bg_label: label index of background class.
Returns:
list[Instances]: N Instances, each contains only the selected foreground instances.
list[Tensor]: N boolean vector, correspond to the selection mask of
each Instances object. True for selected instances.
"""
assert isinstance(proposals, (list, tuple))
assert isinstance(proposals[0], Instances)
assert proposals[0].has("gt_classes")
fg_proposals = []
fg_selection_masks = []
for proposals_per_image in proposals:
gt_classes = proposals_per_image.gt_classes
fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label)
fg_idxs = fg_selection_mask.nonzero().squeeze(1)
fg_proposals.append(proposals_per_image[fg_idxs])
fg_selection_masks.append(fg_selection_mask)
return fg_proposals, fg_selection_masks
def select_proposals_with_visible_keypoints(proposals):
"""
Args:
proposals (list[Instances]): a list of N Instances, where N is the
number of images.
Returns:
proposals: only contains proposals with at least one visible keypoint.
Note that this is still slightly different from Detectron.
In Detectron, proposals for training keypoint head are re-sampled from
all the proposals with IOU>threshold & >=1 visible keypoint.
Here, the proposals are first sampled from all proposals with
IOU>threshold, then proposals with no visible keypoint are filtered out.
This strategy seems to make no difference on Detectron and is easier to implement.
"""
ret = []
all_num_fg = []
for proposals_per_image in proposals:
# If empty/unannotated image (hard negatives), skip filtering for train
if len(proposals_per_image) == 0:
ret.append(proposals_per_image)
continue
gt_keypoints = proposals_per_image.gt_keypoints.tensor
# #fg x K x 3
vis_mask = gt_keypoints[:, :, 2] >= 1
xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1]
proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4
kp_in_box = (
(xs >= proposal_boxes[:, :, 0])
& (xs <= proposal_boxes[:, :, 2])
& (ys >= proposal_boxes[:, :, 1])
& (ys <= proposal_boxes[:, :, 3])
)
selection = (kp_in_box & vis_mask).any(dim=1)
selection_idxs = torch.nonzero(selection).squeeze(1)
all_num_fg.append(selection_idxs.numel())
ret.append(proposals_per_image[selection_idxs])
storage = get_event_storage()
storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg))
return ret
class ROIHeads(torch.nn.Module):
"""
ROIHeads perform all per-region computation in an R-CNN.
It contains logic of cropping the regions, extract per-region features,
and make per-region predictions.
It can have many variants, implemented as subclasses of this class.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super(ROIHeads, self).__init__()
# fmt: off
self.batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE
self.positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
self.test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
self.test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.proposal_append_gt = cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT
self.feature_strides = {k: v.stride for k, v in input_shape.items()}
self.feature_channels = {k: v.channels for k, v in input_shape.items()}
self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
self.smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
# fmt: on
# Matcher to assign box proposals to gt boxes
self.proposal_matcher = Matcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=False,
)
# Box2BoxTransform for bounding box regression
self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
def _sample_proposals(self, matched_idxs, matched_labels, gt_classes):
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_sample_fraction, self.num_classes
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
return sampled_idxs, gt_classes[sampled_idxs]
@torch.no_grad()
def label_and_sample_proposals(self, proposals, targets):
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
boxes, with a fraction of positives that is no larger than
``self.positive_sample_fraction``.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
"""
gt_boxes = [x.gt_boxes for x in targets]
# Augment proposals with ground-truth boxes.
# In the case of learned proposals (e.g., RPN), when training starts
# the proposals will be low quality due to random initialization.
# It's possible that none of these initial
# proposals have high enough overlap with the gt objects to be used
# as positive examples for the second stage components (box head,
# cls head, mask head). Adding the gt boxes to the set of proposals
# ensures that the second stage components will have some positive
# examples from the start of training. For RPN, this augmentation improves
# convergence and empirically improves box AP on COCO by about 0.5
# points (under one tested configuration).
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def forward(self, images, features, proposals, targets=None):
"""
Args:
images (ImageList):
features (dict[str: Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
proposals (list[Instances]): length `N` list of `Instances`s. The i-th
`Instances` contains object proposals for the i-th input image,
with fields "proposal_boxes" and "objectness_logits".
targets (list[Instances], optional): length `N` list of `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
It may have the following fields:
- gt_boxes: the bounding box of each instance.
- gt_classes: the label for each instance with a category ranging in [0, #class].
- gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance.
- gt_keypoints: NxKx3, the groud-truth keypoints for each instance.
Returns:
results (list[Instances]): length `N` list of `Instances`s containing the
detected instances. Returned during inference only; may be [] during training.
losses (dict[str->Tensor]):
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
raise NotImplementedError()
@ROI_HEADS_REGISTRY.register()
class Res5ROIHeads(ROIHeads):
"""
The ROIHeads in a typical "C4" R-CNN model, where
the box and mask head share the cropping and
the per-region feature computation by a Res5 block.
"""
def __init__(self, cfg, input_shape):
super().__init__(cfg, input_shape)
assert len(self.in_features) == 1
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = (1.0 / self.feature_strides[self.in_features[0]], )
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
self.mask_on = cfg.MODEL.MASK_ON
# fmt: on
assert not cfg.MODEL.KEYPOINT_ON
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.res5, out_channels = self._build_res5_block(cfg)
self.box_predictor = FastRCNNOutputLayers(
out_channels, self.num_classes, self.cls_agnostic_bbox_reg
)
if self.mask_on:
self.mask_head = build_mask_head(
cfg,
ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution),
)
def _build_res5_block(self, cfg):
# fmt: off
stage_channel_factor = 2 ** 3 # res5 is 8x res2
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
norm = cfg.MODEL.RESNETS.NORM
assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \
"Deformable conv is not yet supported in res5 head."
# fmt: on
blocks = make_stage(
BottleneckBlock,
3,
first_stride=2,
in_channels=out_channels // 2,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
norm=norm,
stride_in_1x1=stride_in_1x1,
)
return nn.Sequential(*blocks), out_channels
def _shared_roi_transform(self, features, boxes):
x = self.pooler(features, boxes)
return self.res5(x)
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform(
[features[f] for f in self.in_features], proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
pred_class_logits, pred_proposal_deltas = self.box_predictor(feature_pooled)
del feature_pooled
outputs = FastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
)
if self.training:
del features
losses = outputs.losses()
if self.mask_on:
proposals, fg_selection_masks = select_foreground_proposals(
proposals, self.num_classes
)
# Since the ROI feature transform is shared between boxes and masks,
# we don't need to recompute features. The mask loss is only defined
# on foreground proposals, so we need to select out the foreground
# features.
mask_features = box_features[torch.cat(fg_selection_masks, dim=0)]
del box_features
mask_logits = self.mask_head(mask_features)
losses["loss_mask"] = mask_rcnn_loss(mask_logits, proposals)
return [], losses
else:
pred_instances, _ = outputs.inference(
self.test_score_thresh, self.test_nms_thresh, self.test_detections_per_img
)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def forward_with_given_boxes(self, features, instances):
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (Instances):
the same `Instances` object, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
if self.mask_on:
features = [features[f] for f in self.in_features]
x = self._shared_roi_transform(features, [x.pred_boxes for x in instances])
mask_logits = self.mask_head(x)
mask_rcnn_inference(mask_logits, instances)
return instances
@ROI_HEADS_REGISTRY.register()
class StandardROIHeads(ROIHeads):
"""
It's "standard" in a sense that there is no ROI transform sharing
or feature sharing between tasks.
The cropped rois go to separate branches (boxes and masks) directly.
This way, it is easier to make separate abstractions for different branches.
This class is used by most models, such as FPN and C5.
To implement more models, you can subclass it and implement a different
:meth:`forward()` or a head.
"""
def __init__(self, cfg, input_shape):
super(StandardROIHeads, self).__init__(cfg, input_shape)
self._init_box_head(cfg)
self._init_mask_head(cfg)
self._init_keypoint_head(cfg)
def _init_box_head(self, cfg):
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [self.feature_channels[f] for f in self.in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
self.box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
self.box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
self.box_predictor = FastRCNNOutputLayers(
self.box_head.output_size, self.num_classes, self.cls_agnostic_bbox_reg
)
def _init_mask_head(self, cfg):
# fmt: off
self.mask_on = cfg.MODEL.MASK_ON
if not self.mask_on:
return
pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features)
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE
# fmt: on
in_channels = [self.feature_channels[f] for f in self.in_features][0]
self.mask_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.mask_head = build_mask_head(
cfg, ShapeSpec(channels=in_channels, width=pooler_resolution, height=pooler_resolution)
)
def _init_keypoint_head(self, cfg):
# fmt: off
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
if not self.keypoint_on:
return
pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features) # noqa
sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE
self.normalize_loss_by_visible_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS # noqa
self.keypoint_loss_weight = cfg.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT
# fmt: on
in_channels = [self.feature_channels[f] for f in self.in_features][0]
self.keypoint_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.keypoint_head = build_keypoint_head(
cfg, ShapeSpec(channels=in_channels, width=pooler_resolution, height=pooler_resolution)
)
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
features_list = [features[f] for f in self.in_features]
if self.training:
losses = self._forward_box(features_list, proposals)
# During training the proposals used by the box head are
# used by the mask, keypoint (and densepose) heads.
losses.update(self._forward_mask(features_list, proposals))
losses.update(self._forward_keypoint(features_list, proposals))
return proposals, losses
else:
pred_instances, box_head_feat = self._forward_box(features_list, proposals)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances,box_head_feat, {}
def forward_with_given_boxes(self, features, instances):
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
This is useful for downstream tasks where a box is known, but need to obtain
other attributes (outputs of other heads).
Test-time augmentation also uses this.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (Instances):
the same `Instances` object, with extra
fields such as `pred_masks` or `pred_keypoints`.
"""
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
features = [features[f] for f in self.in_features]
instances = self._forward_mask(features, instances)
instances = self._forward_keypoint(features, instances)
return instances
def _forward_box(self, features, proposals):
"""
Forward logic of the box prediction branch.
Args:
features (list[Tensor]): #level input features for box prediction
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
pred_class_logits, pred_proposal_deltas = self.box_predictor(box_features)
# del box_features
outputs = FastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
)
if self.training:
return outputs.losses()
else:
pred_instances, _ = outputs.inference(
self.test_score_thresh, self.test_nms_thresh, self.test_detections_per_img
)
return pred_instances,box_features
def _forward_mask(self, features, instances):
"""
Forward logic of the mask prediction branch.
Args:
features (list[Tensor]): #level input features for mask prediction
instances (list[Instances]): the per-image instances to train/predict masks.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_masks" and return it.
"""
if not self.mask_on:
return {} if self.training else instances
if self.training:
# The loss is only defined on positive proposals.
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
mask_features = self.mask_pooler(features, proposal_boxes)
mask_logits = self.mask_head(mask_features)
return {"loss_mask": mask_rcnn_loss(mask_logits, proposals)}
else:
pred_boxes = [x.pred_boxes for x in instances]
mask_features = self.mask_pooler(features, pred_boxes)
mask_logits = self.mask_head(mask_features)
mask_rcnn_inference(mask_logits, instances)
return instances
def _forward_keypoint(self, features, instances):
"""
Forward logic of the keypoint prediction branch.
Args:
features (list[Tensor]): #level input features for keypoint prediction
instances (list[Instances]): the per-image instances to train/predict keypoints.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_keypoints" and return it.
"""
if not self.keypoint_on:
return {} if self.training else instances
num_images = len(instances)
if self.training:
# The loss is defined on positive proposals with at >=1 visible keypoints.
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposals = select_proposals_with_visible_keypoints(proposals)
proposal_boxes = [x.proposal_boxes for x in proposals]
keypoint_features = self.keypoint_pooler(features, proposal_boxes)
keypoint_logits = self.keypoint_head(keypoint_features)
normalizer = (
num_images
* self.batch_size_per_image
* self.positive_sample_fraction
* keypoint_logits.shape[1]
)
loss = keypoint_rcnn_loss(
keypoint_logits,
proposals,
normalizer=None if self.normalize_loss_by_visible_keypoints else normalizer,
)
return {"loss_keypoint": loss * self.keypoint_loss_weight}
else:
pred_boxes = [x.pred_boxes for x in instances]
keypoint_features = self.keypoint_pooler(features, pred_boxes)
keypoint_logits = self.keypoint_head(keypoint_features)
keypoint_rcnn_inference(keypoint_logits, instances)
return instances
|
the-stack_0_18801 | # -*- coding: utf-8 -*-
# Copyright (2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for test_subscription.py
"""
import json
from oneview_redfish_toolkit.api.subscription \
import Subscription
from oneview_redfish_toolkit.tests.base_test import BaseTest
class TestSubscription(BaseTest):
"""Tests for Subscription class"""
def setUp(self):
"""Tests preparation"""
# Loading Subscription result mockup
with open(
'oneview_redfish_toolkit/mockups/redfish/'
'Subscription.json'
) as f:
self.subscription_mockup = json.load(f)
def test_class_instantiation(self):
# Tests if class is correctly instantiated and validated
try:
subscription = Subscription("", "", [], None)
except Exception as e:
self.fail("Failed to instantiate Subscription class."
" Error: {}".format(e))
self.assertIsInstance(subscription, Subscription)
def test_serialize(self):
# Tests the serialize function result against known result
try:
subscription = Subscription(
"e7f93fa2-0cb4-11e8-9060-e839359bc36a",
"http://www.dnsname.com/Destination1",
["Alert", "StatusChange"], None)
except Exception as e:
self.fail("Failed to instantiate Subscription class."
" Error: {}".format(e))
try:
result = json.loads(subscription.serialize())
except Exception as e:
self.fail("Failed to serialize. Error: ".format(e))
self.assertEqual(self.subscription_mockup, result)
|
the-stack_0_18803 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not #
# use this file except in compliance with the License. A copy of the #
# License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, #
# express or implied. See the License for the specific language governing #
# permissions and limitations under the License. #
##############################################################################
from botocore.vendored import requests
import json
def send(event, context, responseStatus, responseData, physicalResourceId=None, noEcho=False):
responseUrl = event['ResponseURL']
responseBody = {}
responseBody['Status'] = responseStatus
responseBody['Reason'] = 'See the details in CloudWatch Log Stream: ' + context.log_stream_name
responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name
responseBody['StackId'] = event['StackId']
responseBody['RequestId'] = event['RequestId']
responseBody['LogicalResourceId'] = event['LogicalResourceId']
responseBody['NoEcho'] = noEcho
responseBody['Data'] = responseData
json_responseBody = json.dumps(responseBody)
print("Response body:\n" + json_responseBody)
headers = {
'content-type' : '',
'content-length' : str(len(json_responseBody))
}
try:
response = requests.put(responseUrl,
data=json_responseBody,
headers=headers)
print("Status code: " + response.reason)
except Exception as e:
print("send(..) failed executing requests.put(..): " + str(e))
|
the-stack_0_18805 | from .action import SciAction
class TabAction(SciAction):
title = 'Table Action'
note, para, view = [], None, None
def __init__(self): pass
def show(self):
tps, data, snap = self.tps, self.tps.data, self.tps.snap
f = lambda p: self.run(tps, data, snap, p) or tps.update()
return self.app.show_para(self.title, self.para, self.view, f, on_ok=None,
on_cancel=lambda x=self.tps:self.cancel(x),
preview='preview' in self.note, modal=True)
def cancel(self, tps):
tps.data[:] = pts.snap
tps.update()
def run(self, tps, snap, data, para = None):
print('I am running!!!')
def start(self, app, para=None, callback=None):
self.app, self.tps = app, app.get_table()
if 'auto_snap' in self.note:
if 'auto_msk' in self.note: mode = True
elif 'msk_not' in self.note: mode = False
else: mode = None
self.tps.snapshot(mode, 'num_only' in self.note)
if para!=None:
self.ok(self.tps, para, callback)
elif self.view==None:
if not self.__class__.show is Table.show:
if self.show():
self.run(self.tps, para, callback)
else: self.ok(self.tps, para, callback)
elif self.modal:
if self.show():
self.ok(self.tps, para, callback)
else:self.cancel(self.tps)
if not self.dialog is None: self.dialog.Destroy()
else: self.show() |
the-stack_0_18806 | # -*- coding: utf-8 -*-
"""
gpaw
Google Suite Python API Wrapper
"""
from .client import Client
def NewClient(credentials, scopes):
"""Login to Google API using OAuth2 credentials.
:returns: :class:`gpaw.Client` instance.
"""
client = Client(creds=credentials,scopes=scopes)
client._authorize()
return client |
the-stack_0_18807 | #Entering the Program to calculate the sum of a particular round
print("Enter the number (-1 is the event in this program");
total=0;
UserInput=int(input());
while(UserInput!=-1):
total=total+UserInput;
print("Enter the number (-1 is the event in this program");
UserInput=int(input());
print("The total soore is:"+str(total));
|
the-stack_0_18808 | from __future__ import annotations
from datetime import datetime, timedelta
from fnmatch import fnmatch
from timeit import default_timer as timer
from typing import Optional
import asyncio
import os.path
import pathlib
import shutil
from asyncinotify import Inotify, Mask
from loguru import logger
from loguru._logger import Logger
import texoutparse
DEBOUNCE_THRESHOLD = timedelta(seconds=1)
TEX_LOG_ENCODING = 'latin-1'
ROOT_DIR = pathlib.Path('.')
AUX_DIR = ROOT_DIR / 'aux'
OUTPUT_DIR = ROOT_DIR / 'output'
class Task:
command: str
sublogger: Logger
out_buffer: int
def __eq__(self, other):
return isinstance(other, Task) and self.command == other.command
def __hash__(self):
return hash(self.command)
async def pre_process(self, runner: TaskRunner):
pass
async def post_process(self, runner: TaskRunner):
pass
async def on_failure(self, runner: TaskRunner):
pass
class BiberTask(Task):
out_buffer: int = asyncio.subprocess.PIPE
biber_path: pathlib.Path
tex_path: pathlib.Path
def __init__(self, biber_path: pathlib.Path, tex_path: pathlib.Path):
self.biber_path = biber_path
self.tex_path = tex_path
self.sublogger = logger.bind(name=str(self.biber_path))
def __repr__(self):
return f'BiberTask({repr(self.biber_path)})'
@property
def base_name(self):
return os.path.splitext(self.biber_path)[0]
@property
def command(self):
return f'biber {self.biber_path}'
async def post_process(self, runner: TaskRunner):
runner.schedule(TeXTask(self.tex_path), str(self.biber_path))
class TeXTask(Task):
tex_path: pathlib.Path
out_buffer: int = asyncio.subprocess.DEVNULL
_bcf_file_hash: Optional[int] = None
def __init__(self, tex_path: pathlib.Path):
self.tex_path = tex_path
self.sublogger = logger.bind(name=str(self.tex_path))
def __repr__(self):
return f'TeXTask({repr(self.tex_path)})'
def get_aux_path(self, extension: str):
return AUX_DIR / self.tex_path.with_suffix(extension).name
@property
def build_pdf_path(self):
return OUTPUT_DIR / self.tex_path.with_suffix('.pdf').name
@property
def command(self):
return f'pdflatex -interaction=batchmode -output-directory={AUX_DIR} {self.tex_path}'
def get_bcf_hash(self) -> Optional[int]:
try:
with open(self.get_aux_path('.bcf'), 'r') as bcf_file:
return hash(bcf_file.read())
except IOError:
return None
async def pre_process(self, runner: TaskRunner):
self._bcf_file_hash = self.get_bcf_hash()
async def post_process(self, runner: TaskRunner):
parser = texoutparse.LatexLogParser()
requires_rerun = False
try:
with open(self.get_aux_path('.log'), 'r', encoding=TEX_LOG_ENCODING) as log_file:
requires_rerun = 'Rerun to get' in log_file.read()
log_file.seek(0)
parser.process(log_file)
except OSError:
self.sublogger.error('Could not open TeX log file.')
else:
if len(parser.errors) > 0:
self.sublogger.error(f'Compiled with {len(parser.errors)} errors. The first error is:\n {str(parser.errors[0])}')
elif len(parser.warnings) > 0:
self.sublogger.warning(f'Compiled with {len(parser.warnings)} warnings. The first warning is:\n {str(parser.warnings[0])}')
elif len(parser.badboxes) > 0:
self.sublogger.warning(f'Compiled with {len(parser.badboxes)} bad boxes. The first bad box is:\n {str(parser.badboxes[0])}')
if len(parser.errors) != 0:
return
if not self.get_aux_path('.pdf').exists():
self.sublogger.error(f'No output file')
return
if self.get_bcf_hash() != self._bcf_file_hash:
runner.schedule(BiberTask(self.get_aux_path('.bcf'), self.tex_path), str(self.tex_path))
if requires_rerun:
runner.schedule(self, 'last build')
else:
self.sublogger.debug(f'No more passes required. Copying {self.get_aux_path(".pdf")} to {self.build_pdf_path}')
shutil.copyfile(self.get_aux_path('.pdf'), self.build_pdf_path)
on_failure = post_process
class AsymptoteTask(Task):
src_path: pathlib.Path
out_buffer: int = asyncio.subprocess.PIPE
def __init__(self, src_path: pathlib.Path):
self.src_path = src_path
self.sublogger = logger.bind(name=str(self.src_path))
def __repr__(self):
return f'AsymptoteTask({repr(self.src_path)})'
@property
def aux_pdf_path(self):
return AUX_DIR / self.src_path.with_suffix('.pdf').name
@property
def build_pdf_path(self):
return OUTPUT_DIR / self.src_path.with_suffix('.pdf').name
@property
def command(self):
return f'asy -outname={self.aux_pdf_path} {self.src_path}'
async def post_process(self, runner: TaskRunner):
shutil.copyfile(self.aux_pdf_path, self.build_pdf_path)
class TaskRunner:
active_tasks: set[Task] = set()
last_run_attempt: dict[Task, datetime] = {}
async def run_task(self, task: Task, trigger: Optional[str] = None):
self.active_tasks.add(task)
await task.pre_process(self)
start = timer()
if trigger is None:
task.sublogger.info(f'Manually triggered')
else:
task.sublogger.info(f'Triggered by {trigger}')
proc = await asyncio.create_subprocess_shell(
task.command,
stdout=task.out_buffer,
stderr=task.out_buffer,
)
exit_code = await proc.wait()
if exit_code == 0:
await task.post_process(self)
ms = round(1000 * (timer() - start))
task.sublogger.info(f'Finished in {ms}ms')
else:
await task.on_failure(self)
ms = round(1000 * (timer() - start))
task.sublogger.error(f'Failed in {ms}ms with exit code {exit_code}')
self.active_tasks.remove(task)
async def run_task_debounced(self, task: Task, trigger: Optional[str] = None):
# This means that the task has already been scheduled
if task in self.last_run_attempt:
self.last_run_attempt[task] = datetime.now()
return
# Loop asynchronously until enough time has passed since the last scheduling
while task in self.active_tasks or task not in self.last_run_attempt or datetime.now() - self.last_run_attempt[task] < DEBOUNCE_THRESHOLD:
self.last_run_attempt[task] = datetime.now()
await asyncio.sleep(DEBOUNCE_THRESHOLD.seconds)
del self.last_run_attempt[task]
await self.run_task(task, trigger)
def schedule(self, task: Task, trigger: Optional[str] = None):
asyncio.create_task(self.run_task_debounced(task, trigger))
async def iter_file_changes():
with Inotify() as inotify:
inotify.add_watch(ROOT_DIR, Mask.MODIFY)
inotify.add_watch(ROOT_DIR / 'src', Mask.MODIFY)
inotify.add_watch(ROOT_DIR / 'output', Mask.MODIFY)
inotify.add_watch(ROOT_DIR / 'figures', Mask.MODIFY)
inotify.add_watch(ROOT_DIR / 'packages', Mask.MODIFY)
logger.info('Started daemon and initialized watchers')
async for event in inotify:
if event.path is not None:
yield pathlib.Path(event.path)
async def setup_watchers():
runner = TaskRunner()
async for path in iter_file_changes():
if fnmatch(path, 'tikzcd.cls') or fnmatch(path, 'packages/*.sty'):
figures_dir = pathlib.Path('figures')
for figure_path in figures_dir.glob('*.tex'):
runner.schedule(TeXTask(figure_path.resolve()), trigger=str(path))
for figure_path in figures_dir.glob('*.asy'):
runner.schedule(AsymptoteTask(figure_path.resolve()), trigger=str(path))
if fnmatch(path, 'figures/*.tex'):
runner.schedule(TeXTask(path), trigger=str(path))
if fnmatch(path, 'figures/*.asy'):
runner.schedule(AsymptoteTask(path), trigger=str(path))
if not fnmatch(path, 'output/notebook.pdf') and (
fnmatch(path, 'notebook.tex') or
fnmatch(path, 'notebook.cls') or
fnmatch(path, 'src/*.tex') or
fnmatch(path, 'output/*.pdf') or
fnmatch(path, 'packages/*.sty')
):
runner.schedule(TeXTask(pathlib.Path('notebook.tex')), trigger=str(path))
if __name__ == '__main__':
import sys
logger.remove()
logger.add(sys.stdout, colorize=True, format='<green>{time:HH:mm:ss}</green> | <level>{level:7}</level> | {extra[name]} | <level>{message}</level>')
with logger.contextualize(name='<system>'):
try:
asyncio.run(setup_watchers())
except KeyboardInterrupt:
logger.info('Gracefully shutting down')
|
the-stack_0_18809 | import requests
import os
from bs4 import BeautifulSoup
from twilio.rest import Client
from shoe import Shoe
import config
def send_sms(message):
try:
twilio_sid = os.environ['TWILIO_ACCOUNT_SID']
except KeyError:
twilio_sid = config.TWILIO_ACCOUNT_SID
try:
twilio_token = os.environ['TWILIO_ACCOUNT_AUTH_TOKEN']
except KeyError:
twilio_token = config.TWILIO_ACCOUNT_AUTH_TOKEN
try:
sending_phone = os.environ['SENDING_PHONE']
except KeyError:
sending_phone = config.SENDING_PHONE
try:
receiving_phone = os.environ['RECEIVING_PHONE']
except KeyError:
receiving_phone = config.RECEIVING_PHONE
try:
twilio_client = Client(twilio_sid, twilio_token)
except:
return 1, "Error initializing Twilio client - please verify your credentials."
try:
twilio_client.messages.create(to=receiving_phone,
from_=sending_phone,
body=message)
except:
return 1, "Error sending message - please verify phone number(s)."
print("MESSAGE DELIVERED")
return 0, None
def main():
shoes = []
shoes.append(
Shoe("Yeezreel RF Size 12", "https://stockx.com/adidas-yeezy-boost-350-v2-yeezreel-reflective?size=12", 350))
shoes.append(
Shoe("Yeezreel RF Size 13", "https://stockx.com/adidas-yeezy-boost-350-v2-yeezreel-reflective?size=13", 350))
shoes.append(Shoe("Yechiel NRF Size 12.5", "https://stockx.com/adidas-yeezy-boost-350-v2-yecheil?size=12.5", 350))
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', }
message = ""
for shoe in shoes:
res = requests.get(shoe.url, headers=headers)
parser = BeautifulSoup(res.text, features="html.parser")
shoe.price = parser.find('div', attrs={'class': 'sale-value'}).text
# print("Name: {}\tPrice: {}\n".format(shoe.name, shoe.price))
message = message + "Name: {}\tPrice: {}\n".format(shoe.name, shoe.price)
# message = message + "SENT ON AWS"
return send_sms(message)
if __name__ == "__main__":
main()
|
the-stack_0_18811 | # stash.py
# Copyright (C) 2018 Jelmer Vernooij <[email protected]>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Stash handling."""
from __future__ import absolute_import
import os
from dulwich.file import GitFile
from dulwich.index import (
commit_tree,
iter_fresh_objects,
)
from dulwich.reflog import read_reflog
DEFAULT_STASH_REF = b"refs/stash"
class Stash(object):
"""A Git stash.
Note that this doesn't currently update the working tree.
"""
def __init__(self, repo, ref=DEFAULT_STASH_REF):
self._ref = ref
self._repo = repo
def stashes(self):
reflog_path = os.path.join(
self._repo.commondir(), 'logs', os.fsdecode(self._ref))
try:
with GitFile(reflog_path, 'rb') as f:
return reversed(list(read_reflog(f)))
except FileNotFoundError:
return []
@classmethod
def from_repo(cls, repo):
"""Create a new stash from a Repo object."""
return cls(repo)
def drop(self, index):
"""Drop entry with specified index."""
raise NotImplementedError(self.drop)
def pop(self, index):
raise NotImplementedError(self.drop)
def push(self, committer=None, author=None, message=None):
"""Create a new stash.
Args:
committer: Optional committer name to use
author: Optional author name to use
message: Optional commit message
"""
# First, create the index commit.
commit_kwargs = {}
if committer is not None:
commit_kwargs['committer'] = committer
if author is not None:
commit_kwargs['author'] = author
index = self._repo.open_index()
index_tree_id = index.commit(self._repo.object_store)
index_commit_id = self._repo.do_commit(
ref=None, tree=index_tree_id,
message=b"Index stash",
merge_heads=[self._repo.head()],
**commit_kwargs)
# Then, the working tree one.
stash_tree_id = commit_tree(
self._repo.object_store,
iter_fresh_objects(
index, os.fsencode(self._repo.path),
object_store=self._repo.object_store))
if message is None:
message = b"A stash on " + self._repo.head()
# TODO(jelmer): Just pass parents into do_commit()?
self._repo.refs[self._ref] = self._repo.head()
cid = self._repo.do_commit(
ref=self._ref, tree=stash_tree_id,
message=message,
merge_heads=[index_commit_id],
**commit_kwargs)
return cid
def __getitem__(self, index):
return list(self.stashes())[index]
def __len__(self):
return len(list(self.stashes()))
|
the-stack_0_18812 | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calibration wrapper for calibrations returned from the Quantum Engine."""
from collections import abc, defaultdict
import datetime
from typing import Any, Dict, Iterator, Optional, Tuple, TYPE_CHECKING
from cirq import devices, vis
from cirq.google.api import v2
if TYPE_CHECKING:
import cirq
class Calibration(abc.Mapping):
"""A convenience wrapper for calibrations that acts like a dictionary.
Calibrations act as dictionaries whose keys are the names of the metric,
and whose values are the metric values. The metric values themselves are
represented as a dictionary. These metric value dictionaries have
keys that are tuples of `cirq.GridQubit`s and values that are lists of the
metric values for those qubits. If a metric acts globally and is attached
to no specified number of qubits, the map will be from the empty tuple
to the metrics values.
Calibrations act just like a python dictionary. For example you can get
a list of all of the metric names using
`calibration.keys()`
and query a single value by looking up the name by index:
`calibration['t1']`
Attributes:
timestamp: The time that this calibration was run, in milliseconds since
the epoch.
"""
def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:
self.timestamp = calibration.timestamp_ms
self._metric_dict = self._compute_metric_dict(calibration.metrics)
def _compute_metric_dict(
self, metrics: v2.metrics_pb2.MetricsSnapshot
) -> Dict[str, Dict[Tuple['cirq.GridQubit', ...], Any]]:
results: Dict[str, Dict[Tuple[devices.
GridQubit, ...], Any]] = defaultdict(dict)
for metric in metrics:
name = metric.name
# Flatten the values to a list, removing keys containing type names
# (e.g. proto version of each value is {<type>: value}).
flat_values = [
getattr(v, v.WhichOneof('val')) for v in metric.values
]
if metric.targets:
targets = [
t[1:] if t.startswith('q') else t for t in metric.targets
]
# TODO: Remove when calibrations don't prepend this.
qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)
results[name][qubits] = flat_values
else:
assert len(results[name]) == 0, (
'Only one metric of a given name can have no targets. '
'Found multiple for key {}'.format(name))
results[name][()] = flat_values
return results
def __getitem__(self, key: str) -> Dict[Tuple['cirq.GridQubit', ...], Any]:
"""Supports getting calibrations by index.
Calibration may be accessed by key:
`calibration['t1']`.
This returns a map from tuples of `cirq.GridQubit`s to a list of the
values of the metric. If there are no targets, the only key will only
be an empty tuple.
"""
if not isinstance(key, str):
raise TypeError(
'Calibration metrics only have string keys. Key was {}'.format(
key))
if key not in self._metric_dict:
raise KeyError('Metric named {} not in calibration'.format(key))
return self._metric_dict[key]
def __iter__(self) -> Iterator:
return iter(self._metric_dict)
def __len__(self) -> int:
return len(self._metric_dict)
def __str__(self) -> str:
return 'Calibration(keys={})'.format(list(sorted(self.keys())))
def timestamp_str(self,
tz: Optional[datetime.tzinfo] = None,
timespec: str = 'auto') -> str:
"""Return a string for the calibration timestamp.
Args:
tz: The timezone for the string. If None, the method uses the
platform's local date and time.
timespec: See datetime.isoformat for valid values.
Returns:
The string in ISO 8601 format YYYY-MM-DDTHH:MM:SS.ffffff.
"""
dt = datetime.datetime.fromtimestamp(self.timestamp / 1000, tz)
dt += datetime.timedelta(microseconds=self.timestamp % 1000000)
return dt.isoformat(sep=' ', timespec=timespec)
def heatmap(self, key: str) -> vis.Heatmap:
"""Return a heatmap for metrics that target single qubits.
Args:
key: The metric key to return a heatmap for.
Returns:
A `cirq.Heatmap` for the metric.
Raises:
AssertionError if the heatmap is not for single qubits or the metric
values are not single floats.
"""
metrics = self[key]
assert all(len(k) == 1 for k in metrics.keys()), (
'Heatmaps are only supported if all the targets in a metric'
' are single qubits.')
assert all(len(k) == 1 for k in metrics.values()), (
'Heatmaps are only supported if all the values in a metric'
' are single metric values.')
value_map = {qubit: value for (qubit,), (value,) in metrics.items()}
return vis.Heatmap(value_map)
|
the-stack_0_18818 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ServerCertificateCommonName(Model):
"""Describes the server certificate details using common name.
All required parameters must be populated in order to send to Azure.
:param certificate_common_name: Required. The common name of the server
certificate.
:type certificate_common_name: str
:param certificate_issuer_thumbprint: Required. The issuer thumbprint of
the server certificate.
:type certificate_issuer_thumbprint: str
"""
_validation = {
'certificate_common_name': {'required': True},
'certificate_issuer_thumbprint': {'required': True},
}
_attribute_map = {
'certificate_common_name': {'key': 'certificateCommonName', 'type': 'str'},
'certificate_issuer_thumbprint': {'key': 'certificateIssuerThumbprint', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ServerCertificateCommonName, self).__init__(**kwargs)
self.certificate_common_name = kwargs.get('certificate_common_name', None)
self.certificate_issuer_thumbprint = kwargs.get('certificate_issuer_thumbprint', None)
|
the-stack_0_18820 | import sys
import re
import csv
import editdistance as ed
# Error rate functions
def cal_cer(hyp, truth):
return 100*float(ed.eval(hyp, truth))/len(truth)
def cal_wer(hyp, truth, SEP=' '):
return 100*float(ed.eval(hyp.split(SEP), truth.split(SEP)))/len(truth.split(SEP))
def get_testcases(fname):
def clean(ref):
ref = re.sub(r'B\-(\S+) ', '', ref)
ref = re.sub(r' E\-(\S+)', '', ref)
return ref
gex = re.compile(r'B\-(\S+) (.+?) E\-\1')
c = list(csv.reader(open(fname), delimiter='\t'))
testcases = []
for idx, hyp, ref in c[1:]:
hyp = re.sub(r' +', ' ', hyp)
ref = re.sub(r' +', ' ', ref)
hyp_slots = gex.findall(hyp)
ref_slots = gex.findall(ref)
if len(hyp_slots)>0:
hyp_slots = ';'.join([':'.join([clean(x[1]), x[0]]) for x in hyp_slots])
ref_slots = ';'.join([':'.join([x[1], x[0]]) for x in ref_slots])
else:
hyp_slots = ''
ref_slots = ''
ref = clean(ref)
hyp = clean(hyp)
testcase = [ref, hyp, ref_slots, hyp_slots]
testcases.append(testcase)
return testcases
if __name__ == '__main__':
utterance_files = sys.argv[1]
test_cases = get_testcases(utterance_files)
asr_wer = 0.0
asr_cer = 0.0
sf_f1 = 0.0
sf_wer = 0.0
sf_cer = 0.0
total_sent = 0
total_slot = 0
for test_case in test_cases:
ref_text = test_case[0]
hyp_text = test_case[1]
# ASR WER/CER evaluation
asr_wer += cal_wer(hyp_text, ref_text)
asr_cer += cal_cer(hyp_text, ref_text)
# Extract Slots
ref_slots = test_case[2].split(';')
hyp_slots = test_case[3].split(';')
unique_slots = []
ref_dict = {}
hyp_dict = {}
if ref_slots[0] != '':
for ref_slot in ref_slots:
v, k = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if hyp_slots[0] != '':
for hyp_slot in hyp_slots:
v, k = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
# Slot Type F1 evaluation
if len(hyp_dict.keys()) == 0 and len(ref_dict.keys()) == 0:
F1 = 1.0
elif len(hyp_dict.keys()) == 0:
F1 = 0.0
elif len(ref_dict.keys()) == 0:
F1 = 0.0
else:
P, R = 0.0, 0.0
for slot in ref_dict:
if slot in hyp_dict:
R += 1
R = R / len(ref_dict.keys())
for slot in hyp_dict:
if slot in ref_dict:
P += 1
P = P / len(hyp_dict.keys())
F1 = 2*P*R/(P+R) if (P+R) > 0 else 0.0
sf_f1 += F1
total_sent += 1
# Slot Value WER/CER evaluation
unique_slots = list(ref_dict.keys())
for slot in unique_slots:
for ref_i, ref_v in enumerate(ref_dict[slot]):
if slot not in hyp_dict:
hyp_v = ''
wer = cal_wer(hyp_v, ref_v)
cer = cal_cer(hyp_v, ref_v)
else:
min_wer = 100
min_cer = 100
for hyp_v in hyp_dict[slot]:
tmp_wer = cal_wer(hyp_v, ref_v)
tmp_cer = cal_cer(hyp_v, ref_v)
if min_wer > tmp_wer:
min_wer = tmp_wer
if min_cer > tmp_cer:
min_cer = tmp_cer
wer = min_wer
cer = min_cer
sf_wer += wer
sf_cer += cer
total_slot += 1
print('ASR WER:', asr_wer/total_sent)
print('ASR CER:', asr_cer/total_sent)
print('Slot Type F1:', sf_f1/total_sent)
print('Slot Value WER:', sf_wer/total_slot)
print('Slot Value CER:', sf_cer/total_slot)
|
the-stack_0_18822 | """
sphinx.versioning
~~~~~~~~~~~~~~~~~
Implements the low-level algorithms Sphinx uses for the versioning of
doctrees.
:copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pickle
from itertools import product, zip_longest
from operator import itemgetter
from os import path
from typing import TYPE_CHECKING, Any, Dict, Iterator
from uuid import uuid4
from docutils.nodes import Node
from sphinx.transforms import SphinxTransform
if TYPE_CHECKING:
from sphinx.application import Sphinx
try:
import Levenshtein
IS_SPEEDUP = True
except ImportError:
IS_SPEEDUP = False
# anything below that ratio is considered equal/changed
VERSIONING_RATIO = 65
def add_uids(doctree: Node, condition: Any) -> Iterator[Node]:
"""Add a unique id to every node in the `doctree` which matches the
condition and yield the nodes.
:param doctree:
A :class:`docutils.nodes.document` instance.
:param condition:
A callable which returns either ``True`` or ``False`` for a given node.
"""
for node in doctree.findall(condition):
node.uid = uuid4().hex
yield node
def merge_doctrees(old: Node, new: Node, condition: Any) -> Iterator[Node]:
"""Merge the `old` doctree with the `new` one while looking at nodes
matching the `condition`.
Each node which replaces another one or has been added to the `new` doctree
will be yielded.
:param condition:
A callable which returns either ``True`` or ``False`` for a given node.
"""
old_iter = old.findall(condition)
new_iter = new.findall(condition)
old_nodes = []
new_nodes = []
ratios = {}
seen = set()
# compare the nodes each doctree in order
for old_node, new_node in zip_longest(old_iter, new_iter):
if old_node is None:
new_nodes.append(new_node)
continue
if not getattr(old_node, 'uid', None):
# maybe config.gettext_uuid has been changed.
old_node.uid = uuid4().hex
if new_node is None:
old_nodes.append(old_node)
continue
ratio = get_ratio(old_node.rawsource, new_node.rawsource)
if ratio == 0:
new_node.uid = old_node.uid
seen.add(new_node)
else:
ratios[old_node, new_node] = ratio
old_nodes.append(old_node)
new_nodes.append(new_node)
# calculate the ratios for each unequal pair of nodes, should we stumble
# on a pair which is equal we set the uid and add it to the seen ones
for old_node, new_node in product(old_nodes, new_nodes):
if new_node in seen or (old_node, new_node) in ratios:
continue
ratio = get_ratio(old_node.rawsource, new_node.rawsource)
if ratio == 0:
new_node.uid = old_node.uid
seen.add(new_node)
else:
ratios[old_node, new_node] = ratio
# choose the old node with the best ratio for each new node and set the uid
# as long as the ratio is under a certain value, in which case we consider
# them not changed but different
ratios = sorted(ratios.items(), key=itemgetter(1)) # type: ignore
for (old_node, new_node), ratio in ratios:
if new_node in seen:
continue
else:
seen.add(new_node)
if ratio < VERSIONING_RATIO:
new_node.uid = old_node.uid
else:
new_node.uid = uuid4().hex
yield new_node
# create new uuids for any new node we left out earlier, this happens
# if one or more nodes are simply added.
for new_node in set(new_nodes) - seen:
new_node.uid = uuid4().hex
yield new_node
def get_ratio(old: str, new: str) -> float:
"""Return a "similarity ratio" (in percent) representing the similarity
between the two strings where 0 is equal and anything above less than equal.
"""
if not all([old, new]):
return VERSIONING_RATIO
if IS_SPEEDUP:
return Levenshtein.distance(old, new) / (len(old) / 100.0)
else:
return levenshtein_distance(old, new) / (len(old) / 100.0)
def levenshtein_distance(a: str, b: str) -> int:
"""Return the Levenshtein edit distance between two strings *a* and *b*."""
if a == b:
return 0
if len(a) < len(b):
a, b = b, a
if not a:
return len(b)
previous_row = list(range(len(b) + 1))
for i, column1 in enumerate(a):
current_row = [i + 1]
for j, column2 in enumerate(b):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (column1 != column2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
class UIDTransform(SphinxTransform):
"""Add UIDs to doctree for versioning."""
default_priority = 880
def apply(self, **kwargs: Any) -> None:
env = self.env
old_doctree = None
if not env.versioning_condition:
return
if env.versioning_compare:
# get old doctree
try:
filename = path.join(env.doctreedir, env.docname + '.doctree')
with open(filename, 'rb') as f:
old_doctree = pickle.load(f)
except OSError:
pass
# add uids for versioning
if not env.versioning_compare or old_doctree is None:
list(add_uids(self.document, env.versioning_condition))
else:
list(merge_doctrees(old_doctree, self.document, env.versioning_condition))
def setup(app: "Sphinx") -> Dict[str, Any]:
app.add_transform(UIDTransform)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
the-stack_0_18826 | import os
import copy
import json
import numpy as np
import awkward as ak
import torch.utils.data
from functools import partial
from concurrent.futures.thread import ThreadPoolExecutor
from .logger import _logger, warn_once
from .data.tools import _pad, _repeat_pad, _clip
from .data.fileio import _read_files
from .data.config import DataConfig, _md5
from .data.preprocess import _apply_selection, _build_new_variables, _build_weights, AutoStandardizer, WeightMaker
def _finalize_inputs(table, data_config):
output = {}
# copy observer variables before transformation
for k in data_config.z_variables:
if k in data_config.observer_names:
a = ak.to_numpy(table[k])
if a.dtype in (np.uint16, np.uint32, np.uint64):
# FIXME: hack as torch only supports float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint8, and bool
a = a.astype('int64')
output[k] = a
# copy labels
for k in data_config.label_names:
output[k] = ak.to_numpy(table[k])
# transformation
for k, params in data_config.preprocess_params.items():
if data_config._auto_standardization and params['center'] == 'auto':
raise ValueError('No valid standardization params for %s' % k)
if params['center'] is not None:
table[k] = _clip((table[k] - params['center']) * params['scale'], params['min'], params['max'])
if params['length'] is not None:
pad_fn = _repeat_pad if params['pad_mode'] == 'wrap' else partial(_pad, value=params['pad_value'])
table[k] = pad_fn(table[k], params['length'])
# check for NaN
if np.any(np.isnan(table[k])):
_logger.warning(
'Found NaN in %s, silently converting it to 0.', k)
table[k] = np.nan_to_num(table[k])
# stack variables for each input group
for k, names in data_config.input_dicts.items():
if len(names) == 1 and data_config.preprocess_params[names[0]]['length'] is None:
output['_' + k] = ak.to_numpy(ak.values_astype(table[names[0]], 'float32'))
else:
output['_' + k] = ak.to_numpy(np.stack([ak.values_astype(table[n], 'float32') for n in names], axis=1))
# copy monitor variables
for k in data_config.z_variables:
if k not in output:
output[k] = ak.to_numpy(table[k])
return output
def _get_reweight_indices(weights, up_sample=True, max_resample=10, weight_scale=1):
all_indices = np.arange(len(weights))
randwgt = np.random.uniform(low=0, high=weight_scale, size=len(weights))
keep_flags = randwgt < weights
if not up_sample:
keep_indices = all_indices[keep_flags]
else:
n_repeats = len(weights) // max(1, int(keep_flags.sum()))
if n_repeats > max_resample:
n_repeats = max_resample
all_indices = np.repeat(np.arange(len(weights)), n_repeats)
randwgt = np.random.uniform(low=0, high=weight_scale, size=len(weights) * n_repeats)
keep_indices = all_indices[randwgt < np.repeat(weights, n_repeats)]
return keep_indices.copy()
def _check_labels(table):
if np.all(table['_labelcheck_'] == 1):
return
else:
if np.any(table['_labelcheck_'] == 0):
raise RuntimeError('Inconsistent label definition: some of the entries are not assigned to any classes!')
if np.any(table['_labelcheck_'] > 1):
raise RuntimeError('Inconsistent label definition: some of the entries are assigned to multiple classes!')
def _preprocess(table, data_config, options):
# apply selection
table = _apply_selection(table, data_config.selection if options['training'] else data_config.test_time_selection)
if len(table) == 0:
return []
# define new variables
table = _build_new_variables(table, data_config.var_funcs)
# check labels
if data_config.label_type == 'simple' and options['training']:
_check_labels(table)
# compute reweight indices
if options['reweight'] and data_config.weight_name is not None:
wgts = _build_weights(table, data_config, warn=warn_once)
indices = _get_reweight_indices(wgts, up_sample=options['up_sample'],
weight_scale=options['weight_scale'], max_resample=options['max_resample'])
else:
indices = np.arange(len(table[data_config.label_names[0]]))
# shuffle
if options['shuffle']:
np.random.shuffle(indices)
# perform input variable standardization, clipping, padding and stacking
table = _finalize_inputs(table, data_config)
return table, indices
def _load_next(data_config, filelist, load_range, options):
table = _read_files(filelist, data_config.load_branches, load_range, treename=data_config.treename)
table, indices = _preprocess(table, data_config, options)
return table, indices
class _SimpleIter(object):
r"""_SimpleIter
Iterator object for ``SimpleIterDataset''.
"""
def __init__(self, **kwargs):
# inherit all properties from SimpleIterDataset
self.__dict__.update(**kwargs)
# executor to read files and run preprocessing asynchronously
self.executor = ThreadPoolExecutor(max_workers=1) if self._async_load else None
# init: prefetch holds table and indices for the next fetch
self.prefetch = None
self.table = None
self.indices = []
self.cursor = 0
self._seed = None
worker_info = torch.utils.data.get_worker_info()
file_dict = self._init_file_dict.copy()
if worker_info is not None:
# in a worker process
self._name += '_worker%d' % worker_info.id
self._seed = worker_info.seed & 0xFFFFFFFF
np.random.seed(self._seed)
# split workload by files
new_file_dict = {}
for name, files in file_dict.items():
new_files = files[worker_info.id::worker_info.num_workers]
assert(len(new_files) > 0)
new_file_dict[name] = new_files
file_dict = new_file_dict
self.worker_file_dict = file_dict
self.worker_filelist = sum(file_dict.values(), [])
self.worker_info = worker_info
self.restart()
def restart(self):
print('=== Restarting DataIter %s, seed=%s ===' % (self._name, self._seed))
# re-shuffle filelist and load range if for training
filelist = self.worker_filelist.copy()
if self._sampler_options['shuffle']:
np.random.shuffle(filelist)
if self._file_fraction < 1:
num_files = int(len(filelist) * self._file_fraction)
filelist = filelist[:num_files]
self.filelist = filelist
if self._init_load_range_and_fraction is None:
self.load_range = (0, 1)
else:
(start_pos, end_pos), load_frac = self._init_load_range_and_fraction
interval = (end_pos - start_pos) * load_frac
if self._sampler_options['shuffle']:
offset = np.random.uniform(start_pos, end_pos - interval)
self.load_range = (offset, offset + interval)
else:
self.load_range = (start_pos, start_pos + interval)
_logger.debug(
'Init iter [%d], will load %d (out of %d*%s=%d) files with load_range=%s:\n%s', 0
if self.worker_info is None else self.worker_info.id, len(self.filelist),
len(sum(self._init_file_dict.values(), [])),
self._file_fraction, int(len(sum(self._init_file_dict.values(), [])) * self._file_fraction),
str(self.load_range),
'\n'.join(self.filelist[: 3]) + '\n ... ' + self.filelist[-1],)
_logger.info('Restarted DataIter %s, load_range=%s, file_list:\n%s' %
(self._name, str(self.load_range), json.dumps(self.worker_file_dict, indent=2)))
# reset file fetching cursor
self.ipos = 0 if self._fetch_by_files else self.load_range[0]
# prefetch the first entry asynchronously
self._try_get_next(init=True)
def __next__(self):
# print(self.ipos, self.cursor)
if len(self.filelist) == 0:
raise StopIteration
try:
i = self.indices[self.cursor]
except IndexError:
# case 1: first entry, `self.indices` is still empty
# case 2: running out of entries, `self.indices` is not empty
while True:
if self._in_memory and len(self.indices) > 0:
# only need to re-shuffle the indices, if this is not the first entry
if self._sampler_options['shuffle']:
np.random.shuffle(self.indices)
break
if self.prefetch is None:
# reaching the end as prefetch got nothing
self.table = None
if self._async_load:
self.executor.shutdown(wait=False)
raise StopIteration
# get result from prefetch
if self._async_load:
self.table, self.indices = self.prefetch.result()
else:
self.table, self.indices = self.prefetch
# try to load the next ones asynchronously
self._try_get_next()
# check if any entries are fetched (i.e., passing selection) -- if not, do another fetch
if len(self.indices) > 0:
break
# reset cursor
self.cursor = 0
i = self.indices[self.cursor]
self.cursor += 1
return self.get_data(i)
def _try_get_next(self, init=False):
end_of_list = self.ipos >= len(self.filelist) if self._fetch_by_files else self.ipos >= self.load_range[1]
if end_of_list:
if init:
raise RuntimeError('Nothing to load for worker %d' %
0 if self.worker_info is None else self.worker_info.id)
if self._infinity_mode and not self._in_memory:
# infinity mode: re-start
self.restart()
return
else:
# finite mode: set prefetch to None, exit
self.prefetch = None
return
if self._fetch_by_files:
filelist = self.filelist[int(self.ipos): int(self.ipos + self._fetch_step)]
load_range = self.load_range
else:
filelist = self.filelist
load_range = (self.ipos, min(self.ipos + self._fetch_step, self.load_range[1]))
# _logger.info('Start fetching next batch, len(filelist)=%d, load_range=%s'%(len(filelist), load_range))
if self._async_load:
self.prefetch = self.executor.submit(_load_next, self._data_config,
filelist, load_range, self._sampler_options)
else:
self.prefetch = _load_next(self._data_config, filelist, load_range, self._sampler_options)
self.ipos += self._fetch_step
def get_data(self, i):
# inputs
X = {k: self.table['_' + k][i].copy() for k in self._data_config.input_names}
# labels
y = {k: self.table[k][i].copy() for k in self._data_config.label_names}
# observers / monitor variables
Z = {k: self.table[k][i].copy() for k in self._data_config.z_variables}
return X, y, Z
class SimpleIterDataset(torch.utils.data.IterableDataset):
r"""Base IterableDataset.
Handles dataloading.
Arguments:
file_dict (dict): dictionary of lists of files to be loaded.
data_config_file (str): YAML file containing data format information.
for_training (bool): flag indicating whether the dataset is used for training or testing.
When set to ``True``, will enable shuffling and sampling-based reweighting.
When set to ``False``, will disable shuffling and reweighting, but will load the observer variables.
load_range_and_fraction (tuple of tuples, ``((start_pos, end_pos), load_frac)``): fractional range of events to load from each file.
E.g., setting load_range_and_fraction=((0, 0.8), 0.5) will randomly load 50% out of the first 80% events from each file (so load 50%*80% = 40% of the file).
fetch_by_files (bool): flag to control how events are retrieved each time we fetch data from disk.
When set to ``True``, will read only a small number (set by ``fetch_step``) of files each time, but load all the events in these files.
When set to ``False``, will read from all input files, but load only a small fraction (set by ``fetch_step``) of events each time.
Default is ``False``, which results in a more uniform sample distribution but reduces the data loading speed.
fetch_step (float or int): fraction of events (when ``fetch_by_files=False``) or number of files (when ``fetch_by_files=True``) to load each time we fetch data from disk.
Event shuffling and reweighting (sampling) is performed each time after we fetch data.
So set this to a large enough value to avoid getting an imbalanced minibatch (due to reweighting/sampling), especially when ``fetch_by_files`` set to ``True``.
Will load all events (files) at once if set to non-positive value.
file_fraction (float): fraction of files to load.
"""
def __init__(self, file_dict, data_config_file, for_training=True, load_range_and_fraction=None,
fetch_by_files=False, fetch_step=0.01, file_fraction=1, remake_weights=False, up_sample=True,
weight_scale=1, max_resample=10, async_load=True, infinity_mode=False, in_memory=False, name=''):
self._iters = {} if infinity_mode or in_memory else None
_init_args = set(self.__dict__.keys())
self._init_file_dict = file_dict
self._init_load_range_and_fraction = load_range_and_fraction
self._fetch_by_files = fetch_by_files
self._fetch_step = fetch_step
self._file_fraction = file_fraction
self._async_load = async_load
self._infinity_mode = infinity_mode
self._in_memory = in_memory
self._name = name
# ==== sampling parameters ====
self._sampler_options = {
'up_sample': up_sample,
'weight_scale': weight_scale,
'max_resample': max_resample,
}
if for_training:
self._sampler_options.update(training=True, shuffle=True, reweight=True)
else:
self._sampler_options.update(training=False, shuffle=False, reweight=False)
# discover auto-generated reweight file
data_config_md5 = _md5(data_config_file)
data_config_autogen_file = data_config_file.replace('.yaml', '.%s.auto.yaml' % data_config_md5)
if os.path.exists(data_config_autogen_file):
data_config_file = data_config_autogen_file
_logger.info('Found file %s w/ auto-generated preprocessing information, will use that instead!' %
data_config_file)
# load data config (w/ observers now -- so they will be included in the auto-generated yaml)
self._data_config = DataConfig.load(data_config_file)
if for_training:
# produce variable standardization info if needed
if self._data_config._missing_standardization_info:
s = AutoStandardizer(file_dict, self._data_config)
self._data_config = s.produce(data_config_autogen_file)
# produce reweight info if needed
if self._sampler_options['reweight'] and self._data_config.weight_name and not self._data_config.use_precomputed_weights:
if remake_weights or self._data_config.reweight_hists is None:
w = WeightMaker(file_dict, self._data_config)
self._data_config = w.produce(data_config_autogen_file)
# reload data_config w/o observers for training
if os.path.exists(data_config_autogen_file) and data_config_file != data_config_autogen_file:
data_config_file = data_config_autogen_file
_logger.info(
'Found file %s w/ auto-generated preprocessing information, will use that instead!' %
data_config_file)
self._data_config = DataConfig.load(data_config_file, load_observers=False)
# derive all variables added to self.__dict__
self._init_args = set(self.__dict__.keys()) - _init_args
@property
def config(self):
return self._data_config
def __iter__(self):
if self._iters is None:
kwargs = {k: copy.deepcopy(self.__dict__[k]) for k in self._init_args}
return _SimpleIter(**kwargs)
else:
worker_info = torch.utils.data.get_worker_info()
worker_id = worker_info.id if worker_info is not None else 0
try:
return self._iters[worker_id]
except KeyError:
kwargs = {k: copy.deepcopy(self.__dict__[k]) for k in self._init_args}
self._iters[worker_id] = _SimpleIter(**kwargs)
return self._iters[worker_id]
|
the-stack_0_18828 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=8
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=5
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=6
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq15.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
the-stack_0_18829 | #!/usr/bin/env python3
# coding=utf-8
import os
import logging
import re
import subprocess
from argparse import ArgumentParser
import datetime
import itertools
import sys
import time
import json
from pprint import pprint
from lib.amazon import target_group_arn_for, get_autoscaling_group, get_releases, find_release, get_current_key, \
set_current_key, as_client, release_for, find_latest_release, get_all_current, remove_release, get_events_file, \
save_event_file, get_short_link, put_short_link, delete_short_link, list_short_links, delete_s3_links, \
get_autoscaling_groups_for, download_release_file, log_new_build, list_all_build_logs, list_period_build_logs
from lib.instance import AdminInstance, BuilderInstance, Instance, print_instances
from lib.ssh import run_remote_shell, exec_remote, exec_remote_all, exec_remote_to_stdout
logger = logging.getLogger('ce')
RELEASE_FORMAT = '{: <5} {: <10} {: <10} {: <10} {: <14}'
ADS_FORMAT = '{: <5} {: <10} {: <20}'
DECORATION_FORMAT = '{: <10} {: <15} {: <30} {: <50}'
def dispatch_global(sub, args):
globals()['{}_{}_cmd'.format(sub, args['{}_sub'.format(sub)])](args)
def pick_instance(args):
instances = Instance.elb_instances(target_group_arn_for(args))
if len(instances) == 1:
return instances[0]
while True:
print_instances(instances, number=True)
inst = input('Which instance? ')
try:
return instances[int(inst)]
except:
pass
def pick_instances(args):
# TODO, maybe something in args to select only some?
return Instance.elb_instances(target_group_arn_for(args))
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def describe_current_release(args):
current = get_current_key(args)
if not current:
return "none"
r = release_for(get_releases(), current)
if r:
return str(r)
else:
"non-standard release with s3 key '{}'".format(current)
def wait_for_autoscale_state(instance, state):
logger.info("Waiting for {} to reach autoscale lifecycle '{}'...".format(instance, state))
while True:
cur_state = instance.describe_autoscale()['LifecycleState']
logger.debug("State is {}".format(cur_state))
if cur_state == state:
logger.info("...done")
return
time.sleep(5)
def get_events(args):
events = json.loads(get_events_file(args))
if 'ads' not in events:
events['ads'] = []
if 'decorations' not in events:
events['decorations'] = []
if 'motd' not in events:
events['motd'] = ''
return events
def save_events(args, events):
save_event_file(args, json.dumps(events))
def wait_for_elb_state(instance, state):
logger.info("Waiting for {} to reach ELB state '{}'...".format(instance, state))
while True:
instance.update()
instance_state = instance.instance.state['Name']
if instance_state != 'running':
raise RuntimeError('Instance no longer running (state {})'.format(instance_state))
logger.debug("State is {}".format(instance.elb_health))
if instance.elb_health == state:
logger.info("...done")
return
time.sleep(5)
def are_you_sure(name, args):
env = args['env']
while True:
typed = input('Confirm {} in env {}\nType the name of the environment: '.format(name, env))
if typed == env:
return True
def confirm_branch(release):
branch = release.branch
while True:
typed = input('Confirm build branch "{}"\nType the name of the branch: '.format(branch))
if typed == branch:
return True
def confirm_action(description):
typed = input('{}: [Y/N]\n'.format(description))
return typed == 'Y'
def is_everything_awesome(instance):
try:
response = exec_remote(instance, ['curl', '-s', '--max-time', '2', 'http://127.0.0.1/healthcheck'])
return response.strip() == "Everything is awesome"
except subprocess.CalledProcessError:
return False
def wait_for_healthok(instance):
logger.info("Waiting for instance to be Online {}".format(instance))
sys.stdout.write('Waiting')
while not is_everything_awesome(instance):
sys.stdout.write('.')
# Flush stdout so tmux updates
sys.stdout.flush()
time.sleep(10)
print("Ok, Everything is awesome!")
def restart_one_instance(as_group_name, instance, modified_groups):
instance_id = instance.instance.instance_id
logger.info("Enabling instance protection for {}".format(instance))
as_client.set_instance_protection(AutoScalingGroupName=as_group_name,
InstanceIds=[instance_id],
ProtectedFromScaleIn=True)
as_group = get_autoscaling_group(as_group_name)
adjustment_required = as_group['DesiredCapacity'] == as_group['MinSize']
if adjustment_required:
logger.info("Group '{}' needs to be adjusted to keep enough nodes".format(as_group_name))
modified_groups[as_group['AutoScalingGroupName']] = as_group['DesiredCapacity']
logger.info("Putting {} into standby".format(instance))
as_client.enter_standby(
InstanceIds=[instance_id],
AutoScalingGroupName=as_group_name,
ShouldDecrementDesiredCapacity=not adjustment_required)
wait_for_autoscale_state(instance, 'Standby')
logger.info("Restarting service on {}".format(instance))
restart_response = exec_remote(instance, ['sudo', 'systemctl', 'restart', 'compiler-explorer'])
if restart_response:
logger.warn("Restart gave some output: {}".format(restart_response))
wait_for_healthok(instance)
logger.info("Moving {} out of standby".format(instance))
as_client.exit_standby(
InstanceIds=[instance_id],
AutoScalingGroupName=as_group_name)
wait_for_autoscale_state(instance, 'InService')
wait_for_elb_state(instance, 'healthy')
logger.info("Disabling instance protection for {}".format(instance))
as_client.set_instance_protection(AutoScalingGroupName=as_group_name,
InstanceIds=[instance_id],
ProtectedFromScaleIn=False)
logger.info("Instance restarted ok")
def admin_cmd(args):
run_remote_shell(args, AdminInstance.instance())
def builder_cmd(args):
dispatch_global('builder', args)
def builder_login_cmd(args):
instance = BuilderInstance.instance()
run_remote_shell(args, instance)
def builder_exec_cmd(args):
instance = BuilderInstance.instance()
exec_remote_to_stdout(instance, args['remote_cmd'])
def builder_start_cmd(args):
instance = BuilderInstance.instance()
if instance.status() == 'stopped':
print("Starting builder instance...")
instance.start()
for i in range(60):
if instance.status() == 'running':
break
time.sleep(1)
else:
raise RuntimeError("Unable to start instance, still in state: {}".format(instance.status()))
for i in range(60):
try:
r = exec_remote(instance, ["echo", "hello"])
if r.strip() == "hello":
break
except Exception as e:
print("Still waiting for SSH: got: {}".format(e))
pass
time.sleep(1)
else:
raise RuntimeError("Unable to get SSH access")
res = exec_remote(instance, ["bash", "-c", "cd compiler-explorer-image && git pull && sudo ./setup-builder.sh"])
print(res)
print("Builder started OK")
def builder_stop_cmd(args):
BuilderInstance.instance().stop()
def builder_status_cmd(args):
print("Builder status: {}".format(BuilderInstance.instance().status()))
def instances_cmd(args):
dispatch_global('instances', args)
def instances_exec_all_cmd(args):
if not are_you_sure('exec all', args):
return
remote_cmd = args['remote_cmd']
print("Running '{}' on all instances".format(' '.join(remote_cmd)))
exec_remote_all(pick_instances(args), remote_cmd)
def instances_login_cmd(args):
instance = pick_instance(args)
run_remote_shell(args, instance)
def instances_restart_one_cmd(args):
instance = pick_instance(args)
as_instance_status = instance.describe_autoscale()
as_group_name = as_instance_status['AutoScalingGroupName']
modified_groups = {}
try:
restart_one_instance(as_group_name, instance, modified_groups)
except Exception as e:
logger.error("Failed restarting {} - skipping: {}".format(instance, e))
def instances_start_cmd(args):
print("Starting version {}".format(describe_current_release(args)))
exec_remote_all(pick_instances(args), ['sudo', 'systemctl', 'start', 'compiler-explorer'])
def instances_stop_cmd(args):
if not are_you_sure('stop', args):
return
exec_remote_all(pick_instances(args), ['sudo', 'systemctl', 'stop', 'compiler-explorer'])
def instances_restart_cmd(args):
if not are_you_sure('restart version {}'.format(describe_current_release(args)), args):
return
# Store old motd
begin_time = datetime.datetime.now()
events = get_events(args)
old_motd = events['motd']
events['motd'] = old_motd if args['motd'] == '' else args['motd']
save_events(args, events)
modified_groups = {}
failed = False
for instance in pick_instances(args):
logger.info("Restarting {}...".format(instance))
as_instance_status = instance.describe_autoscale()
as_group_name = as_instance_status['AutoScalingGroupName']
if as_instance_status['LifecycleState'] != 'InService':
logger.error("Skipping {} as it is not InService ({})".format(instance, as_instance_status))
continue
try:
restart_one_instance(as_group_name, instance, modified_groups)
except Exception as e:
logger.error("Failed restarting {} - skipping: {}".format(instance, e))
failed = True
# TODO, what here?
for group, desired in iter(modified_groups.items()):
logger.info("Putting desired instances for {} back to {}".format(group, desired))
as_client.update_auto_scaling_group(AutoScalingGroupName=group, DesiredCapacity=desired)
# Events might have changed, re-fetch
events = get_events(args)
events['motd'] = old_motd
save_events(args, events)
end_time = datetime.datetime.now()
delta_time = end_time - begin_time
print(f'Instances restarted in { delta_time.total_seconds()} seconds')
sys.exit(1 if failed else 0)
def instances_status_cmd(args):
print_instances(Instance.elb_instances(target_group_arn_for(args)), number=False)
def builds_cmd(args):
dispatch_global('builds', args)
def builds_current_cmd(args):
print(describe_current_release(args))
def deploy_staticfiles(branch, versionfile):
print("Deploying static files")
downloadfile = versionfile
filename = 'deploy.tar.xz'
remotefile = branch + '/' + downloadfile
download_release_file(remotefile[1:], filename)
os.mkdir('deploy')
subprocess.call(['tar', '-C', 'deploy', '-Jxf', filename])
os.remove(filename)
subprocess.call(['aws', 's3', 'sync', 'deploy/out/dist/dist', 's3://compiler-explorer/dist/cdn'])
subprocess.call(['rm', '-Rf', 'deploy'])
def builds_set_current_cmd(args):
to_set = None
if args['raw']:
to_set = args['version']
else:
setting_latest = args['version'] == 'latest'
release = find_latest_release(args['branch']) if setting_latest else find_release(int(args['version']))
if not release:
print("Unable to find version " + args['version'])
if setting_latest and args['branch'] != '':
print('Branch {} has no available versions (Bad branch/No image yet built)'.format(args['branch']))
elif are_you_sure('change current version to {}'.format(release.key), args) and confirm_branch(release):
print('Found release {}'.format(release))
to_set = release.key
if to_set is not None:
log_new_build(args, to_set)
deploy_staticfiles(args['branch'], to_set)
set_current_key(args, to_set)
def builds_rm_old_cmd(args):
current = get_all_current()
all_releases = get_releases()
max_build = max(x.version for x in all_releases)
for release in get_releases():
if release.key in current:
print("Skipping {} as it is a current version".format(release))
else:
age = max_build - release.version
if age > args['age']:
if args['dry_run']:
print("Would remove build {}".format(release))
else:
print("Removing build {}".format(release))
remove_release(release)
else:
print("Keeping build {}".format(release))
def builds_list_cmd(args):
current = get_current_key(args)
releases = get_releases()
filter_branches = set(args['branch'].split(',') if args['branch'] is not None else [])
print(RELEASE_FORMAT.format('Live', 'Branch', 'Version', 'Size', 'Hash'))
for branch, releases in itertools.groupby(releases, lambda r: r.branch):
for release in releases:
if len(filter_branches) == 0 or release.branch in filter_branches:
print(
RELEASE_FORMAT.format(
' -->' if release.key == current else '',
release.branch, release.version, sizeof_fmt(release.size), str(release.hash))
)
def builds_history_cmd(args):
from_time = args['from']
until_time = args['until']
if from_time is None and until_time is None:
if confirm_action(
'Do you want list all builds for {}? It might be an expensive operation:'.format(args['env'])):
list_all_build_logs(args)
else:
list_period_build_logs(args, from_time, until_time)
def ads_cmd(args):
dispatch_global('ads', args)
def ads_list_cmd(args):
events = get_events(args)
print(ADS_FORMAT.format('ID', 'Filters', 'HTML'))
for ad in events['ads']:
print(ADS_FORMAT.format(ad['id'], str(ad['filter']), ad['html']))
def ads_add_cmd(args):
events = get_events(args)
new_ad = {
'html': args['html'],
'filter': args['filter'].split(',') if len(args['filter']) > 0 else [],
'id': max([x['id'] for x in events['ads']]) + 1 if len(events['ads']) > 0 else 0
}
if are_you_sure('add ad: {}'.format(ADS_FORMAT.format(new_ad['id'], str(new_ad['filter']), new_ad['html'])), args):
events['ads'].append(new_ad)
save_event_file(args, json.dumps(events))
def ads_remove_cmd(args):
events = get_events(args)
for i, ad in enumerate(events['ads']):
if ad['id'] == args['id']:
if args['force'] or \
are_you_sure('remove ad: {}'.format(ADS_FORMAT.format(ad['id'], str(ad['filter']), ad['html'])),
args):
del events['ads'][i]
save_event_file(args, json.dumps(events))
break
def ads_clear_cmd(args):
events = get_events(args)
if are_you_sure('clear all ads (Count: {})'.format(len(events['ads'])), args):
events['ads'] = []
save_event_file(args, json.dumps(events))
def ads_edit_cmd(args):
events = get_events(args)
for i, ad in enumerate(events['ads']):
if ad['id'] == args['id']:
new_ad = {
'id': ad['id'],
'filter': (args['filter'].split(',') if len(args['filter']) > 0 else [])
if args['filter'] is not None else ad['filter'],
'html': args['html'] or ad['html']
}
print('{}\n{}\n{}'.format(ADS_FORMAT.format('Event', 'Filter(s)', 'HTML'),
ADS_FORMAT.format('<FROM', str(ad['filter']), ad['html']),
ADS_FORMAT.format('>TO', str(new_ad['filter']), new_ad['html'])))
if are_you_sure('edit ad id: {}'.format(ad['id']), args):
events['ads'][i] = new_ad
save_event_file(args, json.dumps(events))
break
def decorations_cmd(args):
dispatch_global('decorations', args)
def decorations_list_cmd(args):
events = get_events(args)
print(DECORATION_FORMAT.format('Name', 'Filters', 'Regex', 'Decoration'))
for dec in events['decorations']:
print(DECORATION_FORMAT.format(dec['name'], str(dec['filter']), dec['regex'], json.dumps(dec['decoration'])))
def check_dec_args(regex, decoration):
try:
re.compile(regex)
except re.error as re_err:
raise RuntimeError(f"Unable to validate regex '{regex}' : {re_err}")
try:
decoration = json.loads(decoration)
except json.decoder.JSONDecodeError as json_err:
raise RuntimeError(f"Unable to parse decoration '{decoration}' : {json_err}")
return regex, decoration
def decorations_add_cmd(args):
events = get_events(args)
if args['name'] in [d['name'] for d in events['decorations']]:
raise RuntimeError(f'Duplicate decoration name {args["name"]}')
regex, decoration = check_dec_args(args['regex'], args['decoration'])
new_decoration = {
'name': args['name'],
'filter': args['filter'].split(',') if len(args['filter']) > 0 else [],
'regex': regex,
'decoration': decoration
}
if are_you_sure('add decoration: {}'.format(
DECORATION_FORMAT.format(new_decoration['name'], str(new_decoration['filter']), new_decoration['regex'],
json.dumps(new_decoration['decoration']))), args):
events['decorations'].append(new_decoration)
save_event_file(args, json.dumps(events))
def decorations_remove_cmd(args):
events = get_events(args)
for i, dec in enumerate(events['decorations']):
if dec['name'] == args['name']:
if args['force'] or \
are_you_sure('remove decoration: {}'.format(
DECORATION_FORMAT.format(dec['name'], str(dec['filter']), dec['regex'],
json.dumps(dec['decoration']))), args):
del events['decorations'][i]
save_event_file(args, json.dumps(events))
break
def decorations_clear_cmd(args):
events = get_events(args)
if are_you_sure('clear all decorations (Count: {})'.format(len(events['decorations'])), args):
events['decorations'] = []
save_event_file(args, json.dumps(events))
def decorations_edit_cmd(args):
events = get_events(args)
for i, dec in enumerate(events['decorations']):
if dec['name'] == args['name']:
regex, decoration = check_dec_args(args['regex'] or dec['regex'],
args['decoration'] or json.dumps(dec['decoration']))
new_dec = {
'name': dec['name'],
'filter': (args['filter'].split(',') if len(args['filter']) > 0 else [])
if args['filter'] is not None else dec['filter'],
'regex': regex,
'decoration': decoration
}
print('{}\n{}\n{}'.format(DECORATION_FORMAT.format('Name', 'Filters', 'Regex', 'Decoration'),
DECORATION_FORMAT.format('<FROM', str(dec['filter']), dec['regex'],
json.dumps(dec['decoration'])),
DECORATION_FORMAT.format('>TO', str(new_dec['filter']), new_dec['regex'],
json.dumps(new_dec['decoration']))))
if are_you_sure('edit decoration: {}'.format(dec['name']), args):
events['decoration'][i] = new_dec
save_event_file(args, json.dumps(events))
break
def motd_cmd(args):
dispatch_global('motd', args)
def motd_show_cmd(args):
events = get_events(args)
print('Current motd: "{}"'.format(events['motd']))
def motd_update_cmd(args):
events = get_events(args)
if are_you_sure('update motd:\nfrom: "{}"\nto: "{}"'.format(events['motd'], args['message']), args):
events['motd'] = args['message']
save_event_file(args, json.dumps(events))
def motd_clear_cmd(args):
events = get_events(args)
if are_you_sure('clear current motd: "{}"'.format(events['motd']), args):
events['motd'] = ''
save_events(args, events)
def events_cmd(args):
dispatch_global('events', args)
def events_to_raw_cmd(args):
print(get_events_file(args))
def events_from_raw_cmd(args):
raw = input()
save_event_file(args, json.dumps(json.loads(raw)))
def events_to_file_cmd(args):
with open(args['path'], mode='w') as f:
f.write(get_events_file(args))
def events_from_file_cmd(args):
with open(args['path'], mode='r') as f:
new_contents = f.read()
if are_you_sure('load from file "{}"', args):
save_event_file(args, json.loads(new_contents))
def links_cmd(args):
dispatch_global('links', args)
def links_name_cmd(args):
link_from = args['from']
if len(link_from) < 6:
raise RuntimeError('from length must be at least 6')
if len(args['to']) < 6:
raise RuntimeError('to length must be at least 6')
base_link = get_short_link(link_from)
if not base_link:
raise RuntimeError('Couldn\'t find base link {}'.format(link_from))
base_link['prefix']['S'] = args['to'][0:6]
base_link['unique_subhash']['S'] = args['to']
base_link['stats']['M']['clicks']['N'] = '0'
base_link['creation_ip']['S'] = '0.0.0.0'
# It's us, so we don't care about "anonymizing" the time
base_link['creation_date']['S'] = datetime.datetime.utcnow().isoformat()
title = input('Link title: ')
author = input('Author(s): ')
if len(author) == 0:
# We explicitly ignore author = . in the site code
author = '.'
project = input('Project: ')
description = input('Description: ')
base_link['named_metadata'] = {'M': {
'title': {'S': title},
'author': {'S': author},
'project': {'S': project},
'description': {'S': description}
}}
print('New link: {}'.format(pprint(base_link)))
if are_you_sure('create new link named {}'.format(args['to']), args):
put_short_link(base_link)
def links_update_cmd(args):
link_to = args['to']
link_from = args['from']
if len(link_from) < 6:
raise RuntimeError('from length must be at least 6')
if len(args['to']) < 6:
raise RuntimeError('to length must be at least 6')
base_link = get_short_link(link_from)
if not base_link:
raise RuntimeError('Couldn\'t find base link {}'.format(link_from))
link_to_update = get_short_link(link_to)
if not link_to_update:
raise RuntimeError('Couldn\'t find existing short link {}'.format(link_to))
link_to_update['full_hash'] = base_link['full_hash']
print('New link: {}'.format(pprint(link_to_update)))
if are_you_sure('update link named {}'.format(link_to), args):
put_short_link(link_to_update)
def links_maintenance_cmd(args):
s3links, dblinks = list_short_links()
s3keys_set = set()
dbkeys_set = set()
dbhashes_set = set()
s3dirty_set = set()
dbdirty_set = set()
for page in s3links:
for state in page['Contents']:
if len(state['Key'][6:]) > 1:
s3keys_set.add(state['Key'][6:])
for page in dblinks:
for item in page['Items']:
unique_subhash = item['unique_subhash']['S']
full_hash = item['full_hash']['S']
dbkeys_set.add((unique_subhash, full_hash))
dbhashes_set.add(full_hash)
for dbkey in dbkeys_set:
if dbkey[1] not in s3keys_set:
dbdirty_set.add(dbkey)
for s3key in s3keys_set:
if s3key not in dbhashes_set:
s3dirty_set.add(s3key)
if are_you_sure('delete {} db elements:\n{}\n'.format(len(dbdirty_set), dbdirty_set), args) and not args['dry_run']:
for item in dbdirty_set:
print('Deleting {}'.format(item))
delete_short_link(item)
if are_you_sure('delete {} s3 elements:\n{}\n'.format(len(s3dirty_set), s3dirty_set), args) and not args['dry_run']:
delete_s3_links(s3dirty_set)
def add_required_sub_parsers(parser, dest):
sub_parser = parser.add_subparsers(dest=dest)
sub_parser.required = True # docs say I can pass required=True in add_subparsers but that seems to be a lie
return sub_parser
def environment_cmd(args):
dispatch_global('environment', args)
def environment_status_cmd(args):
for asg in get_autoscaling_groups_for(args):
group_name = asg['AutoScalingGroupName']
instances = asg['DesiredCapacity']
print(f"Found ASG {group_name} with desired instances {instances}")
def environment_start_cmd(args):
for asg in get_autoscaling_groups_for(args):
group_name = asg['AutoScalingGroupName']
if asg['MinSize'] > 0:
print(f"Skipping ASG {group_name} as it has a non-zero min size")
continue
prev = asg['DesiredCapacity']
if prev:
print(f"Skipping ASG {group_name} as it has non-zero desired capacity")
continue
print(f"Updating {group_name} to have desired capacity 1 (from {prev})")
as_client.update_auto_scaling_group(AutoScalingGroupName=group_name, DesiredCapacity=1)
def environment_stop_cmd(args):
for asg in get_autoscaling_groups_for(args):
group_name = asg['AutoScalingGroupName']
if asg['MinSize'] > 0:
print(f"Skipping ASG {group_name} as it has a non-zero min size")
continue
prev = asg['DesiredCapacity']
if not prev:
print(f"Skipping ASG {group_name} as it already zero desired capacity")
continue
print(f"Updating {group_name} to have desired capacity 0 (from {prev})")
as_client.update_auto_scaling_group(AutoScalingGroupName=group_name, DesiredCapacity=0)
def main():
parser = ArgumentParser(prog='ce', description='Administrate Compiler Explorer instances')
parser.add_argument('--env', choices=['prod', 'beta', 'staging'], default='staging', metavar='ENV',
help='Select environment ENV')
parser.add_argument('--mosh', action='store_true', help='Use mosh for interactive shells')
parser.add_argument('--debug', action='store_true', help='Increase debug information')
subparsers = add_required_sub_parsers(parser, 'command')
subparsers.add_parser('admin')
builder_parser = subparsers.add_parser('builder')
builder_sub = add_required_sub_parsers(builder_parser, 'builder_sub')
builder_sub.required = True
builder_sub.add_parser('start')
builder_sub.add_parser('stop')
builder_sub.add_parser('status')
builder_sub.add_parser('login')
builder_exec = builder_sub.add_parser('exec')
builder_exec.add_argument('remote_cmd', nargs='+', help='command to run on builder node')
builds_parser = subparsers.add_parser('builds')
builds_sub = add_required_sub_parsers(builds_parser, 'builds_sub')
list_parser = builds_sub.add_parser('list')
list_parser.add_argument('-b', '--branch', type=str, help='show only selected branches')
builds_sub.add_parser('current')
set_current = builds_sub.add_parser('set_current')
set_current.add_argument('version', help='version to set')
set_current.add_argument('--branch', help='if version == latest, branch to get latest version from', type=str,
default='')
set_current.add_argument('--raw', action='store_true', help='Set a raw path for a version')
expire = builds_sub.add_parser('rm_old', help='delete old versions')
expire.add_argument('age', help='keep the most recent AGE builds (as well as current builds)', metavar='AGE',
type=int)
expire.add_argument('--dry-run', help='dry run only', action='store_true')
history_parser = builds_sub.add_parser('history')
history_parser.add_argument('--from', help='timestamp filter')
history_parser.add_argument('--until', help='timestamp filter')
instances_parser = subparsers.add_parser('instances')
instances_sub = add_required_sub_parsers(instances_parser, 'instances_sub')
instances_sub.add_parser('status')
instances_sub.add_parser('restart_one')
instances_sub.add_parser('login')
exec_all_parser = instances_sub.add_parser('exec_all')
exec_all_parser.add_argument('remote_cmd', nargs='+', help='command to run on all nodes')
instances_sub.add_parser('start')
instances_sub.add_parser('stop')
instances_restart_parser = instances_sub.add_parser('restart')
instances_restart_parser.add_argument('--motd', type=str, default='Site is being updated')
ads_parser = subparsers.add_parser('ads')
ads_sub = add_required_sub_parsers(ads_parser, 'ads_sub')
ads_sub.add_parser('list')
ads_add_parser = ads_sub.add_parser('add')
ads_add_parser.add_argument('html', type=str, help='message contents')
ads_add_parser.add_argument('--filter', type=str, help='target languages', default="")
ads_remove_parser = ads_sub.add_parser('remove')
ads_remove_parser.add_argument('id', type=int, help='remove ad by id')
ads_remove_parser.add_argument('-f', '--force', action='store_true', default=False, help='no confirmation needed')
ads_sub.add_parser('clear')
ads_edit_parser = ads_sub.add_parser('edit')
ads_edit_parser.add_argument('id', type=int, help='event to edit')
ads_edit_parser.add_argument('--html', type=str, help='new ad contents')
ads_edit_parser.add_argument('--filter', type=str, help='new ad filter(s)')
decorations_parser = subparsers.add_parser('decorations')
decorations_sub = add_required_sub_parsers(decorations_parser, 'decorations_sub')
decorations_sub.add_parser('list')
decorations_add_parser = decorations_sub.add_parser('add')
decorations_add_parser.add_argument('name', type=str, help='name')
decorations_add_parser.add_argument('regex', type=str, help='regex')
decorations_add_parser.add_argument('decoration', type=str, help='decoration (JSON format)')
decorations_add_parser.add_argument('--filter', type=str, help='target languages', default="")
decorations_remove_parser = decorations_sub.add_parser('remove')
decorations_remove_parser.add_argument('name', type=str, help='remove decoration by name')
decorations_remove_parser.add_argument('-f', '--force', action='store_true', default=False,
help='no confirmation needed')
decorations_sub.add_parser('clear')
decorations_edit_parser = decorations_sub.add_parser('edit')
decorations_edit_parser.add_argument('name', type=str, help='decoration to edit')
decorations_edit_parser.add_argument('--regex', type=str, help='new regex')
decorations_edit_parser.add_argument('--decoration', type=str, help='new decoration')
decorations_edit_parser.add_argument('--filter', type=str, help='new decoration filter(s)')
motd_parser = subparsers.add_parser('motd')
motd_sub = add_required_sub_parsers(motd_parser, 'motd_sub')
motd_sub.add_parser('show')
motd_update_parser = motd_sub.add_parser('update')
motd_update_parser.add_argument('message', type=str, help='new motd')
motd_sub.add_parser('clear')
events_parser = subparsers.add_parser('events')
events_sub = add_required_sub_parsers(events_parser, 'events_sub')
events_from_file_parser = events_sub.add_parser('from_file')
events_from_file_parser.add_argument('path', type=str, help='location of file to load from')
events_to_file_parser = events_sub.add_parser('to_file')
events_to_file_parser.add_argument('path', type=str, help='location of file to save to')
events_sub.add_parser('from_raw')
events_sub.add_parser('to_raw')
links_parser = subparsers.add_parser('links')
links_sub = add_required_sub_parsers(links_parser, 'links_sub')
links_name_parser = links_sub.add_parser('name')
links_name_parser.add_argument('from', type=str, help='unique subhash to base the link from')
links_name_parser.add_argument('to', type=str, help='name of the link')
links_update_parser = links_sub.add_parser('update')
links_update_parser.add_argument('from', type=str, help='short link to copy from')
links_update_parser.add_argument('to', type=str, help='named short link to update')
links_maintenance_parser = links_sub.add_parser('maintenance')
links_maintenance_parser.add_argument('--dry-run', action='store_true', help='dry run')
env_parser = subparsers.add_parser('environment')
env_sub = add_required_sub_parsers(env_parser, 'environment_sub')
env_sub.add_parser('start')
env_sub.add_parser('stop')
env_sub.add_parser('status')
kwargs = vars(parser.parse_args())
if kwargs['debug']:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
cmd = kwargs.pop('command')
if cmd not in ('admin', 'builder', 'links'):
if cmd != 'events' or not kwargs['events_sub'].endswith('_raw'):
print("Running in {}".format(kwargs['env']))
globals()[cmd + "_cmd"](kwargs)
|
the-stack_0_18830 | # -*- coding: utf-8 -*-
u"""SecureTea IoT Checker.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <[email protected]> , Jul 17 2019
Version: 1.4
Module: SecureTea
"""
import shodan
import requests
from securetea.lib.iot.iot_logger import IoTLogger
class IoTChecker(object):
"""IoTChecker class."""
def __init__(self, debug=False, api_key=None, ip=None):
"""
Initialize IoTChecker.
Args:
debug (bool): Log on terminal or not
Raises:
None
Returns:
None
"""
# Initialize logger
self.logger = IoTLogger(
__name__,
debug=debug
)
# Initialize API Key to None
self._API_KEY = None
# Initialize Shodan API Key
if api_key and api_key != "XXXX":
self._API_KEY = api_key
else:
self.logger.log(
"Invalid Shodan API Key",
logtype="error"
)
# URL to fetch public IP address
self._PUBLIC_IP_URL = "https://ident.me"
if self._API_KEY:
# Initialize Shodan object
self.shodan_obj = shodan.Shodan(self._API_KEY)
if ip and ip != "":
self.ip = ip
else:
# Collect public IP
self.ip = self.get_public_ip()
def get_public_ip(self):
"""
Get public IP address of the device.
Args:
None
Raises:
None
Returns:
ip_addr (str): Public IP address of the device
"""
ip_addr = requests.get(self._PUBLIC_IP_URL).text
return ip_addr.strip(" ")
def check_shodan_range(self):
"""
Check whether the IP address is under
Shodan range or not.
Args:
None
Raises:
None
Returns:
None
"""
if self._API_KEY and self.ip:
self.logger.log(
"Checking IoT device: {0} if under Shodan range".format(self.ip),
logtype="info"
)
try:
results = self.shodan_obj.host(self.ip)
if results:
self.logger.log(
"IP: {0} under Shodan range (risk)".format(self.ip),
logtype="warning"
)
else:
self.logger.log(
"IP: {0} not under Shodan range (safe)".format(self.ip),
logtype="info"
)
except shodan.APIError:
self.logger.log(
"IP: {0} not under Shodan range (safe)".format(self.ip),
logtype="info"
)
else:
self.logger.log(
"Configuration parameters not set.",
logtype="error"
)
|
the-stack_0_18831 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Early Stopping
^^^^^^^^^^^^^^
Monitor a validation metric and stop training when it stops improving.
"""
import numpy as np
import torch
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
import os
torch_inf = torch.tensor(np.Inf)
try:
import torch_xla
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class EarlyStopping(Callback):
r"""
Args:
monitor: quantity to be monitored. Default: ``'early_stop_on'``.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than `min_delta`, will count as no
improvement. Default: ``0.0``.
patience: number of validation epochs with no improvement
after which training will be stopped. Default: ``3``.
verbose: verbosity mode. Default: ``False``.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity. Default: ``'auto'``.
strict: whether to crash the training if `monitor` is
not found in the validation metrics. Default: ``True``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import EarlyStopping
>>> early_stopping = EarlyStopping('val_loss')
>>> trainer = Trainer(early_stop_callback=early_stopping)
"""
mode_dict = {
'min': torch.lt,
'max': torch.gt,
}
def __init__(self, monitor: str = 'early_stop_on', min_delta: float = 0.0, patience: int = 3,
verbose: bool = False, mode: str = 'auto', strict: bool = True):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.strict = strict
self.min_delta = min_delta
self.wait_count = 0
self.stopped_epoch = 0
self.mode = mode
self.warned_result_obj = False
# Indicates, if eval results are used as basis for early stopping
# It is set to False initially and overwritten, if eval results have been validated
self.based_on_eval_results = False
if mode not in self.mode_dict:
if self.verbose > 0:
log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
if self.mode == 'auto':
if self.monitor == 'acc':
self.mode = 'max'
else:
self.mode = 'min'
if self.verbose > 0:
log.info(f'EarlyStopping mode set to {self.mode} for monitoring {self.monitor}.')
self.min_delta *= 1 if self.monitor_op == torch.gt else -1
self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
def _validate_condition_metric(self, logs):
monitor_val = logs.get(self.monitor)
error_msg = (f'Early stopping conditioned on metric `{self.monitor}`'
f' which is not available. Either add `{self.monitor}` to the return of'
' `validation_epoch_end` or modify your `EarlyStopping` callback to use any of the'
f' following: `{"`, `".join(list(logs.keys()))}`')
if monitor_val is None:
if self.strict:
raise RuntimeError(error_msg)
if self.verbose > 0:
rank_zero_warn(error_msg, RuntimeWarning)
return False
return True
@property
def monitor_op(self):
return self.mode_dict[self.mode]
def on_save_checkpoint(self, trainer, pl_module):
return {
'wait_count': self.wait_count,
'stopped_epoch': self.stopped_epoch,
'best_score': self.best_score,
'patience': self.patience
}
def on_load_checkpoint(self, checkpointed_state):
self.wait_count = checkpointed_state['wait_count']
self.stopped_epoch = checkpointed_state['stopped_epoch']
self.best_score = checkpointed_state['best_score']
self.patience = checkpointed_state['patience']
def on_validation_end(self, trainer, pl_module):
if trainer.running_sanity_check:
return
self._run_early_stopping_check(trainer, pl_module)
def on_validation_epoch_end(self, trainer, pl_module):
if trainer.running_sanity_check:
return
if self._validate_condition_metric(trainer.logger_connector.callback_metrics):
# turn off early stopping in on_train_epoch_end
self.based_on_eval_results = True
def on_train_epoch_end(self, trainer, pl_module):
# disable early stopping in train loop when there's a val loop
if self.based_on_eval_results:
return
# early stopping can also work in the train loop when there is no val loop
should_check_early_stop = False
# fallback to monitor key in result dict
if trainer.logger_connector.callback_metrics.get(self.monitor, None) is not None:
should_check_early_stop = True
if should_check_early_stop:
self._run_early_stopping_check(trainer, pl_module)
def _run_early_stopping_check(self, trainer, pl_module):
"""
Checks whether the early stopping condition is met
and if so tells the trainer to stop the training.
"""
logs = trainer.logger_connector.callback_metrics
if not self._validate_condition_metric(logs):
return # short circuit if metric not present
current = logs.get(self.monitor)
# when in dev debugging
trainer.dev_debugger.track_early_stopping_history(current)
if not isinstance(current, torch.Tensor):
current = torch.tensor(current, device=pl_module.device)
if trainer.use_tpu and XLA_AVAILABLE:
current = current.cpu()
if self.monitor_op(current - self.min_delta, self.best_score):
self.best_score = current
self.wait_count = 0
else:
self.wait_count += 1
should_stop = self.wait_count >= self.patience
if bool(should_stop):
self.stopped_epoch = trainer.current_epoch
trainer.should_stop = True
# stop every ddp process if any world process decides to stop
should_stop = trainer.accelerator_backend.early_stopping_should_stop(pl_module)
trainer.should_stop = should_stop
def on_train_end(self, trainer, pl_module):
if self.stopped_epoch > 0 and self.verbose > 0:
rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,'
' but will start from "0" in v0.8.0.', DeprecationWarning)
log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')
|
the-stack_0_18832 | import logging
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Callable, Dict, Optional, Union
from alibi_detect.cd.pytorch.classifier import ClassifierDriftTorch
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.pytorch import GaussianRBF
from alibi_detect.utils.pytorch.prediction import predict_batch
logger = logging.getLogger(__name__)
class SpotTheDiffDriftTorch:
def __init__(
self,
x_ref: np.ndarray,
p_val: float = .05,
preprocess_fn: Optional[Callable] = None,
kernel: Optional[nn.Module] = None,
n_diffs: int = 1,
initial_diffs: Optional[np.ndarray] = None,
l1_reg: float = 0.01,
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector with a classifier of form y = a + b_1*k(x,w_1) + ... + b_J*k(x,w_J),
where k is a kernel and w_1,...,w_J are learnable test locations. If drift has occured the test locations
learn to be more/less (given by sign of b_i) similar to test instances than reference instances.
The test locations are regularised to be close to the average reference instance such that the **difference**
is then interpretable as the transformation required for each feature to make the average instance more/less
like a test instance than a reference instance.
The classifier is trained on a fraction of the combined reference and test data and drift is detected on
the remaining data. To use all the data to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the test.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Differentiable Pytorch model used to define similarity between instances, defaults to Gaussian RBF.
n_diffs
The number of test locations to use, each corresponding to an interpretable difference.
initial_diffs
Array used to initialise the diffs that will be learned. Defaults to Gaussian
for each feature with equal variance to that of reference data.
l1_reg
Strength of l1 regularisation to apply to the differences.
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold instances. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when fitting the classifier.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
if preprocess_fn is not None and preprocess_batch_fn is not None:
raise ValueError("SpotTheDiffDrift detector only supports preprocess_fn or preprocess_batch_fn, not both.")
if n_folds is not None and n_folds > 1:
logger.warning("When using multiple folds the returned diffs will correspond to the final fold only.")
if preprocess_fn is not None:
x_ref_proc = preprocess_fn(x_ref)
elif preprocess_batch_fn is not None:
x_ref_proc = predict_batch(
x_ref, lambda x: x, preprocess_fn=preprocess_batch_fn,
device=torch.device('cpu'), batch_size=batch_size
)
else:
x_ref_proc = x_ref
if kernel is None:
kernel = GaussianRBF(trainable=True)
if initial_diffs is None:
initial_diffs = np.random.normal(size=(n_diffs,) + x_ref_proc.shape[1:]) * x_ref_proc.std(0)
else:
if len(initial_diffs) != n_diffs:
raise ValueError("Should have initial_diffs.shape[0] == n_diffs")
model = SpotTheDiffDriftTorch.InterpretableClf(kernel, x_ref_proc, initial_diffs)
reg_loss_fn = (lambda model: model.diffs.abs().mean() * l1_reg)
self._detector = ClassifierDriftTorch(
x_ref=x_ref,
model=model,
p_val=p_val,
preprocess_x_ref=True,
update_x_ref=None,
preprocess_fn=preprocess_fn,
preds_type='logits',
binarize_preds=binarize_preds,
reg_loss_fn=reg_loss_fn,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
optimizer=optimizer,
learning_rate=learning_rate,
batch_size=batch_size,
preprocess_batch_fn=preprocess_batch_fn,
epochs=epochs,
verbose=verbose,
train_kwargs=train_kwargs,
device=device,
dataset=dataset,
dataloader=dataloader,
data_type=data_type
)
self.meta = self._detector.meta
self.meta['params']['name'] = 'SpotTheDiffDrift'
self.meta['params']['n_diffs'] = n_diffs
self.meta['params']['l1_reg'] = l1_reg
self.meta['params']['initial_diffs'] = initial_diffs
class InterpretableClf(nn.Module):
def __init__(self, kernel: nn.Module, x_ref: np.ndarray, initial_diffs: np.ndarray):
super().__init__()
self.kernel = kernel
self.mean = nn.Parameter(torch.as_tensor(x_ref.mean(0)), requires_grad=False)
self.diffs = nn.Parameter(torch.as_tensor(initial_diffs, dtype=torch.float32))
self.bias = nn.Parameter(torch.zeros((1,)))
self.coeffs = nn.Parameter(torch.zeros((len(initial_diffs),)))
def forward(self, x: torch.Tensor) -> torch.Tensor:
k_xtl = self.kernel(x, self.mean + self.diffs)
logits = self.bias + k_xtl @ self.coeffs[:, None]
return torch.cat([-logits, logits], 1)
def predict(
self, x: np.ndarray, return_p_val: bool = True, return_distance: bool = True,
return_probs: bool = True, return_model: bool = False
) -> Dict[str, Dict[str, Union[str, int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data).
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing 'meta' and 'data' dictionaries.
'meta' has the detector's metadata.
'data' contains the drift prediction, the diffs used to distinguish reference from test instances,
and optionally the p-value, performance of the classifier relative to its expectation under the
no-change null, the out-of-fold classifier model prediction probabilities on the reference and test
data, and the trained model.
"""
preds = self._detector.predict(x, return_p_val, return_distance, return_probs, return_model=True)
preds['data']['diffs'] = preds['data']['model'].diffs.detach().cpu().numpy() # type: ignore
preds['data']['diff_coeffs'] = preds['data']['model'].coeffs.detach().cpu().numpy() # type: ignore
if not return_model:
del preds['data']['model']
return preds
|
the-stack_0_18833 | # You are playing the following Flip Game with your friend: Given a string that contains only these two characters: + and -, you and your friend take turns to flip two consecutive "++" into "--". The game ends when a person can no longer make a move and therefore the other person will be the winner.
#
# Write a function to determine if the starting player can guarantee a win.
#
# Example
# Given s = "++++", return true.
#
# Explanation:
# The starting player can guarantee a win by flipping the middle "++" to become "+--+".
# Challenge
# Derive your algorithm's runtime complexity.
class Solution:
def canWin(self, s):
for i in range(len(s)-1):
if s[i:i+2] == '++':
curr = s[:i] + '--' + s[i+2:]
if not self.canWin(curr):
return True
return False
# 用dictionary存已经出现过的String,和返回的boolean值,可以减少重复判定
class Solution2:
_dic = {}
def canWin(self, s):
dic = self._dic
if s not in dic:
for i in range(len(s)-1):
if s[i:i+2] == '++':
curr = s[:i] + '--' + s[i+2:]
if not self.canWin(curr):
dic[s] = True
if s not in dic:
dic[s] = False
return dic[s]
|
the-stack_0_18834 | # This file is a minimal clang-format vim-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Add to your .vimrc:
#
# if has('python')
# map <C-I> :pyf <path-to-this-file>/clang-format.py<cr>
# imap <C-I> <c-o>:pyf <path-to-this-file>/clang-format.py<cr>
# elseif has('python3')
# map <C-I> :py3f <path-to-this-file>/clang-format.py<cr>
# imap <C-I> <c-o>:py3f <path-to-this-file>/clang-format.py<cr>
# endif
#
# The if-elseif-endif conditional should pick either the python3 or python2
# integration depending on your vim setup.
#
# The first mapping enables clang-format for NORMAL and VISUAL mode, the second
# mapping adds support for INSERT mode. Change "C-I" to another binding if you
# need clang-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and clang-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# You can also pass in the variable "l:lines" to choose the range for
# formatting. This variable can either contain "<start line>:<end line>" or
# "all" to format the full file. So, to format the full file, write a function
# like:
# :function FormatFile()
# : let l:lines="all"
# : if has('python')
# : pyf <path-to-this-file>/clang-format.py
# : elseif has('python3')
# : py3f <path-to-this-file>/clang-format.py
# : endif
# :endfunction
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
from __future__ import absolute_import, division, print_function
import difflib
import json
import platform
import subprocess
import sys
import vim
# set g:clang_format_path to the path to clang-format if it is not on the path
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
if vim.eval('exists("g:clang_format_path")') == "1":
binary = vim.eval('g:clang_format_path')
# Change this to format according to other formatting styles. See the output of
# 'clang-format --help' for a list of supported styles. The default looks for
# a '.clang-format' or '_clang-format' file to indicate the style that should be
# used.
style = None
fallback_style = None
if vim.eval('exists("g:clang_format_fallback_style")') == "1":
fallback_style = vim.eval('g:clang_format_fallback_style')
def get_buffer(encoding):
if platform.python_version_tuple()[0] == '3':
return vim.current.buffer
return [ line.decode(encoding) for line in vim.current.buffer ]
def main():
# Get the current text.
encoding = vim.eval("&encoding")
buf = get_buffer(encoding)
# Join the buffer into a single string with a terminating newline
text = '\n'.join(buf) + '\n'
# Determine range to format.
if vim.eval('exists("l:lines")') == '1':
lines = ['-lines', vim.eval('l:lines')]
elif vim.eval('exists("l:formatdiff")') == '1':
with open(vim.current.buffer.name, 'r') as f:
ondisk = f.read().splitlines();
sequence = difflib.SequenceMatcher(None, ondisk, vim.current.buffer)
lines = []
for op in reversed(sequence.get_opcodes()):
if op[0] not in ['equal', 'delete']:
lines += ['-lines', '%s:%s' % (op[3] + 1, op[4])]
if lines == []:
return
else:
lines = ['-lines', '%s:%s' % (vim.current.range.start + 1,
vim.current.range.end + 1)]
# Determine the cursor position.
cursor = int(vim.eval('line2byte(line("."))+col(".")')) - 2
if cursor < 0:
print('Couldn\'t determine cursor position. Is your file empty?')
return
# Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
startupinfo = None
if sys.platform.startswith('win32'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call formatter.
command = [binary, '-cursor', str(cursor)]
if lines != ['-lines', 'all']:
command += lines
if style:
command.extend(['-style', style])
if fallback_style:
command.extend(['-fallback-style', fallback_style])
if vim.current.buffer.name:
command.extend(['-assume-filename', vim.current.buffer.name])
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate(input=text.encode(encoding))
# If successful, replace buffer contents.
if stderr:
print(stderr)
if not stdout:
print(
'No output from clang-format (crashed?).\n'
'Please report to bugs.llvm.org.'
)
else:
lines = stdout.decode(encoding).split('\n')
output = json.loads(lines[0])
# Strip off the trailing newline (added above).
# This maintains trailing empty lines present in the buffer if
# the -lines specification requests them to remain unchanged.
lines = lines[1:-1]
sequence = difflib.SequenceMatcher(None, buf, lines)
for op in reversed(sequence.get_opcodes()):
if op[0] != 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
if output.get('IncompleteFormat'):
print('clang-format: incomplete (syntax errors)')
vim.command('goto %d' % (output['Cursor'] + 1))
main()
|
the-stack_0_18835 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Face detection loss."""
import numpy as np
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.nn import Cell
from mindspore import Tensor
from mindspore.common import dtype as mstype
class MyLoss(Cell):
"""
Base class for other losses.
"""
def __init__(self, reduction='mean'):
super(MyLoss, self).__init__()
if reduction is None:
reduction = 'none'
if reduction not in ('mean', 'sum', 'none'):
raise ValueError(f"reduction method for {reduction.lower()} is not supported")
self.average = True
self.reduce = True
if reduction == 'sum':
self.average = False
if reduction == 'none':
self.reduce = False
self.reduce_mean = P.ReduceMean()
self.reduce_sum = P.ReduceSum()
self.mul = P.Mul()
self.cast = P.Cast()
def get_axis(self, x):
shape = F.shape(x)
length = F.tuple_len(shape)
perm = F.make_range(0, length)
return perm
def get_loss(self, x, weights=1.0):
"""
Computes the weighted loss
Args:
weights: Optional `Tensor` whose rank is either 0, or the same rank as inputs, and must be broadcastable to
inputs (i.e., all dimensions must be either `1`, or the same as the corresponding inputs dimension).
"""
input_dtype = x.dtype
x = self.cast(x, mstype.float32)
weights = self.cast(weights, mstype.float32)
x = self.mul(weights, x)
if self.reduce and self.average:
x = self.reduce_mean(x, self.get_axis(x))
if self.reduce and not self.average:
x = self.reduce_sum(x, self.get_axis(x))
x = self.cast(x, input_dtype)
return x
def construct(self, base, target):
raise NotImplementedError
class PtLinspace(Cell):
'''PtLinspace'''
def __init__(self):
super(PtLinspace, self).__init__()
self.tuple_to_array = P.TupleToArray()
def construct(self, start, end, steps):
lin_x = ()
step = (end - start + 1) // steps
for i in range(start, end + 1, step):
lin_x += (i,)
lin_x = self.tuple_to_array(lin_x)
return lin_x
class MSELoss(MyLoss):
'''MSELoss'''
def __init__(self):
super(MSELoss, self).__init__()
self.sum = P.Sum()
self.mean = P.ReduceMean(keepdims=False)
self.pow = P.Pow()
self.sqrt = P.Sqrt()
def construct(self, nembeddings1, nembeddings2):
dist = nembeddings1 - nembeddings2
dist_pow = self.pow(dist, 2.0)
dist_sum = self.sum(dist_pow, 1)
dist_sqrt = self.sqrt(dist_sum)
loss = self.mean(dist_sqrt, 0)
return loss
class YoloLoss(Cell):
""" Computes yolo loss from darknet network output and target annotation.
Args:
num_classes (int): number of categories
anchors (list): 2D list representing anchor boxes
coord_scale (float): weight of bounding box coordinates
no_object_scale (float): weight of regions without target boxes
object_scale (float): weight of regions with target boxes
class_scale (float): weight of categorical predictions
thresh (float): minimum iou between a predicted box and ground truth for them to be considered matching
seen (int): How many images the network has already been trained on.
"""
def __init__(self, num_classes, anchors, anchors_mask, reduction=32, seen=0, coord_scale=1.0, no_object_scale=1.0,
object_scale=1.0, class_scale=1.0, thresh=0.5, head_idx=0.0):
super(YoloLoss, self).__init__()
self.num_classes = num_classes
self.num_anchors = len(anchors_mask)
self.anchor_step = len(anchors[0]) # each scale has step anchors
self.anchors = np.array(anchors, dtype=np.float32) / reduction # scale every anchor for every scale
self.tensor_anchors = Tensor(self.anchors, mstype.float32)
self.anchors_mask = anchors_mask
anchors_w = []
anchors_h = []
for i in range(len(anchors_mask)):
anchors_w.append(self.anchors[self.anchors_mask[i]][0])
anchors_h.append(self.anchors[self.anchors_mask[i]][1])
self.anchors_w = Tensor(np.array(anchors_w).reshape(len(self.anchors_mask), 1))
self.anchors_h = Tensor(np.array(anchors_h).reshape(len(self.anchors_mask), 1))
self.reduction = reduction
self.seen = seen
self.head_idx = head_idx
self.zero = Tensor(0)
self.coord_scale = coord_scale
self.no_object_scale = no_object_scale
self.object_scale = object_scale
self.class_scale = class_scale
self.thresh = thresh
self.info = {'avg_iou': 0, 'class': 0, 'obj': 0, 'no_obj': 0,
'recall50': 0, 'recall75': 0, 'obj_cur': 0, 'obj_all': 0,
'coord_xy': 0, 'coord_wh': 0}
self.shape = P.Shape()
self.reshape = P.Reshape()
self.sigmoid = P.Sigmoid()
self.zeros_like = P.ZerosLike()
self.concat0 = P.Concat(0)
self.concat0_2 = P.Concat(0)
self.concat0_3 = P.Concat(0)
self.concat0_4 = P.Concat(0)
self.concat1 = P.Concat(1)
self.concat1_2 = P.Concat(1)
self.concat1_3 = P.Concat(1)
self.concat1_4 = P.Concat(1)
self.concat2 = P.Concat(2)
self.concat2_2 = P.Concat(2)
self.concat2_3 = P.Concat(2)
self.concat2_4 = P.Concat(2)
self.tile = P.Tile()
self.transpose = P.Transpose()
self.cast = P.Cast()
self.exp = P.Exp()
self.sum = P.ReduceSum()
self.smooth_l1_loss = P.SmoothL1Loss()
self.bce = P.SigmoidCrossEntropyWithLogits()
self.ce = P.SoftmaxCrossEntropyWithLogits()
self.pt_linspace = PtLinspace()
self.one_hot = nn.OneHot(-1, self.num_classes, 1.0, 0.0)
self.squeeze_2 = P.Squeeze(2)
self.reduce_sum = P.ReduceSum()
self.select = P.Select()
self.iou = P.IOU()
def construct(self, output, coord_mask, conf_pos_mask, conf_neg_mask, cls_mask, t_coord, t_conf, t_cls, gt_list):
"""Compute Yolo loss."""
output_d = self.shape(output)
num_batch = output_d[0]
num_anchors = self.num_anchors
num_classes = self.num_classes
num_channels = output_d[1] // num_anchors
height = output_d[2]
width = output_d[3]
output = self.reshape(output, (num_batch, num_anchors, num_channels, height * width))
coord_01 = output[:, :, :2] # tx,ty
coord_23 = output[:, :, 2:4] # tw,th
coord = self.concat2((coord_01, coord_23))
conf = self.squeeze_2(output[:, :, 4:5, :])
cls = output[:, :, 5:]
cls = self.reshape(cls, (num_batch*num_anchors, num_classes, height*width))
perm = (0, 2, 1)
cls = self.transpose(cls, perm)
cls_shp = self.shape(cls)
cls = self.reshape(cls, (cls_shp[0] * cls_shp[1] * cls_shp[2] // num_classes, num_classes))
lin_x = self.pt_linspace(0, width - 1, width)
lin_x = self.tile(lin_x, (height,))
lin_x = self.cast(lin_x, mstype.float32)
lin_y = self.pt_linspace(0, height - 1, height)
lin_y = self.reshape(lin_y, (height, 1))
lin_y = self.tile(lin_y, (1, width))
lin_y = self.reshape(lin_y, (self.shape(lin_y)[0] * self.shape(lin_y)[1],))
lin_y = self.cast(lin_y, mstype.float32)
anchor_w = self.anchors_w
anchor_h = self.anchors_h
anchor_w = self.cast(anchor_w, mstype.float32)
anchor_h = self.cast(anchor_h, mstype.float32)
coord_x = self.sigmoid(coord[:, :, 0:1, :])
pred_boxes_0 = self.squeeze_2(coord_x) + lin_x
shape_pb0 = self.shape(pred_boxes_0)
pred_boxes_0 = self.reshape(pred_boxes_0, (shape_pb0[0] * shape_pb0[1] * shape_pb0[2], 1))
coord_y = self.sigmoid(coord[:, :, 1:2, :])
pred_boxes_1 = self.squeeze_2(coord_y) + lin_y
shape_pb1 = self.shape(pred_boxes_1)
pred_boxes_1 = self.reshape(pred_boxes_1, (shape_pb1[0] * shape_pb1[1] * shape_pb1[2], 1))
pred_boxes_2 = self.exp(self.squeeze_2(coord[:, :, 2:3, :])) * anchor_w
shape_pb2 = self.shape(pred_boxes_2)
pred_boxes_2 = self.reshape(pred_boxes_2, (shape_pb2[0] * shape_pb2[1] * shape_pb2[2], 1))
pred_boxes_3 = self.exp(self.squeeze_2(coord[:, :, 3:4, :])) * anchor_h
shape_pb3 = self.shape(pred_boxes_3)
pred_boxes_3 = self.reshape(pred_boxes_3, (shape_pb3[0] * shape_pb3[1] * shape_pb3[2], 1))
pred_boxes_x1 = pred_boxes_0 - pred_boxes_2 / 2
pred_boxes_y1 = pred_boxes_1 - pred_boxes_3 / 2
pred_boxes_x2 = pred_boxes_0 + pred_boxes_2 / 2
pred_boxes_y2 = pred_boxes_1 + pred_boxes_3 / 2
pred_boxes_points = self.concat1_4((pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2))
total_anchors = num_anchors * height * width
mask_concat = None
conf_neg_mask_zero = self.zeros_like(conf_neg_mask)
pred_boxes_points = pred_boxes_points * 64
gt_list = gt_list * 64
for b in range(num_batch):
cur_pred_boxes = pred_boxes_points[b * total_anchors:(b + 1) * total_anchors]
iou_gt_pred = self.iou(self.cast(cur_pred_boxes, mstype.float16), self.cast(gt_list[b], mstype.float16))
mask = self.cast((iou_gt_pred > self.thresh), mstype.float16)
mask = self.reduce_sum(mask, 0)
mask = mask > 0
shape_neg = self.shape(conf_neg_mask[0])
mask = self.reshape(mask, (1, shape_neg[0], shape_neg[1]))
if b == 0:
mask_concat = mask
else:
mask_concat = self.concat0_2((mask_concat, mask))
conf_neg_mask = self.select(mask_concat, conf_neg_mask_zero, conf_neg_mask)
coord_mask = self.tile(coord_mask, (1, 1, 4, 1))
coord_mask = coord_mask[:, :, :2]
coord_center = coord[:, :, :2]
t_coord_center = t_coord[:, :, :2]
coord_wh = coord[:, :, 2:]
t_coord_wh = t_coord[:, :, 2:]
one_hot_label = None
if num_classes > 1:
shape_t_cls = self.shape(t_cls)
t_cls = self.reshape(t_cls, (shape_t_cls[0] * shape_t_cls[1] * shape_t_cls[2],))
one_hot_label = self.one_hot(self.cast(t_cls, mstype.int32))
shape_cls_mask = self.shape(cls_mask)
cls_mask = self.reshape(cls_mask, (1, shape_cls_mask[0] * shape_cls_mask[1] * shape_cls_mask[2]))
added_scale = 1.0 + self.head_idx * 0.5
loss_coord_center = added_scale * 2.0 * 1.0 * self.coord_scale * self.sum(
coord_mask * self.bce(coord_center, t_coord_center), ())
loss_coord_wh = added_scale * 2.0 * 1.5 * self.coord_scale * self.sum(
coord_mask * self.smooth_l1_loss(coord_wh, t_coord_wh), ())
loss_coord = 1.0 * (loss_coord_center + loss_coord_wh)
loss_conf_pos = added_scale * 2.0 * self.object_scale * self.sum(conf_pos_mask * self.bce(conf, t_conf), ())
loss_conf_neg = 1.0 * self.no_object_scale * self.sum(conf_neg_mask * self.bce(conf, t_conf), ())
loss_conf = loss_conf_pos + loss_conf_neg
if num_classes > 1:
loss_cls = self.class_scale * 1.0 * self.sum(cls_mask * self.ce(cls, one_hot_label)[0], ())
else:
loss_cls = 0.0
cls = self.squeeze_2(output[:, :, 5:6, :])
loss_cls_pos = added_scale * 2.0 * self.object_scale * self.sum(conf_pos_mask * self.bce(cls, t_conf), ())
loss_cls_neg = 1.0 * self.no_object_scale * self.sum(conf_neg_mask * self.bce(cls, t_conf), ())
loss_cls = loss_cls_pos + loss_cls_neg
loss_tot = loss_coord + 0.5 * loss_conf + 0.5 * loss_cls
return loss_tot
|
the-stack_0_18836 | import rdflib
from ontquery.utils import mimicArgs
from pyontutils.core import OntGraph, OntId, OntTerm
from pyontutils.namespaces import rdf, rdfs, owl, OntCuries
import sparcur.schemas as sc
from sparcur.core import adops
from sparcur.utils import log, logd
from ttlser import CustomTurtleSerializer
elements = rdflib.Namespace('https://apinatomy.org/uris/elements/')
readable = rdflib.Namespace('https://apinatomy.org/uris/readable/')
# add apinatomy:Graph to ttlser topClasses
tc = CustomTurtleSerializer.topClasses
if readable.Graph not in tc:
sec = CustomTurtleSerializer.SECTIONS
CustomTurtleSerializer.topClasses = [readable.Graph] + tc
CustomTurtleSerializer.SECTIONS = ('',) + sec
# add apinatomy:Graph as a header section marker
OntGraph.metadata_type_markers.append(readable.Graph)
OntCuries({'apinatomy': str(readable),
'elements': str(elements), # FIXME guranteed name collisions ...
'PMID': 'https://www.ncbi.nlm.nih.gov/pubmed/',
# also just read this from the embedded local conventions
})
class NoIdError(Exception):
""" blob has no id """
apinscm = sc.ApiNATOMYSchema()
def make_classes(schema):
types = {}
def ref_to_list(ref):
_jpath = ref.split('/')
if _jpath[0] != '#':
raise ValueError(ref)
else:
jpath = _jpath[1:]
return jpath
def deref(ref):
return adops.get(schema, ref_to_list(ref))
def allOf(obj):
for o in obj['allOf']:
if '$ref' in o:
ref = o['$ref']
if ref in types:
yield types[ref]
else:
jpath = ref_to_list(ref)
no = adops.get(schema, jpath)
yield top(jpath[-1], no)
else:
log.debug(f'{obj}')
def properties(obj):
props = obj['properties']
out = {}
for name, vobj in props.items():
@property
def f(self, n=name):
return self.blob[n]
out[name] = f
return out
def top(cname, obj):
deref, allOf, ref_to_list, schema, types # python is dumb
type_ = None
if 'type' in obj:
type_ = obj['type']
parents = (Base,)
elif 'allOf' in v:
parents = tuple(allOf(obj))
for c in parents:
if hasattr(c, 'type'):
type_ = c.type
if type_ is None:
raise TypeError('wat')
cd = {'type': type_,
'_schema': obj,
}
if type_ == 'object':
props = properties(obj)
for n, f in props.items():
cd[n] = f
return type(cname, parents, cd)
cs = []
d = schema['definitions']
for k, v in d.items():
if not v:
log.warning(f'empty definition for {k}')
continue
c = top(k, v)
ref = f'#/definitions/{k}'
types[ref] = c
cs.append(c)
return cs
class Base:
@classmethod
def fromRdf(cls, uri, graph, context=None):
_, id = uri.rsplit('/', 1)
blob = {'id': id}
for p, o in graph[uri]:
if p == rdf.type:
if o != owl.NamedIndividual:
key = 'class'
_, value = o.rsplit('/', 1)
else:
continue # TODO s rdf:type apinatomy:External ??
else:
if p == rdfs.label:
key = 'name'
else:
_, key = p.rsplit('/', 1)
if isinstance(o, rdflib.Literal):
value = o.toPython()
elif isinstance(o, rdflib.URIRef):
oid = OntId(o)
if oid.prefix == 'local':
value = oid.suffix
elif oid.prefix == 'apinatomy': # FIXME hrm?
value = oid.suffix
else:
value = oid.curie # FIXME external is tricky
log.warning(f'{oid!r}')
else:
raise NotImplementedError(f'{o}')
if key in cls.objects_multi:
if key in blob:
blob[key].append(value)
else:
blob[key] = [value]
else:
blob[key] = value
return cls(blob, context)
def __init__(self, blob, context=None):
self.blob = blob
self.context = context
try:
self.id = blob['id'].replace(' ', '-') #FIXME
except KeyError as e:
raise NoIdError(f'id not in {blob}') from e
except AttributeError:
pass # new impl uses properties to access the blob
if context is not None:
self.s = context[self.id]
else:
self.s = None
try:
self.name = blob['name'] if 'name' in blob else self.id
except AttributeError:
pass # new impl uses properties to access the blob
if 'class' in blob:
assert self.__class__.__name__ == blob['class']
@property
def cname(self):
return self.__class__.__name__
class Graph(Base):
@classmethod
def fromRdf(cls, graph):
iri = graph.boundIdentifier
context = rdflib.Namespace(iri + '/ids/')
# TODO removing things from the trie is not implemented ...
#d = OntCuries._dict
#d.pop('local', None)
#d['local'] = str(context)
#OntCuries.reset()
OntCuries({'local': str(context)})
_, id = iri.rsplit('/', 1)
resources = {}
for s in graph[:rdf.type:owl.NamedIndividual]:
for element in graph[s:rdf.type]:
if element != owl.NamedIndividual:
_, class_ = element.rsplit('/', 1)
resource = getattr(cls, class_).fromRdf(s, graph, context)
# FIXME we should really keep the internal representation
# around instead of just throwing it away
resources[resource.id] = resource.blob
for s in graph[:rdf.type:owl.Class]:
# FIXME s rdf:type elements:External ??
resource = External.fromRdf(s, graph, context)
resources[resource.id] = resource.blob
map = {'id': id,
'resources': resources}
return cls(map, {})
def __init__(self, map, blob):
self.map = map
self.resources = map['resources']
self.prefixes = {} # TODO curie mapping
self.id = self.map['id'].replace(' ', '-') # FIXME
self.blob = blob
#apinscm.validate(self.blob) # TODO
@property
def context(self):
return rdflib.Namespace(f'{self.iri}/ids/')
@property
def triples(self):
self.iri = rdflib.URIRef(f'https://apinatomy.org/uris/models/{self.id}')
yield self.iri, rdf.type, readable.Graph
for id, blob in self.resources.items():
if 'class' not in blob:
logd.warning(f'no class in\n{blob!r}')
continue
elif blob['class'] == 'Graph':
log.warning('Graph is in resources itself')
continue
yield from getattr(self, blob['class'])(blob, self.context).triples()
@property
def triples_generated(self):
context = rdflib.Namespace(f'https://apinatomy.org/uris/models/{self.id}/ids/')
for cls in [Node, Link, Lyph, Tree, Group, Material]:
if cls.key in self.blob:
# FIXME trees not in all blobs
for blob in self.blob[cls.key]:
try:
yield from cls(blob, context).triples()
except NoIdError as e:
logd.exception(e)
def populate(self, graph):
#[graph.add(t) for t in self.triples]
for t in self.triples:
s, p, o = t
if s is None:
raise BaseException(f'{t}')
graph.add(t)
def graph(self):
g = OntGraph()
OntCuries.populate(g)
self.populate(g)
g.bind('local', self.context)
g.bind('apinatomy', readable) # FIXME populate from store
g.bind('elements', elements)
return g
class BaseElement(Base):
key = None
annotations = tuple()
generics = tuple()
objects = tuple()
objects_multi = tuple()
def triples(self):
yield from self.triples_class()
yield from self.triples_external()
yield from self.triples_annotations()
yield from self.triples_generics()
yield from self.triples_objects()
yield from self.triples_objects_multi()
def triples_class(self):
yield self.s, rdf.type, owl.NamedIndividual
yield self.s, rdf.type, elements[f'{self.cname}']
yield self.s, rdfs.label, rdflib.Literal(self.name)
def triples_annotations(self):
for key in self.annotations:
if key in self.blob:
value = self.blob[key]
yield self.s, readable[key], rdflib.Literal(value)
def triples_generics(self):
for key in self.generics:
# FIXME has + key.capitalize()?
if key in self.blob:
value = self.blob[key]
yield self.s, readable[key], readable[value]
else:
log.warning(f'{key} not in {self.blob}')
def triples_objects(self):
for key in self.objects:
if key in self.blob:
value = self.blob[key]
value = value.replace(' ', '-') # FIXME require no spaces in internal ids
yield self.s, readable[key], self.context[value]
if key == 'root': # FIXME temp hack to get directions to cooperate
yield self.context[value], readable['rootOf'], self.s
def triples_objects_multi(self):
for key in self.objects_multi:
if key in self.blob:
values = self.blob[key]
for value in values:
if key == 'external':
o = OntId(value).URIRef
else:
value = value.replace(' ', '-') # FIXME require no spaces in internal ids
o = self.context[value]
yield self.s, readable[key], o
def triples_external(self):
if 'externals' in self.blob:
for external in self.blob['external']:
yield self.s, rdf.type, OntId(external).URIRef
class Node(BaseElement):
key = 'nodes'
annotations = 'skipLabel', 'color', 'generated'
objects = 'cloneOf', 'hostedBy', 'internalIn'
objects_multi = 'sourceOf', 'targetOf', 'clones', 'external'
def triples(self):
yield from super().triples()
Graph.Node = Node
class Lyph(BaseElement):
key = 'lyphs'
generics = 'topology',
annotations = 'width', 'height', 'layerWidth', 'internalLyphColumns', 'isTemplate', 'generated'
objects = 'layerIn', 'conveys', 'border', 'cloneOf'
objects_multi = 'inCoalescences', 'subtypes', 'layers', 'clones', 'external', 'internalNodes', 'bundles', 'bundlesTrees'
def triples(self):
yield from super().triples()
Graph.Lyph = Lyph
class Link(BaseElement):
key = 'links'
annotations = 'generated',
generics = 'conveyingType',
objects = 'source', 'target', 'conveyingLyph', 'fasciculatesIn'
objects_multi = 'conveyingMaterials', 'hostedNodes', 'external'
Graph.Link = Link
class Coalescence(BaseElement):
key = 'coalescences'
generics = 'topology',
annotations = 'generated',
objects = 'generatedFrom',
objects_multi = 'lyphs', 'external'
Graph.Coalescence = Coalescence
class Border(BaseElement):
# FIXME class is Link ?
key = 'borders'
objects = 'host',
objects_multi = 'borders', 'external'
Graph.Border = Border
class Tree(BaseElement):
key = 'trees'
objects = 'root', 'lyphTemplate', 'group'
objects_multi = 'housingLyphs', 'external', 'levels'
Graph.Tree = Tree
class Group(BaseElement):
key = 'groups'
objects = 'generatedFrom',
objects_multi = 'nodes', 'links', 'Lyphs', 'coalescences', 'groups'
elements = Node, Link, Lyph, Coalescence # Group # ah class scope
def triples(self):
yield from super().triples()
for element_class in self.elements:
if element_class.key in self.blob:
for value_local_id in self.blob[element_class.key]:
c = element_class({'id':value_local_id}, context=self.context)
yield self.s, readable.hasElement, c.s
else:
log.warning(f'{element_class.key} not in {self.blob}')
Group.elements += (Group,)
Graph.Group = Group
class Material(BaseElement):
key = 'materials'
objects_multi = 'materials', 'inMaterials', 'external'
class fake:
""" filler """
label = 'sigh'
Graph.Material = Material
class External(BaseElement):
externals = 'id',
annotations = 'generated', 'uri', 'type', # FIXME should be classes
objects_multi = 'annotates',
@classmethod
def fromRdf(cls, uri, graph, context=None):
oid = OntId(uri)
id = oid.curie
blob = {'id': id}
for p, o in graph[uri]:
if p == rdf.type:
key = 'class'
value = 'External'
else:
if p == rdfs.label:
key = 'name'
else:
_, key = p.rsplit('/', 1)
if isinstance(o, rdflib.Literal):
value = o.toPython()
elif isinstance(o, rdflib.URIRef):
oid = OntId(o)
if oid.prefix == 'local':
value = oid.suffix
elif oid.prefix == 'apinatomy': # FIXME hrm?
value = oid.suffix
else:
value = oid.curie # FIXME external is tricky
log.warning(f'{oid!r}')
if key in cls.objects_multi:
if key in blob:
blob[key].append(value)
else:
blob[key] = [value]
else:
blob[key] = value
return cls(blob, context)
@mimicArgs(BaseElement.__init__)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# PLEASE DO NOT PUT PMIDs as external ids!!!
# FIXME idlib PMID(thing) urg the regex state machine is so simple ;_;
if self.id.startswith('PMID:'):
log.warning('PMIDs should never be External IDs!')
self._term = fake
self.s = OntId(self.id).URIRef
return
self._term = OntTerm(self.id)
self.s = self._term.URIRef
def triples_class(self):
yield self.s, rdf.type, owl.Class
yield self.s, rdfs.label, rdflib.Literal(self._term.label)
# TODO triples simple?
Graph.External = External
class Channel(BaseElement):
pass
Graph.Channel = Channel
class Chain(BaseElement):
objects = 'start', 'end'
objects_multi = 'conveyingLyphs',
annotations = 'length',
Graph.Chain = Chain
hrm = make_classes(apinscm.schema)
|
the-stack_0_18840 | """Dataset with 'Shuni orthobunyavirus' sequences.
A dataset with 44 'Shuni orthobunyavirus' sequences. The virus is
segmented and has 3 segments. Based on their strain and/or isolate,
these sequences were able to be grouped into 19 genomes. Many genomes
may have fewer than 3 segments.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetMultiChrom
def seq_header_to_chr(header):
import re
c = re.compile(r'\[segment (L|M|S)\]')
m = c.search(header)
if not m:
raise Exception("Unknown or invalid segment in header %s" % header)
seg = m.group(1)
return "segment_" + seg
def seq_header_to_genome(header):
import re
c = re.compile(r'\[genome (.+)\]')
m = c.search(header)
if not m:
raise Exception("Unknown genome in header %s" % header)
return m.group(1)
chrs = ["segment_" + seg for seg in ['L', 'M', 'S']]
ds = GenomesDatasetMultiChrom(__name__, __file__, __spec__,
chrs, seq_header_to_chr,
seq_header_to_genome=seq_header_to_genome)
ds.add_fasta_path("data/shuni_orthobunyavirus.fasta.gz", relative=True)
sys.modules[__name__] = ds
|
the-stack_0_18842 | from .IOModule import IOModule
from ..priv.Exceptions import InstructionAccessFault
from ..helpers import int_from_bytes
from threading import Thread
import time
def _window_loop(textIO: 'TextIO'):
try:
import PySimpleGUI as sg
logs = sg.Text(font="monospace")
col = sg.Column([[logs]], size=(640, 400), scrollable=True)
window = sg.Window("TextIO:{:x}".format(textIO.addr), [[col]])
lines = list()
window.finalize()
textIO.set_sg_window(window)
while True:
e, v = window.read()
if e == sg.WINDOW_CLOSED:
window.close()
textIO.set_sg_window(None)
break
if e == 'putlog':
lines.insert(0, v[0])
logs.update(value='\n'.join(lines) + '\n')
col.contents_changed()
except ImportError:
print("[TextIO] window disabled - please install PySimpleGui!")
textIO.set_sg_window(None)
class TextIO(IOModule):
def __init__(self, addr: int, buflen: int = 128):
super(TextIO, self).__init__(addr, buflen + 4)
self.buff = bytearray(buflen)
self.current_line = ""
self.sg_window = None
self.start_buffer = list()
self.thread = Thread(target=_window_loop, args=(self,))
self.thread.start()
time.sleep(0.1)
def set_sg_window(self, window):
if self.sg_window is not None and window is not None:
raise Exception("cannot set window twice!")
self.sg_window = window
buff = self.start_buffer
self.start_buffer = None if window is None else list()
for line in buff:
self._present(line)
def read(self, addr: int, size: int) -> bytearray:
raise InstructionAccessFault(addr)
def write(self, addr: int, data: bytearray, size: int):
if addr == self.addr:
if size > 4:
raise InstructionAccessFault(addr)
if int_from_bytes(data[0:4]) > 0:
self._print()
return
buff_start = addr - self.addr - 4
self.buff[buff_start:buff_start + size] = data[0:size]
def _print(self):
buff = self.buff
self.buff = bytearray(self.size)
if b'\x00' in buff:
buff = buff.split(b'\x00')[0]
text = buff.decode('ascii')
if '\n' in text:
lines = text.split("\n")
lines[0] = self.current_line + lines[0]
for line in lines[:-1]:
self._present(line)
self.current_line = lines[-1]
else:
self.current_line += text
def _present(self, text: str):
if self.sg_window is not None:
self.sg_window.write_event_value('putlog', text)
elif self.start_buffer is not None:
self.start_buffer.append(text)
else:
print("[TextIO:{:x}] {}".format(self.addr, text))
|
the-stack_0_18843 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class TrunkMetricsQoS(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
TrunkMetricsQoS - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'mismatch_count': 'int'
}
self.attribute_map = {
'mismatch_count': 'mismatchCount'
}
self._mismatch_count = None
@property
def mismatch_count(self):
"""
Gets the mismatch_count of this TrunkMetricsQoS.
Total number of QoS mismatches over the course of the last 24-hour period (sliding window).
:return: The mismatch_count of this TrunkMetricsQoS.
:rtype: int
"""
return self._mismatch_count
@mismatch_count.setter
def mismatch_count(self, mismatch_count):
"""
Sets the mismatch_count of this TrunkMetricsQoS.
Total number of QoS mismatches over the course of the last 24-hour period (sliding window).
:param mismatch_count: The mismatch_count of this TrunkMetricsQoS.
:type: int
"""
self._mismatch_count = mismatch_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_0_18847 | # Colm O Caoimh
# PandS project 2020
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Import data as pandas dataframe
iris_data = pd.read_csv('iris.data', header=None)
# assign column headers
iris_data.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
# A. Output a summary of each variable to a single txt file.
# Isolate columns according to data type
float_values = iris_data.iloc[:,0:4]
str_values = iris_data.iloc[:,4]
# Use describe function to summarise data
float_summary = float_values.describe()
str_summary = str_values.describe()
# Establish 3 unique values in str_summary.
# This creates an array of each value.
str_summary = str_values.unique()
# Transpose str_summary array and convert to dataframe
str_summary = str_summary[:, None]
str_summary = pd.DataFrame({"Species": str_summary[:, 0]})
# Format string variable summary
# Add column containing quantity of unique values
quantity = ['50', '50', '50']
str_summary['Count'] = quantity
# Rename rows in str_summary
str_summary.index = ['Species_A', 'Species_B', 'Species_C']
# Format summary output and write to text file
with open("iris_summary.txt", "w") as f:
heading = "SUMMARY OF VARIABLES IN IRIS DATASET"
f.write(heading + "\n")
f.write("=" * len(heading) + "\n\n\n\n")
heading2 = "NUMERIC VARIABLE SUMMARY"
f.write(heading2 + "\n")
f.write("=" * len(heading2) + "\n")
f.write(float_summary.to_string() + "\n\n\n\n")
heading3 = "DEPENDENT VARIABLE SUMMARY"
f.write(heading3 + "\n")
f.write("=" * len(heading3) + "\n")
f.write(str_summary.to_string() + "\n\n\n\n\n\n\n")
# B. Save a histogram of each variable to png files
# Assign each column to a variable for easier manipulation
sep_len = iris_data['sepal_length']
sep_width = iris_data['sepal_width']
pet_len = iris_data['petal_length']
pet_width = iris_data['petal_width']
species = iris_data['species']
# Write a function which outputs a histogram for each dataset variable and saves
# it as a png file.
# First for numeric variables
def var_hist(var_data, fig_num, x_label, y_label, title, filepath):
plt.figure(fig_num)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.hist(var_data, rwidth=0.9,)
plt.savefig(filepath)
plt.close() # Close figure so plot won't be displayed later
# Then for string variable
def var_hist2(var_data, fig_num, x_label, y_label, title, filepath):
plt.figure(fig_num)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.hist(var_data, bins=3, rwidth=0.9)
plt.xticks(np.arange(0,3))
plt.savefig(filepath)
plt.close()
# Call function for each variable
var_hist(sep_len, 1, 'sepal_length_cm', 'Frequency', 'Sepal Length', 'sepal_length.png')
var_hist(sep_width, 2, 'sepal_width_cm', 'Frequency', 'Sepal Width', 'sepal_width.png')
var_hist(pet_len, 3, 'petal_length_cm', 'Frequency', 'Petal Length', 'petal_length.png')
var_hist(pet_width, 4, 'petal_width_cm', 'Frequency', 'Petal Width', 'petal_width.png')
var_hist2(species, 5, 'species', 'Frequency', 'Iris Species', 'species.png')
# 4 axes on one figure for better visual comparison
fig, axs = plt.subplots(2, 2)
axs1 = axs[0, 0]
axs1.hist(sep_len, rwidth=0.9)
axs1.set_title('Sepal_Length_Cm')
axs1.set(ylabel='frequency')
axs2 = axs[0, 1]
axs2.hist(sep_width, rwidth=0.9)
axs2.set_title('Sepal_Width_Cm',)
axs2.set(ylabel='frequency')
axs3 = axs[1, 0]
axs3.hist(pet_len, rwidth=0.9)
axs3.set_title('Petal_Length_Cm')
axs3.set(ylabel='frequency')
axs4 = axs[1, 1]
axs4.hist(pet_width, rwidth=0.9)
axs4.set_title('Petal_Width_Cm')
axs4.set(ylabel='frequency')
#plt.show()
plt.close()
# C. Output a scatter plot of each pair of variables
# Scatter plot with matplotlib (no colour separation)
plt.scatter(sep_len, sep_width)
plt.xlabel('sepal_length')
plt.ylabel('sepal_width')
#plt.show()
plt.close()
# Write a function which outputs a scatter plot of each pair of variables.
# Each categorical variable (species of iris flower) is categorized by colour
def scatter(x, y):
sns.set(style="darkgrid", font_scale=1.25)
sns.lmplot(x, y, iris_data, fit_reg=False, hue='species')
plt.show()
plt.close()
# Call function for each pair of variables
scatter('sepal_length', 'sepal_width')
scatter('sepal_length', 'petal_length')
scatter('sepal_length', 'petal_width')
scatter('sepal_width', 'petal_length')
scatter('sepal_width', 'petal_width')
scatter('petal_length', 'petal_width')
# Output pairplot using kde to represent marginal distribution
sns.set(style='ticks', font_scale=1.25, color_codes=True)
sns.pairplot(iris_data, hue='species', diag_kind='kde')
plt.show()
|
the-stack_0_18850 | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from newrelic.admin import command, usage
@command('debug-console', 'config_file [session_log]',
"""Runs the client for the embedded agent debugging console.
""", hidden=True, log_intercept=False)
def debug_console(args):
import sys
if len(args) == 0:
usage('debug-console')
sys.exit(1)
from newrelic.console import ClientShell
config_file = args[0]
log_object = None
if len(args) >= 2:
log_object = open(args[1], 'w')
shell = ClientShell(config_file, log=log_object)
shell.cmdloop()
|
the-stack_0_18851 | from __future__ import division
from __future__ import print_function
import sys
sys.path.append('..')
import argparse
import pickle
import numpy as np
import time
import torch
from gcn import GCNSynthetic
from cf_explanation.cf_explainer import CFExplainer
from utils.utils import normalize_adj, get_neighbourhood, safe_open
from torch_geometric.utils import dense_to_sparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='syn1')
# Based on original GCN models -- do not change
parser.add_argument('--hidden', type=int, default=20, help='Number of hidden units.')
parser.add_argument('--n_layers', type=int, default=3, help='Number of convolutional layers.')
parser.add_argument('--dropout', type=float, default=0.0, help='Dropout rate (between 0 and 1)')
# For explainer
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--lr', type=float, default=0.005, help='Learning rate for explainer')
parser.add_argument('--optimizer', type=str, default="SGD", help='SGD or Adadelta')
parser.add_argument('--n_momentum', type=float, default=0.0, help='Nesterov momentum')
parser.add_argument('--beta', type=float, default=0.5, help='Tradeoff for dist loss')
parser.add_argument('--num_epochs', type=int, default=500, help='Num epochs for explainer')
parser.add_argument('--edge_additions', type=int, default=0, help='Include edge additions?')
parser.add_argument('--device', default='cpu', help='CPU or GPU.')
args = parser.parse_args()
print(args)
args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.autograd.set_detect_anomaly(True)
# Import dataset from GNN explainer paper
with open("../data/gnn_explainer/{}.pickle".format(args.dataset[:4]), "rb") as f:
data = pickle.load(f)
adj = torch.Tensor(data["adj"]).squeeze() # Does not include self loops
features = torch.Tensor(data["feat"]).squeeze()
labels = torch.tensor(data["labels"]).squeeze()
idx_train = torch.tensor(data["train_idx"])
idx_test = torch.tensor(data["test_idx"])
edge_index = dense_to_sparse(adj) # Needed for pytorch-geo functions
# Change to binary task: 0 if not in house, 1 if in house
if args.dataset == "syn1_binary":
labels[labels==2] = 1
labels[labels==3] = 1
norm_adj = normalize_adj(adj) # According to reparam trick from GCN paper
# Set up original model, get predictions
model = GCNSynthetic(nfeat=features.shape[1], nhid=args.hidden, nout=args.hidden,
nclass=len(labels.unique()), dropout=args.dropout)
model.load_state_dict(torch.load("../models/gcn_3layer_{}.pt".format(args.dataset)))
model.eval()
output = model(features, norm_adj)
y_pred_orig = torch.argmax(output, dim=1)
print("y_true counts: {}".format(np.unique(labels.numpy(), return_counts=True)))
print("y_pred_orig counts: {}".format(np.unique(y_pred_orig.numpy(), return_counts=True))) # Confirm model is actually doing something
# Get CF examples in test set
test_cf_examples = []
start = time.time()
for i in idx_test[:20]:
sub_adj, sub_feat, sub_labels, node_dict = get_neighbourhood(int(i), edge_index, args.n_layers + 1, features, labels)
new_idx = node_dict[int(i)]
# Check that original model gives same prediction on full graph and subgraph
with torch.no_grad():
print("Output original model, full adj: {}".format(output[i]))
print("Output original model, sub adj: {}".format(model(sub_feat, normalize_adj(sub_adj))[new_idx]))
# Need to instantitate new cf model every time because size of P changes based on size of sub_adj
explainer = CFExplainer(model=model,
sub_adj=sub_adj,
sub_feat=sub_feat,
n_hid=args.hidden,
dropout=args.dropout,
sub_labels=sub_labels,
y_pred_orig=y_pred_orig[i],
num_classes = len(labels.unique()),
beta=args.beta,
device=args.device)
if args.device == 'cuda':
model.cuda()
explainer.cf_model.cuda()
adj = adj.cuda()
norm_adj = norm_adj.cuda()
features = features.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_test = idx_test.cuda()
cf_example = explainer.explain(node_idx=i, cf_optimizer=args.optimizer, new_idx=new_idx, lr=args.lr,
n_momentum=args.n_momentum, num_epochs=args.num_epochs, node_dict=node_dict) # Need node dict for accuracy calculation
test_cf_examples.append(cf_example)
print("Time for {} epochs of one example: {:.4f}min".format(args.num_epochs, (time.time() - start)/60))
print("Total time elapsed: {:.4f}s".format((time.time() - start)/60))
print("Number of CF examples found: {}/{}".format(len(test_cf_examples), len(idx_test)))
# Save CF examples in test set
if args.edge_additions == 1:
with safe_open("../results_incl_additions/{}/{}/{}_cf_examples_lr{}_beta{}_mom{}_epochs{}".format(args.dataset, args.optimizer, args.dataset,
args.lr, args.beta, args.n_momentum, args.num_epochs), "wb") as f:
pickle.dump(test_cf_examples, f)
elif args.edge_additions == 0:
with safe_open("../results/{}/{}/{}_cf_examples_lr{}_beta{}_mom{}_epochs{}".format(args.dataset, args.optimizer, args.dataset,
args.lr, args.beta, args.n_momentum, args.num_epochs), "wb") as f:
pickle.dump(test_cf_examples, f) |
the-stack_0_18855 | import numpy as np
import os
import json
import tqdm
import time
from collections import defaultdict
import torch
import shutil
import datetime
from utils.ptracker import PerformanceTracker
from utils.dataloader import DataLoader
from utils.utils import find
from tasks.task_generator import TaskGenerator
class ExperimentBuilder():
def __init__(self, model, tasks, datasets, device, args):
"""
Builds a single experiment based on the configuration parameters.
"""
print('Setting up Experiment Builder')
self.model = model
self.tasks = tasks
self.datasets = datasets
self.device = device
self.args = args
self.task_args = args.task_args
self.experiment_timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')
self.create_experiment_folder()
self.state = State(args.seed)
self.ptracker = PerformanceTracker(
folder=self.performance_folder,
args=self.args.ptracker_args
)
self.val_or_test = args.val_or_test
self.template='epoch-{:03d}'
def create_experiment_folder(self):
"""
Creates the experiment folder where checkpoint, configs, and performance is saved
"""
if self.args.experiment_folder is None:
self.args.experiment_folder = os.path.join(os.path.abspath(self.args.results_folder), self.args.experiment_name)
self.experiment_folder = self.args.experiment_folder
self.checkpoint_folder = os.path.join(self.experiment_folder, 'checkpoint')
self.log_folder = os.path.join(self.experiment_folder, 'logs')
self.performance_folder = os.path.join(self.experiment_folder, 'performance')
self.visualisation_folder = os.path.join(self.experiment_folder, 'visualisation')
if self.args.dummy_run:
print('NOT Creating: ', self.experiment_folder)
return
print('Experiment folder: ', self.experiment_folder)
if self.args.continue_from in [None, 'None', 'from_scratch'] and self.args.clean_folder:
print('CLEARING FOLDER')
shutil.rmtree(self.experiment_folder, ignore_errors=True)
os.makedirs(self.experiment_folder, exist_ok=True)
os.makedirs(self.checkpoint_folder, exist_ok=True)
os.makedirs(self.log_folder, exist_ok=True)
os.makedirs(self.performance_folder, exist_ok=True)
os.makedirs(self.visualisation_folder, exist_ok=True)
config_path = os.path.join(self.experiment_folder, 'config_{}.json'.format(self.experiment_timestamp))
# Save args into a file
with open(config_path, 'w') as f:
json.dump(self.args, f, indent=2, sort_keys=True)
def load_from_checkpoint(self, checkpoint_name_or_path, load_model_only=False, load_backbone_only=False):
"""
Loads the model and state of the experiment from a checkpoint file
"""
# Find checkpoint
if os.path.isfile(checkpoint_name_or_path):
filepath = checkpoint_name_or_path
else:
filepath = os.path.join(self.checkpoint_folder, checkpoint_name_or_path)
if not os.path.isfile(filepath):
raise Exception('Invalid checkpoint name or path: {}'.format(checkpoint_name_or_path))
# Device mapping
if torch.cuda.is_available():
map_location= lambda storage, location: storage.cuda(self.device)
else:
map_location='cpu'
print('Loading from checkpoint', filepath)
checkpoint = torch.load(filepath, map_location=map_location)
state_, model_, performance_log = checkpoint['state'], checkpoint['model'], checkpoint['performance_log']
if load_model_only and not load_backbone_only:
self.model.load_state_dict(model_)
return
elif load_backbone_only:
model_layers = self.model.state_dict()
to_load = {k:v for k,v in model_.items() if 'backbone.' in k and k in model_layers} # load only if in model
model_layers.update(to_load)
self.model.load_state_dict(model_layers)
return
self.state.from_dict(state_)
self.model.load_state_dict(model_)
self.ptracker.load_from_logfile(performance_log)
def save_checkpoint(self, checkpoint_name=None):
"""
Saves the model and state of the experiment in a checkpoint file
"""
if self.args.dummy_run:
print('dummy run, no saving')
return (self.state.epoch + 1 ) >= self.args.num_epochs
if checkpoint_name is None:
checkpoint_name = self.template.format(self.state.epoch)
checkpointpath = os.path.join(self.checkpoint_folder, checkpoint_name)
performance_logfile = os.path.join(self.performance_folder, checkpoint_name + '.json')
checkpoint_path_to_best = os.path.join(self.checkpoint_folder, 'path_to_best')
ptracker_path_to_best = os.path.join(self.performance_folder, 'path_to_best')
# Updates best val model if the validation score beats the previous best
is_val_best_updated = self.ptracker.update_best(checkpointpath, self.state.epoch)['val']
is_val_best_updated = is_val_best_updated # or if no validation
# Update path_to_best if best updated or no previous 'path to best'
if is_val_best_updated or self.args.no_val_loop or not os.path.isfile(checkpoint_path_to_best):
with open(checkpoint_path_to_best,'w') as f:
f.write(checkpointpath)
with open(ptracker_path_to_best,'w') as f:
f.write(performance_logfile)
# Make checkpoint
checkpoint={}
checkpoint['state'] = self.state.to_dict()
checkpoint['model'] = self.model.state_dict()
checkpoint['performance_log'] = performance_logfile
# Save checkpoint
print('saving to', checkpointpath, '\t\t')
torch.save(checkpoint, checkpointpath)
self.ptracker.save_logfile(performance_logfile, ['train', 'val'])
# Delete checkpoints due to heavy storage
if (self.args.model in ['matchingnet','btaml']) or ('ResNet' in self.args.backbone) or self.args.storage_friendly:
in_allowed_epochs = lambda x: x % 40 == 19 # allowed every 40th epoch, e.i. at 19th, 59th, 99th etc..
current_epoch = self.state.epoch
previous_epoch = self.state.epoch -1
if not self.args.no_val_loop:
current_best_epoch = self.ptracker.current_best['val']['epoch']
previous_best_epoch = self.ptracker.previous_bests['val'][-1]['epoch']
else:
current_best_epoch = current_epoch
previous_best_epoch = previous_epoch
# remove previous epoch checkpoint (unless current best or in allowed epochs)
if previous_epoch >= 0 and \
not in_allowed_epochs(previous_epoch) and \
previous_epoch != current_best_epoch:
path = os.path.join(self.checkpoint_folder, self.template.format(previous_epoch))
print('removing', path, '\t\t')
os.remove(path)
# remove previous best epoch checkpoint if best has been updated (unless is in allowed epochs)
if is_val_best_updated and \
previous_best_epoch >= 0 and \
not in_allowed_epochs(previous_best_epoch) and \
previous_best_epoch != current_best_epoch and \
previous_best_epoch != previous_epoch:
path = os.path.join(self.checkpoint_folder, self.template.format(previous_best_epoch))
print('removing', path, '\t\t')
os.remove(path)
# Stop criterion
if self.args.num_epochs is None:
current_best_epoch = self.ptracker.current_best['val']['epoch']
return (self.state.epoch-current_best_epoch) > 30
return (self.state.epoch + 1 ) >= self.args.num_epochs
def load_pretrained(self):
"""
Loads model from self.args.continue_from
Return value indicates whether to continue from next epoch
"""
print('Continuing from', self.args.continue_from)
if self.args.continue_from in [None, 'None', 'from_scratch'] or self.args.dummy_run:
return
if self.args.continue_from == 'latest':
checkpoint_names = find('epoch*', self.checkpoint_folder)
checkpoint_names = sorted(checkpoint_names)
self.args.continue_from = checkpoint_names[-1]
print('LATEST', self.args.continue_from)
self.load_from_checkpoint(self.args.continue_from, load_backbone_only=self.args.load_backbone_only)
elif self.args.continue_from == 'best':
with open(os.path.join(self.checkpoint_folder,'path_to_best'),'r') as f:
self.args.continue_from = f.read()
print('BEST', self.args.continue_from)
self.load_from_checkpoint(self.args.continue_from, load_backbone_only=self.args.load_backbone_only)
elif self.args.continue_from.isdigit():
checkpoint_name = 'epoch-{:03d}'.format(int(self.args.continue_from))
self.args.continue_from = os.path.join(self.checkpoint_folder, checkpoint_name)
print('EPOCH', self.args.continue_from)
self.load_from_checkpoint(self.args.continue_from, load_backbone_only=self.args.load_backbone_only)
else: # assume 'continue_from' contains a checkpoint folder, or if not, a filename
if os.path.isdir(self.args.continue_from):
with open(os.path.join(self.args.continue_from, 'checkpoint', 'path_to_best'), 'r') as f:
filename = f.read()
elif os.path.isfile(self.args.continue_from):
filename = self.args.continue_from
else:
raise Exception("Filename / experiment folder not found! Path given: {}".format(self.args.continue_from))
print('FILE', filename)
self.load_from_checkpoint(filename, load_model_only=True, load_backbone_only=self.args.load_backbone_only)
if not self.args.load_backbone_only: # since otherwise we want to start from epoch 0
self.ptracker.reset_epoch_cache()
self.state.next_epoch()
self.model.next_epoch()
def get_task_generator(self, set_name, num_tasks, seed):
return TaskGenerator(self.datasets[set_name],
task=self.tasks[set_name],
task_args=self.task_args[set_name],
num_tasks=num_tasks,
seed=seed,
epoch=self.state.epoch,
mode=set_name,
fix_classes=self.args.fix_class_distribution,
deterministic=self.args.deterministic)
def get_dataloader(self, dataset, sampler, epoch, mode):
return DataLoader(dataset, sampler, self.device, epoch, mode)
def train_model(self):
"""
Runs the main thread of the experiment
"""
if self.args.evaluate_on_test_set_only:
return
converged = False if self.args.num_epochs is None else self.state.epoch >= self.args.num_epochs
while not converged:
train_generator = self.get_task_generator(
'train',
self.args.num_tasks_per_epoch,
self.state.epoch_seed)
self.ptracker.set_mode('train')
# train
with tqdm.tqdm( initial=0, total=self.args.num_tasks_per_epoch, disable=not self.args.tqdm) as train_pbar:
for train_sampler in train_generator:
dataloader = self.get_dataloader(self.datasets['train'], train_sampler, self.state.epoch, 'train')
self.model.meta_train(dataloader, self.ptracker)
train_pbar.set_description('Train phase {}/{} -> {} {}'.format(
self.state.epoch, self.args.num_epochs-1, self.ptracker.get_performance_str(), self.model.get_summary_str()))
train_pbar.update(1)
val_generator = self.get_task_generator(
self.val_or_test,
self.args.num_tasks_per_validation,
self.state.epoch_seed)
if not self.args.tqdm:
print('Train phase {}/{} -> {} {}'.format(
self.state.epoch, self.args.num_epochs-1, self.ptracker.get_performance_str(), self.model.get_summary_str()))
self.ptracker.set_mode('val')
# simpleshot calc train dataset mean for normalisation during validation
if self.args.model == 'simpleshot':
self.model.set_train_mean(self.datasets['train'], istqdm=self.args.tqdm)
# validation
if not self.args.no_val_loop:
with tqdm.tqdm(initial=0, total=self.args.num_tasks_per_validation, disable=not self.args.tqdm) as pbar_val:
for val_sampler in val_generator:
val_dataloader=self.get_dataloader(self.datasets[self.val_or_test],val_sampler,self.state.epoch,'val')
self.model.meta_val(val_dataloader, self.ptracker)
pbar_val.set_description('Val phase {}/{} -> {} {}'.format(
self.state.epoch, self.args.num_epochs-1, self.ptracker.get_performance_str(), self.model.get_summary_str()))
pbar_val.update(1)
if not self.args.tqdm:
print('Val phase {}/{} -> {} {}'.format(
self.state.epoch, self.args.num_epochs-1, self.ptracker.get_performance_str(), self.model.get_summary_str()))
else:
print("No validation phase; set '--no_val_loop False' ")
converged = self.save_checkpoint()
self.ptracker.reset_epoch_cache() # call after save_checkpoint() otherwise performance will be lost
self.state.next_epoch()
self.model.next_epoch()
print()
print()
def evaluate_model(self):
"""
Evaluate final performance on the best model
"""
if self.args.dummy_run or self.args.evaluate_on_test_set_only:
self.evaluate_on_test()
return
# Load best checkpoint path from path_to_best
path_to_best = os.path.join(self.checkpoint_folder, 'path_to_best')
if not os.path.exists(path_to_best):
raise Exception('path_to_best not found: {}'.format(path_to_best))
with open(path_to_best,'r') as f:
checkpointfile = f.read()
self.load_from_checkpoint(checkpointfile)
self.evaluate_on_test()
def evaluate_on_test(self):
"""
Evaluates the current model on the test set
"""
self.ptracker.set_mode('test')
checkpoint_name = self.template.format(self.state.epoch)
# Get train mean for simpleshot for faster performance
if self.args.model == 'simpleshot':
self.model.set_train_mean(self.datasets['train'])
# Evaluate on test (note: seed set to experiment seed, not epoch seed, which allows for fair evaluation)
generator = self.get_task_generator('test', self.args.num_tasks_per_testing, self.args.seed)
with tqdm.tqdm(total=self.args.num_tasks_per_testing, disable=not self.args.tqdm) as pbar_val:
for sampler in generator:
dataloader = self.get_dataloader(self.datasets['test'], sampler, self.state.epoch, 'test')
self.model.meta_test(dataloader, self.ptracker)
pbar_val.update(1)
pbar_val.set_description('Testing ({}) -> {} {}'.format(checkpoint_name,
self.ptracker.get_performance_str(),
self.model.get_summary_str()))
if not self.args.tqdm:
print('Testing ({}) -> {} {}'.format(checkpoint_name,
self.ptracker.get_performance_str(),
self.model.get_summary_str()))
if self.args.dummy_run: # no saving
return
performance_logfile = '{}_{}_{}.json'.format(
os.path.join(self.performance_folder, checkpoint_name),
self.args.test_performance_tag,
self.experiment_timestamp)
self.ptracker.save_logfile(performance_logfile, ['test'])
self.ptracker.reset_epoch_cache()
class State():
def __init__(self, experiment_seed):
"""
Keeps track of the current training epoch and seed
"""
self.epoch = 0
self.epoch_completed_in_this_run = 0
self.epoch_rng = np.random.RandomState(experiment_seed)
self.epoch_seed = self.epoch_rng.randint(999999999)
def next_epoch(self):
self.epoch += 1
self.epoch_completed_in_this_run += 1
self.epoch_seed = self.epoch_rng.randint(999999999)
def to_dict(self):
return {
'epoch_seed': self.epoch_seed,
'epoch': self.epoch,
'epoch_rng': self.epoch_rng
}
def from_dict(self, adict):
self.epoch_seed = adict['epoch_seed']
self.epoch = adict['epoch']
self.epoch_rng = adict['epoch_rng']
|
the-stack_0_18856 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = '''
---
module: federation_idp
short_description: manage a federation Identity Provider
author: OpenStack Ansible SIG
description:
- Manage a federation Identity Provider.
options:
name:
description:
- The name of the Identity Provider.
type: str
required: true
aliases: ['id']
state:
description:
- Whether the Identity Provider should be C(present) or C(absent).
choices: ['present', 'absent']
default: present
type: str
description:
description:
- The description of the Identity Provider.
type: str
domain_id:
description:
- The ID of a domain that is associated with the Identity Provider.
Federated users that authenticate with the Identity Provider will be
created under the domain specified.
- Required when creating a new Identity Provider.
type: str
enabled:
description:
- Whether the Identity Provider is enabled or not.
- Will default to C(true) when creating a new Identity Provider.
type: bool
aliases: ['is_enabled']
remote_ids:
description:
- "List of the unique Identity Provider's remote IDs."
- Will default to an empty list when creating a new Identity Provider.
type: list
elements: str
requirements:
- "python >= 3.6"
- "openstacksdk >= 0.44"
extends_documentation_fragment:
- openstack.cloud.openstack
'''
EXAMPLES = '''
- name: Create an identity provider
openstack.cloud.federation_idp:
cloud: example_cloud
name: example_provider
domain_id: 0123456789abcdef0123456789abcdef
description: 'My example IDP'
remote_ids:
- 'https://auth.example.com/auth/realms/ExampleRealm'
- name: Delete an identity provider
openstack.cloud.federation_idp:
cloud: example_cloud
name: example_provider
state: absent
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_module_kwargs
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_cloud_from_module
def normalize_idp(idp):
"""
Normalizes the IDP definitions so that the outputs are consistent with the
parameters
- "enabled" (parameter) == "is_enabled" (SDK)
- "name" (parameter) == "id" (SDK)
"""
if idp is None:
return None
_idp = idp.to_dict()
_idp['enabled'] = idp['is_enabled']
_idp['name'] = idp['id']
return _idp
def delete_identity_provider(module, sdk, cloud, idp):
"""
Delete an existing Identity Provider
returns: the "Changed" state
"""
if idp is None:
return False
if module.check_mode:
return True
try:
cloud.identity.delete_identity_provider(idp)
except sdk.exceptions.OpenStackCloudException as ex:
module.fail_json(msg='Failed to delete identity provider: {0}'.format(str(ex)))
return True
def create_identity_provider(module, sdk, cloud, name):
"""
Create a new Identity Provider
returns: the "Changed" state and the new identity provider
"""
if module.check_mode:
return True, None
description = module.params.get('description')
enabled = module.params.get('enabled')
domain_id = module.params.get('domain_id')
remote_ids = module.params.get('remote_ids')
if enabled is None:
enabled = True
if remote_ids is None:
remote_ids = []
attributes = {
'domain_id': domain_id,
'enabled': enabled,
'remote_ids': remote_ids,
}
if description is not None:
attributes['description'] = description
try:
idp = cloud.identity.create_identity_provider(id=name, **attributes)
except sdk.exceptions.OpenStackCloudException as ex:
module.fail_json(msg='Failed to create identity provider: {0}'.format(str(ex)))
return (True, idp)
def update_identity_provider(module, sdk, cloud, idp):
"""
Update an existing Identity Provider
returns: the "Changed" state and the new identity provider
"""
description = module.params.get('description')
enabled = module.params.get('enabled')
domain_id = module.params.get('domain_id')
remote_ids = module.params.get('remote_ids')
attributes = {}
if (description is not None) and (description != idp.description):
attributes['description'] = description
if (enabled is not None) and (enabled != idp.is_enabled):
attributes['enabled'] = enabled
if (domain_id is not None) and (domain_id != idp.domain_id):
attributes['domain_id'] = domain_id
if (remote_ids is not None) and (remote_ids != idp.remote_ids):
attributes['remote_ids'] = remote_ids
if not attributes:
return False, idp
if module.check_mode:
return True, None
try:
new_idp = cloud.identity.update_identity_provider(idp, **attributes)
except sdk.exceptions.OpenStackCloudException as ex:
module.fail_json(msg='Failed to update identity provider: {0}'.format(str(ex)))
return (True, new_idp)
def main():
""" Module entry point """
argument_spec = openstack_full_argument_spec(
name=dict(required=True, aliases=['id']),
state=dict(default='present', choices=['absent', 'present']),
description=dict(),
domain_id=dict(),
enabled=dict(type='bool', aliases=['is_enabled']),
remote_ids=dict(type='list', elements='str'),
)
module_kwargs = openstack_module_kwargs(
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
**module_kwargs
)
name = module.params.get('name')
state = module.params.get('state')
changed = False
sdk, cloud = openstack_cloud_from_module(module, min_version="0.44")
try:
idp = cloud.identity.get_identity_provider(name)
except sdk.exceptions.ResourceNotFound:
idp = None
except sdk.exceptions.OpenStackCloudException as ex:
module.fail_json(msg='Failed to get identity provider: {0}'.format(str(ex)))
if state == 'absent':
if idp is not None:
changed = delete_identity_provider(module, sdk, cloud, idp)
module.exit_json(changed=changed)
# state == 'present'
else:
if idp is None:
if module.params.get('domain_id') is None:
module.fail_json(msg='A domain_id must be passed when creating'
' an identity provider')
(changed, idp) = create_identity_provider(module, sdk, cloud, name)
idp = normalize_idp(idp)
module.exit_json(changed=changed, identity_provider=idp)
(changed, new_idp) = update_identity_provider(module, sdk, cloud, idp)
new_idp = normalize_idp(new_idp)
module.exit_json(changed=changed, identity_provider=new_idp)
if __name__ == '__main__':
main()
|
the-stack_0_18857 | #pip install flask
#export FLASK_APP=main.py
#flask run
#para habilitar el modo prueba
#export FLASK_DEBUG=1
from flask import Flask,request,make_response,redirect
from flask.templating import render_template
from flask_bootstrap import Bootstrap
app = Flask(__name__)
bootstrap = Bootstrap(app)
todos = ['GITHUB','PYTHON DESDE CERO','DESARROLLO DE SISTEMAS CON FLASK']
@app.errorhandler(404)
def not_found(error):
return render_template('404.html',error=error)
@app.route('/bs')
def bs():
return render_template('bs.html')
@app.route('/')
def index():
user_ip = request.remote_addr
response = make_response(redirect('/hello'))
response.set_cookie('user_ip',user_ip)
return response
@app.route('/hello')
def hello():
user_ip = request.cookies.get('user_ip')
context = {
'user_ip':user_ip,
'todos':todos,
}
return render_template('hello.html',**context)
if __name__ == '__main__':
app.run(debug = True,port=5000) |
the-stack_0_18858 |
import uuid
from datetime import datetime
import logging
import astropy.io.fits as fits
from numina.simulation.factory import extract
from numina.simulation.shutter import Shutter
from numina.simulation.factory import PersistentRunCounter
from clodiadrp.simulation.instrument import ClodiaInstrument
from clodiadrp.simulation.detector import CLODIA_DETECTOR
from clodiadrp.simulation.sequences import clodia_sequences
_logger = logging.getLogger(__name__)
class ClodiaImageFactory(object):
CARDS_P = [
('OBSERVAT', 'UCM', 'Name of observatory'),
('TELESCOP', 'PATRIX', 'Telescope id.'),
('INSTRUME', 'CLODIA', 'Name of the Instrument'),
('ORIGIN', 'SIMULATOR', 'FITS file originator'),
('INSCONF', 'v1')
]
def create(self, data, name, control):
pheader = fits.Header(self.CARDS_P)
# pheader['FILENAME'] = name
instrument = control.get(name)
pheader['OBSMODE'] = control.mode
pheader['UUID'] = str(uuid.uuid4())
meta = instrument.config_info()
extract(pheader, meta, ['CLODIA.Detector', 'exposed'], 'EXPTIME')
extract(pheader, meta, ['CLODIA.Detector', 'exposed'], 'EXPOSED')
extract(pheader, meta, ['CLODIA.Detector', 'DATE-OBS'], 'DATE-OBS')
extract(pheader, meta, ['CLODIA', 'insmode'], 'insmode', default='unknown')
hdu1 = fits.PrimaryHDU(data, header=pheader)
hdul = fits.HDUList([hdu1])
return hdul
def create_instrument():
detector = CLODIA_DETECTOR
shutter = Shutter()
instrument = ClodiaInstrument(shutter, detector)
return instrument
class ControlSystem(object):
"""Top level"""
def __init__(self, factory):
self.imagecount = PersistentRunCounter('r00%04d.fits')
self._elements = {}
self.mode = 'null'
self.factory = factory
self.ob_data = dict(count=0, repeat=0, name=None, obsid=1)
self.targets = None
def register(self, name, element):
self._elements[name] = element
def get(self, name):
return self._elements[name]
def set_mode(self, mode):
self.mode = mode
def run(self, instrument, exposure, repeat=1):
if repeat < 1:
return
ins = self.get(instrument)
_logger.info('mode is %s', self.mode)
try:
thiss = ins.sequences[self.mode]
except KeyError:
_logger.error('No sequence for mode %s', self.mode)
raise
iterf = thiss.run(self, exposure, repeat)
self.ob_data['repeat'] = repeat
self.ob_data['name'] = None
for count, final in enumerate(iterf, 1):
_logger.info('image %d of %d', count, repeat)
self.ob_data['name'] = self.imagecount.runstring()
self.ob_data['count'] = count
fitsfile = self.factory.create(final, instrument, self)
_logger.info('save image %s', self.ob_data['name'])
fitsfile.writeto(self.ob_data['name'], overwrite=True)
def config_info(self):
return {'ob_data': self.ob_data}
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exposure', type=float, default=0.0,
help="Exposure time per image (in seconds) [0,36000]")
parser.add_argument('-n', '--nimages', metavar="INT", type=int, default=1,
help="Number of images to generate")
parser.add_argument('omode', choices=clodia_sequences().keys(),
help="Observing mode of the intrument")
args = parser.parse_args()
instrument = create_instrument()
factory = ClodiaImageFactory()
control = ControlSystem(factory)
control.register('CLODIA', instrument)
control.set_mode(args.omode)
control.run('CLODIA', args.exposure, repeat=args.nimages)
control.imagecount.store()
main() |
the-stack_0_18859 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
invoice_list = get_invoices(filters)
columns, income_accounts, tax_accounts = get_columns(invoice_list)
if not invoice_list:
msgprint(_("No record found"))
return columns, invoice_list
invoice_income_map = get_invoice_income_map(invoice_list)
invoice_income_map, invoice_tax_map = get_invoice_tax_map(invoice_list,
invoice_income_map, income_accounts)
invoice_so_dn_map = get_invoice_so_dn_map(invoice_list)
customer_map = get_customer_deatils(invoice_list)
company_currency = frappe.db.get_value("Company", filters.company, "default_currency")
data = []
for inv in invoice_list:
# invoice details
sales_order = list(set(invoice_so_dn_map.get(inv.name, {}).get("sales_order", [])))
delivery_note = list(set(invoice_so_dn_map.get(inv.name, {}).get("delivery_note", [])))
row = [inv.name, inv.posting_date, inv.customer, inv.customer_name,
customer_map.get(inv.customer, {}).get("customer_group"),
customer_map.get(inv.customer, {}).get("territory"),
inv.debit_to, inv.mode_of_payment, inv.project, inv.remarks,
", ".join(sales_order), ", ".join(delivery_note), company_currency]
# map income values
base_net_total = 0
for income_acc in income_accounts:
income_amount = flt(invoice_income_map.get(inv.name, {}).get(income_acc))
base_net_total += income_amount
row.append(income_amount)
# net total
row.append(base_net_total or inv.base_net_total)
# tax account
total_tax = 0
for tax_acc in tax_accounts:
if tax_acc not in income_accounts:
tax_amount = flt(invoice_tax_map.get(inv.name, {}).get(tax_acc))
total_tax += tax_amount
row.append(tax_amount)
# total tax, grand total, outstanding amount & rounded total
row += [total_tax, inv.base_grand_total, inv.base_rounded_total, inv.outstanding_amount]
data.append(row)
return columns, data
def get_columns(invoice_list):
"""return columns based on filters"""
columns = [
_("Invoice") + ":Link/Sales Invoice:120", _("Posting Date") + ":Date:80",
_("Customer Id") + "::120", _("Customer Name") + "::120",
_("Customer Group") + ":Link/Customer Group:120", _("Territory") + ":Link/Territory:80",
_("Receivable Account") + ":Link/Account:120", _("Mode of Payment") + ":Link/Mode of Payment:80",
_("Project") +":Link/Project:80", _("Remarks") + "::150",
_("Sales Order") + ":Link/Sales Order:100", _("Delivery Note") + ":Link/Delivery Note:100",
_("Currency") + "::100"
]
income_accounts = tax_accounts = income_columns = tax_columns = []
if invoice_list:
income_accounts = frappe.db.sql_list("""select distinct income_account
from `tabSales Invoice Item` where docstatus = 1 and parent in (%s)
order by income_account""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]))
tax_accounts = frappe.db.sql_list("""select distinct account_head
from `tabSales Taxes and Charges` where parenttype = 'Sales Invoice'
and docstatus = 1 and base_tax_amount_after_discount_amount != 0
and parent in (%s) order by account_head""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]))
income_columns = [(account + ":Currency/currency:120") for account in income_accounts]
for account in tax_accounts:
if account not in income_accounts:
tax_columns.append(account + ":Currency/currency:120")
columns = columns + income_columns + [_("Net Total") + ":Currency/currency:120"] + tax_columns + \
[_("Total Tax") + ":Currency/currency:120", _("Grand Total") + ":Currency/currency:120",
_("Rounded Total") + ":Currency/currency:120", _("Outstanding Amount") + ":Currency/currency:120"]
return columns, income_accounts, tax_accounts
def get_conditions(filters):
conditions = ""
if filters.get("company"): conditions += " and company=%(company)s"
if filters.get("customer"): conditions += " and customer = %(customer)s"
if filters.get("from_date"): conditions += " and posting_date >= %(from_date)s"
if filters.get("to_date"): conditions += " and posting_date <= %(to_date)s"
if filters.get("mode_of_payment"): conditions += " and ifnull(mode_of_payment, '') = %(mode_of_payment)s"
return conditions
def get_invoices(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select name, posting_date, debit_to, project, customer, customer_name, remarks,
base_net_total, base_grand_total, base_rounded_total, outstanding_amount, mode_of_payment
from `tabSales Invoice`
where docstatus = 1 %s order by posting_date desc, name desc""" %
conditions, filters, as_dict=1)
def get_invoice_income_map(invoice_list):
income_details = frappe.db.sql("""select parent, income_account, sum(base_net_amount) as amount
from `tabSales Invoice Item` where parent in (%s) group by parent, income_account""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]), as_dict=1)
invoice_income_map = {}
for d in income_details:
invoice_income_map.setdefault(d.parent, frappe._dict()).setdefault(d.income_account, [])
invoice_income_map[d.parent][d.income_account] = flt(d.amount)
return invoice_income_map
def get_invoice_tax_map(invoice_list, invoice_income_map, income_accounts):
tax_details = frappe.db.sql("""select parent, account_head,
sum(base_tax_amount_after_discount_amount) as tax_amount
from `tabSales Taxes and Charges` where parent in (%s) group by parent, account_head""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]), as_dict=1)
invoice_tax_map = {}
for d in tax_details:
if d.account_head in income_accounts:
if invoice_income_map[d.parent].has_key(d.account_head):
invoice_income_map[d.parent][d.account_head] += flt(d.tax_amount)
else:
invoice_income_map[d.parent][d.account_head] = flt(d.tax_amount)
else:
invoice_tax_map.setdefault(d.parent, frappe._dict()).setdefault(d.account_head, [])
invoice_tax_map[d.parent][d.account_head] = flt(d.tax_amount)
return invoice_income_map, invoice_tax_map
def get_invoice_so_dn_map(invoice_list):
si_items = frappe.db.sql("""select parent, sales_order, delivery_note, so_detail
from `tabSales Invoice Item` where parent in (%s)
and (ifnull(sales_order, '') != '' or ifnull(delivery_note, '') != '')""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]), as_dict=1)
invoice_so_dn_map = {}
for d in si_items:
if d.sales_order:
invoice_so_dn_map.setdefault(d.parent, frappe._dict()).setdefault(
"sales_order", []).append(d.sales_order)
delivery_note_list = None
if d.delivery_note:
delivery_note_list = [d.delivery_note]
elif d.sales_order:
delivery_note_list = frappe.db.sql_list("""select distinct parent from `tabDelivery Note Item`
where docstatus=1 and so_detail=%s""", d.so_detail)
if delivery_note_list:
invoice_so_dn_map.setdefault(d.parent, frappe._dict()).setdefault("delivery_note", delivery_note_list)
return invoice_so_dn_map
def get_customer_deatils(invoice_list):
customer_map = {}
customers = list(set([inv.customer for inv in invoice_list]))
for cust in frappe.db.sql("""select name, territory, customer_group from `tabCustomer`
where name in (%s)""" % ", ".join(["%s"]*len(customers)), tuple(customers), as_dict=1):
customer_map.setdefault(cust.name, cust)
return customer_map
|
the-stack_0_18861 | """Support for Rflink sensors."""
from rflink.parser import PACKET_FIELDS, UNITS
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_DEVICES,
CONF_NAME,
CONF_SENSOR_TYPE,
CONF_UNIT_OF_MEASUREMENT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
CONF_ALIASES,
CONF_AUTOMATIC_ADD,
DATA_DEVICE_REGISTER,
DATA_ENTITY_LOOKUP,
EVENT_KEY_ID,
EVENT_KEY_SENSOR,
EVENT_KEY_UNIT,
SIGNAL_AVAILABILITY,
SIGNAL_HANDLE_EVENT,
TMP_ENTITY,
RflinkDevice,
)
SENSOR_ICONS = {
"humidity": "mdi:water-percent",
"battery": "mdi:battery",
"temperature": "mdi:thermometer",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_AUTOMATIC_ADD, default=True): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
},
extra=vol.ALLOW_EXTRA,
)
def lookup_unit_for_sensor_type(sensor_type):
"""Get unit for sensor type.
Async friendly.
"""
field_abbrev = {v: k for k, v in PACKET_FIELDS.items()}
return UNITS.get(field_abbrev.get(sensor_type))
def devices_from_config(domain_config):
"""Parse configuration and add Rflink sensor devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
if ATTR_UNIT_OF_MEASUREMENT not in config:
config[ATTR_UNIT_OF_MEASUREMENT] = lookup_unit_for_sensor_type(
config[CONF_SENSOR_TYPE]
)
device = RflinkSensor(device_id, **config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink platform."""
async_add_entities(devices_from_config(config))
async def add_new_device(event):
"""Check if device is known, otherwise create device entity."""
device_id = event[EVENT_KEY_ID]
device = RflinkSensor(
device_id,
event[EVENT_KEY_SENSOR],
event[EVENT_KEY_UNIT],
initial_event=event,
)
# Add device entity
async_add_entities([device])
if config[CONF_AUTOMATIC_ADD]:
hass.data[DATA_DEVICE_REGISTER][EVENT_KEY_SENSOR] = add_new_device
class RflinkSensor(RflinkDevice):
"""Representation of a Rflink sensor."""
def __init__(
self, device_id, sensor_type, unit_of_measurement, initial_event=None, **kwargs
):
"""Handle sensor specific args and super init."""
self._sensor_type = sensor_type
self._unit_of_measurement = unit_of_measurement
super().__init__(device_id, initial_event=initial_event, **kwargs)
def _handle_event(self, event):
"""Domain specific event handler."""
self._state = event["value"]
async def async_added_to_hass(self):
"""Register update callback."""
# Remove temporary bogus entity_id if added
tmp_entity = TMP_ENTITY.format(self._device_id)
if (
tmp_entity
in self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][self._device_id]
):
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][
self._device_id
].remove(tmp_entity)
# Register id and aliases
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][self._device_id].append(
self.entity_id
)
if self._aliases:
for _id in self._aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][_id].append(
self.entity_id
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_AVAILABILITY, self._availability_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_HANDLE_EVENT.format(self.entity_id),
self.handle_event_callback,
)
)
# Process the initial event now that the entity is created
if self._initial_event:
self.handle_event_callback(self._initial_event)
@property
def unit_of_measurement(self):
"""Return measurement unit."""
return self._unit_of_measurement
@property
def state(self):
"""Return value."""
return self._state
@property
def icon(self):
"""Return possible sensor specific icon."""
if self._sensor_type in SENSOR_ICONS:
return SENSOR_ICONS[self._sensor_type]
|
the-stack_0_18862 | from collections import defaultdict
from corehq.apps.userreports.exceptions import StaleRebuildError, TableRebuildError
from corehq.apps.userreports.rebuild import migrate_tables, get_tables_rebuild_migrate, get_table_diffs
from corehq.apps.userreports.sql import get_metadata
from corehq.apps.userreports.tasks import rebuild_indicators
from corehq.sql_db.connections import connection_manager
from corehq.util.soft_assert import soft_assert
from pillowtop.logger import pillow_logging
def rebuild_sql_tables(adapters):
tables_by_engine = defaultdict(dict)
all_adapters = []
for adapter in adapters:
if getattr(adapter, 'all_adapters', None):
all_adapters.extend(adapter.all_adapters)
else:
all_adapters.append(adapter)
for adapter in all_adapters:
tables_by_engine[adapter.engine_id][adapter.get_table().name] = adapter
_assert = soft_assert(notify_admins=True)
_notify_rebuild = lambda msg, obj: _assert(False, msg, obj)
for engine_id, table_map in tables_by_engine.items():
table_names = list(table_map)
engine = connection_manager.get_engine(engine_id)
diffs = get_table_diffs(engine, table_names, get_metadata(engine_id))
tables_to_act_on = get_tables_rebuild_migrate(diffs)
for table_name in tables_to_act_on.rebuild:
sql_adapter = table_map[table_name]
pillow_logging.info(
"[rebuild] Rebuilding table: %s, from config %s at rev %s",
table_name, sql_adapter.config._id, sql_adapter.config._rev
)
pillow_logging.info("[rebuild] Using config: %r", sql_adapter.config)
pillow_logging.info("[rebuild] sqlalchemy metadata: %r", get_metadata(engine_id).tables[table_name])
pillow_logging.info("[rebuild] sqlalchemy table: %r", sql_adapter.get_table())
table_diffs = [diff for diff in diffs if diff.table_name == table_name]
if not sql_adapter.config.is_static:
try:
rebuild_table(sql_adapter, table_diffs)
except TableRebuildError as e:
_notify_rebuild(str(e), sql_adapter.config.to_json())
else:
rebuild_table(sql_adapter, table_diffs)
migrate_tables_with_logging(engine, diffs, tables_to_act_on.migrate, table_map)
def migrate_tables_with_logging(engine, diffs, table_names, adapters_by_table):
migration_diffs = [diff for diff in diffs if diff.table_name in table_names]
for table in table_names:
adapter = adapters_by_table[table]
pillow_logging.info("[rebuild] Using config: %r", adapter.config)
pillow_logging.info("[rebuild] sqlalchemy metadata: %r", get_metadata(adapter.engine_id).tables[table])
pillow_logging.info("[rebuild] sqlalchemy table: %r", adapter.get_table())
changes = migrate_tables(engine, migration_diffs)
for table, diffs in changes.items():
adapter = adapters_by_table[table]
pillow_logging.info(
"[rebuild] Migrating table: %s, from config %s at rev %s",
table, adapter.config._id, adapter.config._rev
)
adapter.log_table_migrate(source='pillowtop', diffs=diffs)
def rebuild_table(adapter, diffs=None):
config = adapter.config
if not config.is_static:
latest_rev = config.get_db().get_rev(config._id)
if config._rev != latest_rev:
raise StaleRebuildError('Tried to rebuild a stale table ({})! Ignoring...'.format(config))
diff_dicts = [diff.to_dict() for diff in diffs]
if config.disable_destructive_rebuild and adapter.table_exists:
adapter.log_table_rebuild_skipped(source='pillowtop', diffs=diff_dicts)
return
rebuild_indicators.delay(adapter.config.get_id, source='pillowtop', engine_id=adapter.engine_id,
diffs=diff_dicts)
|
the-stack_0_18863 | # Copyright 2013 Google Inc. All Rights Reserved.
"""The auth command gets tokens via oauth2."""
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.credentials import store as c_store
from googlecloudsdk.core.util import console_io
class ActivateRefreshToken(base.Command):
"""Get credentials via an existing refresh token.
Use an oauth2 refresh token to manufacture credentials for Google APIs. This
token must have been acquired via some legitimate means to work. The account
provided is only used locally to help the Cloud SDK keep track of the new
credentials, so you can activate, list, and revoke the credentials in the
future.
"""
@staticmethod
def Args(parser):
"""Set args for gcloud auth activate-refresh-token."""
parser.add_argument(
'account', help='The account to associate with the refresh token.')
parser.add_argument('token', nargs='?',
help=('OAuth2 refresh token. If blank, prompt for'
' value.'))
@c_exc.RaiseToolExceptionInsteadOf(c_store.Error)
def Run(self, args):
"""Run the authentication command."""
token = args.token or console_io.PromptResponse('Refresh token: ')
if not token:
raise c_exc.InvalidArgumentException('token', 'not provided.')
creds = c_store.AcquireFromToken(token)
account = args.account
c_store.Refresh(creds)
c_store.Store(creds, account)
properties.PersistProperty(properties.VALUES.core.account, account)
project = args.project
if project:
properties.PersistProperty(properties.VALUES.core.project, project)
return creds
def Display(self, args, result):
if result:
log.Print('Activated refresh token credentials for %s.' % args.account)
|
the-stack_0_18864 | import os
import itertools
from tempfile import TemporaryDirectory as InTemporaryDirectory
import pytest
import numpy as np
import numpy.testing as npt
from scipy.ndimage.measurements import center_of_mass
from scipy.signal import convolve
from fury import shaders
from fury import actor, window
from fury.actor import grid
from fury.decorators import skip_osx, skip_win
from fury.utils import shallow_copy, rotate, VTK_9_PLUS
from fury.testing import assert_greater, assert_greater_equal
# Allow import, but disable doctests if we don't have dipy
from fury.optpkg import optional_package
dipy, have_dipy, _ = optional_package('dipy')
matplotlib, have_matplotlib, _ = optional_package('matplotlib')
if have_dipy:
from dipy.data import get_sphere
from dipy.reconst.shm import sh_to_sf_matrix
from dipy.tracking.streamline import (center_streamlines,
transform_streamlines)
from dipy.align.tests.test_streamlinear import fornix_streamlines
from dipy.reconst.dti import color_fa, fractional_anisotropy
if have_matplotlib:
import matplotlib.pyplot as plt
from fury.convert import matplotlib_figure_to_numpy
def test_slicer(verbose=False):
scene = window.Scene()
data = (255 * np.random.rand(50, 50, 50))
affine = np.eye(4)
slicer = actor.slicer(data, affine, value_range=[data.min(), data.max()])
slicer.display(None, None, 25)
scene.add(slicer)
scene.reset_camera()
scene.reset_clipping_range()
# window.show(scene)
# copy pixels in numpy array directly
arr = window.snapshot(scene, 'test_slicer.png', offscreen=True)
if verbose:
print(arr.sum())
print(np.sum(arr == 0))
print(np.sum(arr > 0))
print(arr.shape)
print(arr.dtype)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
# print(arr[..., 0])
# The slicer can cut directly a smaller part of the image
slicer.display_extent(10, 30, 10, 30, 35, 35)
scene.ResetCamera()
scene.add(slicer)
# save pixels in png file not a numpy array
with InTemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, 'slice.png')
window.snapshot(scene, fname, offscreen=True)
report = window.analyze_snapshot(fname, find_objects=True)
npt.assert_equal(report.objects, 1)
# Test Errors
data_4d = (255 * np.random.rand(50, 50, 50, 50))
npt.assert_raises(ValueError, actor.slicer, data_4d)
npt.assert_raises(ValueError, actor.slicer, np.ones(10))
scene.clear()
rgb = np.zeros((30, 30, 30, 3), dtype='f8')
rgb[..., 0] = 255
rgb_actor = actor.slicer(rgb)
scene.add(rgb_actor)
scene.reset_camera()
scene.reset_clipping_range()
arr = window.snapshot(scene, offscreen=True)
report = window.analyze_snapshot(arr, colors=[(255, 0, 0)])
npt.assert_equal(report.objects, 1)
npt.assert_equal(report.colors_found, [True])
lut = actor.colormap_lookup_table(scale_range=(0, 255),
hue_range=(0.4, 1.),
saturation_range=(1, 1.),
value_range=(0., 1.))
scene.clear()
slicer_lut = actor.slicer(data, lookup_colormap=lut)
slicer_lut.display(10, None, None)
slicer_lut.display(None, 10, None)
slicer_lut.display(None, None, 10)
slicer_lut.opacity(0.5)
slicer_lut.tolerance(0.03)
slicer_lut2 = slicer_lut.copy()
npt.assert_equal(slicer_lut2.GetOpacity(), 0.5)
npt.assert_equal(slicer_lut2.picker.GetTolerance(), 0.03)
slicer_lut2.opacity(1)
slicer_lut2.tolerance(0.025)
slicer_lut2.display(None, None, 10)
scene.add(slicer_lut2)
scene.reset_clipping_range()
arr = window.snapshot(scene, offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
scene.clear()
data = (255 * np.random.rand(50, 50, 50))
affine = np.diag([1, 3, 2, 1])
slicer = actor.slicer(data, affine, interpolation='nearest')
slicer.display(None, None, 25)
scene.add(slicer)
scene.reset_camera()
scene.reset_clipping_range()
arr = window.snapshot(scene, offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_equal(data.shape, slicer.shape)
slicer2 = slicer.copy()
npt.assert_equal(slicer2.shape, slicer.shape)
scene.clear()
data = (255 * np.random.rand(50, 50, 50))
affine = np.diag([1, 3, 2, 1])
if have_dipy:
from dipy.align.reslice import reslice
data2, affine2 = reslice(data, affine, zooms=(1, 3, 2),
new_zooms=(1, 1, 1))
slicer = actor.slicer(data2, affine2, interpolation='linear')
slicer.display(None, None, 25)
scene.add(slicer)
scene.reset_camera()
scene.reset_clipping_range()
# window.show(scene, reset_camera=False)
arr = window.snapshot(scene, offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_array_equal([1, 3, 2] * np.array(data.shape),
np.array(slicer.shape))
def test_surface():
import math
import random
from scipy.spatial import Delaunay
size = 11
vertices = list()
for i in range(-size, size):
for j in range(-size, size):
fact1 = - math.sin(i) * math.cos(j)
fact2 = - math.exp(abs(1 - math.sqrt(i ** 2 + j ** 2) / math.pi))
z_coord = -abs(fact1 * fact2)
vertices.append([i, j, z_coord])
c_arr = np.random.rand(len(vertices), 3)
random.shuffle(vertices)
vertices = np.array(vertices)
tri = Delaunay(vertices[:, [0, 1]])
faces = tri.simplices
c_loop = [None, c_arr]
f_loop = [None, faces]
s_loop = [None, "butterfly", "loop"]
for smooth_type in s_loop:
for face in f_loop:
for color in c_loop:
scene = window.Scene(background=(1, 1, 1))
surface_actor = actor.surface(vertices, faces=face,
colors=color, smooth=smooth_type)
scene.add(surface_actor)
# window.show(scene, size=(600, 600), reset_camera=False)
arr = window.snapshot(scene, 'test_surface.png',
offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 1)
def test_contour_from_roi(interactive=False):
# Render volume
scene = window.Scene()
data = np.zeros((50, 50, 50))
data[20:30, 25, 25] = 1.
data[25, 20:30, 25] = 1.
affine = np.eye(4)
surface = actor.contour_from_roi(data, affine,
color=np.array([1, 0, 1]),
opacity=.5)
scene.add(surface)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene)
# Test Errors
npt.assert_raises(ValueError, actor.contour_from_roi, np.ones(50))
# Test binarization
scene2 = window.Scene()
data2 = np.zeros((50, 50, 50))
data2[20:30, 25, 25] = 1.
data2[35:40, 25, 25] = 1.
affine = np.eye(4)
surface2 = actor.contour_from_roi(data2, affine,
color=np.array([0, 1, 1]),
opacity=.5)
scene2.add(surface2)
scene2.reset_camera()
scene2.reset_clipping_range()
if interactive:
window.show(scene2)
arr = window.snapshot(scene, 'test_surface.png', offscreen=True)
arr2 = window.snapshot(scene2, 'test_surface2.png', offscreen=True)
report = window.analyze_snapshot(arr, find_objects=True)
report2 = window.analyze_snapshot(arr2, find_objects=True)
npt.assert_equal(report.objects, 1)
npt.assert_equal(report2.objects, 2)
# test on real streamlines using tracking example
if have_dipy:
from dipy.data import read_stanford_labels
from dipy.reconst.shm import CsaOdfModel
from dipy.data import default_sphere
from dipy.direction import peaks_from_model
from fury.colormap import line_colors
from dipy.tracking import utils
try:
from dipy.tracking.local import ThresholdTissueClassifier \
as ThresholdStoppingCriterion
from dipy.tracking.local import LocalTracking
except ImportError:
from dipy.tracking.stopping_criterion import \
ThresholdStoppingCriterion
from dipy.tracking.local_tracking import LocalTracking
hardi_img, gtab, labels_img = read_stanford_labels()
data = np.asanyarray(hardi_img.dataobj)
labels = np.asanyarray(labels_img.dataobj)
affine = hardi_img.affine
white_matter = (labels == 1) | (labels == 2)
csa_model = CsaOdfModel(gtab, sh_order=6)
csa_peaks = peaks_from_model(csa_model, data, default_sphere,
relative_peak_threshold=.8,
min_separation_angle=45,
mask=white_matter)
classifier = ThresholdStoppingCriterion(csa_peaks.gfa, .25)
seed_mask = labels == 2
seeds = utils.seeds_from_mask(seed_mask, density=[1, 1, 1],
affine=affine)
# Initialization of LocalTracking.
# The computation happens in the next step.
streamlines = LocalTracking(csa_peaks, classifier, seeds, affine,
step_size=2)
# Compute streamlines and store as a list.
streamlines = list(streamlines)
# Prepare the display objects.
streamlines_actor = actor.line(streamlines, line_colors(streamlines))
seedroi_actor = actor.contour_from_roi(seed_mask, affine,
[0, 1, 1], 0.5)
# Create the 3d display.
r = window.Scene()
r2 = window.Scene()
r.add(streamlines_actor)
arr3 = window.snapshot(r, 'test_surface3.png', offscreen=True)
report3 = window.analyze_snapshot(arr3, find_objects=True)
r2.add(streamlines_actor)
r2.add(seedroi_actor)
arr4 = window.snapshot(r2, 'test_surface4.png', offscreen=True)
report4 = window.analyze_snapshot(arr4, find_objects=True)
# assert that the seed ROI rendering is not far
# away from the streamlines (affine error)
npt.assert_equal(report3.objects, report4.objects)
# window.show(r)
# window.show(r2)
@pytest.mark.skipif(skip_osx, reason="This test does not work on macOS + "
"Travis. It works on a local machine"
" with 3 different version of VTK. There"
" are 2 problems to check: Travis macOS"
" vs Azure macOS and an issue with"
" vtkAssembly + actor opacity.")
def test_contour_from_label(interactive=False):
# Render volumne
scene = window.Scene()
data = np.zeros((50, 50, 50))
data[5:15, 1:10, 25] = 1.
data[25:35, 1:10, 25] = 2.
data[40:49, 1:10, 25] = 3.
color = np.array([[255, 0, 0, 0.6],
[0, 255, 0, 0.5],
[0, 0, 255, 1.0]])
surface = actor.contour_from_label(data, color=color)
scene.add(surface)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene)
# Test Errors
with npt.assert_raises(ValueError):
actor.contour_from_label(data, color=np.array([1, 2, 3]))
actor.contour_from_label(np.ones(50))
# Test binarization
scene2 = window.Scene()
data2 = np.zeros((50, 50, 50))
data2[20:30, 25, 25] = 1.
data2[25, 20:30, 25] = 2.
color2 = np.array([[255, 0, 255],
[255, 255, 0]])
surface2 = actor.contour_from_label(data2, color=color2)
scene2.add(surface2)
scene2.reset_camera()
scene2.reset_clipping_range()
if interactive:
window.show(scene2)
arr = window.snapshot(scene, 'test_surface.png', offscreen=True,
order_transparent=False)
arr2 = window.snapshot(scene2, 'test_surface2.png', offscreen=True,
order_transparent=True)
report = window.analyze_snapshot(arr, colors=[(255, 0, 0),
(0, 255, 0),
(0, 0, 255)],
find_objects=True)
report2 = window.analyze_snapshot(arr2, find_objects=True)
npt.assert_equal(report.objects, 3)
npt.assert_equal(report2.objects, 1)
actor.contour_from_label(data)
def test_streamtube_and_line_actors():
scene = window.Scene()
line1 = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2.]])
line2 = line1 + np.array([0.5, 0., 0.])
lines = [line1, line2]
colors = np.array([[1, 0, 0], [0, 0, 1.]])
c = actor.line(lines, colors, linewidth=3)
scene.add(c)
c = actor.line(lines, colors, spline_subdiv=5, linewidth=3)
scene.add(c)
# create streamtubes of the same lines and shift them a bit
c2 = actor.streamtube(lines, colors, linewidth=.1)
c2.SetPosition(2, 0, 0)
scene.add(c2)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr,
colors=[(255, 0, 0), (0, 0, 255)],
find_objects=True)
npt.assert_equal(report.objects, 4)
npt.assert_equal(report.colors_found, [True, True])
# as before with splines
c2 = actor.streamtube(lines, colors, spline_subdiv=5, linewidth=.1)
c2.SetPosition(2, 0, 0)
scene.add(c2)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr,
colors=[(255, 0, 0), (0, 0, 255)],
find_objects=True)
npt.assert_equal(report.objects, 4)
npt.assert_equal(report.colors_found, [True, True])
c3 = actor.line(lines, colors, depth_cue=True, fake_tube=True)
shader_obj = c3.GetShaderProperty() if VTK_9_PLUS else c3.GetMapper()
mapper_code = shader_obj.GetGeometryShaderCode()
file_code = shaders.load("line.geom")
npt.assert_equal(mapper_code, file_code)
npt.assert_equal(c3.GetProperty().GetRenderLinesAsTubes(), True)
@pytest.mark.skipif(not have_dipy, reason="Requires DIPY")
def test_bundle_maps():
scene = window.Scene()
bundle = fornix_streamlines()
bundle, _ = center_streamlines(bundle)
mat = np.array([[1, 0, 0, 100],
[0, 1, 0, 100],
[0, 0, 1, 100],
[0, 0, 0, 1.]])
bundle = transform_streamlines(bundle, mat)
# metric = np.random.rand(*(200, 200, 200))
metric = 100 * np.ones((200, 200, 200))
# add lower values
metric[100, :, :] = 100 * 0.5
# create a nice orange-red colormap
lut = actor.colormap_lookup_table(scale_range=(0., 100.),
hue_range=(0., 0.1),
saturation_range=(1, 1),
value_range=(1., 1))
line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut)
scene.add(line)
scene.add(actor.scalar_bar(lut, ' '))
report = window.analyze_scene(scene)
npt.assert_almost_equal(report.actors, 1)
# window.show(scene)
scene.clear()
nb_points = np.sum([len(b) for b in bundle])
values = 100 * np.random.rand(nb_points)
# values[:nb_points/2] = 0
line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut)
scene.add(line)
# window.show(scene)
report = window.analyze_scene(scene)
npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
scene.clear()
colors = np.random.rand(nb_points, 3)
# values[:nb_points/2] = 0
line = actor.line(bundle, colors, linewidth=2)
scene.add(line)
# window.show(scene)
report = window.analyze_scene(scene)
npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
# window.show(scene)
arr = window.snapshot(scene)
report2 = window.analyze_snapshot(arr)
npt.assert_equal(report2.objects, 1)
# try other input options for colors
scene.clear()
actor.line(bundle, (1., 0.5, 0))
actor.line(bundle, np.arange(len(bundle)))
actor.line(bundle)
colors = [np.random.rand(*b.shape) for b in bundle]
actor.line(bundle, colors=colors)
@pytest.mark.skipif(not have_dipy, reason="Requires DIPY")
def test_odf_slicer(interactive=False):
# Prepare our data
sphere = get_sphere('repulsion100')
shape = (11, 11, 11, sphere.vertices.shape[0])
odfs = np.ones(shape)
affine = np.array([[2.0, 0.0, 0.0, 3.0],
[0.0, 2.0, 0.0, 3.0],
[0.0, 0.0, 2.0, 1.0],
[0.0, 0.0, 0.0, 1.0]])
mask = np.ones(odfs.shape[:3], bool)
mask[:4, :4, :4] = False
# Test that affine and mask work
odf_actor = actor.odf_slicer(odfs, sphere=sphere, affine=affine, mask=mask,
scale=.25, colormap='blues')
k = 2
I, J, _ = odfs.shape[:3]
odf_actor.display_extent(0, I - 1, 0, J - 1, k, k)
scene = window.Scene()
scene.add(odf_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene, reset_camera=False)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, find_objects=True)
npt.assert_equal(report.objects, 11 * 11 - 16)
# Test that global colormap works
odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask, scale=.25,
colormap='blues', norm=False, global_cm=True)
scene.clear()
scene.add(odf_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene)
# Test that the most basic odf_slicer instanciation works
odf_actor = actor.odf_slicer(odfs)
scene.clear()
scene.add(odf_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene)
# Test that odf_slicer.display works properly
scene.clear()
scene.add(odf_actor)
scene.add(actor.axes((11, 11, 11)))
for i in range(11):
odf_actor.display(i, None, None)
if interactive:
window.show(scene)
for j in range(11):
odf_actor.display(None, j, None)
if interactive:
window.show(scene)
# With mask equal to zero everything should be black
mask = np.zeros(odfs.shape[:3])
odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask,
scale=.25, colormap='blues',
norm=False, global_cm=True)
scene.clear()
scene.add(odf_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene)
# global_cm=True with colormap=None should raise an error
npt.assert_raises(IOError, actor.odf_slicer, odfs, sphere=sphere,
mask=None, scale=.25, colormap=None, norm=False,
global_cm=True)
# Dimension mismatch between sphere vertices and number
# of SF coefficients will raise an error.
npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None,
sphere=get_sphere('repulsion200'), scale=.25)
# colormap=None and global_cm=False results in directionally encoded colors
odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=None,
scale=.25, colormap=None,
norm=False, global_cm=False)
scene.clear()
scene.add(odf_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene)
# Test that SH coefficients input works
B = sh_to_sf_matrix(sphere, sh_order=4, return_inv=False)
odfs = np.zeros((11, 11, 11, B.shape[0]))
odfs[..., 0] = 1.0
odf_actor = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B)
scene.clear()
scene.add(odf_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene)
# Dimension mismatch between sphere vertices and dimension of
# B matrix will raise an error.
npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None,
sphere=get_sphere('repulsion200'))
# Test that constant colormap color works. Also test that sphere
# normals are oriented correctly. Will show purple spheres with
# a white contour.
odf_contour = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B,
colormap=(255, 255, 255))
odf_contour.GetProperty().SetAmbient(1.0)
odf_contour.GetProperty().SetFrontfaceCulling(True)
odf_actor = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B,
colormap=(255, 0, 255), scale=0.4)
scene.clear()
scene.add(odf_contour)
scene.add(odf_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene)
# Test that we can change the sphere on an active actor
new_sphere = get_sphere('symmetric362')
new_B = sh_to_sf_matrix(new_sphere, sh_order=4, return_inv=False)
odf_actor.update_sphere(new_sphere.vertices, new_sphere.faces, new_B)
if interactive:
window.show(scene)
del odf_actor
del odfs
def test_peak_slicer(interactive=False):
_peak_dirs = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='f4')
# peak_dirs.shape = (1, 1, 1) + peak_dirs.shape
peak_dirs = np.zeros((11, 11, 11, 3, 3))
peak_values = np.random.rand(11, 11, 11, 3)
peak_dirs[:, :, :] = _peak_dirs
scene = window.Scene()
peak_actor = actor.peak_slicer(peak_dirs)
scene.add(peak_actor)
scene.add(actor.axes((11, 11, 11)))
if interactive:
window.show(scene)
scene.clear()
scene.add(peak_actor)
scene.add(actor.axes((11, 11, 11)))
for k in range(11):
peak_actor.display_extent(0, 10, 0, 10, k, k)
for j in range(11):
peak_actor.display_extent(0, 10, j, j, 0, 10)
for i in range(11):
peak_actor.display(i, None, None)
scene.rm_all()
peak_actor_sym = actor.peak_slicer(
peak_dirs,
peak_values,
mask=None,
affine=np.diag([3, 2, 1, 1]),
colors=None,
opacity=0.8,
linewidth=3,
lod=True,
lod_points=10 ** 4,
lod_points_size=3)
peak_actor_asym = actor.peak_slicer(
peak_dirs,
peak_values,
mask=None,
affine=np.diag([3, 2, 1, 1]),
colors=None,
opacity=0.8,
linewidth=3,
lod=True,
lod_points=10 ** 4,
lod_points_size=3,
symmetric=False)
scene.add(peak_actor_sym)
scene.add(peak_actor_asym)
scene.add(actor.axes((11, 11, 11)))
if interactive:
window.show(scene)
report = window.analyze_scene(scene)
ex = ['vtkLODActor', 'vtkLODActor', 'vtkOpenGLActor']
npt.assert_equal(report.actors_classnames, ex)
# 6d data
data_6d = (255 * np.random.rand(5, 5, 5, 5, 5, 5))
npt.assert_raises(ValueError, actor.peak_slicer, data_6d, data_6d)
def test_peak():
# 4D dirs data
dirs_data_4d = np.random.rand(3, 4, 5, 6)
npt.assert_raises(ValueError, actor.peak, dirs_data_4d)
# 6D dirs data
dirs_data_6d = np.random.rand(7, 8, 9, 10, 11, 12)
npt.assert_raises(ValueError, actor.peak, dirs_data_6d)
# 2D directions
dirs_2d = np.random.rand(3, 4, 5, 6, 2)
npt.assert_raises(ValueError, actor.peak, dirs_2d)
# 4D directions
dirs_4d = np.random.rand(3, 4, 5, 6, 4)
npt.assert_raises(ValueError, actor.peak, dirs_4d)
valid_dirs = np.random.rand(3, 4, 5, 6, 3)
# 3D vals data
vals_data_3d = np.random.rand(3, 4, 5)
npt.assert_raises(ValueError, actor.peak, valid_dirs,
peaks_values=vals_data_3d)
# 5D vals data
vals_data_5d = np.random.rand(6, 7, 8, 9, 10)
npt.assert_raises(ValueError, actor.peak, valid_dirs,
peaks_values=vals_data_5d)
# Diff vals data #1
vals_data_diff_1 = np.random.rand(3, 4, 5, 9)
npt.assert_raises(ValueError, actor.peak, valid_dirs,
peaks_values=vals_data_diff_1)
# Diff vals data #2
vals_data_diff_2 = np.random.rand(7, 8, 9, 10)
npt.assert_raises(ValueError, actor.peak, valid_dirs,
peaks_values=vals_data_diff_2)
# 2D mask
mask_2d = np.random.rand(2, 3)
npt.assert_warns(UserWarning, actor.peak, valid_dirs, mask=mask_2d)
# 4D mask
mask_4d = np.random.rand(4, 5, 6, 7)
npt.assert_warns(UserWarning, actor.peak, valid_dirs, mask=mask_4d)
# Diff mask
diff_mask = np.random.rand(6, 7, 8)
npt.assert_warns(UserWarning, actor.peak, valid_dirs, mask=diff_mask)
# Valid mask
dirs000 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
dirs100 = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
peaks_dirs = np.empty((2, 1, 1, 3, 3))
peaks_dirs[0, 0, 0, :, :] = dirs000
peaks_dirs[1, 0, 0, :, :] = dirs100
peaks_vals = np.ones((2, 1, 1, 3)) * .5
mask = np.zeros((2, 1, 1))
mask[0, 0, 0] = 1
peaks_actor = actor.peak(peaks_dirs, peaks_values=peaks_vals, mask=mask)
npt.assert_equal(peaks_actor.min_centers, [0, 0, 0])
npt.assert_equal(peaks_actor.max_centers, [0, 0, 0])
@pytest.mark.skipif(not have_dipy, reason="Requires DIPY")
def test_tensor_slicer(interactive=False):
evals = np.array([1.4, .35, .35]) * 10 ** (-3)
evecs = np.eye(3)
mevals = np.zeros((3, 2, 4, 3))
mevecs = np.zeros((3, 2, 4, 3, 3))
mevals[..., :] = evals
mevecs[..., :, :] = evecs
sphere = get_sphere('symmetric724')
affine = np.eye(4)
scene = window.Scene()
tensor_actor = actor.tensor_slicer(mevals, mevecs, affine=affine,
sphere=sphere, scale=.3, opacity=0.4)
_, J, K = mevals.shape[:3]
scene.add(tensor_actor)
scene.reset_camera()
scene.reset_clipping_range()
tensor_actor.display_extent(0, 1, 0, J, 0, K)
if interactive:
window.show(scene, reset_camera=False)
tensor_actor.GetProperty().SetOpacity(1.0)
if interactive:
window.show(scene, reset_camera=False)
npt.assert_equal(scene.GetActors().GetNumberOfItems(), 1)
# Test extent
big_extent = scene.GetActors().GetLastActor().GetBounds()
big_extent_x = abs(big_extent[1] - big_extent[0])
tensor_actor.display(x=2)
if interactive:
window.show(scene, reset_camera=False)
small_extent = scene.GetActors().GetLastActor().GetBounds()
small_extent_x = abs(small_extent[1] - small_extent[0])
npt.assert_equal(big_extent_x > small_extent_x, True)
# Test empty mask
empty_actor = actor.tensor_slicer(mevals, mevecs, affine=affine,
mask=np.zeros(mevals.shape[:3]),
sphere=sphere, scale=.3)
npt.assert_equal(empty_actor.GetMapper(), None)
# Test mask
mask = np.ones(mevals.shape[:3])
mask[:2, :3, :3] = 0
cfa = color_fa(fractional_anisotropy(mevals), mevecs)
tensor_actor = actor.tensor_slicer(mevals, mevecs, affine=affine,
mask=mask, scalar_colors=cfa,
sphere=sphere, scale=.3)
scene.clear()
scene.add(tensor_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene, reset_camera=False)
mask_extent = scene.GetActors().GetLastActor().GetBounds()
mask_extent_x = abs(mask_extent[1] - mask_extent[0])
npt.assert_equal(big_extent_x > mask_extent_x, True)
# test display
tensor_actor.display()
current_extent = scene.GetActors().GetLastActor().GetBounds()
current_extent_x = abs(current_extent[1] - current_extent[0])
npt.assert_equal(big_extent_x > current_extent_x, True)
if interactive:
window.show(scene, reset_camera=False)
tensor_actor.display(y=1)
current_extent = scene.GetActors().GetLastActor().GetBounds()
current_extent_y = abs(current_extent[3] - current_extent[2])
big_extent_y = abs(big_extent[3] - big_extent[2])
npt.assert_equal(big_extent_y > current_extent_y, True)
if interactive:
window.show(scene, reset_camera=False)
tensor_actor.display(z=1)
current_extent = scene.GetActors().GetLastActor().GetBounds()
current_extent_z = abs(current_extent[5] - current_extent[4])
big_extent_z = abs(big_extent[5] - big_extent[4])
npt.assert_equal(big_extent_z > current_extent_z, True)
if interactive:
window.show(scene, reset_camera=False)
# Test error handling of the method when
# incompatible dimension of mevals and evecs are passed.
mevals = np.zeros((3, 2, 3))
mevecs = np.zeros((3, 2, 4, 3, 3))
with npt.assert_raises(RuntimeError):
tensor_actor = actor.tensor_slicer(mevals, mevecs, affine=affine,
mask=mask, scalar_colors=cfa,
sphere=sphere, scale=.3)
def test_dots(interactive=False):
points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
dots_actor = actor.dots(points, color=(0, 255, 0))
scene = window.Scene()
scene.add(dots_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene, reset_camera=False)
npt.assert_equal(scene.GetActors().GetNumberOfItems(), 1)
extent = scene.GetActors().GetLastActor().GetBounds()
npt.assert_equal(extent, (0.0, 1.0, 0.0, 1.0, 0.0, 0.0))
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr,
colors=(0, 255, 0))
npt.assert_equal(report.objects, 3)
# Test one point
points = np.array([0, 0, 0])
dot_actor = actor.dots(points, color=(0, 0, 255))
scene.clear()
scene.add(dot_actor)
scene.reset_camera()
scene.reset_clipping_range()
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr,
colors=(0, 0, 255))
npt.assert_equal(report.objects, 1)
def test_points(interactive=False):
points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
opacity = 0.5
points_actor = actor.point(points, colors, opacity=opacity)
scene = window.Scene()
scene.add(points_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene, reset_camera=False)
npt.assert_equal(scene.GetActors().GetNumberOfItems(), 1)
npt.assert_equal(points_actor.GetProperty().GetOpacity(), opacity)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr,
colors=colors)
npt.assert_equal(report.objects, 3)
def test_labels(interactive=False):
text_actor = actor.label("Hello")
scene = window.Scene()
scene.add(text_actor)
scene.reset_camera()
scene.reset_clipping_range()
if interactive:
window.show(scene, reset_camera=False)
npt.assert_equal(scene.GetActors().GetNumberOfItems(), 1)
def test_spheres(interactive=False):
xyzr = np.array([[0, 0, 0, 10], [100, 0, 0, 25], [200, 0, 0, 50]])
colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]])
opacity = 0.5
scene = window.Scene()
sphere_actor = actor.sphere(centers=xyzr[:, :3], colors=colors[:],
radii=xyzr[:, 3], opacity=opacity)
scene.add(sphere_actor)
if interactive:
window.show(scene, order_transparent=True)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr,
colors=colors)
npt.assert_equal(report.objects, 3)
npt.assert_equal(sphere_actor.GetProperty().GetOpacity(), opacity)
# test with an unique color for all centers
scene.clear()
sphere_actor = actor.sphere(centers=xyzr[:, :3],
colors=np.array([1, 0, 0]),
radii=xyzr[:, 3])
scene.add(sphere_actor)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=(1, 0, 0))
npt.assert_equal(report.colors_found, [True])
def test_cones_vertices_faces(interactive=False):
scene = window.Scene()
centers = np.array([[0, 0, 0], [20, 0, 0], [40, 0, 0]])
directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [0, 0, 1., 0.99]])
vertices = np.array([[0.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[10.0, 0.0, 0.0], [0.0, 0.0, 10.0]])
faces = np.array([[0, 1, 3], [0, 1, 2]])
cone_actor = actor.cone(centers=centers, directions=directions,
colors=colors[:], vertices=vertices,
faces=faces)
scene.add(cone_actor)
if interactive:
window.show(scene, order_transparent=True)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=colors)
npt.assert_equal(report.objects, 3)
scene.clear()
def test_basic_geometry_actor(interactive=False):
centers = np.array([[4, 0, 0], [0, 4, 0], [0, 0, 0]])
colors = np.array([[1, 0, 0, 0.4], [0, 1, 0, 0.8], [0, 0, 1, 0.5]])
directions = np.array([[1, 1, 0]])
scale_list = [1, 2, (1, 1, 1), [3, 2, 1], np.array([1, 2, 3]),
np.array([[1, 2, 3], [1, 3, 2], [3, 1, 2]])]
actor_list = [[actor.cube, {}],
[actor.box, {}],
[actor.square, {}],
[actor.rectangle, {}],
[actor.frustum, {}],
[actor.octagonalprism, {}],
[actor.pentagonalprism, {}],
[actor.triangularprism, {}]]
for act_func, extra_args in actor_list:
for scale in scale_list:
scene = window.Scene()
g_actor = act_func(centers=centers, colors=colors,
directions=directions, scales=scale,
**extra_args)
scene.add(g_actor)
if interactive:
window.show(scene)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=colors)
msg = 'Failed with {}, scale={}'.format(act_func.__name__, scale)
npt.assert_equal(report.objects, 3, err_msg=msg)
def test_advanced_geometry_actor(interactive=False):
xyz = np.array([[0, 0, 0], [50, 0, 0], [100, 0, 0]])
dirs = np.array([[0, 1, 0], [1, 0, 0], [0, 0.5, 0.5]])
actor_list = [[actor.cone, {'directions': dirs, 'resolution': 8}],
[actor.arrow, {'directions': dirs, 'resolution': 9}],
[actor.cylinder, {'directions': dirs}]]
scene = window.Scene()
for act_func, extra_args in actor_list:
colors = np.array([[1, 0, 0, 0.3], [0, 1, 0, 0.4], [1, 1, 0, 1]])
heights = np.array([5, 7, 10])
geom_actor = act_func(centers=xyz, heights=heights, colors=colors[:],
**extra_args)
scene.add(geom_actor)
if interactive:
window.show(scene, order_transparent=True)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=colors)
npt.assert_equal(report.objects, 3)
colors = np.array([1.0, 1.0, 1.0, 1.0])
heights = 10
scene.clear()
geom_actor = act_func(centers=xyz[:, :3], heights=10, colors=colors[:],
**extra_args)
scene.add(geom_actor)
if interactive:
window.show(scene, order_transparent=True)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=[colors])
npt.assert_equal(report.objects, 3)
scene.clear()
def test_text_3d():
msg = 'I \nlove\n FURY'
txt_actor = actor.text_3d(msg)
npt.assert_equal(txt_actor.get_message().lower(), msg.lower())
npt.assert_raises(ValueError, txt_actor.justification, 'middle')
npt.assert_raises(ValueError, txt_actor.vertical_justification, 'center')
scene = window.Scene()
scene.add(txt_actor)
txt_actor.vertical_justification('middle')
txt_actor.justification('right')
arr_right = window.snapshot(scene, size=(1920, 1080), offscreen=True)
scene.clear()
txt_actor.vertical_justification('middle')
txt_actor.justification('left')
scene.add(txt_actor)
arr_left = window.snapshot(scene, size=(1920, 1080), offscreen=True)
# X axis of right alignment should have a lower center of mass position
# than left
assert_greater(center_of_mass(arr_left)[0], center_of_mass(arr_right)[0])
scene.clear()
txt_actor.justification('center')
txt_actor.vertical_justification('top')
scene.add(txt_actor)
arr_top = window.snapshot(scene, size=(1920, 1080), offscreen=True)
scene.clear()
txt_actor.justification('center')
txt_actor.vertical_justification('bottom')
scene.add(txt_actor)
arr_bottom = window.snapshot(scene, size=(1920, 1080), offscreen=True)
assert_greater_equal(center_of_mass(arr_bottom)[0],
center_of_mass(arr_top)[0])
scene.clear()
txt_actor.font_style(bold=True, italic=True, shadow=True)
scene.add(txt_actor)
arr = window.snapshot(scene, size=(1920, 1080), offscreen=True)
assert_greater_equal(arr.mean(), arr_bottom.mean())
def test_container():
container = actor.Container()
axes = actor.axes()
container.add(axes)
npt.assert_equal(len(container), 1)
npt.assert_equal(container.GetBounds(), axes.GetBounds())
npt.assert_equal(container.GetCenter(), axes.GetCenter())
npt.assert_equal(container.GetLength(), axes.GetLength())
container.clear()
npt.assert_equal(len(container), 0)
container.add(axes)
container_shallow_copy = shallow_copy(container)
container_shallow_copy.add(actor.axes())
assert_greater(len(container_shallow_copy), len(container))
npt.assert_equal(container_shallow_copy.GetPosition(),
container.GetPosition())
npt.assert_equal(container_shallow_copy.GetVisibility(),
container.GetVisibility())
# Check is the shallow_copy do not modify original container
container_shallow_copy.SetVisibility(False)
npt.assert_equal(container.GetVisibility(), True)
container_shallow_copy.SetPosition((1, 1, 1))
npt.assert_equal(container.GetPosition(), (0, 0, 0))
def test_grid(_interactive=False):
vol1 = np.zeros((100, 100, 100))
vol1[25:75, 25:75, 25:75] = 100
contour_actor1 = actor.contour_from_roi(vol1, np.eye(4),
(1., 0, 0), 1.)
vol2 = np.zeros((100, 100, 100))
vol2[25:75, 25:75, 25:75] = 100
contour_actor2 = actor.contour_from_roi(vol2, np.eye(4),
(1., 0.5, 0), 1.)
vol3 = np.zeros((100, 100, 100))
vol3[25:75, 25:75, 25:75] = 100
contour_actor3 = actor.contour_from_roi(vol3, np.eye(4),
(1., 0.5, 0.5), 1.)
scene = window.Scene()
actors = []
texts = []
actors.append(contour_actor1)
text_actor1 = actor.text_3d('cube 1', justification='center')
texts.append(text_actor1)
actors.append(contour_actor2)
text_actor2 = actor.text_3d('cube 2', justification='center')
texts.append(text_actor2)
actors.append(contour_actor3)
text_actor3 = actor.text_3d('cube 3', justification='center')
texts.append(text_actor3)
actors.append(shallow_copy(contour_actor1))
text_actor1 = 'cube 4'
texts.append(text_actor1)
actors.append(shallow_copy(contour_actor2))
text_actor2 = 'cube 5'
texts.append(text_actor2)
actors.append(shallow_copy(contour_actor3))
text_actor3 = 'cube 6'
texts.append(text_actor3)
# show the grid without the captions
container = grid(actors=actors, captions=None,
caption_offset=(0, -40, 0),
cell_padding=(10, 10), dim=(2, 3))
scene.add(container)
scene.projection('orthogonal')
counter = itertools.count()
show_m = window.ShowManager(scene)
show_m.initialize()
def timer_callback(_obj, _event):
nonlocal counter
cnt = next(counter)
# show_m.scene.zoom(1)
show_m.render()
if cnt == 4:
show_m.exit()
show_m.destroy_timers()
show_m.add_timer_callback(True, 200, timer_callback)
show_m.start()
arr = window.snapshot(scene)
arr[arr < 20] = 0
report = window.analyze_snapshot(arr)
npt.assert_equal(report.objects, 6)
scene.rm_all()
counter = itertools.count()
show_m = window.ShowManager(scene)
show_m.initialize()
# show the grid with the captions
container = grid(actors=actors, captions=texts,
caption_offset=(0, -50, 0),
cell_padding=(10, 10),
dim=(3, 3))
scene.add(container)
show_m.add_timer_callback(True, 200, timer_callback)
show_m.start()
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr)
npt.assert_equal(report.objects > 6, True)
def test_direct_sphere_mapping():
arr = 255 * np.ones((810, 1620, 3), dtype='uint8')
rows, cols, _ = arr.shape
rs = rows // 2
cs = cols // 2
w = 150 // 2
arr[rs - w: rs + w, cs - 10 * w: cs + 10 * w] = np.array([255, 127, 0])
# enable to see pacman on sphere
# arr[0: 2 * w, cs - 10 * w: cs + 10 * w] = np.array([255, 127, 0])
scene = window.Scene()
tsa = actor.texture_on_sphere(arr)
scene.add(tsa)
rotate(tsa, rotation=(90, 0, 1, 0))
display = window.snapshot(scene)
res = window.analyze_snapshot(display, bg_color=(0, 0, 0),
colors=[(255, 127, 0)],
find_objects=False)
npt.assert_equal(res.colors_found, [True])
def test_texture_mapping():
arr = np.zeros((512, 212, 3), dtype='uint8')
arr[:256, :] = np.array([255, 0, 0])
arr[256:, :] = np.array([0, 255, 0])
tp = actor.texture(arr,
interp=True)
scene = window.Scene()
scene.add(tp)
display = window.snapshot(scene)
res = window.analyze_snapshot(display, bg_color=(0, 0, 0),
colors=[(255, 0, 0), (0, 255, 0)],
find_objects=False)
npt.assert_equal(res.colors_found, [True, True])
def test_texture_update():
arr = np.zeros((512, 212, 3), dtype='uint8')
arr[:256, :] = np.array([255, 0, 0])
arr[256:, :] = np.array([0, 255, 0])
# create a texture on plane
tp = actor.texture(arr, interp=True)
scene = window.Scene()
scene.add(tp)
display = window.snapshot(scene)
res1 = window.analyze_snapshot(display, bg_color=(0, 0, 0),
colors=[(255, 255, 255),
(255, 0, 0),
(0, 255, 0)],
find_objects=False)
# update the texture
new_arr = np.zeros((512, 212, 3), dtype='uint8')
new_arr[:, :] = np.array([255, 255, 255])
actor.texture_update(tp, new_arr)
display = window.snapshot(scene)
res2 = window.analyze_snapshot(display, bg_color=(0, 0, 0),
colors=[(255, 255, 255),
(255, 0, 0),
(0, 255, 0)],
find_objects=False)
# Test for original colors
npt.assert_equal(res1.colors_found, [False, True, True])
# Test for changed colors of the actor
npt.assert_equal(res2.colors_found, [True, False, False])
def test_figure_vs_texture_actor():
arr = (255 * np.ones((512, 212, 4))).astype('uint8')
arr[20:40, 20:40, 3] = 0
tp = actor.figure(arr)
arr[20:40, 20:40, :] = np.array([255, 0, 0, 255], dtype='uint8')
tp2 = actor.texture(arr)
scene = window.Scene()
scene.add(tp)
scene.add(tp2)
tp2.SetPosition(0, 0, -50)
display = window.snapshot(scene)
res = window.analyze_snapshot(display, bg_color=(0, 0, 0),
colors=[(255, 0, 0), (255, 255, 255)],
find_objects=False)
npt.assert_equal(res.colors_found, [True, True])
@pytest.mark.skipif(not have_matplotlib, reason="Requires MatplotLib")
def test_matplotlib_figure():
names = ['group_a', 'group_b', 'group_c']
values = [1, 10, 100]
fig = plt.figure(figsize=(9, 3))
plt.subplot(131)
plt.bar(names, values)
plt.subplot(132)
plt.scatter(names, values)
plt.subplot(133)
plt.plot(names, values)
plt.suptitle('Categorical Plotting')
arr = matplotlib_figure_to_numpy(fig, dpi=500, transparent=True)
plt.close('all')
fig_actor = actor.figure(arr, 'cubic')
fig_actor2 = actor.figure(arr, 'cubic')
scene = window.Scene()
scene.background((1, 1, 1.))
ax_actor = actor.axes(scale=(1000, 1000, 1000))
scene.add(ax_actor)
scene.add(fig_actor)
scene.add(fig_actor2)
ax_actor.SetPosition(-50, 500, -800)
fig_actor2.SetPosition(500, 800, -400)
display = window.snapshot(scene, 'test_mpl.png', order_transparent=False,
offscreen=True)
res = window.analyze_snapshot(display, bg_color=(255, 255, 255.),
colors=[(31, 119, 180)],
find_objects=False)
# omit assert from now until we know why snapshot creates
# different colors in Github Actions but not on our computers
# npt.assert_equal(res.colors_found, [True, True])
# TODO: investigate further this issue with snapshot in Actions
pass
def test_superquadric_actor(interactive=False):
scene = window.Scene()
centers = np.array([[8, 0, 0], [0, 8, 0], [0, 0, 0]])
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
directions = np.array([[0.27753247, 0.15332503, 0.63670953],
[0.14138223, 0.76031677, 0.14669451],
[0.23416946, 0.12816617, 0.92596145]])
scales = [1, 2, 3]
roundness = np.array([[1, 1], [1, 2], [2, 1]])
sq_actor = actor.superquadric(centers, roundness=roundness,
directions=directions,
colors=colors.astype(np.uint8),
scales=scales)
scene.add(sq_actor)
if interactive:
window.show(scene)
arr = window.snapshot(scene, offscreen=True)
arr[arr > 0] = 255 # Normalization
report = window.analyze_snapshot(arr, colors=255*colors.astype(np.uint8))
npt.assert_equal(report.objects, 3)
npt.assert_equal(report.colors_found, [True, True, True])
def test_billboard_actor(interactive=False):
scene = window.Scene()
scene.background((1, 1, 1))
centers = np.array([[0, 0, 0], [5, -5, 5], [-7, 7, -7], [10, 10, 10],
[10.5, 11.5, 11.5], [12, -12, -12], [-17, 17, 17],
[-22, -22, 22]])
colors = np.array([[1, 1, 0], [0, 0, 0], [1, 0, 1], [0, 0, 1], [1, 1, 1],
[1, 0, 0], [0, 1, 0], [0, 1, 1]])
scales = [6, .4, 1.2, 1, .2, .7, 3, 2]
fake_sphere = \
"""
float len = length(point);
float radius = 1.;
if(len > radius)
{discard;}
vec3 normalizedPoint = normalize(vec3(point.xy, sqrt(1. - len)));
vec3 direction = normalize(vec3(1., 1., 1.));
float df_1 = max(0, dot(direction, normalizedPoint));
float sf_1 = pow(df_1, 24);
fragOutput0 = vec4(max(df_1 * color, sf_1 * vec3(1)), 1);
"""
billboard_actor = actor.billboard(centers, colors=colors, scales=scales,
fs_impl=fake_sphere)
scene.add(billboard_actor)
scene.add(actor.axes())
if interactive:
window.show(scene)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=colors)
npt.assert_equal(report.objects, 8)
@pytest.mark.skipif(skip_win, reason="This test does not work on Windows"
" due to snapshot (memory access"
" violation). Check what is causing this"
" issue with shader")
def test_sdf_actor(interactive=False):
scene = window.Scene()
scene.background((1, 1, 1))
centers = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 0], [2, 2, 0]]) * 11
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0]])
directions = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 1, 0]])
scales = [1, 2, 3, 4]
primitive = ['sphere', 'ellipsoid', 'torus', 'capsule']
sdf_actor = actor.sdf(centers, directions,
colors, primitive, scales)
scene.add(sdf_actor)
scene.add(actor.axes())
if interactive:
window.show(scene)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=colors)
npt.assert_equal(report.objects, 4)
# Draw 3 spheres as the primitive type is str
scene.clear()
primitive = 'sphere'
sdf_actor = actor.sdf(centers, directions,
colors, primitive, scales)
scene.add(sdf_actor)
scene.add(actor.axes())
if interactive:
window.show(scene)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=colors)
npt.assert_equal(report.objects, 4)
# A sphere and default back to two torus
# as the primitive type is list
scene.clear()
primitive = ['sphere']
with npt.assert_warns(UserWarning):
sdf_actor = actor.sdf(centers, directions, colors,
primitive, scales)
scene.add(sdf_actor)
scene.add(actor.axes())
if interactive:
window.show(scene)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=colors)
npt.assert_equal(report.objects, 4)
# One sphere and ellipsoid each
# Default to torus
scene.clear()
primitive = ['sphere', 'ellipsoid']
with npt.assert_warns(UserWarning):
sdf_actor = actor.sdf(centers, directions,
colors, primitive, scales)
scene.add(sdf_actor)
scene.add(actor.axes())
if interactive:
window.show(scene)
arr = window.snapshot(scene)
report = window.analyze_snapshot(arr, colors=colors)
npt.assert_equal(report.objects, 4)
def test_marker_actor(interactive=False):
scene = window.Scene()
scene.background((1, 1, 1))
centers_3do = np.array([[4, 0, 0], [4, 4, 0], [4, 8, 0]])
markers_2d = ['o', 's', 'd', '^', 'p', 'h', 's6', 'x', '+']
center_markers_2d = np.array(
[[0, i*2, 0] for i in range(len(markers_2d))])
fake_spheres = actor.markers(
centers_3do,
colors=(0, 1, 0),
scales=1,
marker='3d'
)
markers_2d = actor.markers(
center_markers_2d,
colors=(0, 1, 0),
scales=1,
marker=markers_2d
)
scene.add(fake_spheres)
scene.add(markers_2d)
if interactive:
window.show(scene)
arr = window.snapshot(scene)
colors = np.array([[0, 1, 0] for i in range(12)])
report = window.analyze_snapshot(arr, colors=colors)
npt.assert_equal(report.objects, 12)
|
the-stack_0_18865 | # Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
from fractions import gcd
from . import sigtools
from ._upfirdn import _UpFIRDn, _output_len
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import fftpack, linalg
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
prod, product, r_, ravel, real_if_close, reshape,
roots, sort, sum, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
# numpy is significantly faster for 1d
if in1.ndim == 1 and in2.ndim == 1:
return np.correlate(in1, in2, mode)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
# numpy is significantly faster for 1d
if in1.ndim == 1 and in2.ndim == 1 and (in1.size >= in2.size):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward
swapped_inputs = (mode == 'full') and (in2.size > in1.size)
if swapped_inputs:
in1, in2 = in2, in1
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
# Reverse and conjugate to undo the effect of swapping inputs
if swapped_inputs:
slice_obj = [slice(None, None, -1)] * len(z.shape)
z = z[slice_obj].conj()
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target-1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2**((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2**(len(bin(quotient - 1)) - 2)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse. (This is at least 100 times
as fast as `convolve`.)
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, complex) or
np.issubdtype(in2.dtype, complex))
shape = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [_next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
ret = (np.fft.irfftn(np.fft.rfftn(in1, fshape) *
np.fft.rfftn(in2, fshape), fshape)[fslice].
copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
ret = fftpack.ifftn(fftpack.fftn(in1, fshape) *
fftpack.fftn(in2, fshape))[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
# fastpath to faster numpy 1d convolve
if volume.ndim == 1 and kernel.ndim == 1 and volume.size >= kernel.size:
return np.convolve(volume, kernel, mode)
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) + 0.1*np.sin(2*np.pi*1.25*t + 1)
... + 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + np.random.randn(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape, strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the instantaneous
phase in respect to time. The instantaneous phase corresponds to the phase
angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = np.diff(instantaneous_phase) / (2.0*np.pi) * fs
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal Processing,
Third Edition, 2009. Chapter 12. ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fftpack.fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = fftpack.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fftpack.fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = fftpack.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n])
/ factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1)...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See also
--------
decimate
resample_poly
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fftpack.fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftpack.fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = fftpack.ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string or tuple of string and parameter values
Desired window to use to design the low-pass filter. See
`scipy.signal.get_window` for a list of windows and required
parameters.
Returns
-------
resampled_x : array
The resampled array.
See also
--------
decimate
resample
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The `window` argument is passed directly to `scipy.signal.firwin`
to design a low-pass filter.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * up / float(down)``.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
"""
x = asarray(x)
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
# Determine our up and down factors
# Use a rational approimation to save computation time on really long
# signals
g_ = gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_out = (x.shape[axis] * up) // down
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
h = firwin(2 * half_len + 1, f_c, window=window)
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad), h, np.zeros(n_post_pad)))
ufd = _UpFIRDn(h, x.dtype, up, down)
n_pre_remove_end = n_pre_remove + n_out
def apply_remove(x):
"""Apply the upfirdn filter and remove excess"""
return ufd.apply_filter(x)[n_pre_remove:n_pre_remove_end]
y = np.apply_along_axis(apply_remove, axis, x)
return y
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
doi: 10.1063/1.3670512
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. doi: 10.1007/s00422-013-0561-7.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
doi: 10.1007/s00422-013-0560-8
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# `method` is "pad"...
ntaps = max(len(a), len(b))
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos = atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample
resample_poly
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
|
the-stack_0_18866 | #!/usr/bin/env python
#
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import collections
import datetime
import json
import os
import sys
from simpleperf_report_lib import ReportLib
from utils import log_info, log_exit
from utils import Addr2Nearestline, get_script_dir, Objdump, open_report_in_browser
from utils import SourceFileSearcher
MAX_CALLSTACK_LENGTH = 750
class HtmlWriter(object):
def __init__(self, output_path):
self.fh = open(output_path, 'w')
self.tag_stack = []
def close(self):
self.fh.close()
def open_tag(self, tag, **attrs):
attr_str = ''
for key in attrs:
attr_str += ' %s="%s"' % (key, attrs[key])
self.fh.write('<%s%s>' % (tag, attr_str))
self.tag_stack.append(tag)
return self
def close_tag(self, tag=None):
if tag:
assert tag == self.tag_stack[-1]
self.fh.write('</%s>\n' % self.tag_stack.pop())
def add(self, text):
self.fh.write(text)
return self
def add_file(self, file_path):
file_path = os.path.join(get_script_dir(), file_path)
with open(file_path, 'r') as f:
self.add(f.read())
return self
def modify_text_for_html(text):
return text.replace('>', '>').replace('<', '<')
class EventScope(object):
def __init__(self, name):
self.name = name
self.processes = {} # map from pid to ProcessScope
self.sample_count = 0
self.event_count = 0
def get_process(self, pid):
process = self.processes.get(pid)
if not process:
process = self.processes[pid] = ProcessScope(pid)
return process
def get_sample_info(self, gen_addr_hit_map):
result = {}
result['eventName'] = self.name
result['eventCount'] = self.event_count
processes = sorted(self.processes.values(), key=lambda a: a.event_count, reverse=True)
result['processes'] = [process.get_sample_info(gen_addr_hit_map)
for process in processes]
return result
@property
def threads(self):
for process in self.processes.values():
for thread in process.threads.values():
yield thread
@property
def libraries(self):
for process in self.processes.values():
for thread in process.threads.values():
for lib in thread.libs.values():
yield lib
class ProcessScope(object):
def __init__(self, pid):
self.pid = pid
self.name = ''
self.event_count = 0
self.threads = {} # map from tid to ThreadScope
def get_thread(self, tid, thread_name):
thread = self.threads.get(tid)
if not thread:
thread = self.threads[tid] = ThreadScope(tid)
thread.name = thread_name
if self.pid == tid:
self.name = thread_name
return thread
def get_sample_info(self, gen_addr_hit_map):
result = {}
result['pid'] = self.pid
result['eventCount'] = self.event_count
threads = sorted(self.threads.values(), key=lambda a: a.event_count, reverse=True)
result['threads'] = [thread.get_sample_info(gen_addr_hit_map)
for thread in threads]
return result
class ThreadScope(object):
def __init__(self, tid):
self.tid = tid
self.name = ''
self.event_count = 0
self.sample_count = 0
self.libs = {} # map from lib_id to LibScope
self.call_graph = CallNode(-1)
self.reverse_call_graph = CallNode(-1)
def add_callstack(self, event_count, callstack, build_addr_hit_map):
""" callstack is a list of tuple (lib_id, func_id, addr).
For each i > 0, callstack[i] calls callstack[i-1]."""
hit_func_ids = set()
for i, (lib_id, func_id, addr) in enumerate(callstack):
# When a callstack contains recursive function, only add for each function once.
if func_id in hit_func_ids:
continue
hit_func_ids.add(func_id)
lib = self.libs.get(lib_id)
if not lib:
lib = self.libs[lib_id] = LibScope(lib_id)
function = lib.get_function(func_id)
function.subtree_event_count += event_count
if i == 0:
lib.event_count += event_count
function.event_count += event_count
function.sample_count += 1
if build_addr_hit_map:
function.build_addr_hit_map(addr, event_count if i == 0 else 0, event_count)
# build call graph and reverse call graph
node = self.call_graph
for item in reversed(callstack):
node = node.get_child(item[1])
node.event_count += event_count
node = self.reverse_call_graph
for item in callstack:
node = node.get_child(item[1])
node.event_count += event_count
def update_subtree_event_count(self):
self.call_graph.update_subtree_event_count()
self.reverse_call_graph.update_subtree_event_count()
def limit_percents(self, min_func_limit, min_callchain_percent, hit_func_ids):
for lib in self.libs.values():
to_del_funcs = []
for function in lib.functions.values():
if function.subtree_event_count < min_func_limit:
to_del_funcs.append(function.func_id)
else:
hit_func_ids.add(function.func_id)
for func_id in to_del_funcs:
del lib.functions[func_id]
min_limit = min_callchain_percent * 0.01 * self.call_graph.subtree_event_count
self.call_graph.cut_edge(min_limit, hit_func_ids)
self.reverse_call_graph.cut_edge(min_limit, hit_func_ids)
def get_sample_info(self, gen_addr_hit_map):
result = {}
result['tid'] = self.tid
result['eventCount'] = self.event_count
result['sampleCount'] = self.sample_count
result['libs'] = [lib.gen_sample_info(gen_addr_hit_map)
for lib in self.libs.values()]
result['g'] = self.call_graph.gen_sample_info()
result['rg'] = self.reverse_call_graph.gen_sample_info()
return result
class LibScope(object):
def __init__(self, lib_id):
self.lib_id = lib_id
self.event_count = 0
self.functions = {} # map from func_id to FunctionScope.
def get_function(self, func_id):
function = self.functions.get(func_id)
if not function:
function = self.functions[func_id] = FunctionScope(func_id)
return function
def gen_sample_info(self, gen_addr_hit_map):
result = {}
result['libId'] = self.lib_id
result['eventCount'] = self.event_count
result['functions'] = [func.gen_sample_info(gen_addr_hit_map)
for func in self.functions.values()]
return result
class FunctionScope(object):
def __init__(self, func_id):
self.func_id = func_id
self.sample_count = 0
self.event_count = 0
self.subtree_event_count = 0
self.addr_hit_map = None # map from addr to [event_count, subtree_event_count].
# map from (source_file_id, line) to [event_count, subtree_event_count].
self.line_hit_map = None
def build_addr_hit_map(self, addr, event_count, subtree_event_count):
if self.addr_hit_map is None:
self.addr_hit_map = {}
count_info = self.addr_hit_map.get(addr)
if count_info is None:
self.addr_hit_map[addr] = [event_count, subtree_event_count]
else:
count_info[0] += event_count
count_info[1] += subtree_event_count
def build_line_hit_map(self, source_file_id, line, event_count, subtree_event_count):
if self.line_hit_map is None:
self.line_hit_map = {}
key = (source_file_id, line)
count_info = self.line_hit_map.get(key)
if count_info is None:
self.line_hit_map[key] = [event_count, subtree_event_count]
else:
count_info[0] += event_count
count_info[1] += subtree_event_count
def gen_sample_info(self, gen_addr_hit_map):
result = {}
result['f'] = self.func_id
result['c'] = [self.sample_count, self.event_count, self.subtree_event_count]
if self.line_hit_map:
items = []
for key in self.line_hit_map:
count_info = self.line_hit_map[key]
item = {'f': key[0], 'l': key[1], 'e': count_info[0], 's': count_info[1]}
items.append(item)
result['s'] = items
if gen_addr_hit_map and self.addr_hit_map:
items = []
for addr in sorted(self.addr_hit_map):
count_info = self.addr_hit_map[addr]
items.append({'a': addr, 'e': count_info[0], 's': count_info[1]})
result['a'] = items
return result
class CallNode(object):
def __init__(self, func_id):
self.event_count = 0
self.subtree_event_count = 0
self.func_id = func_id
self.children = collections.OrderedDict() # map from func_id to CallNode
def get_child(self, func_id):
child = self.children.get(func_id)
if not child:
child = self.children[func_id] = CallNode(func_id)
return child
def update_subtree_event_count(self):
self.subtree_event_count = self.event_count
for child in self.children.values():
self.subtree_event_count += child.update_subtree_event_count()
return self.subtree_event_count
def cut_edge(self, min_limit, hit_func_ids):
hit_func_ids.add(self.func_id)
to_del_children = []
for key in self.children:
child = self.children[key]
if child.subtree_event_count < min_limit:
to_del_children.append(key)
else:
child.cut_edge(min_limit, hit_func_ids)
for key in to_del_children:
del self.children[key]
def gen_sample_info(self):
result = {}
result['e'] = self.event_count
result['s'] = self.subtree_event_count
result['f'] = self.func_id
result['c'] = [child.gen_sample_info() for child in self.children.values()]
return result
class LibSet(object):
""" Collection of shared libraries used in perf.data. """
def __init__(self):
self.lib_name_to_id = {}
self.lib_id_to_name = []
def get_lib_id(self, lib_name):
lib_id = self.lib_name_to_id.get(lib_name)
if lib_id is None:
lib_id = len(self.lib_id_to_name)
self.lib_name_to_id[lib_name] = lib_id
self.lib_id_to_name.append(lib_name)
return lib_id
def get_lib_name(self, lib_id):
return self.lib_id_to_name[lib_id]
class Function(object):
""" Represent a function in a shared library. """
def __init__(self, lib_id, func_name, func_id, start_addr, addr_len):
self.lib_id = lib_id
self.func_name = func_name
self.func_id = func_id
self.start_addr = start_addr
self.addr_len = addr_len
self.source_info = None
self.disassembly = None
class FunctionSet(object):
""" Collection of functions used in perf.data. """
def __init__(self):
self.name_to_func = {}
self.id_to_func = {}
def get_func_id(self, lib_id, symbol):
key = (lib_id, symbol.symbol_name)
function = self.name_to_func.get(key)
if function is None:
func_id = len(self.id_to_func)
function = Function(lib_id, symbol.symbol_name, func_id, symbol.symbol_addr,
symbol.symbol_len)
self.name_to_func[key] = function
self.id_to_func[func_id] = function
return function.func_id
def trim_functions(self, left_func_ids):
""" Remove functions excepts those in left_func_ids. """
for function in self.name_to_func.values():
if function.func_id not in left_func_ids:
del self.id_to_func[function.func_id]
# name_to_func will not be used.
self.name_to_func = None
class SourceFile(object):
""" A source file containing source code hit by samples. """
def __init__(self, file_id, abstract_path):
self.file_id = file_id
self.abstract_path = abstract_path # path reported by addr2line
self.real_path = None # file path in the file system
self.requested_lines = set()
self.line_to_code = {} # map from line to code in that line.
def request_lines(self, start_line, end_line):
self.requested_lines |= set(range(start_line, end_line + 1))
def add_source_code(self, real_path):
self.real_path = real_path
with open(real_path, 'r') as f:
source_code = f.readlines()
max_line = len(source_code)
for line in self.requested_lines:
if line > 0 and line <= max_line:
self.line_to_code[line] = source_code[line - 1]
# requested_lines is no longer used.
self.requested_lines = None
class SourceFileSet(object):
""" Collection of source files. """
def __init__(self):
self.path_to_source_files = {} # map from file path to SourceFile.
def get_source_file(self, file_path):
source_file = self.path_to_source_files.get(file_path)
if source_file is None:
source_file = SourceFile(len(self.path_to_source_files), file_path)
self.path_to_source_files[file_path] = source_file
return source_file
def load_source_code(self, source_dirs):
file_searcher = SourceFileSearcher(source_dirs)
for source_file in self.path_to_source_files.values():
real_path = file_searcher.get_real_path(source_file.abstract_path)
if real_path:
source_file.add_source_code(real_path)
class RecordData(object):
"""RecordData reads perf.data, and generates data used by report.js in json format.
All generated items are listed as below:
1. recordTime: string
2. machineType: string
3. androidVersion: string
4. recordCmdline: string
5. totalSamples: int
6. processNames: map from pid to processName.
7. threadNames: map from tid to threadName.
8. libList: an array of libNames, indexed by libId.
9. functionMap: map from functionId to funcData.
funcData = {
l: libId
f: functionName
s: [sourceFileId, startLine, endLine] [optional]
d: [(disassembly, addr)] [optional]
}
10. sampleInfo = [eventInfo]
eventInfo = {
eventName
eventCount
processes: [processInfo]
}
processInfo = {
pid
eventCount
threads: [threadInfo]
}
threadInfo = {
tid
eventCount
sampleCount
libs: [libInfo],
g: callGraph,
rg: reverseCallgraph
}
libInfo = {
libId,
eventCount,
functions: [funcInfo]
}
funcInfo = {
f: functionId
c: [sampleCount, eventCount, subTreeEventCount]
s: [sourceCodeInfo] [optional]
a: [addrInfo] (sorted by addrInfo.addr) [optional]
}
callGraph and reverseCallGraph are both of type CallNode.
callGraph shows how a function calls other functions.
reverseCallGraph shows how a function is called by other functions.
CallNode {
e: selfEventCount
s: subTreeEventCount
f: functionId
c: [CallNode] # children
}
sourceCodeInfo {
f: sourceFileId
l: line
e: eventCount
s: subtreeEventCount
}
addrInfo {
a: addr
e: eventCount
s: subtreeEventCount
}
11. sourceFiles: an array of sourceFile, indexed by sourceFileId.
sourceFile {
path
code: # a map from line to code for that line.
}
"""
def __init__(self, binary_cache_path, ndk_path, build_addr_hit_map):
self.binary_cache_path = binary_cache_path
self.ndk_path = ndk_path
self.build_addr_hit_map = build_addr_hit_map
self.meta_info = None
self.cmdline = None
self.arch = None
self.events = {}
self.libs = LibSet()
self.functions = FunctionSet()
self.total_samples = 0
self.source_files = SourceFileSet()
self.gen_addr_hit_map_in_record_info = False
def load_record_file(self, record_file, show_art_frames):
lib = ReportLib()
lib.SetRecordFile(record_file)
# If not showing ip for unknown symbols, the percent of the unknown symbol may be
# accumulated to very big, and ranks first in the sample table.
lib.ShowIpForUnknownSymbol()
if show_art_frames:
lib.ShowArtFrames()
if self.binary_cache_path:
lib.SetSymfs(self.binary_cache_path)
self.meta_info = lib.MetaInfo()
self.cmdline = lib.GetRecordCmd()
self.arch = lib.GetArch()
while True:
raw_sample = lib.GetNextSample()
if not raw_sample:
lib.Close()
break
raw_event = lib.GetEventOfCurrentSample()
symbol = lib.GetSymbolOfCurrentSample()
callchain = lib.GetCallChainOfCurrentSample()
event = self._get_event(raw_event.name)
self.total_samples += 1
event.sample_count += 1
event.event_count += raw_sample.period
process = event.get_process(raw_sample.pid)
process.event_count += raw_sample.period
thread = process.get_thread(raw_sample.tid, raw_sample.thread_comm)
thread.event_count += raw_sample.period
thread.sample_count += 1
lib_id = self.libs.get_lib_id(symbol.dso_name)
func_id = self.functions.get_func_id(lib_id, symbol)
callstack = [(lib_id, func_id, symbol.vaddr_in_file)]
for i in range(callchain.nr):
symbol = callchain.entries[i].symbol
lib_id = self.libs.get_lib_id(symbol.dso_name)
func_id = self.functions.get_func_id(lib_id, symbol)
callstack.append((lib_id, func_id, symbol.vaddr_in_file))
if len(callstack) > MAX_CALLSTACK_LENGTH:
callstack = callstack[:MAX_CALLSTACK_LENGTH]
thread.add_callstack(raw_sample.period, callstack, self.build_addr_hit_map)
for event in self.events.values():
for thread in event.threads:
thread.update_subtree_event_count()
def limit_percents(self, min_func_percent, min_callchain_percent):
hit_func_ids = set()
for event in self.events.values():
min_limit = event.event_count * min_func_percent * 0.01
for process in event.processes.values():
to_del_threads = []
for thread in process.threads.values():
if thread.call_graph.subtree_event_count < min_limit:
to_del_threads.append(thread.tid)
else:
thread.limit_percents(min_limit, min_callchain_percent, hit_func_ids)
for thread in to_del_threads:
del process.threads[thread]
self.functions.trim_functions(hit_func_ids)
def _get_event(self, event_name):
if event_name not in self.events:
self.events[event_name] = EventScope(event_name)
return self.events[event_name]
def add_source_code(self, source_dirs, filter_lib):
""" Collect source code information:
1. Find line ranges for each function in FunctionSet.
2. Find line for each addr in FunctionScope.addr_hit_map.
3. Collect needed source code in SourceFileSet.
"""
addr2line = Addr2Nearestline(self.ndk_path, self.binary_cache_path, False)
# Request line range for each function.
for function in self.functions.id_to_func.values():
if function.func_name == 'unknown':
continue
lib_name = self.libs.get_lib_name(function.lib_id)
if filter_lib(lib_name):
addr2line.add_addr(lib_name, function.start_addr, function.start_addr)
addr2line.add_addr(lib_name, function.start_addr,
function.start_addr + function.addr_len - 1)
# Request line for each addr in FunctionScope.addr_hit_map.
for event in self.events.values():
for lib in event.libraries:
lib_name = self.libs.get_lib_name(lib.lib_id)
if filter_lib(lib_name):
for function in lib.functions.values():
func_addr = self.functions.id_to_func[function.func_id].start_addr
for addr in function.addr_hit_map:
addr2line.add_addr(lib_name, func_addr, addr)
addr2line.convert_addrs_to_lines()
# Set line range for each function.
for function in self.functions.id_to_func.values():
if function.func_name == 'unknown':
continue
dso = addr2line.get_dso(self.libs.get_lib_name(function.lib_id))
if not dso:
continue
start_source = addr2line.get_addr_source(dso, function.start_addr)
end_source = addr2line.get_addr_source(dso, function.start_addr + function.addr_len - 1)
if not start_source or not end_source:
continue
start_file_path, start_line = start_source[-1]
end_file_path, end_line = end_source[-1]
if start_file_path != end_file_path or start_line > end_line:
continue
source_file = self.source_files.get_source_file(start_file_path)
source_file.request_lines(start_line, end_line)
function.source_info = (source_file.file_id, start_line, end_line)
# Build FunctionScope.line_hit_map.
for event in self.events.values():
for lib in event.libraries:
dso = addr2line.get_dso(self.libs.get_lib_name(lib.lib_id))
if not dso:
continue
for function in lib.functions.values():
for addr in function.addr_hit_map:
source = addr2line.get_addr_source(dso, addr)
if not source:
continue
for file_path, line in source:
source_file = self.source_files.get_source_file(file_path)
# Show [line - 5, line + 5] of the line hit by a sample.
source_file.request_lines(line - 5, line + 5)
count_info = function.addr_hit_map[addr]
function.build_line_hit_map(source_file.file_id, line, count_info[0],
count_info[1])
# Collect needed source code in SourceFileSet.
self.source_files.load_source_code(source_dirs)
def add_disassembly(self, filter_lib):
""" Collect disassembly information:
1. Use objdump to collect disassembly for each function in FunctionSet.
2. Set flag to dump addr_hit_map when generating record info.
"""
objdump = Objdump(self.ndk_path, self.binary_cache_path)
cur_lib_name = None
dso_info = None
for function in sorted(self.functions.id_to_func.values(), key=lambda a: a.lib_id):
if function.func_name == 'unknown':
continue
lib_name = self.libs.get_lib_name(function.lib_id)
if lib_name != cur_lib_name:
cur_lib_name = lib_name
if filter_lib(lib_name):
dso_info = objdump.get_dso_info(lib_name)
else:
dso_info = None
if dso_info:
log_info('Disassemble %s' % dso_info[0])
if dso_info:
code = objdump.disassemble_code(dso_info, function.start_addr, function.addr_len)
function.disassembly = code
self.gen_addr_hit_map_in_record_info = True
def gen_record_info(self):
record_info = {}
timestamp = self.meta_info.get('timestamp')
if timestamp:
t = datetime.datetime.fromtimestamp(int(timestamp))
else:
t = datetime.datetime.now()
record_info['recordTime'] = t.strftime('%Y-%m-%d (%A) %H:%M:%S')
product_props = self.meta_info.get('product_props')
machine_type = self.arch
if product_props:
manufacturer, model, name = product_props.split(':')
machine_type = '%s (%s) by %s, arch %s' % (model, name, manufacturer, self.arch)
record_info['machineType'] = machine_type
record_info['androidVersion'] = self.meta_info.get('android_version', '')
record_info['recordCmdline'] = self.cmdline
record_info['totalSamples'] = self.total_samples
record_info['processNames'] = self._gen_process_names()
record_info['threadNames'] = self._gen_thread_names()
record_info['libList'] = self._gen_lib_list()
record_info['functionMap'] = self._gen_function_map()
record_info['sampleInfo'] = self._gen_sample_info()
record_info['sourceFiles'] = self._gen_source_files()
return record_info
def _gen_process_names(self):
process_names = {}
for event in self.events.values():
for process in event.processes.values():
process_names[process.pid] = process.name
return process_names
def _gen_thread_names(self):
thread_names = {}
for event in self.events.values():
for process in event.processes.values():
for thread in process.threads.values():
thread_names[thread.tid] = thread.name
return thread_names
def _gen_lib_list(self):
return [modify_text_for_html(x) for x in self.libs.lib_id_to_name]
def _gen_function_map(self):
func_map = {}
for func_id in sorted(self.functions.id_to_func):
function = self.functions.id_to_func[func_id]
func_data = {}
func_data['l'] = function.lib_id
func_data['f'] = modify_text_for_html(function.func_name)
if function.source_info:
func_data['s'] = function.source_info
if function.disassembly:
disassembly_list = []
for code, addr in function.disassembly:
disassembly_list.append([modify_text_for_html(code), addr])
func_data['d'] = disassembly_list
func_map[func_id] = func_data
return func_map
def _gen_sample_info(self):
return [event.get_sample_info(self.gen_addr_hit_map_in_record_info)
for event in self.events.values()]
def _gen_source_files(self):
source_files = sorted(self.source_files.path_to_source_files.values(),
key=lambda x: x.file_id)
file_list = []
for source_file in source_files:
file_data = {}
if not source_file.real_path:
file_data['path'] = ''
file_data['code'] = {}
else:
file_data['path'] = source_file.real_path
code_map = {}
for line in source_file.line_to_code:
code_map[line] = modify_text_for_html(source_file.line_to_code[line])
file_data['code'] = code_map
file_list.append(file_data)
return file_list
URLS = {
'jquery': 'https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js',
'bootstrap4-css': 'https://stackpath.bootstrapcdn.com/bootstrap/4.1.2/css/bootstrap.min.css',
'bootstrap4-popper':
'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js',
'bootstrap4': 'https://stackpath.bootstrapcdn.com/bootstrap/4.1.2/js/bootstrap.min.js',
'dataTable': 'https://cdn.datatables.net/1.10.19/js/jquery.dataTables.min.js',
'dataTable-bootstrap4': 'https://cdn.datatables.net/1.10.19/js/dataTables.bootstrap4.min.js',
'dataTable-css': 'https://cdn.datatables.net/1.10.19/css/dataTables.bootstrap4.min.css',
'gstatic-charts': 'https://www.gstatic.com/charts/loader.js',
}
class ReportGenerator(object):
def __init__(self, html_path):
self.hw = HtmlWriter(html_path)
self.hw.open_tag('html')
self.hw.open_tag('head')
for css in ['bootstrap4-css', 'dataTable-css']:
self.hw.open_tag('link', rel='stylesheet', type='text/css', href=URLS[css]).close_tag()
for js in ['jquery', 'bootstrap4-popper', 'bootstrap4', 'dataTable', 'dataTable-bootstrap4',
'gstatic-charts']:
self.hw.open_tag('script', src=URLS[js]).close_tag()
self.hw.open_tag('script').add(
"google.charts.load('current', {'packages': ['corechart', 'table']});").close_tag()
self.hw.open_tag('style', type='text/css').add("""
.colForLine { width: 50px; }
.colForCount { width: 100px; }
.tableCell { font-size: 17px; }
.boldTableCell { font-weight: bold; font-size: 17px; }
""").close_tag()
self.hw.close_tag('head')
self.hw.open_tag('body')
self.record_info = {}
def write_content_div(self):
self.hw.open_tag('div', id='report_content').close_tag()
def write_record_data(self, record_data):
self.hw.open_tag('script', id='record_data', type='application/json')
self.hw.add(json.dumps(record_data))
self.hw.close_tag()
def write_script(self):
self.hw.open_tag('script').add_file('report_html.js').close_tag()
def finish(self):
self.hw.close_tag('body')
self.hw.close_tag('html')
self.hw.close()
def main():
sys.setrecursionlimit(MAX_CALLSTACK_LENGTH * 2 + 50)
parser = argparse.ArgumentParser(description='report profiling data')
parser.add_argument('-i', '--record_file', nargs='+', default=['perf.data'], help="""
Set profiling data file to report. Default is perf.data.""")
parser.add_argument('-o', '--report_path', default='report.html', help="""
Set output html file. Default is report.html.""")
parser.add_argument('--min_func_percent', default=0.01, type=float, help="""
Set min percentage of functions shown in the report.
For example, when set to 0.01, only functions taking >= 0.01%% of total
event count are collected in the report. Default is 0.01.""")
parser.add_argument('--min_callchain_percent', default=0.01, type=float, help="""
Set min percentage of callchains shown in the report.
It is used to limit nodes shown in the function flamegraph. For example,
when set to 0.01, only callchains taking >= 0.01%% of the event count of
the starting function are collected in the report. Default is 0.01.""")
parser.add_argument('--add_source_code', action='store_true', help='Add source code.')
parser.add_argument('--source_dirs', nargs='+', help='Source code directories.')
parser.add_argument('--add_disassembly', action='store_true', help='Add disassembled code.')
parser.add_argument('--binary_filter', nargs='+', help="""Annotate source code and disassembly
only for selected binaries.""")
parser.add_argument('--ndk_path', nargs=1, help='Find tools in the ndk path.')
parser.add_argument('--no_browser', action='store_true', help="Don't open report in browser.")
parser.add_argument('--show_art_frames', action='store_true',
help='Show frames of internal methods in the ART Java interpreter.')
args = parser.parse_args()
# 1. Process args.
binary_cache_path = 'binary_cache'
if not os.path.isdir(binary_cache_path):
if args.add_source_code or args.add_disassembly:
log_exit("""binary_cache/ doesn't exist. Can't add source code or disassembled code
without collected binaries. Please run binary_cache_builder.py to
collect binaries for current profiling data, or run app_profiler.py
without -nb option.""")
binary_cache_path = None
if args.add_source_code and not args.source_dirs:
log_exit('--source_dirs is needed to add source code.')
build_addr_hit_map = args.add_source_code or args.add_disassembly
ndk_path = None if not args.ndk_path else args.ndk_path[0]
# 2. Produce record data.
record_data = RecordData(binary_cache_path, ndk_path, build_addr_hit_map)
for record_file in args.record_file:
record_data.load_record_file(record_file, args.show_art_frames)
record_data.limit_percents(args.min_func_percent, args.min_callchain_percent)
def filter_lib(lib_name):
if not args.binary_filter:
return True
for binary in args.binary_filter:
if binary in lib_name:
return True
return False
if args.add_source_code:
record_data.add_source_code(args.source_dirs, filter_lib)
if args.add_disassembly:
record_data.add_disassembly(filter_lib)
# 3. Generate report html.
report_generator = ReportGenerator(args.report_path)
report_generator.write_script()
report_generator.write_content_div()
report_generator.write_record_data(record_data.gen_record_info())
report_generator.finish()
if not args.no_browser:
open_report_in_browser(args.report_path)
log_info("Report generated at '%s'." % args.report_path)
if __name__ == '__main__':
main()
|
the-stack_0_18867 | import numpy as np
import cv2
from .vector3d import Vector3D
class Vector3DStable(Vector3D):
"""Represents a 3D vector stabilized"""
def __init__(self,
x=.0, y=.0, z=.0,
vx=.0, vy=.0, vz=.0,
ax=.0, ay=.0, az=.0,
vmax=0.0001, amax=0.00001,
p_cov=.03, m_cov=.01, use_accel=True, modulo=None):
self.x = x
self.y = y
self.z = z
self.vx = vx
self.vy = vz
self.vz = vz
self.vmax = vmax
self.amax = amax
self.use_accel = use_accel
if self.use_accel is True:
self.ax = ax
self.ay = ay
self.az = az
self.filter = cv2.KalmanFilter(9, 3)
else:
self.ax = .0
self.ay = .0
self.az = .0
self.filter = cv2.KalmanFilter(6, 3)
self.filter.statePost = self.to_array()
self.use_accel = use_accel
self.modulo = modulo
if self.use_accel is True:
self.filter.measurementMatrix = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0]], np.float32)
else:
self.filter.measurementMatrix = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0]], np.float32)
self.__update_noise_cov(p_cov, m_cov)
self.last_update = cv2.getTickCount()
def from_array(self, array):
""" """
if self.use_accel is True:
assert array.shape == (9, 1)
else:
assert array.shape == (6, 1)
self.x = array[0][0]
self.y = array[1][0]
self.z = array[2][0]
self.vx = array[3][0]
self.vy = array[4][0]
self.vz = array[5][0]
if self.use_accel is True:
self.ax = array[6][0]
self.ay = array[7][0]
self.az = array[8][0]
self.filter.statePost = array
self.filter.statePre = self.filter.statePost
return self
def to_array(self):
""" """
if self.use_accel is True:
return np.array([[self.x],
[self.y],
[self.z],
[self.vx],
[self.vy],
[self.vz],
[self.ax],
[self.ay],
[self.az]], np.float32)
else:
return np.array([[self.x],
[self.y],
[self.z],
[self.vx],
[self.vy],
[self.vz]], np.float32)
def position(self):
""" """
return Vector3D(x=self.x, y=self.y, z=self.z)
def velocity(self):
""" """
return Vector3D(x=self.vx, y=self.vy, z=self.vz)
def acceleration(self):
""" """
return Vector3D(x=self.ax, y=self.ay, z=self.az)
def update(self, x, y, z):
"""Updates/Filter the 3D vector"""
self.__update_time()
self.predict()
measurement = np.array([[x], [y], [z]], np.float32)
measurement = measurement.flatten().reshape((3, 1)) # ugly fix
assert measurement.shape == (3, 1)
self.filter.correct(measurement)
self.from_array(self.filter.statePost)
def predict(self):
"""Predicts the 3D vector based on motion model"""
self.__update_time()
self.filter.predict()
x = self.filter.statePost[0][0]
y = self.filter.statePost[1][0]
z = self.filter.statePost[2][0]
if self.modulo is not None:
x = self.filter.statePost[0][0]
y = self.filter.statePost[1][0]
z = self.filter.statePost[2][0]
x = x - self.modulo if x > self.modulo else x
y = y - self.modulo if y > self.modulo else y
z = z - self.modulo if z > self.modulo else z
vx = self.filter.statePost[3][0]
vy = self.filter.statePost[4][0]
vz = self.filter.statePost[5][0]
if self.vmax is not None:
vx = self.vmax if vx > self.vmax else vx
vy = self.vmax if vy > self.vmax else vy
vz = self.vmax if vz > self.vmax else vz
if self.use_accel is True:
ax = self.filter.statePost[6][0]
ay = self.filter.statePost[7][0]
az = self.filter.statePost[8][0]
if self.amax is not None:
ax = self.amax if ax > self.amax else ax
ay = self.amax if ay > self.amax else ay
az = self.amax if az > self.amax else az
state = np.array([[x],[y],[z],[vx],[vy],[vz],[ax],[ay],[az]], dtype=np.float32)
else:
state = np.array([[x],[y],[z],[vx],[vy],[vz]], dtype=np.float32)
self.from_array(state)
def __update_noise_cov(self, p_cov, m_cov):
"""Updates the process and measurement covariances"""
if self.use_accel is True:
self.filter.processNoiseCov = np.eye(9, dtype=np.float32) * p_cov
else:
self.filter.processNoiseCov = np.eye(6, dtype=np.float32) * p_cov
self.filter.measurementNoiseCov = np.eye(3, dtype=np.float32) * m_cov
def __update_transition(self, dt):
if self.use_accel is True:
a = 0.5*dt*dt
self.filter.transitionMatrix = np.array([[1, 0, 0, dt, 0, 0, a, 0, 0],
[0, 1, 0, 0, dt, 0, 0, a, 0],
[0, 0, 1, 0, 0, dt, 0, 0, a],
[0, 0, 0, 1, 0, 0, dt, 0, 0],
[0, 0, 0, 0, 1, 0, 0, dt, 0],
[0, 0, 0, 0, 0, 1, 0, 0, dt],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]], np.float32)
else:
self.filter.transitionMatrix = np.array([[1, 0, 0, dt, 0, 0],
[0, 1, 0, 0, dt, 0],
[0, 0, 1, 0, 0, dt],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]], np.float32)
def __update_time(self):
now = cv2.getTickCount()
elapsed_time = (now - self.last_update)/cv2.getTickFrequency()
self.last_update = now
self.__update_transition(elapsed_time)
def __str__(self):
return("3d vector stable: {}".format(self.to_array().flatten()))
|
the-stack_0_18871 | import sys
import click
from .. import shared
@shared.cli.command()
@click.argument("name", required=False)
@click.option("--list_docs", "-l", is_flag=True, required=False)
@click.option("--force", "-f", is_flag=True, required=False)
@click.pass_context
def delete(ctx, name, list_docs, force):
"""Delete a document.
To delete a remote document, it needs to be local. So,
you may need to sync it from remote before deleting it.
"""
yew = ctx.obj["YEW"]
docs = shared.get_document_selection(ctx, name, list_docs, multiple=True)
if not docs:
click.echo("no matching documents")
return
for doc in docs:
click.echo("Document: %s %s" % (doc.uid, doc.name))
d = True
if not force:
d = click.confirm("Do you want to continue to delete the document(s)?")
if d:
for doc in docs:
yew.store.delete_document(doc)
|
the-stack_0_18874 | import zipfile
from pathlib import Path
from typing import Optional, Union
import fire
from tqdm import tqdm
from fastai.core import *
from fastai.datasets import *
ROOT = Path("data").resolve()
XNLI_DIR = ROOT / "xnli"
if not ROOT.exists():
ROOT.mkdir()
XNLI_DIR.mkdir(exist_ok=True)
print(f"Saving data in {ROOT}")
MT_FILE = "XNLI-MT-1.0.zip"
XNLI_FILE = "XNLI-1.0.zip"
MT_PATH = XNLI_DIR / MT_FILE
XNLI_PATH = XNLI_DIR / XNLI_FILE
MT_URL = "https://s3.amazonaws.com/xnli/XNLI-MT-1.0.zip"
XNLI_URL = "https://s3.amazonaws.com/xnli/XNLI-1.0.zip"
class TqdmUpTo(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_data(url: str, fname: Union[str, Path], dest: Optional[Union[str, Path]]):
"""
Download data if the filename does not exist already
Uses Tqdm to show download progress
"""
from urllib.request import urlretrieve
filepath = (Path(dest) / fname).resolve()
if not filepath.exists():
dirname = Path(filepath.parents[0])
print(f"Creating directory {dirname} from {filepath}")
dirname.mkdir(exist_ok=True)
with TqdmUpTo(unit="B", unit_scale=True, miniters=1, desc=url.split("/")[-1]) as t:
urlretrieve(url, filepath, reporthook=t.update_to)
return str(filepath.resolve().absolute())
def get_and_unzip_data(url: str, fname: Union[str, Path] = None, dest: Union[str, Path] = None):
"""Download `url` if it doesn't exist to `fname` and un-tgz to folder `dest`"""
if dest is None:
dest = url.split("/")[-1]
dest = Path(dest)
fname = dest / fname
if not fname.exists():
download_data(url=url, fname=fname, dest=dest)
print(f"Extracting {fname.resolve().absolute()} \n to {dest}")
zipfile.ZipFile(fname, "r").extractall(dest)
return dest
def get_xnli_and_MT(dest: Union[str, Path] = XNLI_DIR):
get_and_unzip_data(url=XNLI_URL, fname=XNLI_FILE, dest=dest)
get_and_unzip_data(url=MT_URL, fname=MT_FILE, dest=dest)
if __name__ == "__main__":
fire.Fire(get_xnli_and_MT)
|
the-stack_0_18875 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.core.doctype.data_import_beta.exporter import Exporter
from frappe.core.doctype.data_import_beta.test_importer import (
create_doctype_if_not_exists,
)
doctype_name = 'DocType for Export'
class TestExporter(unittest.TestCase):
def setUp(self):
create_doctype_if_not_exists(doctype_name)
def test_exports_specified_fields(self):
if not frappe.db.exists(doctype_name, "Test"):
doc = frappe.get_doc(
doctype=doctype_name,
title="Test",
description="Test Description",
table_field_1=[
{"child_title": "Child Title 1", "child_description": "Child Description 1"},
{"child_title": "Child Title 2", "child_description": "Child Description 2"},
],
table_field_2=[
{"child_2_title": "Child Title 1", "child_2_description": "Child Description 1"},
],
table_field_1_again=[
{
"child_title": "Child Title 1 Again",
"child_description": "Child Description 1 Again",
},
],
).insert()
else:
doc = frappe.get_doc(doctype_name, "Test")
e = Exporter(
doctype_name,
export_fields={
doctype_name: ["title", "description", "number", "another_number"],
"table_field_1": ["name", "child_title", "child_description"],
"table_field_2": ["child_2_date", "child_2_number"],
"table_field_1_again": [
"child_title",
"child_date",
"child_number",
"child_another_number",
],
},
export_data=True,
)
csv_array = e.get_csv_array()
header_row = csv_array[0]
self.assertEqual(
header_row,
[
"Title",
"Description",
"Number",
"another_number",
"ID (Table Field 1)",
"Child Title (Table Field 1)",
"Child Description (Table Field 1)",
"Child 2 Date (Table Field 2)",
"Child 2 Number (Table Field 2)",
"Child Title (Table Field 1 Again)",
"Child Date (Table Field 1 Again)",
"Child Number (Table Field 1 Again)",
"table_field_1_again.child_another_number",
],
)
table_field_1_row_1_name = doc.table_field_1[0].name
table_field_1_row_2_name = doc.table_field_1[1].name
# fmt: off
self.assertEqual(
csv_array[1],
["Test", "Test Description", 0, 0, table_field_1_row_1_name, "Child Title 1", "Child Description 1", None, 0, "Child Title 1 Again", None, 0, 0]
)
self.assertEqual(
csv_array[2],
["", "", "", "", table_field_1_row_2_name, "Child Title 2", "Child Description 2", "", "", "", "", "", ""],
)
# fmt: on
self.assertEqual(len(csv_array), 3)
def test_export_csv_response(self):
e = Exporter(
doctype_name,
export_fields={doctype_name: ["title", "description"]},
export_data=True,
file_type="CSV"
)
e.build_response()
self.assertTrue(frappe.response['result'])
self.assertEqual(frappe.response['doctype'], doctype_name)
self.assertEqual(frappe.response['type'], "csv")
|
the-stack_0_18876 | # pylint: disable=R0902,R0904,R0914
from __future__ import annotations
from typing import TYPE_CHECKING
from pyNastran.bdf.bdf_interface.attributes import BDFAttributes
from pyNastran.bdf.cards.nodes import SPOINT, EPOINT
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import (
CYAX, CYJOIN, AXIF,
TOPVAR, MPCAX, CORD3G,
SESUPORT, SEUSET, SEUSET1,
)
from pyNastran.bdf.cards.elements.elements import CFAST, CGAP, CRAC2D, CRAC3D, PLOTEL, GENEL
from pyNastran.bdf.cards.properties.properties import PFAST, PGAP, PRAC2D, PRAC3D
from pyNastran.bdf.cards.properties.solid import PLSOLID, PSOLID, PIHEX, PCOMPS
#from pyNastran.bdf.cards.msgmesh import CGEN, GMCORD
from pyNastran.bdf.cards.elements.springs import CELAS1, CELAS2, CELAS3, CELAS4
from pyNastran.bdf.cards.properties.springs import PELAS, PELAST
from pyNastran.bdf.cards.elements.solid import (
#CTETRA, CPYRAM, CPENTA, CHEXA,
CIHEX1, CIHEX2, CHEXA1, CHEXA2,
CTETRA4, CPYRAM5, CPENTA6, CHEXA8,
CTETRA10, CPYRAM13, CPENTA15, CHEXA20,
)
from pyNastran.bdf.cards.elements.rigid import RBAR, RBAR1, RBE1, RBE2, RBE3, RROD, RSPLINE, RSSCON
from pyNastran.bdf.cards.axisymmetric.axisymmetric import (
AXIF, RINGFL,
AXIC, RINGAX, POINTAX, CCONEAX, PCONEAX, PRESAX, TEMPAX,)
from pyNastran.bdf.cards.elements.axisymmetric_shells import (
CTRAX3, CTRAX6, CTRIAX, CTRIAX6, CQUADX, CQUADX4, CQUADX8)
from pyNastran.bdf.cards.elements.shell import (
CQUAD, CQUAD4, CQUAD8, CQUADR, CSHEAR,
CTRIA3, CTRIA6, CTRIAR,
CPLSTN3, CPLSTN4, CPLSTN6, CPLSTN8,
CPLSTS3, CPLSTS4, CPLSTS6, CPLSTS8,
SNORM,
)
from pyNastran.bdf.cards.properties.shell import PSHELL, PCOMP, PCOMPG, PSHEAR, PLPLANE, PPLANE
from pyNastran.bdf.cards.elements.bush import CBUSH, CBUSH1D, CBUSH2D
from pyNastran.bdf.cards.properties.bush import PBUSH, PBUSH1D, PBUSHT #PBUSH2D
from pyNastran.bdf.cards.elements.damper import (CVISC, CDAMP1, CDAMP2, CDAMP3, CDAMP4,
CDAMP5)
from pyNastran.bdf.cards.properties.damper import PVISC, PDAMP, PDAMP5, PDAMPT
from pyNastran.bdf.cards.elements.rods import CROD, CONROD, CTUBE
from pyNastran.bdf.cards.elements.bars import CBAR, CBARAO, CBEAM3, CBEND, BAROR
from pyNastran.bdf.cards.elements.beam import CBEAM, BEAMOR
from pyNastran.bdf.cards.properties.rods import PROD, PTUBE
from pyNastran.bdf.cards.properties.bars import PBAR, PBARL, PBRSECT, PBEND, PBEAM3
from pyNastran.bdf.cards.properties.beam import PBEAM, PBEAML, PBCOMP, PBMSECT
# CMASS5
from pyNastran.bdf.cards.elements.mass import CONM1, CONM2, CMASS1, CMASS2, CMASS3, CMASS4
from pyNastran.bdf.cards.properties.mass import PMASS, NSM, NSM1, NSML, NSML1, NSMADD
from pyNastran.bdf.cards.constraints import (SPC, SPCADD, SPCAX, SPC1, SPCOFF, SPCOFF1,
MPC, MPCADD, SUPORT1, SUPORT, SESUP,
GMSPC)
from pyNastran.bdf.cards.coordinate_systems import (CORD1R, CORD1C, CORD1S,
CORD2R, CORD2C, CORD2S, #CORD3G,
)
from pyNastran.bdf.cards.deqatn import DEQATN
from pyNastran.bdf.cards.dynamic import (
DELAY, DPHASE, FREQ, FREQ1, FREQ2, FREQ3, FREQ4, FREQ5,
TSTEP, TSTEP1, TSTEPNL, NLPARM, NLPCI, TF, ROTORG, ROTORD, TIC)
from pyNastran.bdf.cards.loads.loads import (
LSEQ, SLOAD, DAREA, RFORCE, RFORCE1, SPCD, DEFORM, LOADCYN)
from pyNastran.bdf.cards.loads.dloads import ACSRCE, DLOAD, TLOAD1, TLOAD2, RLOAD1, RLOAD2
from pyNastran.bdf.cards.loads.static_loads import (LOAD, CLOAD, GRAV, ACCEL, ACCEL1, FORCE,
FORCE1, FORCE2, MOMENT, MOMENT1, MOMENT2,
PLOAD, PLOAD1, PLOAD2, PLOAD4, PLOADX1,
GMLOAD)
from pyNastran.bdf.cards.loads.random_loads import RANDPS, RANDT1
from pyNastran.bdf.cards.materials import (MAT1, MAT2, MAT3, MAT4, MAT5,
MAT8, MAT9, MAT10, MAT11, MAT3D,
MATG, MATHE, MATHP, CREEP, EQUIV,
NXSTRAT)
from pyNastran.bdf.cards.material_deps import (
MATT1, MATT2, MATT3, MATT4, MATT5, MATT8, MATT9, MATS1)
from pyNastran.bdf.cards.methods import EIGB, EIGC, EIGR, EIGP, EIGRL, MODTRAK
from pyNastran.bdf.cards.nodes import GRID, GRDSET, SPOINTs, EPOINTs, POINT, SEQGP, GRIDB
from pyNastran.bdf.cards.aero.aero import (
AECOMP, AECOMPL, AEFACT, AELINK, AELIST, AEPARM, AESURF, AESURFS,
CAERO1, CAERO2, CAERO3, CAERO4, CAERO5,
PAERO1, PAERO2, PAERO3, PAERO4, PAERO5,
MONPNT1, MONPNT2, MONPNT3,
SPLINE1, SPLINE2, SPLINE3, SPLINE4, SPLINE5)
from pyNastran.bdf.cards.aero.static_loads import AESTAT, AEROS, CSSCHD, TRIM, TRIM2, DIVERG
from pyNastran.bdf.cards.aero.dynamic_loads import AERO, FLFACT, FLUTTER, GUST, MKAERO1, MKAERO2
#from pyNastran.bdf.cards.aero.zona import (
#ACOORD, AEROZ, AESURFZ, BODY7, CAERO7, MKAEROZ, PAFOIL7, PANLST1, PANLST3,
#SEGMESH, SPLINE1_ZONA, SPLINE2_ZONA, SPLINE3_ZONA, TRIMLNK, TRIMVAR, TRIM_ZONA,
#ZONA)
from pyNastran.bdf.cards.optimization import (
DCONADD, DCONSTR, DESVAR, DDVAL, DOPTPRM, DLINK,
DRESP1, DRESP2, DRESP3,
DVCREL1, DVCREL2,
DVMREL1, DVMREL2,
DVPREL1, DVPREL2,
DVGRID, DSCREEN)
from pyNastran.bdf.cards.superelements import (
RELEASE, SEBNDRY, SEBULK, SECONCT, SEELT, SEEXCLD,
SELABEL, SELOAD, SELOC, SEMPLN, SENQSET, SETREE,
CSUPER, CSUPEXT,
)
from pyNastran.bdf.cards.bdf_sets import (
ASET, BSET, CSET, QSET, USET, OMIT,
ASET1, BSET1, CSET1, QSET1, USET1, OMIT1,
SET1, SET2, SET3,
SEBSET, SECSET, SEQSET, # SEUSET
SEBSET1, SECSET1, SEQSET1, # SEUSET1
SESET, # SEQSEP,
RADSET
)
from pyNastran.bdf.cards.params import PARAM
from pyNastran.bdf.cards.dmig import DMIG, DMIAX, DMI, DMIJ, DMIK, DMIJI, DMIG_UACCEL, DTI
from pyNastran.bdf.cards.thermal.loads import (QBDY1, QBDY2, QBDY3, QHBDY, TEMP, TEMPD, TEMPB3,
QVOL, QVECT)
from pyNastran.bdf.cards.thermal.thermal import (CHBDYE, CHBDYG, CHBDYP, PCONV, PCONVM,
PHBDY, CONV, CONVM, TEMPBC)
from pyNastran.bdf.cards.thermal.radiation import RADM, RADBC, RADCAV, RADLST, RADMTX, VIEW, VIEW3D
from pyNastran.bdf.cards.bdf_tables import (TABLED1, TABLED2, TABLED3, TABLED4,
TABLEM1, TABLEM2, TABLEM3, TABLEM4,
TABLES1, TABDMP1, TABLEST, TABLEHT, TABLEH1,
TABRND1, TABRNDG,
DTABLE)
from pyNastran.bdf.cards.contact import (
BCRPARA, BCTADD, BCTSET, BSURF, BSURFS, BCPARA, BCTPARA, BCONP, BLSEG,
BFRIC, BCTPARM, BGADD, BGSET)
from pyNastran.bdf.cards.parametric.geometry import PSET, PVAL, FEEDGE, FEFACE, GMCURV, GMSURF
class AddMethods(BDFAttributes):
"""defines methods to add card objects to the BDF"""
def __init__(self) -> None:
BDFAttributes.__init__(self)
def _add_dmi_object(self, dmi: DMI, allow_overwrites: bool=False) -> None:
"""adds a DMI object"""
name = dmi.name
self.dmi[name] = dmi
self._type_to_id_map[dmi.type].append(name)
def _add_dmig_object(self, dmig: DMIG, allow_overwrites: bool=False) -> None:
"""adds a DMIG object"""
name = dmig.name
self.dmig[name] = dmig
self._type_to_id_map[dmig.type].append(name)
def _add_dmiax_object(self, dmiax: DMIAX, allow_overwrites: bool=False) -> None:
"""adds a DMI object"""
name = dmiax.name
self.dmiax[name] = dmiax
self._type_to_id_map[dmiax.type].append(name)
def _add_dmij_object(self, dmij: DMIJ, allow_overwrites: bool=False) -> None:
"""adds a DMIJ object"""
name = dmij.name
self.dmij[name] = dmij
self._type_to_id_map[dmij.type].append(name)
def _add_dmiji_object(self, dmiji: DMIJI, allow_overwrites: bool=False) -> None:
"""adds a DMIJI object"""
name = dmiji.name
self.dmiji[name] = dmiji
self._type_to_id_map[dmiji.type].append(name)
def _add_dmik_object(self, dmik: DMIK, allow_overwrites: bool=False) -> None:
"""adds a DMIK object"""
name = dmik.name
self.dmik[name] = dmik
self._type_to_id_map[dmik.type].append(name)
def _add_dti_object(self, dti: DTI, allow_overwrites: bool=False) -> None:
"""adds an DTI object"""
name = dti.name
if name == 'UNITS' or name not in self.dti:
self.dti[name] = dti
self._type_to_id_map[dti.type].append(name)
else:
old_dti = self.dti[name]
key = list(dti.fields.keys())[0]
assert key not in old_dti.fields, 'key=%i old_fields=%s fields=%s' % (key, old_dti.fields, dti.fields)
old_dti.fields[key] = dti.fields[key]
def _add_param_object(self, param: PARAM, allow_overwrites: bool=False) -> None:
"""adds a PARAM object"""
key = param.key
if key in self.params and not allow_overwrites:
if not param == self.params[key]:
#if param.key in self.params:
#msg = 'key=%s param=%s old_param=%s' % (key, param, self.params[key])
#raise KeyError(msg)
self.log.warning('key=%s param=%s old_param=%s' %
(key, param, self.params[key]))
self.params[key] = param
else:
self.params[key] = param
self._type_to_id_map[param.type].append(key)
def _add_node_object(self, node: Union[GRID], allow_overwrites: bool=False) -> None:
"""adds a GRID card"""
key = node.nid
if key in self.nodes and not allow_overwrites:
if not node == self.nodes[key]:
assert node.nid not in self.nodes, 'nid=%s\nold_node=\n%snew_node=\n%s' % (node.nid, self.nodes[key], node)
else:
#print('GRID was duplicated...nid=%s; node=\n%s' % (key, node))
pass
else:
assert key > 0, 'nid=%s node=%s' % (key, node)
self.nodes[key] = node
self._type_to_id_map[node.type].append(key)
def _add_gridb_object(self, node: GRIDB, allow_overwrites: bool=False) -> None:
"""adds a GRIDB card"""
key = node.nid
assert key > 0, 'eid=%s node=%s' % (key, node)
if key in self.gridb and not allow_overwrites:
assert node.nid not in self.gridb, 'nid=%s\nold_node=\n%snew_node=\n%s' % (node.nid, self.gridb[key], node)
self.gridb[key] = node
self._type_to_id_map[node.type].append(key)
self._is_axis_symmetric = True
def _add_ringfl_object(self, ringfl: RINGFL, allow_overwrites: bool=False) -> None:
"""adds a RINGFL card"""
key = ringfl.ringfl
assert key > 0, 'eid=%s ringfl=%s' % (key, ringfl)
if key in self.ringfl and not allow_overwrites:
assert ringfl.ringfl not in self.ringfl, 'ringfl=%s\nold_ringfl=\n%snew_ringfl=\n%s' % (ringfl.ringfl, self.ringfl[key], ringfl)
self.ringfl[key] = ringfl
self._type_to_id_map[ringfl.type].append(key)
self._is_axis_symmetric = True
def _add_ringax_object(self, ringax: Union[RINGAX, POINTAX], allow_overwrites: bool=False) -> None:
"""adds a RINGAX card"""
key = ringax.nid
if key in self.ringaxs and not allow_overwrites:
if not ringax == self.ringaxs[key]:
assert ringax.nid not in self.ringaxs, 'nid=%s\nold_ringax=\n%snew_ringax=\n%s' % (ringax.nid, self.ringaxs[key], ringax)
else:
#print('RINGAX was duplicated...nid=%s; ringax=\n%s' % (key, ringax))
pass
else:
assert key > 0, 'nid=%s ringax=%s' % (key, ringax)
self.ringaxs[key] = ringax
self._type_to_id_map[ringax.type].append(key)
self._is_axis_symmetric = True
def _add_seqgp_object(self, seqgp: SEQGP) -> None:
"""adds an SEQGP card"""
if self.seqgp is None:
self.seqgp = seqgp
else:
self.seqgp.append(seqgp)
def _add_point_object(self, point: POINT, allow_overwrites: bool=False) -> None:
"""adds a POINT card"""
key = point.nid
if key in self.points and not allow_overwrites:
if not point == self.points[key]:
assert point.nid not in self.points, 'nid=%s\nold_point=\n%snew_point=\n%s' % (point.nid, self.points[key], point)
else:
#print('POINT was duplicated...nid=%s; point=\n%s' % (key, point))
pass
else:
assert key > 0, 'nid=%s point=%s' % (key, point)
self.points[key] = point
self._type_to_id_map[point.type].append(key)
def _add_spoint_object(self, spoints: SPOINTs) -> None:
"""adds an SPOINT card"""
comment = spoints.comment
if hasattr(spoints, 'ifile'):
ifile = spoints.ifile
for nid in spoints.points:
if nid in self.spoints:
continue
spoint = SPOINT(nid, comment=comment)
spoint.ifile = ifile
comment = ''
self.spoints[nid] = spoint
else:
for nid in spoints.points:
if nid in self.spoints:
continue
spoint = SPOINT(nid, comment=comment)
comment = ''
self.spoints[nid] = spoint
def _add_epoint_object(self, epoints: EPOINTs) -> None:
"""adds an EPOINT card"""
comment = epoints.comment
for nid in epoints.points:
if nid in self.epoints:
continue
epoint = EPOINT(nid, comment=comment)
comment = ''
self.epoints[nid] = epoint
def _add_setree_object(self, setree: SETREE) -> None:
key = setree.seid
self.setree[key] = setree
self._type_to_id_map[setree.type].append(key)
def _add_senqset_object(self, senqset: SENQSET) -> None:
key = senqset.set_id
self.senqset[key] = senqset
self._type_to_id_map[senqset.type].append(key)
def _add_sebulk_object(self, sebulk: SEBULK) -> None:
key = sebulk.seid
self.sebulk[key] = sebulk
self._type_to_id_map[sebulk.type].append(key)
def _add_release_object(self, release: RELEASE) -> None:
key = release.seid
self.release[key] = release
self._type_to_id_map[release.type].append(key)
def _add_sebndry_object(self, sebndry: SEBNDRY) -> None:
key = (sebndry.seid_a, sebndry.seid_b)
self.sebndry[key] = sebndry
def _add_seloc_object(self, seloc: SELOC) -> None:
key = seloc.seid
self.seloc[key] = seloc
self._type_to_id_map[seloc.type].append(key)
def _add_sempln_object(self, sempln: SEMPLN) -> None:
key = sempln.seid
self.sempln[key] = sempln
self._type_to_id_map[sempln.type].append(key)
def _add_seconct_object(self, seconct: SECONCT) -> None:
key = (seconct.seid_a, seconct.seid_b)
self.seconct[key] = seconct
self._type_to_id_map[seconct.type].append(key)
def _add_selabel_object(self, selabel: SELABEL) -> None:
key = selabel.seid
self.selabel[key] = selabel
self._type_to_id_map[selabel.type].append(key)
def _add_seexcld_object(self, seexcld: SEEXCLD) -> None:
key = (seexcld.seid_a, seexcld.seid_b)
self.seexcld[key] = seexcld
self._type_to_id_map[seexcld.type].append(key)
def _add_seelt_object(self, seelt: SEELT) -> None:
#self.seelt.append(seelt)
key = seelt.seid
self.seelt[key] = seelt
self._type_to_id_map[seelt.type].append(key)
def _add_seload_object(self, seload: SELOAD) -> None:
key = seload.seid
self.seload[key] = seload
self._type_to_id_map[seload.type].append(key)
def _add_csuper_object(self, csuper: CSUPER) -> None:
key = csuper.seid
self.csuper[key] = csuper
self._type_to_id_map[csuper.type].append(key)
def _add_csupext_object(self, csupext: CSUPEXT) -> None:
key = csupext.seid
self.csupext[key] = csupext
self._type_to_id_map[csupext.type].append(key)
def _add_plotel_object(self, elem: PLOTEL, allow_overwrites: bool=False) -> None:
"""adds an PLOTEL object"""
key = elem.eid
assert key > 0, 'eid=%s must be positive; elem=\n%s' % (key, elem)
if not allow_overwrites:
if key in self.elements:
if elem == self.elements[key]:
self._duplicate_elements.append(elem)
if self._stop_on_duplicate_error:
self.pop_parse_errors()
elif key in self.plotels:
if not elem == self.plotels[key]:
assert elem.eid not in self.plotels, 'eid=%s\nold_element=\n%snew_element=\n%s' % (elem.eid, self.plotels[elem.eid], elem)
self.plotels[key] = elem
self._type_to_id_map[elem.type].append(key)
def _add_element_object(self, elem: Union[CELAS1, CELAS2, CELAS3, CELAS4,
CDAMP1, CDAMP2, CDAMP3, CDAMP4, CDAMP5,
CVISC, CBUSH, CBUSH1D, CBUSH2D, CFAST, #CWELD
CGAP, GENEL, CCONEAX,
CROD, CTUBE, CONROD,
CBAR, CBEAM, CBEAM3, CBEND, CSHEAR,
CTRIA3, CTRIA6, CTRIAR,
CQUAD4, CQUAD8, CQUADR, CQUAD,
CTRIAX, CTRIAX6,
CQUADX, CQUADX4, CQUADX8,
CRAC2D, CRAC3D,
CPLSTN3, CPLSTN4, CPLSTN6, CPLSTN8,
CPLSTS3, #CPLSTS4, CPLSTS6, CPLSTS8,
CTETRA4, CTETRA10, CPENTA6, CPENTA15,
CHEXA8, CHEXA20, CPYRAM5, CPYRAM13,
CTRAX3, CTRAX6,
# thermal
CHBDYE, CHBDYG, CHBDYP,
# Nastran 95
CIHEX1, CIHEX2,
CHEXA1, CHEXA2,
],
allow_overwrites: bool=False) -> None:
key = elem.eid
assert key > 0, 'eid=%s must be positive; elem=\n%s' % (key, elem)
if key in self.elements and not allow_overwrites:
if not elem == self.elements[key]:
self._duplicate_elements.append(elem)
if self._stop_on_duplicate_error:
self.pop_parse_errors()
else:
self.elements[key] = elem
self._type_to_id_map[elem.type].append(key)
def _add_ao_object(self, elem_flag: CBARAO, allow_overwrites: bool=False) -> None:
"""adds a CBARAO"""
key = elem_flag.eid
assert key > 0, 'eid=%s must be positive; elem_flag=\n%s' % (key, elem_flag)
if key in self.ao_element_flags and not allow_overwrites:
if not elem_flag == self.ao_element_flags[key]:
#self._duplicate_elements.append(elem_flag)
#if self._stop_on_duplicate_error:
#self.pop_parse_errors()
assert elem_flag.eid not in self.ao_element_flags, 'eid=%s\nold_ao_element=\n%snew_ao_element=\n%s' % (
elem_flag.eid, self.ao_element_flags[elem_flag.eid], elem_flag)
else:
self.ao_element_flags[key] = elem_flag
self._type_to_id_map[elem_flag.type].append(key)
def _add_doptprm_object(self, doptprm: DOPTPRM) -> None:
"""adds a DOPTPRM"""
self.doptprm = doptprm
def _add_nsm_object(self, nsm: Union[NSM, NSM1, NSML, NSML1], allow_overwrites: bool=False) -> None:
"""adds a nsm object to a nsm set"""
key = nsm.sid
assert key > 0, 'sid=%s must be positive; nsm=\n%s' % (key, nsm)
if key in self.nsms:
self.nsms[key].append(nsm)
else:
self.nsms[key] = [nsm]
self._type_to_id_map[nsm.type].append(key)
def _add_nsmadd_object(self, nsmadd: NSMADD, allow_overwrites: bool=False) -> None:
"""adds a nsmadd object to a nsm set"""
key = nsmadd.sid
assert key > 0, 'sid=%s must be positive; nsmadd=\n%s' % (key, nsmadd)
if key in self.nsmadds:
self.nsmadds[key].append(nsmadd)
else:
self.nsmadds[key] = [nsmadd]
self._type_to_id_map[nsmadd.type].append(key)
def _add_mass_object(self, mass: Union[CMASS1, CMASS2, CMASS3, CMASS4,
CONM1, CONM2], allow_overwrites: bool=False) -> None:
key = mass.eid
assert key > 0, 'eid=%s must be positive; mass=\n%s' % (key, mass)
if key in self.masses and not allow_overwrites:
if not mass == self.masses[key]:
self._duplicate_masses.append(mass)
else:
self.masses[key] = mass
self._type_to_id_map[mass.type].append(key)
def _add_damper_object(self, elem, allow_overwrites: bool=False) -> None:
""".. warning:: can dampers have the same ID as a standard element?"""
return self._add_element_object(elem, allow_overwrites)
def _add_rigid_element_object(self, elem: Union[RBAR, RBAR1,
RBE1, RBE2, RBE3,
RROD, RSPLINE, RSSCON],
allow_overwrites: bool=False) -> None:
key = elem.eid
assert key > 0, 'eid=%s elem=%s' % (key, elem)
if key in self.rigid_elements and not allow_overwrites:
assert elem.eid not in self.rigid_elements, 'eid=%s\noldElement=\n%snewElement=\n%s' % (elem.eid, self.rigid_elements[elem.eid], elem)
self.rigid_elements[key] = elem
self._type_to_id_map[elem.type].append(key)
def _add_thermal_element_object(self, elem: Union[CHBDYE, CHBDYG, CHBDYP]) -> None:
"""same as add_element at the moment..."""
self._add_element_object(elem)
def _add_deqatn_object(self, deqatn: DEQATN, allow_overwrites: bool=False) -> None:
"""adds an DEQATN object"""
key = deqatn.equation_id
assert key > 0, 'ID=%s deqatn\n%s' % (key, deqatn)
if key in self.dequations and not allow_overwrites:
if not deqatn.write_card() == self.dequations[key].write_card():
assert key not in self.dequations, 'id=%s old_eq=\n%snew_eq=\n%s' % (
key, self.dequations[key], deqatn)
self.dequations[key] = deqatn
self._type_to_id_map[deqatn.type].append(key)
def _add_acoustic_property_object(self, prop: PACABS) -> None:
self._add_property_object(prop)
def _add_property_object(self, prop: Union[PELAS, PBUSH, PBUSH1D, PDAMP, PDAMP5, # PBUSH2D,
PFAST, PVISC, PGAP, PRAC2D, PRAC3D, # PWELD
PROD, PTUBE,
PBAR, PBARL, PBRSECT, PCONEAX,
PBEAM, PBEAML, PBCOMP, PBMSECT,
PBEND, PBEAM3, PPLANE, PSHEAR,
PSHELL, PCOMP, PCOMPG, PLPLANE,
PSOLID, PLSOLID, PIHEX, PCOMPS],
allow_overwrites: bool=False) -> None:
"""
adds one of the following objects:
PELAS, PBUSH, PBUSH1D, PBUSH2D, PDAMP,
PROD, PBAR, PBARL, PBEAM, PBEAML, PBCOMP,
PSHELL, PCOMP, PCOMPG,
PSOLID, PLSOLID
"""
key = prop.pid
assert key > 0, 'pid=%s prop=%s' % (key, prop)
if key in self.properties and not allow_overwrites:
if not prop == self.properties[key]:
self._duplicate_properties.append(prop)
if self._stop_on_duplicate_error:
self.pop_parse_errors()
else:
self.properties[key] = prop
self._type_to_id_map[prop.type].append(key)
def _add_property_mass_object(self, prop: PMASS, allow_overwrites: bool=False) -> None:
"""adds an PMASS object"""
key = prop.pid
if key in self.properties_mass and not allow_overwrites:
if not prop == self.properties_mass[key]:
#print('pid=%s\noldProperty=\n%snewProperty=\n%s' %(key,self.properties_mass[key],prop))
assert key not in self.properties_mass, 'pid=%s oldProperty=\n%snewProperty=\n%s' % (key, self.properties_mass[key], prop)
else:
assert key > 0, 'pid=%s prop=%s' % (key, prop)
self.properties_mass[key] = prop
self._type_to_id_map[prop.type].append(key)
def _add_dtable_object(self, dtable: DTABLE, allow_overwrites: bool=False) -> None:
"""adds an DTABLE object"""
if self.dtable is not None:
if not dtable == self.dtable:
raise RuntimeError('DTABLE cannot be overwritten\nold:\n%s\nnew:\n%s',
self.dtable, dtable)
else:
self.dtable = dtable
#self._type_to_id_map[dtable.type].append(1)
def _add_bcrpara_object(self, card: BCRPARA, allow_overwrites: bool=False) -> None:
"""adds an BCRPARA object"""
key = card.crid
self.bcrparas[key] = card
self._type_to_id_map[card.type].append(key)
def _add_bgadd_object(self, card: BGADD, allow_overwrites: bool=False) -> None:
"""adds an BGADD object"""
key = card.glue_id
self.bgadds[key] = card
self._type_to_id_map[card.type].append(key)
def _add_bctadd_object(self, card: BCTADD, allow_overwrites: bool=False) -> None:
"""adds an BCTADD object"""
key = card.csid
self.bctadds[key] = card
self._type_to_id_map[card.type].append(key)
def _add_bcpara_object(self, card: BCPARA, allow_overwrites: bool=False) -> None:
"""adds an BCPARA object"""
key = card.csid
self.bcparas[key] = card
self._type_to_id_map[card.type].append(key)
def _add_bctpara_object(self, card: BCTPARA, allow_overwrites: bool=False) -> None:
"""adds an BCTPARA object"""
key = card.csid
self.bctparas[key] = card
self._type_to_id_map[card.type].append(key)
def _add_bctparam_object(self, card: BCTPARM, allow_overwrites: bool=False) -> None:
"""adds an BCTPARM object"""
key = card.csid
self.bctparms[key] = card
self._type_to_id_map[card.type].append(key)
def _add_bctset_object(self, card: BCTSET, allow_overwrites: bool=False) -> None:
"""adds an BCTSET object"""
key = card.csid
self.bctsets[key] = card
self._type_to_id_map[card.type].append(key)
def _add_bgset_object(self, card: BGSET, allow_overwrites: bool=False) -> None:
"""adds an BGSET object"""
key = card.glue_id
self.bgsets[key] = card
self._type_to_id_map[card.type].append(key)
def _add_bconp_object(self, bconp: BCONP) -> None:
"""adds an BCONP object"""
key = bconp.contact_id
self.bconp[key] = bconp
self._type_to_id_map[bconp.type].append(key)
def _add_bcbody_object(self, bcbody: BCBODY) -> None:
"""adds an BCBODY object"""
key = bcbody.contact_id
self.bcbodys[key] = bcbody
self._type_to_id_map[bcbody.type].append(key)
def _add_blseg_object(self, blseg: BLSEG) -> None:
"""adds an BLSEG object"""
key = blseg.line_id
self.blseg[key] = blseg
self._type_to_id_map[blseg.type].append(key)
def _add_bfric_object(self, bfric: BFRIC) -> None:
"""adds an BFRIC object"""
key = bfric.friction_id
self.bfric[key] = bfric
self._type_to_id_map[bfric.type].append(key)
def _add_bsurf_object(self, card: BSURF, allow_overwrites: bool=False) -> None:
"""adds an BSURF object"""
key = card.sid
self.bsurf[key] = card
self._type_to_id_map[card.type].append(key)
def _add_bsurfs_object(self, card: BSURFS, allow_overwrites: bool=False) -> None:
"""adds an BSURFS object"""
key = card.id
self.bsurfs[key] = card
self._type_to_id_map[card.type].append(key)
def _add_radcav_object(self, radcav: RADCAV, allow_overwrites: bool=False) -> None:
"""adds an RADCAV object"""
key = radcav.icavity
if key in self.radcavs and not allow_overwrites:
if not radcav == self.radcavs[key]:
assert key not in self.radcavs, 'pid=%s old RADCAV=\n%snew RADCAV=\n%s' % (key, self.radcavs[key], radcav)
else:
assert key > 0, 'pid=%s radcav=%s' % (key, radcav)
self.radcavs[key] = radcav
self._type_to_id_map[radcav.type].append(key)
def _add_radmtx_object(self, radmtx: RADMTX, allow_overwrites: bool=False) -> None:
"""adds an RADMTX object"""
key = radmtx.icavity
if key in self.radmtx and not allow_overwrites:
if not radmtx == self.radmtx[key]:
assert key not in self.radmtx, 'pid=%s old RADMTX=\n%snew RADMTX=\n%s' % (key, self.radmtx[key], radmtx)
else:
assert key > 0, 'pid=%s radmtx=%s' % (key, radmtx)
self.radmtx[key] = radmtx
self._type_to_id_map[radmtx.type].append(key)
def _add_tempd_object(self, tempd: TEMPD, allow_overwrites: bool=False) -> None:
"""adds an TEMPD object"""
key = tempd.sid
if key in self.tempds and not allow_overwrites:
if not tempd == self.tempds[key]:
assert key not in self.tempds, 'TEMPD.sid=%s old=\n%snew=\n%s' % (
key, self.tempds[key], tempd)
else:
assert key > 0, 'sid=%s tempd=%s' % (key, tempd)
self.tempds[key] = tempd
self._type_to_id_map[tempd.type].append(key)
def _add_pbusht_object(self, prop: PBUSHT, allow_overwrites: bool=False) -> None:
"""adds an PBUSHT object"""
key = prop.pid
if key in self.pbusht and not allow_overwrites:
if not prop == self.pbusht[key]:
assert key not in self.pbusht, 'PBUSHT.pid=%s old=\n%snew=\n%s' % (
key, self.pbusht[key], prop)
else:
assert key > 0, 'pid=%s prop=%s' % (key, prop)
self.pbusht[key] = prop
self._type_to_id_map[prop.type].append(key)
def _add_pdampt_object(self, prop: PDAMPT, allow_overwrites: bool=False) -> None:
"""adds an PDAMPT object"""
key = prop.pid
if key in self.pdampt and not allow_overwrites:
if not prop == self.pdampt[key]:
assert key not in self.pdampt, 'PDAMPT.pid=%s old=\n%snew=\n%s' % (
key, self.pdampt[key], prop)
else:
assert key > 0, 'pid=%s prop=%s' % (key, prop)
self.pdampt[key] = prop
self._type_to_id_map[prop.type].append(key)
def _add_pelast_object(self, prop: PELAST, allow_overwrites: bool=False) -> None:
"""adds an PELAST object"""
key = prop.pid
assert key > 0, 'pid=%s prop=%s' % (key, prop)
if key in self.pelast and not allow_overwrites:
if not prop == self.pelast[key]:
#print('pid=%s\noldProperty=\n%snewProperty=\n%s' % (key, self.pelast[key],prop))
assert key not in self.pelast, 'PELAST.pid=%s old=\n%snew=\n%s' % (
key, self.pelast[key], prop)
else:
self.pelast[key] = prop
self._type_to_id_map[prop.type].append(key)
def _add_tf_object(self, tf: TF, allow_overwrites: bool=False) -> None:
"""adds an TF (transfer function) object"""
key = tf.sid
assert key > 0, 'sid=%s tf=%s' % (key, tf)
if key in self.transfer_functions:
self.transfer_functions[key].append(tf)
else:
self.transfer_functions[key] = [tf]
self._type_to_id_map[tf.type].append(key)
def _add_structural_material_object(self, material: Union[MAT1, MAT2, MAT3, MAT8, MAT9,
MAT10, MAT11, MAT3D, MATG],
allow_overwrites: bool=False) -> None:
"""adds an MAT1, MAT2, MAT8 object"""
key = material.mid
assert key > 0, 'mid=%s material=\n%s' % (key, material)
if key in self.materials and not allow_overwrites:
if not material == self.materials[key]:
self._duplicate_materials.append(material)
else:
self.materials[key] = material
self._type_to_id_map[material.type].append(key)
def _add_thermal_material_object(self, material: Union[MAT4, MAT5],
allow_overwrites: bool=False) -> None:
"""adds an MAT4, MAT5 object"""
key = material.mid
assert key > 0, 'mid=%s material=\n%s' % (key, material)
if key in self.thermal_materials and not allow_overwrites:
if not material == self.thermal_materials[key]:
self._duplicate_thermal_materials.append(material)
else:
self.thermal_materials[key] = material
self._type_to_id_map[material.type].append(key)
def _add_hyperelastic_material_object(self, material: Union[MATHE, MATHP],
allow_overwrites: bool=False) -> None:
"""adds an MATHP, MATHE object"""
key = material.mid
assert key > 0, 'mid=%s material=\n%s' % (key, material)
if key in self.hyperelastic_materials and not allow_overwrites:
if not material == self.hyperelastic_materials[key]:
assert key not in self.hyperelastic_materials, 'mid=%s\nold=\n%snew=\n%s' % (key, self.hyperelastic_materials[key], material)
else:
self.hyperelastic_materials[key] = material
self._type_to_id_map[material.type].append(key)
def _add_material_dependence_object(self, material: Union[MATT1, MATT2, MATT3, MATT4, MATT5, MATT8, MATT9,
MATS1], # MATS3, MATS8
allow_overwrites: bool=False) -> None:
"""
adds the following objects:
MATS1, MATS3, MATS8,
MATT1, MATT2, MATT3,
MATT4, MATT5, MATT8, MATT9
"""
Type = material.type
key = material.mid
mapper = {
'MATS1' : self.MATS1,
'MATS3' : self.MATS3,
'MATS8' : self.MATS8,
'MATT1' : self.MATT1,
'MATT2' : self.MATT2,
'MATT3' : self.MATT3,
'MATT4' : self.MATT4,
'MATT5' : self.MATT5,
'MATT8' : self.MATT8,
'MATT9' : self.MATT9,
}
slot = mapper[Type]
if key in slot and not allow_overwrites:
if not material == slot[key]:
assert key not in slot, 'dMATx.mid=%s Type=%r\nold=\n%snew=\n%s' % (key, Type, slot[key], material)
else:
assert key > 0, 'mid=%s material=\n%s' % (key, material)
slot[key] = material
self._type_to_id_map[material.type].append(key)
def _add_creep_material_object(self, material: CREEP, allow_overwrites: bool=False) -> None:
"""
Adds a CREEP material
Notes
-----
May be removed in the future. Are CREEP cards materials?
They have an MID, but reference structural materials.
"""
key = material.mid
if key in self.thermal_materials and not allow_overwrites:
if not material == self.creep_materials[key]:
assert key not in self.creep_materials, 'Material.mid=%s\nold=\n%snew=\n%s' % (key, self.creep_materials[key], material)
else:
assert key > 0, 'mid=%s material=\n%s' % (key, material)
self.creep_materials[key] = material
self._type_to_id_map[material.type].append(key)
def _add_coord_object(self, coord: Union[CORD1R, CORD1C, CORD1S,
CORD2R, CORD2C, CORD2S], # CORD3G
allow_overwrites: bool=False) -> None:
"""adds a CORDx object"""
key = coord.cid
assert coord.cid > -1, 'cid=%s coord=\n%s' % (key, coord)
if key in self.coords:
#if not allow_overwrites:
if not coord == self.coords[key]:
self._duplicate_coords.append(coord)
else:
self.coords[key] = coord
self._type_to_id_map[coord.type].append(key)
def _add_load_combination_object(self, load: Union[LOAD, CLOAD]) -> None:
"""adds a load object to a load case"""
key = load.sid
if key in self.load_combinations:
self.load_combinations[key].append(load)
else:
self.load_combinations[key] = [load]
self._type_to_id_map[load.type].append(key)
def _add_load_object(self, load: Union[FORCE, FORCE1, FORCE2, MOMENT, MOMENT1, MOMENT2,
PLOAD, PLOAD1, PLOAD2, PLOAD4, PLOADX1,
GRAV, ACCEL, ACCEL1, SPCD, SLOAD,
QBDY1, QBDY2, QBDY3, QVOL, TEMPAX, PRESAX,
RFORCE, RFORCE1, LOADCYN, LOADCYH, DEFORM,
GMLOAD]) -> None:
"""adds a load object to a load case"""
key = load.sid
if key in self.loads:
self.loads[key].append(load)
else:
self.loads[key] = [load]
self._type_to_id_map[load.type].append(key)
def _add_dload_object(self, load: DLOAD) -> None:
"""adds a dload object to a load case"""
key = load.sid
if key in self.dloads:
self.dloads[key].append(load)
else:
self.dloads[key] = [load]
self._type_to_id_map[load.type].append(key)
def _add_dload_entry(self, dload: Union[ACSRCE, RANDPS, RANDT1,
TLOAD1, TLOAD2, RLOAD1, RLOAD2,
QVECT]) -> None:
"""adds a sub-dload object to a load case"""
key = dload.sid
if key in self.dload_entries:
self.dload_entries[key].append(dload)
else:
self.dload_entries[key] = [dload]
self._type_to_id_map[dload.type].append(key)
def _add_lseq_object(self, load: LSEQ) -> None:
"""adds a LSEQ object to a load case"""
key = load.sid
if key in self.load_combinations:
self.load_combinations[key].append(load)
else:
self.load_combinations[key] = [load]
self._type_to_id_map[load.type].append(key)
def _add_thermal_load_object(self, load: Union[TEMP, TEMPB3, QHBDY, QBDY1, QBDY2, QBDY3]) -> None:
# same function at the moment...
key = load.sid
assert key > 0, 'key=%s; load=%s\n' % (key, load)
if key in self.loads:
self.loads[key].append(load)
else:
self.loads[key] = [load]
self._type_to_id_map[load.type].append(key)
def _add_phbdy_object(self, prop: PHBDY) -> None:
key = prop.pid
if key in self.phbdys:
if not prop == self.phbdys[key]:
assert key not in self.phbdys, 'PHBDY.pid=%s\nold=\n%snew=\n%s' % (
key, self.phbdys[key], prop)
else:
assert key > 0, 'pid=%s prop=\n%s' % (key, prop)
self.phbdys[key] = prop
self._type_to_id_map[prop.type].append(key)
def _add_view_object(self, view: VIEW) -> None:
"""adds a VIEW object"""
key = view.iview
assert key > 0, 'key=%s; view=%s\n' % (key, view)
if key in self.views:
if not view == self.views[key]:
assert key not in self.views, 'VIEW.iview=%s\nold=\n%snew=\n%s' % (
key, self.views[key], view)
else:
assert key > 0, 'iview=%s view=\n%s' % (key, view)
self.views[key] = view
self._type_to_id_map[view.type].append(key)
def _add_view3d_object(self, view3d: VIEW3D) -> None:
"""adds a VIEW3D object"""
key = view3d.icavity
assert key > 0, 'key=%s; view3d=%s\n' % (key, view3d)
if key in self.view3ds:
if not view3d == self.view3ds[key]:
assert key not in self.view3ds, 'VIEW3D.icavity=%s\nold=\n%snew=\n%s' % (
key, self.view3ds[key], view3d)
else:
assert key > 0, 'icavity=%s view3d=\n%s' % (key, view3d)
self.view3ds[key] = view3d
self._type_to_id_map[view3d.type].append(key)
def _add_normal_object(self, snorm: SNORM) -> None:
"""adds an SNORM object"""
key = snorm.nid
assert key > 0, 'key=%s; snorm=%s\n' % (key, snorm)
if key in self.normals:
if not snorm == self.normals[key]:
assert key not in self.normals, 'VIEW.iview=%s\nold=\n%snew=\n%s' % (
key, self.normals[key], snorm)
else:
assert key > 0, 'pid=%s SNORM=\n%s' % (key, snorm)
self.normals[key] = snorm
self._type_to_id_map[snorm.type].append(key)
def _add_convection_property_object(self, prop: Union[PCONV, PCONVM]) -> None:
key = prop.pconid
assert key > 0, key
assert key not in self.convection_properties, key
self.convection_properties[key] = prop
self._type_to_id_map[prop.type].append(key)
def _add_thermal_bc_object(self, bc: Union[CONV, CONVM, RADM, TEMPBC], key) -> None:
assert key > 0
if key in self.bcs:
self.bcs[key].append(bc)
else:
self.bcs[key] = [bc]
self._type_to_id_map[bc.type].append(key)
def _add_constraint_mpc_object(self, constraint: MPC) -> None: # MPCAX
key = constraint.conid
if key in self.mpcs:
self.mpcs[key].append(constraint)
else:
self.mpcs[key] = [constraint]
self._type_to_id_map[constraint.type].append(key)
def _add_constraint_mpcadd_object(self, constraint: MPCADD) -> None:
key = constraint.conid
if key in self.mpcadds:
self.mpcadds[key].append(constraint)
else:
self.mpcadds[key] = [constraint]
self._type_to_id_map[constraint.type].append(key)
def _add_constraint_spc_object(self, constraint: Union[SPC, SPC1, SPCAX, GMSPC]) -> None:
key = constraint.conid
if key in self.spcs:
self.spcs[key].append(constraint)
else:
self.spcs[key] = [constraint]
self._type_to_id_map[constraint.type].append(key)
def _add_constraint_spcadd_object(self, constraint: SPCADD) -> None:
key = constraint.conid
if key in self.spcadds:
self.spcadds[key].append(constraint)
else:
self.spcadds[key] = [constraint]
self._type_to_id_map[constraint.type].append(key)
def _add_constraint_spcoff_object(self, constraint: Union[SPCOFF, SPCOFF1]) -> None:
"""dumb key, but good enough..."""
key = constraint.type
if key in self.spcoffs:
self.spcoffs[key].append(constraint)
else:
self.spcoffs[key] = [constraint]
self._type_to_id_map[constraint.type].append(key)
def _add_sesuport_object(self, se_suport: Union[SESUP, SESUPORT]) -> None:
"""adds an SESUPORT"""
self._type_to_id_map[se_suport.type].append(len(self.se_suport))
self.se_suport.append(se_suport)
def _add_suport_object(self, suport: SUPORT) -> None:
"""adds a SUPORT"""
self._type_to_id_map[suport.type].append(len(self.suport))
self.suport.append(suport)
def _add_suport1_object(self, suport1: SUPORT1) -> None:
"""adds a SUPORT1"""
key = suport1.conid
if key in self.suport1:
self.suport1[key].add_suport1_to_set(suport1)
else:
assert suport1.conid > 0
self.suport1[key] = suport1
self._type_to_id_map[suport1.type].append(key)
def _add_tic_object(self, tic: TIC, allow_overwrites: bool=False) -> None:
"""adds a TIC object"""
key = tic.sid
if key in self.tics:
self.tics[key].add(tic)
else:
assert tic.sid > 0
self.tics[key] = tic
self._type_to_id_map[tic.type].append(key)
def _add_darea_object(self, darea: DAREA, allow_overwrites: bool=False) -> None:
"""adds a DAREA object"""
#key = (darea.sid, darea.p, darea.c)
key = darea.sid
if key in self.dareas:
self.dareas[key].add(darea)
else:
assert darea.sid > 0
self.dareas[key] = darea
self._type_to_id_map[darea.type].append(key)
def _add_dphase_object(self, dphase: DPHASE, allow_overwrites: bool=False) -> None:
"""adds a DPHASE object"""
#key = (dphase.sid, dphase.nid, dphase.component) # dphase.phase_lead,
key = dphase.sid
if key in self.dphases:
self.dphases[key].add(dphase)
else:
assert dphase.sid > 0, key
self.dphases[key] = dphase
self._type_to_id_map[dphase.type].append(key)
def _add_delay_object(self, delay: DELAY, allow_overwrites: bool=False) -> None:
"""adds an DELAY object"""
#key = (delay.sid, delay.nid, delay.component)
key = delay.sid
assert key > 0, 'sid=%s delay=%s' % (key, delay)
if key in self.delays:
self.delays[key].add(delay)
else:
self.delays[key] = delay
self._type_to_id_map[delay.type].append(key)
def _add_aero_object(self, aero: AERO) -> None:
"""adds an AERO object"""
# only one AERO card allowed
assert self.aero is None, '\naero=\n%s old=\n%s' % (aero, self.aero)
self.aero = aero
#self._type_to_id_map[aero.type].append(key)
def _add_aeros_object(self, aeros: AEROS) -> None:
"""adds an AEROS object"""
# only one AEROS card allowed
assert self.aeros is None, '\naeros=\n%s old=\n%s' % (aeros, self.aeros)
self.aeros = aeros
#self._type_to_id_map[aeros.type].append(key)
#def _add_aeroz_object(self, aeroz: AEROZ) -> None:
#"""adds an AEROZ object"""
#key = aeroz.sid
#if key in self.aeroz and not allow_overwrites:
#if not aeroz == self.zona.aeroz[key]:
#assert key not in self.aeroz, 'AEROZ.sid=%s\nold=\n%snew=\n%s' % (key, self.aeroz[key], aeroz)
#else:
#assert key > 0, 'sid=%s method=\n%s' % (key, aefact)
#self.aeroz[key] = aeroz
#self._type_to_id_map[aeroz.type].append(key)
def _add_baror_object(self, baror: BAROR) -> None:
"""adds an BAROR object"""
# only one BAROR card allowed
assert self.baror is None, '\nBAROR=\n%s old=\n%s' % (baror, self.baror)
if self.baror is None:
self.baror = baror
def _add_beamor_object(self, beamor: BEAMOR) -> None:
"""adds an BEAMOR object"""
# only one BAROR card allowed
assert self.beamor is None, '\nBEAMOR=\n%s old=\n%s' % (beamor, self.beamor)
if self.beamor is None:
self.beamor = beamor
def _add_axic_object(self, axic: AXIC) -> None:
"""adds an AXIC object"""
# only one AXIC card allowed
assert self.axic is None, '\naxic=\n%s old=\n%s' % (axic, self.axic)
self.axic = axic
def _add_axif_object(self, axif: AXIF) -> None:
"""adds an AXIF object"""
# only one AXIC card allowed
assert self.axif is None, '\naxif=\n%s old=\n%s' % (axif, self.axif)
self.axif = axif
def _add_acmodl_object(self, acmodl) -> None:
"""adds a ACMODL object"""
assert self.acmodl is None, self.acmodl
self.acmodl = acmodl
def _add_cyax_object(self, cyax: CYAX) -> None:
"""adds an CYAX object"""
# only one CYAX card allowed
assert self.cyax is None, '\ncyax=\n%s old=\n%s' % (cyax, self.cyax)
self.cyax = cyax
def _add_cyjoin_object(self, cyjoin: CYJOIN) -> None:
"""adds an CYJOIN object"""
key = cyjoin.side
assert key not in self.cyjoin, 'CYJOIN.side=%s\nold=\n%snew=\n%s' % (key, self.cyjoin[key], cyjoin)
assert key >= 0
self.cyjoin[key] = cyjoin
self._type_to_id_map[cyjoin.type].append(key)
def _add_modtrak_object(self, modtrak: MODTRAK) -> None:
"""adds an MODTRAK object"""
# only one CYAX card allowed
assert self.modtrak is None, '\nmodtrak=\n%s old=\n%s' % (modtrak, self.modtrak)
self.modtrak = modtrak
def _add_aefact_object(self, aefact: AEFACT, allow_overwrites: bool=False) -> None:
"""adds an AEFACT object"""
key = aefact.sid
if key in self.aefacts and not allow_overwrites:
if not aefact == self.aefacts[key]:
assert key not in self.aefacts, 'AEFACT.sid=%s\nold=\n%snew=\n%s' % (key, self.aefacts[key], aefact)
else:
assert key > 0, 'sid=%s method=\n%s' % (key, aefact)
self.aefacts[key] = aefact
self._type_to_id_map[aefact.type].append(key)
def _add_aelist_object(self, aelist: AELIST) -> None:
"""adds an AELIST object"""
key = aelist.sid
assert key not in self.aelists, 'AELIST.sid=%s\nold=\n%snew=\n%s' % (key, self.aelists[key], aelist)
assert key >= 0
self.aelists[key] = aelist
self._type_to_id_map[aelist.type].append(key)
def _add_aelink_object(self, aelink: AELINK) -> None:
"""adds an AELINK object"""
key = aelink.aelink_id
assert key >= 0
if key not in self.aelinks:
self.aelinks[key] = []
self.aelinks[key].append(aelink)
self._type_to_id_map[aelink.type].append(key)
#assert key not in self.aestats,'\naestat=%s oldAESTAT=\n%s' %(aestat,self.aestats[key])
def _add_aecomp_object(self, aecomp: Union[AECOMP, AECOMPL]) -> None:
"""adds an AECOMP object"""
key = aecomp.name
assert key not in self.aecomps, '\naecomp=\n%s oldAECOMP=\n%s' % (aecomp, self.aecomps[key])
self.aecomps[key] = aecomp
self._type_to_id_map[aecomp.type].append(key)
def _add_aeparm_object(self, aeparam: AEPARM) -> None:
"""adds an AEPARM object"""
key = aeparam.aeparm_id
assert key not in self.aeparams, '\naeparam=\n%s oldAEPARM=\n%s' % (aeparam, self.aeparams[key])
assert key >= 0
self.aeparams[key] = aeparam
self._type_to_id_map[aeparam.type].append(key)
def _add_aestat_object(self, aestat: AESTAT) -> None:
"""adds an AESTAT object"""
key = aestat.aestat_id
assert key not in self.aestats, '\naestat=\n%s old=\n%s' % (
aestat, self.aestats[key])
assert key >= 0
self.aestats[key] = aestat
self._type_to_id_map[aestat.type].append(key)
def _add_aesurf_object(self, aesurf: AESURF) -> None:
"""adds an AESURF object"""
key = aesurf.aesid
assert key not in self.aesurf, '\naesurf=\n%s old=\n%s' % (
aesurf, self.aesurf[key])
assert key >= 0
self.aesurf[key] = aesurf
self._type_to_id_map[aesurf.type].append(key)
def _add_aesurfs_object(self, aesurfs: AESURFS) -> None:
"""adds an AESURFS object"""
key = aesurfs.aesid
assert key not in self.aesurfs, '\naesurfs=\n%s old=\n%s' % (
aesurfs, self.aesurfs[key])
assert key >= 0
self.aesurfs[key] = aesurfs
self._type_to_id_map[aesurfs.type].append(key)
def _add_csschd_object(self, csschd: CSSCHD) -> None:
"""adds an CSSCHD object"""
key = csschd.sid
assert key not in self.csschds, '\nCSSCHD=\n%s old=\n%s' % (csschd, self.csschds[key])
assert key >= 0
self.csschds[key] = csschd
self._type_to_id_map[csschd.type].append(key)
def _add_caero_object(self, caero: Union[CAERO1, CAERO2, CAERO3, CAERO4, CAERO5]) -> None:
"""adds an CAERO1/CAERO2/CAERO3/CAERO4/CAERO5 object"""
key = caero.eid
assert key not in self.caeros, '\nkey=%s; caero=\n%r old_caero=\n%r' % (
key, caero, self.caeros[key])
assert key > 0
self.caeros[key] = caero
self._type_to_id_map[caero.type].append(key)
def _add_paero_object(self, paero: Union[PAERO1, PAERO2, PAERO3, PAERO4, PAERO5]) -> None:
"""adds an PAERO1/PAERO2/PAERO3/PAERO4/PAERO5 object"""
key = paero.pid
assert key not in self.paeros, '\npaero=\n%r old_paero=\n%r' % (
paero, self.paeros[key])
assert key > 0, 'paero.pid = %r' % (key)
self.paeros[key] = paero
self._type_to_id_map[paero.type].append(key)
def _add_monpnt_object(self, monitor_point: Union[MONPNT1, MONPNT2, MONPNT3]) -> None:
"""adds an MONPNT object"""
key = monitor_point.name
assert key not in self.monitor_points, '\nmonitor_point=\n%soldMOTPNT=\n%s' % (
monitor_point, self.monitor_points[key])
self.monitor_points.append(monitor_point)
self._type_to_id_map[monitor_point.type].append(len(self.monitor_points) - 1)
def _add_spline_object(self, spline: Union[SPLINE1, SPLINE2, SPLINE3, SPLINE4, SPLINE5]) -> None:
"""adds an SPLINE1/SPLINE2/SPLINE3/SPLINE4/SPLINE5 object"""
key = spline.eid
assert spline.eid not in self.splines, f'\nspline={spline}\n%sold_spline=\n{self.splines[key]}'
assert spline.eid > 0, spline
self.splines[key] = spline
self._type_to_id_map[spline.type].append(key)
def _add_gust_object(self, gust: GUST) -> None:
"""adds an GUST object"""
key = gust.sid
assert key not in self.gusts
assert key > 0
self.gusts[key] = gust
self._type_to_id_map[gust.type].append(key)
def _add_trim_object(self, trim: Union[TRIM, TRIM2], allow_overwrites: bool=False) -> None:
"""adds an TRIM object"""
key = trim.sid
if not allow_overwrites:
assert key not in self.trims, 'TRIM=%s old=\n%snew=\n%s' % (key, self.trims[key], trim)
assert key > 0, 'key=%r trim=\n%s' % (key, trim)
self.trims[key] = trim
self._type_to_id_map[trim.type].append(key)
def _add_diverg_object(self, diverg: DIVERG, allow_overwrites: bool=False) -> None:
"""adds an DIVERG object"""
key = diverg.sid
if not allow_overwrites:
assert key not in self.divergs, 'DIVERG=%s old=\n%snew=\n%s' % (key, self.divergs[key], diverg)
assert key > 0, 'key=%r diverg=\n%s' % (key, diverg)
self.divergs[key] = diverg
self._type_to_id_map[diverg.type].append(key)
def _add_flutter_object(self, flutter: FLUTTER) -> None:
"""adds an FLUTTER object"""
key = flutter.sid
assert key not in self.flutters, 'FLUTTER=%s old=\n%snew=\n%s' % (key, self.flutters[key], flutter)
assert key > 0
self.flutters[key] = flutter
self._type_to_id_map[flutter.type].append(key)
def _add_flfact_object(self, flfact: FLFACT) -> None:
"""adds an FLFACT object"""
key = flfact.sid
#assert key not in self.flfacts
assert key > 0
self.flfacts[key] = flfact # set id...
self._type_to_id_map[flfact.type].append(key)
def _add_dconstr_object(self, dconstr: [DCONSTR, DCONADD]) -> None:
"""adds a DCONSTR object"""
#key = (dconstr.oid, dconstr.rid)
key = dconstr.oid
#assert key not in self.dconstrs, 'key=%r DCONSTR/DCONADD=\n%s' % (key, dconstr)
assert dconstr.oid > 0
#assert dconstr.dresp_id > 0
if key in self.dconstrs:
self.dconstrs[key].append(dconstr)
else:
self.dconstrs[key] = [dconstr]
self._type_to_id_map[dconstr.type].append(key)
#def add_DCONADD(self, dconadd, allow_overwrites: bool=False) -> None:
#key = dconadd.oid
#if key in self.dconstrs and not allow_overwrites:
#if not dconadd == self.dconstrs[key]:
#assert key not in self.dconstrs, 'DCONADD=%s old=\n%snew=\n%s' % (
#key, self.dconstrs[key], dconadd)
#else:
#assert key > 0, 'dcid=%s dconadd=%s' % (key, dconadd)
#self.dconstrs[key] = dconadd
#self._type_to_id_map[dconadd.type].append(key)
def _add_desvar_object(self, desvar: DESVAR) -> None:
"""adds a DESVAR object"""
key = desvar.desvar_id
assert key not in self.desvars, 'DESVAR=%s old=\n%snew=\n%s' % (
key, self.desvars[key], desvar)
assert key > 0
self.desvars[key] = desvar
self._type_to_id_map[desvar.type].append(key)
def _add_topvar_object(self, topvar: TOPVAR) -> None:
"""adds a TOPVAR object"""
key = topvar.opt_id
assert key not in self.topvar, 'TOPVAR=%s old=\n%snew=\n%s' % (
key, self.topvar[key], topvar)
assert key > 0
self.topvar[key] = topvar
self._type_to_id_map[topvar.type].append(key)
def _add_ddval_object(self, ddval: DDVAL) -> None:
"""adds a DDVAL object"""
key = ddval.oid
assert key not in self.ddvals, 'DDVAL=%s old=\n%snew=\n%s' % (
key, self.ddvals[key], ddval)
assert key > 0
self.ddvals[key] = ddval
self._type_to_id_map[ddval.type].append(key)
def _add_dlink_object(self, dlink: DLINK) -> None:
"""adds a DLINK object"""
key = dlink.oid
assert key not in self.dlinks, 'DLINK=%s old=\n%snew=\n%s' % (
key, self.dlinks[key], dlink)
assert key > 0
self.dlinks[key] = dlink
self._type_to_id_map[dlink.type].append(key)
def _add_dscreen_object(self, dscreen: DSCREEN) -> None:
"""adds a DSCREEN object"""
key = dscreen.rtype
assert key not in self.dscreen, 'DSCREEN=%s old=\n%snew=\n%s' % (
key, self.dscreen[key], dscreen)
assert len(key) > 0, 'key=%r' % key
self.dscreen[key] = dscreen
self._type_to_id_map[dscreen.type].append(key)
def _add_dresp_object(self, dresp: Union[DRESP1, DRESP2, DRESP3]) -> None:
"""adds a DRESP1/DRESP2/DRESP3 object"""
key = dresp.dresp_id
assert key not in self.dresps, 'DRESPx=%s old=\n%snew=\n%s' % (
key, self.dresps[key], dresp)
assert key > 0
self.dresps[key] = dresp
self._type_to_id_map[dresp.type].append(key)
def _add_dvcrel_object(self, dvcrel: Union[DVCREL1, DVCREL2]) -> None:
"""adds a DVCREL1/DVCREL2 object"""
key = dvcrel.oid
assert key not in self.dvcrels, 'DVCRELx=%s old\n%snew=\n%s' % (
key, self.dvcrels[key], dvcrel)
assert key > 0
self.dvcrels[key] = dvcrel
self._type_to_id_map[dvcrel.type].append(key)
def _add_dvmrel_object(self, dvmrel: Union[DVMREL1, DVMREL2]) -> None:
"""adds a DVMREL1/DVMREL2 object"""
key = dvmrel.oid
assert key not in self.dvmrels, 'DVMRELx=%s old=\n%snew=\n%s' % (
key, self.dvmrels[key], dvmrel)
assert key not in self.dvmrels
assert key > 0
self.dvmrels[key] = dvmrel
self._type_to_id_map[dvmrel.type].append(key)
def _add_dvprel_object(self, dvprel: Union[DVPREL1, DVPREL2]) -> None:
"""adds a DVPREL1/DVPREL2 object"""
key = dvprel.oid
assert key not in self.dvprels, 'DVPRELx=%s old\n%snew=\n%s' % (
key, self.dvprels[key], dvprel)
assert key > 0
self.dvprels[key] = dvprel
self._type_to_id_map[dvprel.type].append(key)
def _add_dvgrid_object(self, dvgrid: DVGRID) -> None:
"""adds a DVGRID object"""
key = dvgrid.dvid
assert key > 0
if key not in self.dvgrids:
self.dvgrids[key] = []
self._type_to_id_map[dvgrid.type].append(key)
self.dvgrids[key].append(dvgrid)
def _add_nlparm_object(self, nlparm: NLPARM) -> None:
"""adds a NLPARM object"""
key = nlparm.nlparm_id
assert key not in self.nlparms
assert key > 0, 'key=%s; nlparm=%s\n' % (key, nlparm)
self.nlparms[key] = nlparm
self._type_to_id_map[nlparm.type].append(key)
def _add_rotor_object(self, rotor: Union[ROTORD, ROTORG]) -> None:
"""adds a ROTORD/ROTORG object"""
key = rotor.sid
assert key > 0, 'key=%s; rotor=%s\n' % (key, rotor)
if key in self.rotors:
rotor_old = self.rotors[key]
assert rotor.type == rotor_old.type
self.rotors[key].nids += rotor.nids
else:
self.rotors[key] = rotor
self._type_to_id_map[rotor.type].append(key)
def _add_nlpci_object(self, nlpci: NLPCI) -> None:
"""adds a NLPCI object"""
key = nlpci.nlpci_id
assert key not in self.nlpcis
assert key > 0
self.nlpcis[key] = nlpci
self._type_to_id_map[nlpci.type].append(key)
def _add_nxstrat_object(self, nxstrat: NXSTRAT) -> None:
key = nxstrat.sid
assert key not in self.nxstrats, 'nxstrats=%s nxstrat=%s' % (self.nxstrats, nxstrat)
assert key > 0
self.nxstrats[key] = nxstrat
self._type_to_id_map[nxstrat.type].append(key)
def _add_tstep_object(self, tstep: Union[TSTEP, TSTEP1],
allow_overwrites: bool=False) -> None:
"""adds a TSTEP object"""
key = tstep.sid
if key in self.tsteps and not allow_overwrites:
if not tstep == self.tsteps[key]:
assert key not in self.tsteps, 'TSTEP=%s\nold=\n%snew=\n%s' % (key, self.tsteps[key], tstep)
else:
assert key > 0, 'sid=%s tstep=\n%s' % (key, tstep)
self.tsteps[key] = tstep
self._type_to_id_map[tstep.type].append(key)
def _add_tstepnl_object(self, tstepnl: TSTEPNL,
allow_overwrites: bool=False) -> None:
"""adds a TSTEPNL object"""
key = tstepnl.sid
if key in self.tstepnls and not allow_overwrites:
if not tstepnl == self.tstepnls[key]:
assert key not in self.tstepnls, 'TSTEPNL=%s\nold=\n%snew=\n%s' % (key, self.tstepnls[key], tstepnl)
else:
assert key > 0, 'sid=%s tstepnl=\n%s' % (key, tstepnl)
self.tstepnls[key] = tstepnl
self._type_to_id_map[tstepnl.type].append(key)
def _add_freq_object(self, freq: Union[FREQ, FREQ1, FREQ2, FREQ3, FREQ4, FREQ5]) -> None:
key = freq.sid
assert key > 0
if key in self.frequencies:
freq0 = self.frequencies[key][0]
if freq0.type == 'FREQ' and freq.type == 'FREQ':
freq0.add_frequency_object(freq)
else:
self.frequencies[key].append(freq)
else:
self.frequencies[key] = [freq]
self._type_to_id_map[freq.type].append(key)
def _add_set_object(self, set_obj: Union[SET1, SET2, SET3]) -> None:
"""adds an SET1/SET3 object"""
key = set_obj.sid
assert key >= 0
if key in self.sets:
self.sets[key].add_set(set_obj)
else:
self.sets[key] = set_obj
self._type_to_id_map[set_obj.type].append(key)
def _add_radset_object(self, set_obj: RADSET) -> None:
"""adds an RADSET object"""
if self.radset:
self.radset.add_set(set_obj)
else:
self.radset = set_obj
#self._type_to_id_map[set_obj.type].append(key)
def _add_aset_object(self, set_obj: Union[ASET, ASET1]) -> None:
"""adds an ASET/ASET1 object"""
self.asets.append(set_obj)
n = len(self._type_to_id_map['ASET'])
self._type_to_id_map['ASET'].append(n)
def _add_omit_object(self, set_obj: Union[OMIT, OMIT1]) -> None:
"""adds an OMIT/OMIT1 object"""
self.omits.append(set_obj)
n = len(self._type_to_id_map['OMIT'])
self._type_to_id_map['OMIT'].append(n)
def _add_bset_object(self, set_obj: Union[BSET, BSET1]) -> None:
"""adds an BSET/BSET1 object"""
self.bsets.append(set_obj)
n = len(self._type_to_id_map['BSET'])
self._type_to_id_map['BSET'].append(n)
def _add_cset_object(self, set_obj: Union[CSET, CSET1]) -> None:
"""adds an CSET/USET1 object"""
self.csets.append(set_obj)
n = len(self._type_to_id_map['CSET'])
self._type_to_id_map['CSET'].append(n)
def _add_qset_object(self, set_obj: Union[QSET, QSET1]) -> None:
"""adds an QSET/QSET1 object"""
self.qsets.append(set_obj)
n = len(self._type_to_id_map['QSET'])
self._type_to_id_map['QSET'].append(n)
def _add_uset_object(self, set_obj: Union[USET, USET1]) -> None:
"""adds an USET/USET1 object"""
key = set_obj.name
if key in self.usets:
self.usets[key].append(set_obj)
else:
self.usets[key] = [set_obj]
self._type_to_id_map[set_obj.type].append(key)
def _add_sebset_object(self, set_obj: Union[SEBSET, SEBSET1]) -> None:
"""adds an SEBSET/SEBSET1 object"""
self.se_bsets.append(set_obj)
def _add_secset_object(self, set_obj: Union[SECSET, SECSET1]) -> None:
"""adds an SECSET/SECSTE1 object"""
self.se_csets.append(set_obj)
def _add_seqset_object(self, set_obj: Union[SEQSET, SEQSET1]) -> None:
"""adds an SEQSET/SEQSET1 object"""
self.se_qsets.append(set_obj)
def _add_seuset_object(self, set_obj: Union[SEUSET, SEUSET1]) -> None:
"""adds an SEUSET/SEUSET1 object"""
key = set_obj.name
if key in self.se_usets:
self.se_usets[key].append(set_obj)
else:
self.se_usets[key] = [set_obj]
self._type_to_id_map[set_obj.type].append(key)
def _add_seset_object(self, set_obj: SESET) -> None:
"""adds an SESET object"""
key = set_obj.seid
assert key >= 0
if key in self.se_sets:
old_set = self.se_sets[key]
set_obj.add_seset(old_set)
self.se_sets[key] = set_obj
self._type_to_id_map[set_obj.type].append(key)
def _add_table_object(self, table: Union[TABLEH1, TABLEHT, TABLES1, TABLEST]) -> None:
"""adds a TABLES1, TABLEST object"""
key = table.tid
if key in self.tables:
if not table == self.tables[key]:
assert key not in self.tables, '\ntable=\n%s old_table=\n%s' % (
table, self.tables[key])
assert key > 0
self.tables[key] = table
self._type_to_id_map[table.type].append(key)
def _add_tabled_object(self, table: Union[TABLED1, TABLED2, TABLED3, TABLED4]) -> None:
"""adds a TABLED1, TABLED2, TABLED3, TABLED4 object"""
key = table.tid
assert key not in self.tables_d, '\ntabled=\n%s old_tabled=\n%s' % (
table, self.tables_d[key])
#assert key > 0; yes you can have negative tables...
self.tables_d[key] = table
self._type_to_id_map[table.type].append(key)
def _add_tablem_object(self, table: Union[TABLEM1, TABLEM2, TABLEM3, TABLEM4]) -> None:
"""adds a TABLEM1, TABLEM2, TABLEM3, TABLEM4 object"""
key = table.tid
assert key not in self.tables_m, '\ntablem=\n%s old_tablem=\n%s' % (
table, self.tables_m[key])
#assert key > 0; yes you can have negative tables...
self.tables_m[key] = table
self._type_to_id_map[table.type].append(key)
def _add_table_sdamping_object(self, table: TABDMP1) -> None:
"""adds a TABDMP1 object"""
key = table.tid
assert key not in self.tables_sdamping, '\nTable=\n%s oldTable=\n%s' % (
table, self.tables_sdamping[key])
#assert key > 0; yes you can have negative tables...
self.tables_sdamping[key] = table
self._type_to_id_map[table.type].append(key)
def _add_random_table_object(self, table: Union[TABRND1, TABRNDG]) -> None:
"""adds a TABRND1, TABRNDG object"""
key = table.tid
assert key not in self.random_tables, '\nTable=\n%s old=\n%s' % (
table, self.random_tables[key])
assert key > 0
self.random_tables[key] = table
self._type_to_id_map[table.type].append(key)
def _add_method_object(self, method: Union[EIGR, EIGRL, EIGB],
allow_overwrites: bool=False) -> None:
"""adds a EIGR/EIGRL object"""
key = method.sid
if key in self.methods and not allow_overwrites:
if not method == self.methods[key]:
assert key not in self.methods, 'sid=%s\nold_method=\n%snew_method=\n%s' % (key, self.methods[key], method)
else:
assert key > 0, 'sid=%s method=\n%s' % (key, method)
self.methods[key] = method
self._type_to_id_map[method.type].append(key)
def _add_cmethod_object(self, method: Union[EIGC, EIGP],
allow_overwrites: bool=False) -> None:
"""adds a EIGB/EIGC object"""
key = method.sid
if key in self.cMethods and not allow_overwrites:
if not method == self.cMethods[key]:
assert key not in self.cMethods, 'sid=%s\nold_cmethod=\n%snew_cmethod=\n%s' % (key, self.cMethods[key], method)
else:
assert key > 0, 'sid=%s cMethod=\n%s' % (key, method)
self.cMethods[key] = method
self._type_to_id_map[method.type].append(key)
def _add_mkaero_object(self, mkaero: Union[MKAERO1, MKAERO2]) -> None:
"""adds an MKAERO1/MKAERO2 object"""
self.mkaeros.append(mkaero)
#---------------------------------------------------------------------------
# parametric
def _add_pset(self, pset: PSET, allow_overwrites: bool=False) -> None:
assert pset.idi not in self.pset, pset
self.pset[pset.idi] = pset
def _add_pval(self, pval: PVAL, allow_overwrites: bool=False) -> None:
if pval.idi not in self.pval:
self.pval[pval.idi] = []
self.pval[pval.idi].append(pval)
def _add_gmcurv(self, curve: GMCURV, allow_overwrites: bool=False) -> None:
assert curve.curve_id not in self.gmcurv, curve
self.gmcurv[curve.curve_id] = curve
def _add_gmsurf(self, surf: GMSURF, allow_overwrites: bool=False) -> None:
assert surf.surf_id not in self.gmsurf, surf
self.gmsurf[surf.surf_id] = surf
def _add_feface(self, face: FEFACE, allow_overwrites: bool=False) -> None:
key = face.face_id
if key in self.feface and not allow_overwrites:
if not face == self.feface[key]:
raise RuntimeError(f'feface is duplicated\n{face}\nold:\n{self.feface[key]}')
else:
self.feface[face.face_id] = face
self._type_to_id_map[face.type].append(key)
#assert face.face_id not in self.feface, face
#self.feface[face.face_id] = face
def _add_feedge(self, edge: FEEDGE, allow_overwrites: bool=False) -> None:
key = edge.edge_id
if key in self.feedge and not allow_overwrites:
if not edge == self.feedge[key]:
raise RuntimeError(f'feedge is duplicated\n{edge}\nold:\n{self.feedge[key]}')
else:
self.feedge[edge.edge_id] = edge
self._type_to_id_map[edge.type].append(key)
#---------------------------------------------------------------------------
|
the-stack_0_18877 | # Most of this code is credited to [hollance](https://github.com/hollance).
# I did some minor changes to suit my need.
import os
import numpy as np
import keras
from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
# generated using YAD2K
model_path = "model_data/yolov2-tiny-voc.h5"
# Load the model that was exported by YAD2K.
model = load_model(model_path)
# model.summary()
model_nobn = Sequential()
model_nobn.add(Conv2D(16, (3, 3), padding="same", input_shape=(416, 416, 3)))
model_nobn.add(LeakyReLU(alpha=0.1))
model_nobn.add(MaxPooling2D())
model_nobn.add(Conv2D(32, (3, 3), padding="same"))
model_nobn.add(LeakyReLU(alpha=0.1))
model_nobn.add(MaxPooling2D())
model_nobn.add(Conv2D(64, (3, 3), padding="same"))
model_nobn.add(LeakyReLU(alpha=0.1))
model_nobn.add(MaxPooling2D())
model_nobn.add(Conv2D(128, (3, 3), padding="same"))
model_nobn.add(LeakyReLU(alpha=0.1))
model_nobn.add(MaxPooling2D())
model_nobn.add(Conv2D(256, (3, 3), padding="same"))
model_nobn.add(LeakyReLU(alpha=0.1))
model_nobn.add(MaxPooling2D())
model_nobn.add(Conv2D(512, (3, 3), padding="same"))
model_nobn.add(LeakyReLU(alpha=0.1))
model_nobn.add(MaxPooling2D(strides=(1, 1), padding="same"))
model_nobn.add(Conv2D(1024, (3, 3), padding="same"))
model_nobn.add(LeakyReLU(alpha=0.1))
model_nobn.add(Conv2D(1024, (3, 3), padding="same"))
model_nobn.add(LeakyReLU(alpha=0.1))
model_nobn.add(Conv2D(125, (1, 1), padding="same", activation='linear'))
model_nobn.summary()
def fold_batch_norm(conv_layer, bn_layer):
"""Fold the batch normalization parameters into the weights for
the previous layer."""
conv_weights = conv_layer.get_weights()[0]
# Keras stores the learnable weights for a BatchNormalization layer
# as four separate arrays:
# 0 = gamma (if scale == True)
# 1 = beta (if center == True)
# 2 = moving mean
# 3 = moving variance
bn_weights = bn_layer.get_weights()
gamma = bn_weights[0]
beta = bn_weights[1]
mean = bn_weights[2]
variance = bn_weights[3]
epsilon = 1e-3
new_weights = conv_weights * gamma / np.sqrt(variance + epsilon)
new_bias = beta - mean * gamma / np.sqrt(variance + epsilon)
return new_weights, new_bias
W_nobn = []
W_nobn.extend(fold_batch_norm(model.layers[1], model.layers[2]))
W_nobn.extend(fold_batch_norm(model.layers[5], model.layers[6]))
W_nobn.extend(fold_batch_norm(model.layers[9], model.layers[10]))
W_nobn.extend(fold_batch_norm(model.layers[13], model.layers[14]))
W_nobn.extend(fold_batch_norm(model.layers[17], model.layers[18]))
W_nobn.extend(fold_batch_norm(model.layers[21], model.layers[22]))
W_nobn.extend(fold_batch_norm(model.layers[25], model.layers[26]))
W_nobn.extend(fold_batch_norm(model.layers[28], model.layers[29]))
W_nobn.extend(model.layers[31].get_weights())
model_nobn.set_weights(W_nobn)
# Make a prediction using the original model and also using the model that
# has batch normalization removed, and check that the differences between
# the two predictions are small enough. They seem to be smaller than 1e-4,
# which is good enough for us, since we'll be using 16-bit floats anyway.
print("Comparing models...")
# order: [ batch, height, width, inputChannel ]
image_data = np.fromfile(open('model_data/imagedata.bin', 'r'), dtype=np.float32).reshape(1,416,416,3)
# order: [ batch, inputChannel, height, width ]
corgy_image_data = image_data.transpose(0,3,1,2)
features_nobn = model_nobn.predict(image_data)
# output in Corgy should be
corgy_features_nobn = features_nobn.transpose(0,3,1,2)
max_error = 0
for i in range(features.shape[1]):
for j in range(features.shape[2]):
for k in range(features.shape[3]):
diff = np.abs(features[0, i, j, k] - features_nobn[0, i, j, k])
max_error = max(max_error, diff)
if diff > 1e-4:
print(i, j, k, ":", features[0, i, j, k], features_nobn[0, i, j, k], diff)
print("Largest error:", max_error)
# Convert the weights and biases to Metal format.
print("\nConverting parameters...")
dst_path = "Parameters"
W = model_nobn.get_weights()
for i, w in enumerate(W):
j = i // 2 + 1
print(w.shape)
if i % 2 == 0:
# weight order in keras is [ height, width, inputChannel, outputChannel ]
w.transpose(3, 2, 0, 1).tofile(os.path.join(dst_path, "new_corgy_voc_conv%d_W.bin" % j))
else:
w.tofile(os.path.join(dst_path, "new_corgy_voc_conv%d_b.bin" % j))
print("Done!") |
the-stack_0_18880 | from util.decorators import restricted
from telegram import Poll
@restricted
def execute(update, context):
"""
'poll' create a new poll
:param update: bot update
:param context: CallbackContext
:return: None
"""
if update.message.text.strip() == "/poll":
msg = "These are examples of POLL. Use the command followed by any of these examples.\n\n"
msg += "Regular - anonymous:\n"
context.bot.send_message(chat_id=update.message.chat_id,
text=msg,
disable_web_page_preview=True)
msg = "#Q: How many hours in a year?\n"
msg += "#O: 100\n"
msg += "#O: 876\n"
msg += "#O: 8760\n"
msg += "#O: 87600\n\n"
context.bot.send_message(chat_id=update.message.chat_id,
text=msg,
disable_web_page_preview=True)
msg = "Regular - multiple answers:\n"
context.bot.send_message(chat_id=update.message.chat_id,
text=msg,
disable_web_page_preview=True)
msg = "#Q: How many hours in a year?\n"
msg += "#O: 100\n"
msg += "#O: 876\n"
msg += "#O: 8760\n"
msg += "#O: 87600\n"
msg += "#M: 1\n\n"
context.bot.send_message(chat_id=update.message.chat_id,
text=msg,
disable_web_page_preview=True)
msg = "Regular - not anonymous:\n"
context.bot.send_message(chat_id=update.message.chat_id,
text=msg,
disable_web_page_preview=True)
msg = "#Q: How many hours in a year?\n"
msg += "#O: 100\n"
msg += "#O: 876\n"
msg += "#O: 8760\n"
msg += "#O: 87600\n"
msg += "#A: 0\n\n"
context.bot.send_message(chat_id=update.message.chat_id,
text=msg,
disable_web_page_preview=True)
msg = "Quiz - anonymous:\n"
context.bot.send_message(chat_id=update.message.chat_id,
text=msg,
disable_web_page_preview=True)
msg = "#Q: How many hours in a year?\n"
msg += "#O: 100\n"
msg += "#O: 876\n"
msg += "#O: 8760\n"
msg += "#O: 87600\n"
msg += "#R: 2 (0-based)\n"
msg += "#A: 1\n"
msg += "#T: quiz\n"
context.bot.send_message(chat_id=update.message.chat_id,
text=msg,
disable_web_page_preview=True)
else:
# defaults
poll_type = Poll.REGULAR
anonymity = True
correct_option = None
multiple_answer = False
# remove command
msg = update.message.text.replace('/poll ', '').split("\n")
# get question and options
options = []
for line in msg:
if "#Q:" in line.upper():
question = line.replace("#Q:", "").replace("#q:", "").strip()
elif "#O:" in line.upper():
options.append(line.replace("#O:", "").replace("#o:", "").strip())
elif "#A:" in line.upper():
anonymity = bool(int(line.replace("#A:", "").replace("#a:", "").strip()))
elif "#T:" in line.upper():
flag = line.replace("#T:", "").replace("#t:", "").strip().lower()
if flag == 'quiz':
poll_type = Poll.QUIZ
elif flag == 'regular':
poll_type = Poll.REGULAR
elif "#R:" in line.upper():
correct_option = int(line.replace("#R:", "").replace("#r:", "").strip())
elif "#M:" in line.upper():
multiple_answer = bool(float(line.replace("#M:", "").replace("#m:", "").strip()))
context.bot.send_poll(chat_id=update.message.chat_id,
question=question,
options=options,
disable_notification=False,
type=poll_type,
is_anonymous=anonymity,
correct_option_id=correct_option,
allows_multiple_answers=multiple_answer)
# remove command
context.bot.delete_message(chat_id=update.message.chat_id, message_id=update.message.message_id)
|
the-stack_0_18881 | #! /usr/bin/env python
import pytest
import sys
from tests.common.utilities import skip_release
sys.path.append("./configlet/util")
from base_test import do_test_add_rack, backup_minigraph, restore_orig_minigraph
from helpers import log_info
pytestmark = [
pytest.mark.topology("t1")
]
@pytest.fixture(scope="module", autouse=True)
def check_image_version(duthost):
"""Skips this test if the SONiC image installed on DUT is older than 202111
Args:
duthost: DUT host object.
Returns:
None.
"""
skip_release(duthost, ["201811", "201911", "202012", "202106", "202111"])
@pytest.fixture(scope="module")
def configure_dut(duthosts, rand_one_dut_hostname):
try:
log_info("configure_dut fixture on setup for {}".format(rand_one_dut_hostname))
if not restore_orig_minigraph(duthosts[rand_one_dut_hostname]):
backup_minigraph(duthosts[rand_one_dut_hostname])
log_info("configure_dut fixture DONE for {}".format(rand_one_dut_hostname))
yield
finally:
log_info("configure_dut fixture on cleanup for {}".format(rand_one_dut_hostname))
restore_orig_minigraph(duthosts[rand_one_dut_hostname])
log_info("configure_dut fixture DONE for {}".format(rand_one_dut_hostname))
def test_add_rack(configure_dut, tbinfo, duthosts, rand_one_dut_hostname):
global data_dir, orig_db_dir, clet_db_dir, files_dir
duthost = duthosts[rand_one_dut_hostname]
if duthost.is_multi_asic:
pytest.skip('Generic patch updater does not support multiasic')
log_info("sys.version={}".format(sys.version))
do_test_add_rack(duthost, is_storage_backend = 'backend' in tbinfo['topo']['name'])
|
the-stack_0_18882 | from model.entry import Entry
from model.group import Group
import random
def test_add_entry_in_group(app, orm, db):
if len(db.get_groups_without_entries()) == 0:
app.group.create_group(Group(name="test_group"))
if len(db.get_entries_not_in_any_group()) == 0:
app.entry.create_entry(Entry(first_name="test_entry"))
groups = db.get_groups_without_entries()
selected_group = random.choice(groups)
entries = db.get_entries_not_in_any_group()
selected_entry = random.choice(entries)
app.entry.add_entry_in_group(selected_entry.id, selected_group.id)
entries_in_group = orm.get_entries_in_group(selected_group)
assert selected_entry in entries_in_group
def test_delete_entry_from_group(app, orm, db, check_ui):
if len(orm.get_groups_list()) == 0:
initial_group = Group(name="test_group")
app.group.create_group(initial_group)
if len(orm.get_entries_list()) == 0:
initial_entry = Entry(first_name="test_entry")
app.entry.create_entry(initial_entry)
app.entry.add_entry_in_group(initial_entry.id, initial_group.id)
groups = orm.get_groups_list()
selected_group = random.choice(groups)
entries_in_selected_group = orm.get_entries_in_group(selected_group)
if len(entries_in_selected_group) == 0:
selected_entry = random.choice(orm.get_entries_list())
app.entry.add_entry_in_group(selected_entry.id, selected_group.id)
entries_in_selected_group = orm.get_entries_in_group(selected_group)
selected_entry = random.choice(entries_in_selected_group)
app.entry.delete_entry_from_group(selected_entry.id, selected_group.id)
entries_in_group = orm.get_entries_in_group(selected_group)
assert selected_entry not in entries_in_group
|
the-stack_0_18886 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Connection class using the C Extension
"""
# Detection of abstract methods in pylint is not working correctly
# pylint: disable=W0223
from . import errors
from .abstracts import MySQLConnectionAbstract, MySQLCursorAbstract
from .catch23 import INT_TYPES
from .constants import (
CharacterSet, FieldFlag, ServerFlag, ShutdownType, ClientFlag
)
from .protocol import MySQLProtocol
HAVE_CMYSQL = False
# pylint: disable=F0401,C0413
try:
import _mysql_connector
from .cursor_cext import (
CMySQLCursor, CMySQLCursorRaw,
CMySQLCursorBuffered, CMySQLCursorBufferedRaw, CMySQLCursorPrepared,
CMySQLCursorDict, CMySQLCursorBufferedDict, CMySQLCursorNamedTuple,
CMySQLCursorBufferedNamedTuple)
from _mysql_connector import MySQLInterfaceError # pylint: disable=F0401
except ImportError as exc:
raise ImportError(
"MySQL Connector/Python C Extension not available ({0})".format(
str(exc)
))
else:
HAVE_CMYSQL = True
# pylint: enable=F0401,C0413
class CMySQLConnection(MySQLConnectionAbstract):
"""Class initiating a MySQL Connection using Connector/C"""
def __init__(self, **kwargs):
"""Initialization"""
if not HAVE_CMYSQL:
raise RuntimeError(
"MySQL Connector/Python C Extension not available")
self._cmysql = None
self._connection_timeout = 2
self._columns = []
self.converter = None
super(CMySQLConnection, self).__init__(**kwargs)
if len(kwargs) > 0:
self.connect(**kwargs)
def _do_handshake(self):
"""Gather information of the MySQL server before authentication"""
self._handshake = {
'protocol': self._cmysql.get_proto_info(),
'server_version_original': self._cmysql.get_server_info(),
'server_threadid': self._cmysql.thread_id(),
'charset': None,
'server_status': None,
'auth_plugin': None,
'auth_data': None,
'capabilities': self._cmysql.st_server_capabilities(),
}
self._server_version = self._check_server_version(
self._handshake['server_version_original']
)
@property
def _server_status(self):
"""Returns the server status attribute of MYSQL structure"""
return self._cmysql.st_server_status()
def set_unicode(self, value=True):
"""Toggle unicode mode
Set whether we return string fields as unicode or not.
Default is True.
"""
self._use_unicode = value
if self._cmysql:
self._cmysql.use_unicode(value)
if self.converter:
self.converter.set_unicode(value)
@property
def autocommit(self):
"""Get whether autocommit is on or off"""
value = self.info_query("SELECT @@session.autocommit")[0]
return True if value == 1 else False
@autocommit.setter
def autocommit(self, value): # pylint: disable=W0221
"""Toggle autocommit"""
try:
self._cmysql.autocommit(value)
self._autocommit = value
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
@property
def database(self):
"""Get the current database"""
return self.info_query("SELECT DATABASE()")[0]
@database.setter
def database(self, value): # pylint: disable=W0221
"""Set the current database"""
self._cmysql.select_db(value)
@property
def in_transaction(self):
"""MySQL session has started a transaction"""
return self._server_status & ServerFlag.STATUS_IN_TRANS
def _open_connection(self):
charset_name = CharacterSet.get_info(self._charset_id)[0]
self._cmysql = _mysql_connector.MySQL(
buffered=self._buffered,
raw=self._raw,
charset_name=charset_name,
connection_timeout=int(self._connection_timeout or 10),
use_unicode=self._use_unicode,
auth_plugin=self._auth_plugin)
cnx_kwargs = {
'host': self._host,
'user': self._user,
'password': self._password,
'database': self._database,
'port': self._port,
'client_flags': self._client_flags,
'unix_socket': self._unix_socket,
'compress': self.isset_client_flag(ClientFlag.COMPRESS)
}
if self.isset_client_flag(ClientFlag.SSL):
cnx_kwargs.update({
'ssl_ca': self._ssl['ca'],
'ssl_cert': self._ssl['cert'],
'ssl_key': self._ssl['key'],
'ssl_verify_cert': self._ssl['verify_cert']
})
try:
self._cmysql.connect(**cnx_kwargs)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
self._do_handshake()
def close(self):
"""Disconnect from the MySQL server"""
if self._cmysql:
try:
self._cmysql.close()
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
self._cmysql = None
disconnect = close
def is_connected(self):
"""Reports whether the connection to MySQL Server is available"""
if self._cmysql:
return self._cmysql.ping()
return False
def ping(self, reconnect=False, attempts=1, delay=0):
"""Check availability of the MySQL server
When reconnect is set to True, one or more attempts are made to try
to reconnect to the MySQL server using the reconnect()-method.
delay is the number of seconds to wait between each retry.
When the connection is not available, an InterfaceError is raised. Use
the is_connected()-method if you just want to check the connection
without raising an error.
Raises InterfaceError on errors.
"""
errmsg = "Connection to MySQL is not available"
try:
connected = self._cmysql.ping()
except AttributeError:
pass # Raise or reconnect later
else:
if connected:
return
if reconnect:
self.reconnect(attempts=attempts, delay=delay)
else:
raise errors.InterfaceError(errmsg)
def set_character_set_name(self, charset):
"""Sets the default character set name for current connection.
"""
self._cmysql.set_character_set(charset)
def info_query(self, query):
"""Send a query which only returns 1 row"""
self._cmysql.query(query)
first_row = ()
if self._cmysql.have_result_set:
first_row = self._cmysql.fetch_row()
if self._cmysql.fetch_row():
self._cmysql.free_result()
raise errors.InterfaceError(
"Query should not return more than 1 row")
self._cmysql.free_result()
return first_row
@property
def connection_id(self):
"""MySQL connection ID"""
try:
return self._cmysql.thread_id()
except MySQLInterfaceError:
pass # Just return None
return None
def get_rows(self, count=None, binary=False, columns=None):
"""Get all or a subset of rows returned by the MySQL server"""
if not (self._cmysql and self.unread_result):
raise errors.InternalError("No result set available")
rows = []
if count is not None and count <= 0:
raise AttributeError("count should be 1 or higher, or None")
counter = 0
try:
row = self._cmysql.fetch_row()
while row:
if self.converter:
row = list(row)
for i, _ in enumerate(row):
row[i] = self.converter.to_python(self._columns[i],
row[i])
row = tuple(row)
rows.append(row)
counter += 1
if count and counter == count:
break
row = self._cmysql.fetch_row()
except MySQLInterfaceError as exc:
self.free_result()
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
return rows
def get_row(self, binary=False, columns=None):
"""Get the next rows returned by the MySQL server"""
try:
return self.get_rows(count=1, binary=binary, columns=columns)[0]
except IndexError:
# No row available
return None
def next_result(self):
"""Reads the next result"""
if self._cmysql:
self._cmysql.consume_result()
return self._cmysql.next_result()
return None
def free_result(self):
"""Frees the result"""
if self._cmysql:
self._cmysql.free_result()
def commit(self):
"""Commit current transaction"""
if self._cmysql:
self._cmysql.commit()
def rollback(self):
"""Rollback current transaction"""
if self._cmysql:
self._cmysql.consume_result()
self._cmysql.rollback()
def cmd_init_db(self, database):
"""Change the current database"""
try:
self._cmysql.select_db(database)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
def fetch_eof_columns(self):
"""Fetch EOF and column information"""
if not self._cmysql.have_result_set:
raise errors.InterfaceError("No result set")
fields = self._cmysql.fetch_fields()
self._columns = []
for col in fields:
self._columns.append((
col[4],
int(col[8]),
None,
None,
None,
None,
~int(col[9]) & FieldFlag.NOT_NULL,
int(col[9])
))
return {
'eof': {
'status_flag': self._server_status,
'warning_count': self._cmysql.st_warning_count(),
},
'columns': self._columns,
}
def fetch_eof_status(self):
"""Fetch EOF and status information"""
if self._cmysql:
return {
'warning_count': self._cmysql.st_warning_count(),
'field_count': self._cmysql.st_field_count(),
'insert_id': self._cmysql.insert_id(),
'affected_rows': self._cmysql.affected_rows(),
'server_status': self._server_status,
}
return None
def cmd_query(self, query, raw=False, buffered=False, raw_as_string=False):
"""Send a query to the MySQL server"""
self.handle_unread_result()
try:
if not isinstance(query, bytes):
query = query.encode('utf-8')
self._cmysql.query(query,
raw=raw, buffered=buffered,
raw_as_string=raw_as_string)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(exc.errno, msg=exc.msg,
sqlstate=exc.sqlstate)
except AttributeError:
if self._unix_socket:
addr = self._unix_socket
else:
addr = self._host + ':' + str(self._port)
raise errors.OperationalError(
errno=2055, values=(addr, 'Connection not available.'))
self._columns = []
if not self._cmysql.have_result_set:
# No result
return self.fetch_eof_status()
return self.fetch_eof_columns()
_execute_query = cmd_query
def cursor(self, buffered=None, raw=None, prepared=None, cursor_class=None,
dictionary=None, named_tuple=None):
"""Instantiates and returns a cursor using C Extension
By default, CMySQLCursor is returned. Depending on the options
while connecting, a buffered and/or raw cursor is instantiated
instead. Also depending upon the cursor options, rows can be
returned as dictionary or named tuple.
Dictionary and namedtuple based cursors are available with buffered
output but not raw.
It is possible to also give a custom cursor through the
cursor_class parameter, but it needs to be a subclass of
mysql.connector.cursor_cext.CMySQLCursor.
Raises ProgrammingError when cursor_class is not a subclass of
CursorBase. Raises ValueError when cursor is not available.
Returns instance of CMySQLCursor or subclass.
:param buffered: Return a buffering cursor
:param raw: Return a raw cursor
:param prepared: Return a cursor which uses prepared statements
:param cursor_class: Use a custom cursor class
:param dictionary: Rows are returned as dictionary
:param named_tuple: Rows are returned as named tuple
:return: Subclass of CMySQLCursor
:rtype: CMySQLCursor or subclass
"""
self.handle_unread_result()
if not self.is_connected():
raise errors.OperationalError("MySQL Connection not available.")
if cursor_class is not None:
if not issubclass(cursor_class, MySQLCursorAbstract):
raise errors.ProgrammingError(
"Cursor class needs be to subclass"
" of cursor_cext.CMySQLCursor")
return (cursor_class)(self)
buffered = buffered or self._buffered
raw = raw or self._raw
cursor_type = 0
if buffered is True:
cursor_type |= 1
if raw is True:
cursor_type |= 2
if dictionary is True:
cursor_type |= 4
if named_tuple is True:
cursor_type |= 8
if prepared is True:
cursor_type |= 16
types = {
0: CMySQLCursor, # 0
1: CMySQLCursorBuffered,
2: CMySQLCursorRaw,
3: CMySQLCursorBufferedRaw,
4: CMySQLCursorDict,
5: CMySQLCursorBufferedDict,
8: CMySQLCursorNamedTuple,
9: CMySQLCursorBufferedNamedTuple,
16: CMySQLCursorPrepared
}
try:
return (types[cursor_type])(self)
except KeyError:
args = ('buffered', 'raw', 'dictionary', 'named_tuple', 'prepared')
raise ValueError('Cursor not available with given criteria: ' +
', '.join([args[i] for i in range(5)
if cursor_type & (1 << i) != 0]))
@property
def num_rows(self):
"""Returns number of rows of current result set"""
if not self._cmysql.have_result_set:
raise errors.InterfaceError("No result set")
return self._cmysql.num_rows()
@property
def warning_count(self):
"""Returns number of warnings"""
if not self._cmysql:
return 0
return self._cmysql.warning_count()
@property
def result_set_available(self):
"""Check if a result set is available"""
if not self._cmysql:
return False
return self._cmysql.have_result_set
@property
def unread_result(self):
"""Check if there are unread results or rows"""
return self.result_set_available
@property
def more_results(self):
"""Check if there are more results"""
return self._cmysql.more_results()
def prepare_for_mysql(self, params):
"""Prepare parameters for statements
This method is use by cursors to prepared parameters found in the
list (or tuple) params.
Returns dict.
"""
if isinstance(params, (list, tuple)):
result = self._cmysql.convert_to_mysql(*params)
elif isinstance(params, dict):
result = {}
for key, value in params.items():
result[key] = self._cmysql.convert_to_mysql(value)[0]
else:
raise ValueError("Could not process parameters")
return result
def consume_results(self):
"""Consume the current result
This method consume the result by reading (consuming) all rows.
"""
self._cmysql.consume_result()
def cmd_change_user(self, username='', password='', database='',
charset=33):
"""Change the current logged in user"""
try:
self._cmysql.change_user(username, password, database)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
self._charset_id = charset
self._post_connection()
def cmd_refresh(self, options):
"""Send the Refresh command to the MySQL server"""
try:
self._cmysql.refresh(options)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
return self.fetch_eof_status()
def cmd_quit(self):
"""Close the current connection with the server"""
self.close()
def cmd_shutdown(self, shutdown_type=None):
"""Shut down the MySQL Server"""
if not self._cmysql:
raise errors.OperationalError("MySQL Connection not available")
if shutdown_type:
if not ShutdownType.get_info(shutdown_type):
raise errors.InterfaceError("Invalid shutdown type")
level = shutdown_type
else:
level = ShutdownType.SHUTDOWN_DEFAULT
try:
self._cmysql.shutdown(level)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
self.close()
def cmd_statistics(self):
"""Return statistics from the MySQL server"""
self.handle_unread_result()
try:
stat = self._cmysql.stat()
return MySQLProtocol().parse_statistics(stat, with_header=False)
except (MySQLInterfaceError, errors.InterfaceError) as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
def cmd_process_kill(self, mysql_pid):
"""Kill a MySQL process"""
if not isinstance(mysql_pid, INT_TYPES):
raise ValueError("MySQL PID must be int")
self.info_query("KILL {0}".format(mysql_pid))
def handle_unread_result(self):
"""Check whether there is an unread result"""
if self.can_consume_results:
self.consume_results()
elif self.unread_result:
raise errors.InternalError("Unread result found")
|
the-stack_0_18887 | #GCPMetricsFunction v0.7.2
#All-in-one metrics function
'''MIT License
Copyright (c) 2019 Splunk
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. '''
import base64
import argparse
import os
import pprint
import time
import json
import re
import threading
from threading import Thread
from queue import Queue
from google.cloud import monitoring_v3
from datetime import datetime
from datetime import date
import time
import requests
from requests.adapters import HTTPAdapter
import urllib3
##turns off the warning that is generated below because using self signed ssl cert
urllib3.disable_warnings()
#threadsafe HEC Events list
class HECMessages:
def __init__(self):
self.HECevents = []
self._lock = threading.Lock()
def locked_update(self, HECevent):
with self._lock:
self.HECevents.append(HECevent)
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
These values are ignored as used only as a Trigger for this function
"""
def hello_pubsub(event, context):
HEC_Pack_size=20 # number of events per http post to HEC. Max size = 5MB by default on HEC
now = time.time()
#HECevents=[]
HECevents=HECMessages() #create threadsafe message list
metricslist=json.loads(os.environ['METRICS_LIST'])
try:
payloadType=os.environ['METRIC_INDEX_TYPE']
except:
payloadType='EVENT'
#print(metricslist)
workers=len(metricslist)
if workers>8:
workers=8
metricsq=Queue()
for x in range(workers):
worker = BuilderThreadWorker(metricsq)
# Set as daemon thread
worker.daemon = True
worker.start()
for metric in metricslist:
metricsq.put((metric, now, HECevents,payloadType))
#wait for all of the builds to complete
metricsq.join()
message_counter=0
package=''
flushed=0
workers=int(round(len(HECevents.HECevents)/HEC_Pack_size))
queue = Queue()
threadcount=10
if workers<threadcount:
threadcount=workers
# Create (max) 10 worker threads (no need to thread more than number of packages)
for x in range(threadcount):
worker = HECThreadWorker(queue)
# Set as daemon thread
worker.daemon = True
worker.start()
for events in HECevents.HECevents:
package=package+events
message_counter+=1
if message_counter>HEC_Pack_size:
#splunkHec(package);
queue.put(package)
message_counter=0;
package=''
if len(package)>0:
#splunkHec(package);
queue.put(package)
# wait for the queue to finish processing all the tasks
queue.join()
class BuilderThreadWorker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the parameters from the queue and expand the queue
metric, now, HECevents,payloadType = self.queue.get()
try:
MetricBuilder(metric, now, HECevents,payloadType)
finally:
self.queue.task_done()
def MetricBuilder(metric,now,HECevents,payloadType):
one_time_series = list_time_series(os.environ['PROJECTID'], metric, now, int(os.environ['TIME_INTERVAL']))
source=os.environ['PROJECTID']+':'+metric
for data in one_time_series:
pointsStrList=str(data.points)
strdata=str(data)
metricKindPart=get_metric_kind(strdata)
valueTypePart=get_value_type(strdata)
metricPart=str(data.metric)
resourcePart=str(data.resource)
pointsList=pullPointsList(pointsStrList)
resourcePart=pull_labels(resourcePart,'"resource":{',1,1)
metricPart=pull_labels(metricPart,'"metric":{',1,1)
numPoints=len(pointsList)/3
ix=0
getevent='NULL'
while ix<numPoints:
if pointsList[ix,2]!="-": #ignore distributions with no values
getevent = makeEvent(source,metricPart,resourcePart,metricKindPart,valueTypePart,pointsList[ix,0],pointsList[ix,1],pointsList[ix,2],now,payloadType)
else:
getevent='NULL'
ix=ix+1
if getevent!='NULL':
HECevents.locked_update(getevent)
def makeEvent(source,metric,resource,metrickind,valuetype,points,timevalue,value,now,payloadType):
try:
host=os.environ['HOST']
except:
host='GCPMetricsFunction'
try:
sourcetype=os.environ['SPLUNK_SOURCETYPE']
except:
sourcetype='google:gcp:monitoring'
if int(timevalue)<((now - int(os.environ['TIME_INTERVAL'])*60) - 180): #filter out earlier events to avoid duplications
HECevent='NULL'
else:
if payloadType=='EVENT':
HECevent='{"time": '+ timevalue + ', "host": "'+ host + '", "source": "'+source+'", "sourcetype": "'+sourcetype+'", "event":{'
HECevent=HECevent+points+','+metric+resource+metrickind+valuetype+'}}'
HECevent=HECevent.replace('\n','')
else: #metric
HECevent='{"time":'+ timevalue+',"event":"metric","source":"'+source+'","host":"'+host+'","fields":{'
metric=stripMetric(metric)
resource=stripResource(resource)
points=stripPoints(points)
HECevent=HECevent+metrickind+resource+','+valuetype+','+metric+',"_value":'
HECevent=HECevent+value+'}}'
HECevent=HECevent.replace("\n","")
return HECevent
def stripMetric(in_str):
start_pt=in_str.find('"metric":{"labels":{')
if start_pt!=-1:
start_pt=start_pt+21
else: #there is only the metric, no labels
start_pt=in_str.find('"type":')
end_pt=len(in_str)-2
ret_string=in_str[start_pt:end_pt]
ret_string=ret_string.replace('type','metric_name')
return ret_string.replace('}','')
def stripResource(in_str):
#find the resource key:values for metrics index format only
start_pt=in_str.find('"resource":{"labels":{')+23
end_pt=len(in_str)-1
in_str=in_str[start_pt:end_pt]
in_str=in_str.replace('type','resourceType')
return in_str.replace('}','')
def stripPoints(in_str):
#this is for distribution values only - need to extract key:value pairs for metrics index
start_pt=in_str.find('distributionValue')
if start_pt>0: #distribution value - need to extract key:value pairs for metrics index
start_pt=in_str.find('"count"',start_pt)
end_pt=in_str.find(',',start_pt)
ret_string=in_str[start_pt:end_pt]
start_pt=in_str.find('"exponentialBuckets":{',end_pt)+23
end_pt=in_str.find('}},',start_pt)-2
ret_string=ret_string+in_str[start_pt]
else:
ret_string=''
return ret_string
def list_time_series(project_id, metric_type, now_time, timelength):
client = monitoring_v3.MetricServiceClient()
project_name = client.project_path(project_id)
interval = monitoring_v3.types.TimeInterval()
now = now_time #time.time()
interval.end_time.seconds = int(now)
interval.end_time.nanos = int(
(now - interval.end_time.seconds) * 10**9)
interval.start_time.seconds = int(now - timelength*60) - 180
interval.start_time.nanos = interval.end_time.nanos
metric_string = 'metric.type = ' + '"'+metric_type+'"'
results = client.list_time_series(
project_name,
metric_string,
interval,
monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL)
return results
def uxtime(unixtime):
return datetime.utcfromtimestamp(unixtime).strftime('%Y-%m-%dT%H:%M:%SZ')
def pull_labels(in_string,initialstr,event,flag):
#extract the labels from the payload, return json format
returning=initialstr
#get the type line
start_type=in_string.find('type:')+5
end_type=in_string.find('"',start_type+3)+1
typestr=in_string[start_type:end_type]
typestr='"type":'+typestr
#get the key/value pairs from labels, cut them out of string
cutstr=in_string
start_k = cutstr.find('key:') + 4
#make sure it has labels, if not, then only has type
if (start_k>3):
returning=returning+'"labels":{'
typeonly=1
while (start_k!=3):
end_k = cutstr.find('value:')-1
start_v = end_k+8
end_v= cutstr.find('}')-1
returning=returning+cutstr[start_k:end_k]+':'+cutstr[start_v:end_v]
cutstr=cutstr[end_v+2:]
start_k = cutstr.find('key:') + 4
if start_k>3: #if more keys, then add comma, otherwise, end of labels
returning=returning+','
else:
returning=returning+'},'
returning=returning+typestr+'},'
return returning
def str_type(type):
#switch type from API value to equivalent used by Splunk GCP Add-On for compatability
switcher={
'bool_value:':'"boolValue"',
'int64_value:':'"int64Value"',
'double_value:':'"doubleValue"',
'string_value:':'"stringValue"',
'distribution_value':'"distributionValue"'
}
return switcher.get(type,'"TYPE_UNSPECIFIED"')
def pullPointsList(in_str):
#extract the points from the list of values returned by the API call. Return dict with json, _time, and metric value
#in the case of distribution values, the value is taken from the mean value
header='"points": [{"interval": {"endTime":'
retarr={}
count=0
start_t = in_str.find('seconds:',1) + 8
while start_t>7:
end_t = in_str.find('}',start_t)
strtime=in_str[start_t:end_t]
nanos_t = strtime.find('nanos')
if nanos_t>0:
strtime=strtime[0:nanos_t] #some points have nanos.
starttime=uxtime(int(strtime))
start_t2 = in_str.find('seconds:',end_t) + 8
end_t2 = in_str.find('}',start_t2)
endtimeNum = in_str[start_t2:end_t2-3]
nanos_t = endtimeNum.find('nanos')
if nanos_t>0:
endtimeNum=endtimeNum[0:nanos_t] #some points have nanos.
endtime=uxtime(int(endtimeNum))
start_vt = in_str.find('value {',start_t) + 10
end_vt = in_str.find(' ',start_vt)
valuet = str_type(in_str[start_vt:end_vt])
if valuet=='"distributionValue"':
end_val = in_str.find('}\n}',end_vt)
value='{' + getDistribution(in_str[end_vt+5:end_val-5])
retarr[count,0]=header + '"' + endtime+'",'+' "startTime": "' + starttime + '"},"value": {' + valuet + ':' + value + '}}}]'
mean_st=value.find('"mean":')+7
if mean_st<7: #some distributions return with empty datasets; we will ignore those later
value='-'
else:
mean_end=value.find(',',mean_st)-1
value=value[mean_st:mean_end]
else:
end_val = in_str.find('}',end_vt) -1
value = in_str[end_vt+1:end_val]
if value=='':
value='0'
retarr[count,0]=header + '"' + endtime+'",'+' "startTime": "' + starttime + '"},"value": {'
retarr[count,0]=retarr[count,0] + valuet + ': "' + value + '"}}]'
retarr[count,1]= endtimeNum
retarr[count,2]= value
count=count+1
start_t = in_str.find('seconds:',end_val) + 8
#end while
return retarr
def get_metric_kind(in_str):
#pull out the metric Kind details, return in json format
start_kind=in_str.find('metric_kind')+13
end_kind=in_str.find('\n',start_kind)
metricKind='"metricKind": "' + in_str[start_kind:end_kind] + '",'
return metricKind
def get_value_type(in_str):
#pull out the value type and return in json format
start_type=in_str.find('value_type')+12
end_type=in_str.find('\n',start_type)
valueType='"valueType": "' + in_str[start_type:end_type] + '"'
return valueType
def getDistribution(in_str):
#for distribution values, need to re-format the payload into a json format compatible with the Splunk GCP Add-On
in_str=in_str.replace('count:','"count":')
in_str=in_str.replace(' mean:',',"mean":')
in_str=in_str.replace(' sum_of_squared_deviation:',',"sumOfSquaredDeviation":')
in_str=in_str.replace(' bucket_options ',',"bucketOptions":')
in_str=in_str.replace('exponential_buckets','"exponentialBuckets":')
in_str=in_str.replace('num_finite_buckets:','"numFiniteBuckets":')
in_str=in_str.replace(' growth_factor:',',"growthFactor":')
in_str=in_str.replace(' scale',',"scale"')
first_bucket=in_str.find('bucket_counts')-1
if first_bucket>0:
buckets=in_str[first_bucket:]
buckets=buckets.replace('bucket_counts:', '')
bucketvals=re.sub("(\d)",r'"\1",',buckets)
in_str=in_str[0:first_bucket-1]+', "bucketCounts":['+bucketvals+']'
in_str=re.sub(",\s*]",']',in_str) #replace the last comma
in_str=in_str.replace(' ','')
return in_str
class HECThreadWorker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the log from the queue
logdata = self.queue.get()
try:
splunkHec(logdata)
finally:
self.queue.task_done()
def splunkHec(logdata):
#post to HEC
url = 'https://'+os.environ['HEC_URL']
try:
ix_type=os.environ['METRIC_INDEX_TYPE']
except:
ix_type='EVENT'
if ix_type=='EVENT':
url=url+'/services/collector/event'
else:
url=url+'/services/collector'
token = os.environ['HEC_TOKEN']
s = requests.Session()
#s.config['keep_alive'] = False #HEC performance is improved by keepalive, but event distribution is affected. Setting to false provides better event distribution across indexers
s.mount( 'http://' , HTTPAdapter(max_retries= 3 ))
s.mount( 'https://' , HTTPAdapter(max_retries= 3 ))
authHeader = {'Authorization': 'Splunk '+ token}
try:
r = s.post(url, headers=authHeader, data=logdata, verify=False, timeout=2)
r.raise_for_status()
except requests.exceptions.HTTPError as errh:
print ("Http Error:",errh)
print(errh.response.status_code)
if errh.response.status_code<500:
print(r.json())
errorHandler(logdata,url,token)
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
errorHandler(logdata,url,token)
except requests.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
errorHandler(logdata,url,token)
except requests.exceptions.RequestException as err:
print ("Error: ",err)
errorHandler(logdata,url,token)
except:
print("unknown Error in http post >> message content:")
print(logdata.replace('\n',''))
errorHandler(logdata,url,token)
def errorHandler(logdata,url,token):
"""Publishes failed messages to RETRY Pub/Sub topic."""
from google.cloud import pubsub_v1
project_id = os.environ['PROJECTID']
topic_name = os.environ['RETRY_TOPIC']
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
data = logdata.encode('utf-8')
future = publisher.publish(topic_path, data, url=url, token=token, source='MetricsFunction')
print(future.result())
print('Published messages into PubSub')
|
the-stack_0_18888 | # -*- coding: utf-8 -*-
'''
NAPALM Network
==============
Basic methods for interaction with the network device through the virtual proxy 'napalm'.
:codeauthor: Mircea Ulinic <[email protected]> & Jerome Fleury <[email protected]>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm proxy minion <salt.proxy.napalm>`
.. versionadded:: 2016.11.0
.. versionchanged:: 2017.7.0
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import time
import logging
import datetime
log = logging.getLogger(__name__)
# Import Salt libs
from salt.ext import six
import salt.utils.files
import salt.utils.napalm
import salt.utils.versions
import salt.utils.templates
# Import 3rd-party libs
try:
import jxmlease # pylint: disable=unused-import
HAS_JXMLEASE = True
except ImportError:
HAS_JXMLEASE = False
# ----------------------------------------------------------------------------------------------------------------------
# module properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'net'
__proxyenabled__ = ['*']
__virtual_aliases__ = ('napalm_net',)
# uses NAPALM-based proxy to interact with network devices
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _filter_list(input_list, search_key, search_value):
'''
Filters a list of dictionary by a set of key-value pair.
:param input_list: is a list of dictionaries
:param search_key: is the key we are looking for
:param search_value: is the value we are looking for the key specified in search_key
:return: filered list of dictionaries
'''
output_list = list()
for dictionary in input_list:
if dictionary.get(search_key) == search_value:
output_list.append(dictionary)
return output_list
def _filter_dict(input_dict, search_key, search_value):
'''
Filters a dictionary of dictionaries by a key-value pair.
:param input_dict: is a dictionary whose values are lists of dictionaries
:param search_key: is the key in the leaf dictionaries
:param search_values: is the value in the leaf dictionaries
:return: filtered dictionary
'''
output_dict = dict()
for key, key_list in six.iteritems(input_dict):
key_list_filtered = _filter_list(key_list, search_key, search_value)
if key_list_filtered:
output_dict[key] = key_list_filtered
return output_dict
def _safe_commit_config(loaded_result, napalm_device):
_commit = commit(inherit_napalm_device=napalm_device) # calls the function commit, defined below
if not _commit.get('result', False):
# if unable to commit
loaded_result['comment'] += _commit['comment'] if _commit.get('comment') else 'Unable to commit.'
loaded_result['result'] = False
# unable to commit, something went wrong
discarded = _safe_dicard_config(loaded_result, napalm_device)
if not discarded['result']:
return loaded_result
return _commit
def _safe_dicard_config(loaded_result, napalm_device):
'''
'''
log.debug('Discarding the config')
log.debug(loaded_result)
_discarded = discard_config(inherit_napalm_device=napalm_device)
if not _discarded.get('result', False):
loaded_result['comment'] += _discarded['comment'] if _discarded.get('comment') \
else 'Unable to discard config.'
loaded_result['result'] = False
# make sure it notifies
# that something went wrong
_explicit_close(napalm_device)
__context__['retcode'] = 1
return loaded_result
return _discarded
def _explicit_close(napalm_device):
'''
Will explicily close the config session with the network device,
when running in a now-always-alive proxy minion or regular minion.
This helper must be used in configuration-related functions,
as the session is preserved and not closed before making any changes.
'''
if salt.utils.napalm.not_always_alive(__opts__):
# force closing the configuration session
# when running in a non-always-alive proxy
# or regular minion
try:
napalm_device['DRIVER'].close()
except Exception as err:
log.error('Unable to close the temp connection with the device:')
log.error(err)
log.error('Please report.')
def _config_logic(napalm_device,
loaded_result,
test=False,
debug=False,
replace=False,
commit_config=True,
loaded_config=None,
commit_in=None,
commit_at=None,
revert_in=None,
revert_at=None,
commit_jid=None,
**kwargs):
'''
Builds the config logic for `load_config` and `load_template` functions.
'''
# As the Salt logic is built around independent events
# when it comes to configuration changes in the
# candidate DB on the network devices, we need to
# make sure we're using the same session.
# Hence, we need to pass the same object around.
# the napalm_device object is inherited from
# the load_config or load_template functions
# and forwarded to compare, discard, commit etc.
# then the decorator will make sure that
# if not proxy (when the connection is always alive)
# and the `inherit_napalm_device` is set,
# `napalm_device` will be overridden.
# See `salt.utils.napalm.proxy_napalm_wrap` decorator.
current_jid = kwargs.get('__pub_jid')
if not current_jid:
current_jid = '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
loaded_result['already_configured'] = False
loaded_result['loaded_config'] = ''
if debug:
loaded_result['loaded_config'] = loaded_config
_compare = compare_config(inherit_napalm_device=napalm_device)
if _compare.get('result', False):
loaded_result['diff'] = _compare.get('out')
loaded_result.pop('out', '') # not needed
else:
loaded_result['diff'] = None
loaded_result['result'] = False
loaded_result['comment'] = _compare.get('comment')
__context__['retcode'] = 1
return loaded_result
_loaded_res = loaded_result.get('result', False)
if not _loaded_res or test:
# if unable to load the config (errors / warnings)
# or in testing mode,
# will discard the config
if loaded_result['comment']:
loaded_result['comment'] += '\n'
if not len(loaded_result.get('diff', '')) > 0:
loaded_result['already_configured'] = True
discarded = _safe_dicard_config(loaded_result, napalm_device)
if not discarded['result']:
return loaded_result
loaded_result['comment'] += 'Configuration discarded.'
# loaded_result['result'] = False not necessary
# as the result can be true when test=True
_explicit_close(napalm_device)
if not loaded_result['result']:
__context__['retcode'] = 1
return loaded_result
if not test and commit_config:
# if not in testing mode and trying to commit
if commit_jid:
log.info('Committing the JID: %s', str(commit_jid))
removed = cancel_commit(commit_jid)
log.debug('Cleaned up the commit from the schedule')
log.debug(removed['comment'])
if len(loaded_result.get('diff', '')) > 0:
# if not testing mode
# and also the user wants to commit (default)
# and there are changes to commit
if commit_in or commit_at:
commit_time = __utils__['timeutil.get_time_at'](time_in=commit_in,
time_at=commit_in)
# schedule job
scheduled_job_name = '__napalm_commit_{}'.format(current_jid)
temp_file = salt.utils.files.mkstemp()
with salt.utils.files.fopen(temp_file, 'w') as fp_:
fp_.write(loaded_config)
scheduled = __salt__['schedule.add'](scheduled_job_name,
function='net.load_config',
job_kwargs={
'filename': temp_file,
'commit_jid': current_jid,
'replace': replace
},
once=commit_time)
log.debug('Scheduling job')
log.debug(scheduled)
saved = __salt__['schedule.save']() # ensure the schedule is
# persistent cross Minion restart
discarded = _safe_dicard_config(loaded_result, napalm_device)
# discard the changes
if not discarded['result']:
discarded['comment'] += ('Scheduled the job to be executed at {schedule_ts}, '
'but was unable to discard the config: \n').format(schedule_ts=commit_time)
return discarded
loaded_result['comment'] = ('Changes discarded for now, and scheduled commit at: {schedule_ts}.\n'
'The commit ID is: {current_jid}.\n'
'To discard this commit, you can execute: \n\n'
'salt {min_id} net.cancel_commit {current_jid}').format(schedule_ts=commit_time,
min_id=__opts__['id'],
current_jid=current_jid)
loaded_result['commit_id'] = current_jid
return loaded_result
log.debug('About to commit:')
log.debug(loaded_result['diff'])
if revert_in or revert_at:
revert_time = __utils__['timeutil.get_time_at'](time_in=revert_in,
time_at=revert_at)
if __grains__['os'] == 'junos':
if not HAS_JXMLEASE:
loaded_result['comment'] = ('This feature requires the library jxmlease to be installed.\n'
'To install, please execute: ``pip install jxmlease``.')
loaded_result['result'] = False
return loaded_result
timestamp_at = __utils__['timeutil.get_timestamp_at'](time_in=revert_in,
time_at=revert_at)
minutes = int((timestamp_at - time.time())/60)
_comm = __salt__['napalm.junos_commit'](confirm=minutes)
if not _comm['out']:
# If unable to commit confirm, should try to bail out
loaded_result['comment'] = 'Unable to commit confirm: {}'.format(_comm['message'])
loaded_result['result'] = False
# But before exiting, we must gracefully discard the config
discarded = _safe_dicard_config(loaded_result, napalm_device)
if not discarded['result']:
return loaded_result
else:
temp_file = salt.utils.files.mkstemp()
running_config = __salt__['net.config'](source='running')['out']['running']
with salt.utils.files.fopen(temp_file, 'w') as fp_:
fp_.write(running_config)
committed = _safe_commit_config(loaded_result, napalm_device)
if not committed['result']:
# If unable to commit, dicard the config (which is
# already done by the _safe_commit_config function), and
# return with the command and other details.
return loaded_result
scheduled_job_name = '__napalm_commit_{}'.format(current_jid)
scheduled = __salt__['schedule.add'](scheduled_job_name,
function='net.load_config',
job_kwargs={
'filename': temp_file,
'commit_jid': current_jid,
'replace': True
},
once=revert_time)
log.debug('Scheduling commit confirmed')
log.debug(scheduled)
saved = __salt__['schedule.save']()
loaded_result['comment'] = ('The commit ID is: {current_jid}.\n'
'This commit will be reverted at: {schedule_ts}, unless confirmed.\n'
'To confirm the commit and avoid reverting, you can execute:\n\n'
'salt {min_id} net.confirm_commit {current_jid}').format(schedule_ts=revert_time,
min_id=__opts__['id'],
current_jid=current_jid)
loaded_result['commit_id'] = current_jid
return loaded_result
committed = _safe_commit_config(loaded_result, napalm_device)
if not committed['result']:
return loaded_result
else:
# would like to commit, but there's no change
# need to call discard_config() to release the config DB
discarded = _safe_dicard_config(loaded_result, napalm_device)
if not discarded['result']:
return loaded_result
loaded_result['already_configured'] = True
loaded_result['comment'] = 'Already configured.'
_explicit_close(napalm_device)
if not loaded_result['result']:
__context__['retcode'] = 1
return loaded_result
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
@salt.utils.napalm.proxy_napalm_wrap
def connected(**kwargs): # pylint: disable=unused-argument
'''
Specifies if the connection to the device succeeded.
CLI Example:
.. code-block:: bash
salt '*' net.connected
'''
return {
'out': napalm_device.get('UP', False) # pylint: disable=undefined-variable
}
@salt.utils.napalm.proxy_napalm_wrap
def facts(**kwargs): # pylint: disable=unused-argument
'''
Returns characteristics of the network device.
:return: a dictionary with the following keys:
* uptime - Uptime of the device in seconds.
* vendor - Manufacturer of the device.
* model - Device model.
* hostname - Hostname of the device
* fqdn - Fqdn of the device
* os_version - String with the OS version running on the device.
* serial_number - Serial number of the device
* interface_list - List of the interfaces of the device
CLI Example:
.. code-block:: bash
salt '*' net.facts
Example output:
.. code-block:: python
{
'os_version': '13.3R6.5',
'uptime': 10117140,
'interface_list': [
'lc-0/0/0',
'pfe-0/0/0',
'pfh-0/0/0',
'xe-0/0/0',
'xe-0/0/1',
'xe-0/0/2',
'xe-0/0/3',
'gr-0/0/10',
'ip-0/0/10'
],
'vendor': 'Juniper',
'serial_number': 'JN131356FBFA',
'model': 'MX480',
'hostname': 're0.edge05.syd01',
'fqdn': 're0.edge05.syd01'
}
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_facts',
**{
}
)
@salt.utils.napalm.proxy_napalm_wrap
def environment(**kwargs): # pylint: disable=unused-argument
'''
Returns the environment of the device.
CLI Example:
.. code-block:: bash
salt '*' net.environment
Example output:
.. code-block:: python
{
'fans': {
'Bottom Rear Fan': {
'status': True
},
'Bottom Middle Fan': {
'status': True
},
'Top Middle Fan': {
'status': True
},
'Bottom Front Fan': {
'status': True
},
'Top Front Fan': {
'status': True
},
'Top Rear Fan': {
'status': True
}
},
'memory': {
'available_ram': 16349,
'used_ram': 4934
},
'temperature': {
'FPC 0 Exhaust A': {
'is_alert': False,
'temperature': 35.0,
'is_critical': False
}
},
'cpu': {
'1': {
'%usage': 19.0
},
'0': {
'%usage': 35.0
}
}
}
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_environment',
**{
}
)
@salt.utils.napalm.proxy_napalm_wrap
def cli(*commands, **kwargs): # pylint: disable=unused-argument
'''
Returns a dictionary with the raw output of all commands passed as arguments.
commands
List of commands to be executed on the device.
textfsm_parse: ``False``
Try parsing the outputs using the TextFSM templates.
.. versionadded:: 2018.3.0
.. note::
This option can be also specified in the minion configuration
file or pillar as ``napalm_cli_textfsm_parse``.
textfsm_path
The path where the TextFSM templates can be found. This option implies
the usage of the TextFSM index file.
``textfsm_path`` can be either absolute path on the server,
either specified using the following URL mschemes: ``file://``,
``salt://``, ``http://``, ``https://``, ``ftp://``,
``s3://``, ``swift://``.
.. versionadded:: 2018.3.0
.. note::
This needs to be a directory with a flat structure, having an
index file (whose name can be specified using the ``index_file`` option)
and a number of TextFSM templates.
.. note::
This option can be also specified in the minion configuration
file or pillar as ``textfsm_path``.
textfsm_template
The path to a certain the TextFSM template.
This can be specified using the absolute path
to the file, or using one of the following URL schemes:
- ``salt://``, to fetch the template from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
.. versionadded:: 2018.3.0
textfsm_template_dict
A dictionary with the mapping between a command
and the corresponding TextFSM path to use to extract the data.
The TextFSM paths can be specified as in ``textfsm_template``.
.. versionadded:: 2018.3.0
.. note::
This option can be also specified in the minion configuration
file or pillar as ``napalm_cli_textfsm_template_dict``.
platform_grain_name: ``os``
The name of the grain used to identify the platform name
in the TextFSM index file. Default: ``os``.
.. versionadded:: 2018.3.0
.. note::
This option can be also specified in the minion configuration
file or pillar as ``textfsm_platform_grain``.
platform_column_name: ``Platform``
The column name used to identify the platform,
exactly as specified in the TextFSM index file.
Default: ``Platform``.
.. versionadded:: 2018.3.0
.. note::
This is field is case sensitive, make sure
to assign the correct value to this option,
exactly as defined in the index file.
.. note::
This option can be also specified in the minion configuration
file or pillar as ``textfsm_platform_column_name``.
index_file: ``index``
The name of the TextFSM index file, under the ``textfsm_path``. Default: ``index``.
.. versionadded:: 2018.3.0
.. note::
This option can be also specified in the minion configuration
file or pillar as ``textfsm_index_file``.
saltenv: ``base``
Salt fileserver envrionment from which to retrieve the file.
Ignored if ``textfsm_path`` is not a ``salt://`` URL.
.. versionadded:: 2018.3.0
include_empty: ``False``
Include empty files under the ``textfsm_path``.
.. versionadded:: 2018.3.0
include_pat
Glob or regex to narrow down the files cached from the given path.
If matching with a regex, the regex must be prefixed with ``E@``,
otherwise the expression will be interpreted as a glob.
.. versionadded:: 2018.3.0
exclude_pat
Glob or regex to exclude certain files from being cached from the given path.
If matching with a regex, the regex must be prefixed with ``E@``,
otherwise the expression will be interpreted as a glob.
.. versionadded:: 2018.3.0
.. note::
If used with ``include_pat``, files matching this pattern will be
excluded from the subset of files defined by ``include_pat``.
CLI Example:
.. code-block:: bash
salt '*' net.cli "show version" "show chassis fan"
CLI Example with TextFSM template:
.. code-block:: bash
salt '*' net.cli textfsm_parse=True textfsm_path=salt://textfsm/
Example output:
.. code-block:: python
{
'show version and haiku': 'Hostname: re0.edge01.arn01
Model: mx480
Junos: 13.3R6.5
Help me, Obi-Wan
I just saw Episode Two
You're my only hope
',
'show chassis fan' : 'Item Status RPM Measurement
Top Rear Fan OK 3840 Spinning at intermediate-speed
Bottom Rear Fan OK 3840 Spinning at intermediate-speed
Top Middle Fan OK 3900 Spinning at intermediate-speed
Bottom Middle Fan OK 3840 Spinning at intermediate-speed
Top Front Fan OK 3810 Spinning at intermediate-speed
Bottom Front Fan OK 3840 Spinning at intermediate-speed
'
}
Example output with TextFSM parsing:
.. code-block:: json
{
"comment": "",
"result": true,
"out": {
"sh ver": [
{
"kernel": "9.1S3.5",
"documentation": "9.1S3.5",
"boot": "9.1S3.5",
"crypto": "9.1S3.5",
"chassis": "",
"routing": "9.1S3.5",
"base": "9.1S3.5",
"model": "mx960"
}
]
}
}
'''
raw_cli_outputs = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'cli',
**{
'commands': list(commands)
}
)
# thus we can display the output as is
# in case of errors, they'll be catched in the proxy
if not raw_cli_outputs['result']:
# Error -> dispaly the output as-is.
return raw_cli_outputs
textfsm_parse = kwargs.get('textfsm_parse') or __opts__.get('napalm_cli_textfsm_parse') or\
__pillar__.get('napalm_cli_textfsm_parse', False)
if not textfsm_parse:
# No TextFSM parsing required, return raw commands.
log.debug('No TextFSM parsing requested.')
return raw_cli_outputs
if 'textfsm.extract' not in __salt__ or 'textfsm.index' not in __salt__:
raw_cli_outputs['comment'] += 'Unable to process: is TextFSM installed?'
log.error(raw_cli_outputs['comment'])
return raw_cli_outputs
textfsm_template = kwargs.get('textfsm_template')
log.debug('textfsm_template: %s', textfsm_template)
textfsm_path = kwargs.get('textfsm_path') or __opts__.get('textfsm_path') or\
__pillar__.get('textfsm_path')
log.debug('textfsm_path: %s', textfsm_path)
textfsm_template_dict = kwargs.get('textfsm_template_dict') or __opts__.get('napalm_cli_textfsm_template_dict') or\
__pillar__.get('napalm_cli_textfsm_template_dict', {})
log.debug('TextFSM command-template mapping: %s', textfsm_template_dict)
index_file = kwargs.get('index_file') or __opts__.get('textfsm_index_file') or\
__pillar__.get('textfsm_index_file')
log.debug('index_file: %s', index_file)
platform_grain_name = kwargs.get('platform_grain_name') or __opts__.get('textfsm_platform_grain') or\
__pillar__.get('textfsm_platform_grain', 'os')
log.debug('platform_grain_name: %s', platform_grain_name)
platform_column_name = kwargs.get('platform_column_name') or __opts__.get('textfsm_platform_column_name') or\
__pillar__.get('textfsm_platform_column_name', 'Platform')
log.debug('platform_column_name: %s', platform_column_name)
saltenv = kwargs.get('saltenv', 'base')
include_empty = kwargs.get('include_empty', False)
include_pat = kwargs.get('include_pat')
exclude_pat = kwargs.get('exclude_pat')
processed_cli_outputs = {
'comment': raw_cli_outputs.get('comment', ''),
'result': raw_cli_outputs['result'],
'out': {}
}
log.debug('Starting to analyse the raw outputs')
for command in list(commands):
command_output = raw_cli_outputs['out'][command]
log.debug('Output from command: %s', command)
log.debug(command_output)
processed_command_output = None
if textfsm_path:
log.debug('Using the templates under %s', textfsm_path)
processed_cli_output = __salt__['textfsm.index'](command,
platform_grain_name=platform_grain_name,
platform_column_name=platform_column_name,
output=command_output.strip(),
textfsm_path=textfsm_path,
saltenv=saltenv,
include_empty=include_empty,
include_pat=include_pat,
exclude_pat=exclude_pat)
log.debug('Processed CLI output:')
log.debug(processed_cli_output)
if not processed_cli_output['result']:
log.debug('Apparently this didnt work, returnin the raw output')
processed_command_output = command_output
processed_cli_outputs['comment'] += '\nUnable to process the output from {0}: {1}.'.format(command,
processed_cli_output['comment'])
log.error(processed_cli_outputs['comment'])
elif processed_cli_output['out']:
log.debug('All good, %s has a nice output!', command)
processed_command_output = processed_cli_output['out']
else:
comment = '''\nProcessing "{}" didn't fail, but didn't return anything either. Dumping raw.'''.format(
command)
processed_cli_outputs['comment'] += comment
log.error(comment)
processed_command_output = command_output
elif textfsm_template or command in textfsm_template_dict:
if command in textfsm_template_dict:
textfsm_template = textfsm_template_dict[command]
log.debug('Using %s to process the command: %s', textfsm_template, command)
processed_cli_output = __salt__['textfsm.extract'](textfsm_template,
raw_text=command_output,
saltenv=saltenv)
log.debug('Processed CLI output:')
log.debug(processed_cli_output)
if not processed_cli_output['result']:
log.debug('Apparently this didnt work, returning '
'the raw output')
processed_command_output = command_output
processed_cli_outputs['comment'] += '\nUnable to process the output from {0}: {1}'.format(command,
processed_cli_output['comment'])
log.error(processed_cli_outputs['comment'])
elif processed_cli_output['out']:
log.debug('All good, %s has a nice output!', command)
processed_command_output = processed_cli_output['out']
else:
log.debug('Processing %s didnt fail, but didnt return'
' anything either. Dumping raw.', command)
processed_command_output = command_output
else:
log.error('No TextFSM template specified, or no TextFSM path defined')
processed_command_output = command_output
processed_cli_outputs['comment'] += '\nUnable to process the output from {}.'.format(command)
processed_cli_outputs['out'][command] = processed_command_output
processed_cli_outputs['comment'] = processed_cli_outputs['comment'].strip()
return processed_cli_outputs
@salt.utils.napalm.proxy_napalm_wrap
def traceroute(destination, source=None, ttl=None, timeout=None, vrf=None, **kwargs): # pylint: disable=unused-argument
'''
Calls the method traceroute from the NAPALM driver object and returns a dictionary with the result of the traceroute
command executed on the device.
destination
Hostname or address of remote host
source
Source address to use in outgoing traceroute packets
ttl
IP maximum time-to-live value (or IPv6 maximum hop-limit value)
timeout
Number of seconds to wait for response (seconds)
vrf
VRF (routing instance) for traceroute attempt
.. versionadded:: 2016.11.4
CLI Example:
.. code-block:: bash
salt '*' net.traceroute 8.8.8.8
salt '*' net.traceroute 8.8.8.8 source=127.0.0.1 ttl=5 timeout=1
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'traceroute',
**{
'destination': destination,
'source': source,
'ttl': ttl,
'timeout': timeout,
'vrf': vrf
}
)
@salt.utils.napalm.proxy_napalm_wrap
def ping(destination, source=None, ttl=None, timeout=None, size=None, count=None, vrf=None, **kwargs): # pylint: disable=unused-argument
'''
Executes a ping on the network device and returns a dictionary as a result.
destination
Hostname or IP address of remote host
source
Source address of echo request
ttl
IP time-to-live value (IPv6 hop-limit value) (1..255 hops)
timeout
Maximum wait time after sending final packet (seconds)
size
Size of request packets (0..65468 bytes)
count
Number of ping requests to send (1..2000000000 packets)
vrf
VRF (routing instance) for ping attempt
.. versionadded:: 2016.11.4
CLI Example:
.. code-block:: bash
salt '*' net.ping 8.8.8.8
salt '*' net.ping 8.8.8.8 ttl=3 size=65468
salt '*' net.ping 8.8.8.8 source=127.0.0.1 timeout=1 count=100
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'ping',
**{
'destination': destination,
'source': source,
'ttl': ttl,
'timeout': timeout,
'size': size,
'count': count,
'vrf': vrf
}
)
@salt.utils.napalm.proxy_napalm_wrap
def arp(interface='', ipaddr='', macaddr='', **kwargs): # pylint: disable=unused-argument
'''
NAPALM returns a list of dictionaries with details of the ARP entries.
:param interface: interface name to filter on
:param ipaddr: IP address to filter on
:param macaddr: MAC address to filter on
:return: List of the entries in the ARP table
CLI Example:
.. code-block:: bash
salt '*' net.arp
salt '*' net.arp macaddr='5c:5e:ab:da:3c:f0'
Example output:
.. code-block:: python
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
]
'''
proxy_output = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_arp_table',
**{
}
)
if not proxy_output.get('result'):
return proxy_output
arp_table = proxy_output.get('out')
if interface:
arp_table = _filter_list(arp_table, 'interface', interface)
if ipaddr:
arp_table = _filter_list(arp_table, 'ip', ipaddr)
if macaddr:
arp_table = _filter_list(arp_table, 'mac', macaddr)
proxy_output.update({
'out': arp_table
})
return proxy_output
@salt.utils.napalm.proxy_napalm_wrap
def ipaddrs(**kwargs): # pylint: disable=unused-argument
'''
Returns IP addresses configured on the device.
:return: A dictionary with the IPv4 and IPv6 addresses of the interfaces.
Returns all configured IP addresses on all interfaces as a dictionary
of dictionaries. Keys of the main dictionary represent the name of the
interface. Values of the main dictionary represent are dictionaries
that may consist of two keys 'ipv4' and 'ipv6' (one, both or none)
which are themselvs dictionaries with the IP addresses as keys.
CLI Example:
.. code-block:: bash
salt '*' net.ipaddrs
Example output:
.. code-block:: python
{
'FastEthernet8': {
'ipv4': {
'10.66.43.169': {
'prefix_length': 22
}
}
},
'Loopback555': {
'ipv4': {
'192.168.1.1': {
'prefix_length': 24
}
},
'ipv6': {
'1::1': {
'prefix_length': 64
},
'2001:DB8:1::1': {
'prefix_length': 64
},
'FE80::3': {
'prefix_length': 'N/A'
}
}
}
}
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_interfaces_ip',
**{
}
)
@salt.utils.napalm.proxy_napalm_wrap
def interfaces(**kwargs): # pylint: disable=unused-argument
'''
Returns details of the interfaces on the device.
:return: Returns a dictionary of dictionaries. The keys for the first
dictionary will be the interfaces in the devices.
CLI Example:
.. code-block:: bash
salt '*' net.interfaces
Example output:
.. code-block:: python
{
'Management1': {
'is_up': False,
'is_enabled': False,
'description': '',
'last_flapped': -1,
'speed': 1000,
'mac_address': 'dead:beef:dead',
},
'Ethernet1':{
'is_up': True,
'is_enabled': True,
'description': 'foo',
'last_flapped': 1429978575.1554043,
'speed': 1000,
'mac_address': 'beef:dead:beef',
}
}
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_interfaces',
**{
}
)
@salt.utils.napalm.proxy_napalm_wrap
def lldp(interface='', **kwargs): # pylint: disable=unused-argument
'''
Returns a detailed view of the LLDP neighbors.
:param interface: interface name to filter on
:return: A dictionary with the LLDL neighbors. The keys are the
interfaces with LLDP activated on.
CLI Example:
.. code-block:: bash
salt '*' net.lldp
salt '*' net.lldp interface='TenGigE0/0/0/8'
Example output:
.. code-block:: python
{
'TenGigE0/0/0/8': [
{
'parent_interface': 'Bundle-Ether8',
'interface_description': 'TenGigE0/0/0/8',
'remote_chassis_id': '8c60.4f69.e96c',
'remote_system_name': 'switch',
'remote_port': 'Eth2/2/1',
'remote_port_description': 'Ethernet2/2/1',
'remote_system_description': 'Cisco Nexus Operating System (NX-OS) Software 7.1(0)N1(1a)
TAC support: http://www.cisco.com/tac
Copyright (c) 2002-2015, Cisco Systems, Inc. All rights reserved.',
'remote_system_capab': 'B, R',
'remote_system_enable_capab': 'B'
}
]
}
'''
proxy_output = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_lldp_neighbors_detail',
**{
}
)
if not proxy_output.get('result'):
return proxy_output
lldp_neighbors = proxy_output.get('out')
if interface:
lldp_neighbors = {interface: lldp_neighbors.get(interface)}
proxy_output.update({
'out': lldp_neighbors
})
return proxy_output
@salt.utils.napalm.proxy_napalm_wrap
def mac(address='', interface='', vlan=0, **kwargs): # pylint: disable=unused-argument
'''
Returns the MAC Address Table on the device.
:param address: MAC address to filter on
:param interface: Interface name to filter on
:param vlan: VLAN identifier
:return: A list of dictionaries representing the entries in the MAC Address Table
CLI Example:
.. code-block:: bash
salt '*' net.mac
salt '*' net.mac vlan=10
Example output:
.. code-block:: python
[
{
'mac' : '00:1c:58:29:4a:71',
'interface' : 'xe-3/0/2',
'static' : False,
'active' : True,
'moves' : 1,
'vlan' : 10,
'last_move' : 1454417742.58
},
{
'mac' : '8c:60:4f:58:e1:c1',
'interface' : 'xe-1/0/1',
'static' : False,
'active' : True,
'moves' : 2,
'vlan' : 42,
'last_move' : 1453191948.11
}
]
'''
proxy_output = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_mac_address_table',
**{
}
)
if not proxy_output.get('result'):
# if negative, leave the output unchanged
return proxy_output
mac_address_table = proxy_output.get('out')
if vlan and isinstance(vlan, int):
mac_address_table = _filter_list(mac_address_table, 'vlan', vlan)
if address:
mac_address_table = _filter_list(mac_address_table, 'mac', address)
if interface:
mac_address_table = _filter_list(mac_address_table, 'interface', interface)
proxy_output.update({
'out': mac_address_table
})
return proxy_output
@salt.utils.napalm.proxy_napalm_wrap
def config(source=None, **kwargs): # pylint: disable=unused-argument
'''
.. versionadded:: 2017.7.0
Return the whole configuration of the network device. By default, it will
return all possible configuration sources supported by the network device.
At most, there will be:
- running config
- startup config
- candidate config
To return only one of the configurations, you can use the ``source``
argument.
source
Which configuration type you want to display, default is all of them.
Options:
- running
- candidate
- startup
:return:
The object returned is a dictionary with the following keys:
- running (string): Representation of the native running configuration.
- candidate (string): Representation of the native candidate configuration.
If the device doesn't differentiate between running and startup
configuration this will an empty string.
- startup (string): Representation of the native startup configuration.
If the device doesn't differentiate between running and startup
configuration this will an empty string.
CLI Example:
.. code-block:: bash
salt '*' net.config
salt '*' net.config source=candidate
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_config',
**{
'retrieve': source
}
)
@salt.utils.napalm.proxy_napalm_wrap
def optics(**kwargs): # pylint: disable=unused-argument
'''
.. versionadded:: 2017.7.0
Fetches the power usage on the various transceivers installed
on the network device (in dBm), and returns a view that conforms with the
OpenConfig model openconfig-platform-transceiver.yang.
:return:
Returns a dictionary where the keys are as listed below:
* intf_name (unicode)
* physical_channels
* channels (list of dicts)
* index (int)
* state
* input_power
* instant (float)
* avg (float)
* min (float)
* max (float)
* output_power
* instant (float)
* avg (float)
* min (float)
* max (float)
* laser_bias_current
* instant (float)
* avg (float)
* min (float)
* max (float)
CLI Example:
.. code-block:: bash
salt '*' net.optics
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'get_optics',
**{
}
)
# <---- Call NAPALM getters --------------------------------------------------------------------------------------------
# ----- Configuration specific functions ------------------------------------------------------------------------------>
@salt.utils.napalm.proxy_napalm_wrap
def load_config(filename=None,
text=None,
test=False,
commit=True,
debug=False,
replace=False,
commit_in=None,
commit_at=None,
revert_in=None,
revert_at=None,
commit_jid=None,
inherit_napalm_device=None,
saltenv='base',
**kwargs): # pylint: disable=unused-argument
'''
Applies configuration changes on the device. It can be loaded from a file or from inline string.
If you send both a filename and a string containing the configuration, the file has higher precedence.
By default this function will commit the changes. If there are no changes, it does not commit and
the flag ``already_configured`` will be set as ``True`` to point this out.
To avoid committing the configuration, set the argument ``test`` to ``True`` and will discard (dry run).
To keep the changes but not commit, set ``commit`` to ``False``.
To replace the config, set ``replace`` to ``True``.
filename
Path to the file containing the desired configuration.
This can be specified using the absolute path to the file,
or using one of the following URL schemes:
- ``salt://``, to fetch the template from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
.. versionchanged:: 2018.3.0
text
String containing the desired configuration.
This argument is ignored when ``filename`` is specified.
test: False
Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False``
and will commit the changes on the device.
commit: True
Commit? Default: ``True``.
debug: False
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
configuration loaded on the device.
.. versionadded:: 2016.11.2
replace: False
Load and replace the configuration. Default: ``False``.
.. versionadded:: 2016.11.2
commit_in: ``None``
Commit the changes in a specific number of minutes / hours. Example of
accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2
minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit
the changes in 5 hours and 30 minutes).
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
commit_at: ``None``
Commit the changes at a specific time. Example of accepted formats:
``1am`` (will commit the changes at the next 1AM), ``13:20`` (will
commit at 13:20), ``1:20am``, etc.
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
revert_in: ``None``
Commit and revert the changes in a specific number of minutes / hours.
Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert
in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert
the changes in 5 hours and 30 minutes).
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
revert_at: ``None``
Commit and revert the changes at a specific time. Example of accepted
formats: ``1am`` (will commit and revert the changes at the next 1AM),
``13:20`` (will commit and revert at 13:20), ``1:20am``, etc.
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
saltenv: ``base``
Specifies the Salt environment name.
.. versionadded:: 2018.3.0
:return: a dictionary having the following keys:
* result (bool): if the config was applied successfully. It is ``False`` only in case of failure. In case \
there are no changes to be applied and successfully performs all operations it is still ``True`` and so will be \
the ``already_configured`` flag (example below)
* comment (str): a message for the user
* already_configured (bool): flag to check if there were no changes applied
* loaded_config (str): the configuration loaded on the device. Requires ``debug`` to be set as ``True``
* diff (str): returns the config changes applied
CLI Example:
.. code-block:: bash
salt '*' net.load_config text='ntp peer 192.168.0.1'
salt '*' net.load_config filename='/absolute/path/to/your/file'
salt '*' net.load_config filename='/absolute/path/to/your/file' test=True
salt '*' net.load_config filename='/absolute/path/to/your/file' commit=False
Example output:
.. code-block:: python
{
'comment': 'Configuration discarded.',
'already_configured': False,
'result': True,
'diff': '[edit interfaces xe-0/0/5]+ description "Adding a description";'
}
'''
fun = 'load_merge_candidate'
if replace:
fun = 'load_replace_candidate'
if salt.utils.napalm.not_always_alive(__opts__):
# if a not-always-alive proxy
# or regular minion
# do not close the connection after loading the config
# this will be handled in _config_logic
# after running the other features:
# compare_config, discard / commit
# which have to be over the same session
napalm_device['CLOSE'] = False # pylint: disable=undefined-variable
if filename:
text = __salt__['cp.get_file_str'](filename, saltenv=saltenv)
if text is False:
# When using salt:// or https://, if the resource is not available,
# it will either raise an exception, or return False.
ret = {
'result': False,
'out': None
}
ret['comment'] = 'Unable to read from {}. Please specify a valid file or text.'.format(filename)
log.error(ret['comment'])
return ret
if commit_jid:
# When the commit_jid argument is passed, it probably is a scheduled
# commit to be executed, and filename is a temporary file which
# can be removed after reading it.
salt.utils.files.safe_rm(filename)
_loaded = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
fun,
**{
'config': text
}
)
return _config_logic(napalm_device, # pylint: disable=undefined-variable
_loaded,
test=test,
debug=debug,
replace=replace,
commit_config=commit,
loaded_config=text,
commit_at=commit_at,
commit_in=commit_in,
revert_in=revert_in,
revert_at=revert_at,
commit_jid=commit_jid,
**kwargs)
@salt.utils.napalm.proxy_napalm_wrap
def load_template(template_name=None,
template_source=None,
context=None,
defaults=None,
template_engine='jinja',
saltenv='base',
template_hash=None,
template_hash_name=None,
skip_verify=False,
test=False,
commit=True,
debug=False,
replace=False,
commit_in=None,
commit_at=None,
revert_in=None,
revert_at=None,
inherit_napalm_device=None, # pylint: disable=unused-argument
**template_vars):
'''
Renders a configuration template (default: Jinja) and loads the result on the device.
By default this function will commit the changes. If there are no changes,
it does not commit, discards he config and the flag ``already_configured``
will be set as ``True`` to point this out.
To avoid committing the configuration, set the argument ``test`` to ``True``
and will discard (dry run).
To preserve the changes, set ``commit`` to ``False``.
However, this is recommended to be used only in exceptional cases
when there are applied few consecutive states
and/or configuration changes.
Otherwise the user might forget that the config DB is locked
and the candidate config buffer is not cleared/merged in the running config.
To replace the config, set ``replace`` to ``True``.
template_name
Identifies path to the template source.
The template can be either stored on the local machine, either remotely.
The recommended location is under the ``file_roots``
as specified in the master config file.
For example, let's suppose the ``file_roots`` is configured as:
.. code-block:: yaml
file_roots:
base:
- /etc/salt/states
Placing the template under ``/etc/salt/states/templates/example.jinja``,
it can be used as ``salt://templates/example.jinja``.
Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples:
- ``salt://my_template.jinja``
- ``/absolute/path/to/my_template.jinja``
- ``http://example.com/template.cheetah``
- ``https:/example.com/template.mako``
- ``ftp://example.com/template.py``
.. versionchanged:: 2019.2.0
This argument can now support a list of templates to be rendered.
The resulting configuration text is loaded at once, as a single
configuration chunk.
template_source: None
Inline config template to be rendered and loaded on the device.
template_hash: None
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
.. versionadded:: 2016.11.2
context: None
Overrides default context variables passed to the template.
.. versionadded:: 2019.2.0
template_hash_name: None
When ``template_hash`` refers to a remote file,
this specifies the filename to look for in that file.
.. versionadded:: 2016.11.2
saltenv: ``base``
Specifies the template environment.
This will influence the relative imports inside the templates.
.. versionadded:: 2016.11.2
template_engine: jinja
The following templates engines are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
.. versionadded:: 2016.11.2
skip_verify: True
If ``True``, hash verification of remote file sources
(``http://``, ``https://``, ``ftp://``) will be skipped,
and the ``source_hash`` argument will be ignored.
.. versionadded:: 2016.11.2
test: False
Dry run? If set to ``True``, will apply the config,
discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: True
Commit? (default: ``True``)
debug: False
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw result after the template was rendered.
.. versionadded:: 2016.11.2
replace: False
Load and replace the configuration.
.. versionadded:: 2016.11.2
commit_in: ``None``
Commit the changes in a specific number of minutes / hours. Example of
accepted formats: ``5`` (commit in 5 minutes), ``2m`` (commit in 2
minutes), ``1h`` (commit the changes in 1 hour)`, ``5h30m`` (commit
the changes in 5 hours and 30 minutes).
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
commit_at: ``None``
Commit the changes at a specific time. Example of accepted formats:
``1am`` (will commit the changes at the next 1AM), ``13:20`` (will
commit at 13:20), ``1:20am``, etc.
.. note::
This feature works on any platforms, as it does not rely on the
native features of the network operating system.
.. note::
After the command is executed and the ``diff`` is not satisfactory,
or for any other reasons you have to discard the commit, you are
able to do so using the
:py:func:`net.cancel_commit <salt.modules.napalm_network.cancel_commit>`
execution function, using the commit ID returned by this function.
.. warning::
Using this feature, Salt will load the exact configuration you
expect, however the diff may change in time (i.e., if an user
applies a manual configuration change, or a different process or
command changes the configuration in the meanwhile).
.. versionadded:: 2019.2.0
revert_in: ``None``
Commit and revert the changes in a specific number of minutes / hours.
Example of accepted formats: ``5`` (revert in 5 minutes), ``2m`` (revert
in 2 minutes), ``1h`` (revert the changes in 1 hour)`, ``5h30m`` (revert
the changes in 5 hours and 30 minutes).
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
revert_at: ``None``
Commit and revert the changes at a specific time. Example of accepted
formats: ``1am`` (will commit and revert the changes at the next 1AM),
``13:20`` (will commit and revert at 13:20), ``1:20am``, etc.
.. note::
To confirm the commit, and prevent reverting the changes, you will
have to execute the
:mod:`net.confirm_commit <salt.modules.napalm_network.confirm_commit>`
function, using the commit ID returned by this function.
.. warning::
This works on any platform, regardless if they have or don't have
native capabilities to confirming a commit. However, please be
*very* cautious when using this feature: on Junos (as it is the only
NAPALM core platform supporting this natively) it executes a commit
confirmed as you would do from the command line.
All the other platforms don't have this capability natively,
therefore the revert is done via Salt. That means, your device needs
to be reachable at the moment when Salt will attempt to revert your
changes. Be cautious when pushing configuration changes that would
prevent you reach the device.
Similarly, if an user or a different process apply other
configuration changes in the meanwhile (between the moment you
commit and till the changes are reverted), these changes would be
equally reverted, as Salt cannot be aware of them.
.. versionadded:: 2019.2.0
defaults: None
Default variables/context passed to the template.
.. versionadded:: 2016.11.2
template_vars
Dictionary with the arguments/context to be used when the template is rendered.
.. note::
Do not explicitly specify this argument. This represents any other
variable that will be sent to the template rendering system.
Please see the examples below!
.. note::
It is more recommended to use the ``context`` argument to avoid
conflicts between CLI arguments and template variables.
:return: a dictionary having the following keys:
- result (bool): if the config was applied successfully. It is ``False``
only in case of failure. In case there are no changes to be applied and
successfully performs all operations it is still ``True`` and so will be
the ``already_configured`` flag (example below)
- comment (str): a message for the user
- already_configured (bool): flag to check if there were no changes applied
- loaded_config (str): the configuration loaded on the device, after
rendering the template. Requires ``debug`` to be set as ``True``
- diff (str): returns the config changes applied
The template can use variables from the ``grains``, ``pillar`` or ``opts``, for example:
.. code-block:: jinja
{% set router_model = grains.get('model') -%}
{% set router_vendor = grains.get('vendor') -%}
{% set os_version = grains.get('version') -%}
{% set hostname = pillar.get('proxy', {}).get('host') -%}
{% if router_vendor|lower == 'juniper' %}
system {
host-name {{hostname}};
}
{% elif router_vendor|lower == 'cisco' %}
hostname {{hostname}}
{% endif %}
CLI Examples:
.. code-block:: bash
salt '*' net.load_template set_ntp_peers peers=[192.168.0.1] # uses NAPALM default templates
# inline template:
salt -G 'os:junos' net.load_template template_source='system { host-name {{host_name}}; }' \
host_name='MX480.lab'
# inline template using grains info:
salt -G 'os:junos' net.load_template \
template_source='system { host-name {{grains.model}}.lab; }'
# if the device is a MX480, the command above will set the hostname as: MX480.lab
# inline template using pillar data:
salt -G 'os:junos' net.load_template template_source='system { host-name {{pillar.proxy.host}}; }'
salt '*' net.load_template https://bit.ly/2OhSgqP hostname=example # will commit
salt '*' net.load_template https://bit.ly/2OhSgqP hostname=example test=True # dry run
salt '*' net.load_template salt://templates/example.jinja debug=True # Using the salt:// URI
# render a mako template:
salt '*' net.load_template salt://templates/example.mako template_engine=mako debug=True
# render remote template
salt -G 'os:junos' net.load_template http://bit.ly/2fReJg7 test=True debug=True peers=['192.168.0.1']
salt -G 'os:ios' net.load_template http://bit.ly/2gKOj20 test=True debug=True peers=['192.168.0.1']
# render multiple templates at once
salt '*' net.load_template "['https://bit.ly/2OhSgqP', 'salt://templates/example.jinja']" context="{'hostname': 'example'}"
Example output:
.. code-block:: python
{
'comment': '',
'already_configured': False,
'result': True,
'diff': '[edit system]+ host-name edge01.bjm01',
'loaded_config': 'system { host-name edge01.bjm01; }''
}
'''
_rendered = ''
_loaded = {
'result': True,
'comment': '',
'out': None
}
loaded_config = None
# prechecks
deprecated_args = ('template_user', 'template_attrs', 'template_group', 'template_mode')
for deprecated_arg in deprecated_args:
if template_vars.get(deprecated_arg):
del template_vars[deprecated_arg]
salt.utils.versions.warn_until(
'Sodium',
('The \'{arg}\' argument to \'net.load_template\' is deprecated '
'and has been ignored').format(arg=deprecated_arg)
)
if template_engine not in salt.utils.templates.TEMPLATE_REGISTRY:
_loaded.update({
'result': False,
'comment': 'Invalid templating engine! Choose between: {tpl_eng_opts}'.format(
tpl_eng_opts=', '.join(list(salt.utils.templates.TEMPLATE_REGISTRY.keys()))
)
})
return _loaded # exit
# to check if will be rendered by salt or NAPALM
salt_render_prefixes = ('salt://', 'http://', 'https://', 'ftp://')
salt_render = False
file_exists = False
if not isinstance(template_name, (tuple, list)):
for salt_render_prefix in salt_render_prefixes:
if not salt_render:
salt_render = salt_render or template_name.startswith(salt_render_prefix)
file_exists = __salt__['file.file_exists'](template_name)
if template_source or file_exists or salt_render or isinstance(template_name, (tuple, list)):
# either inline template
# either template in a custom path
# either abs path send
# either starts with salt:// and
# then use Salt render system
if context is None:
context = {}
context.update(template_vars)
# if needed to render the template send as inline arg
if template_source:
# render the content
_rendered = __salt__['file.apply_template_on_contents'](
contents=template_source,
template=template_engine,
context=context,
defaults=defaults,
saltenv=saltenv
)
if not isinstance(_rendered, six.string_types):
if 'result' in _rendered:
_loaded['result'] = _rendered['result']
else:
_loaded['result'] = False
if 'comment' in _rendered:
_loaded['comment'] = _rendered['comment']
else:
_loaded['comment'] = 'Error while rendering the template.'
return _loaded
else:
# render the file - either local, either remote
if not isinstance(template_name, (list, tuple)):
template_name = [template_name]
if template_hash_name and not isinstance(template_hash_name, (list, tuple)):
template_hash_name = [template_hash_name]
elif not template_hash_name:
template_hash_name = [None] * len(template_name)
if template_hash and isinstance(template_hash, six.string_types) and not\
(template_hash.startswith('salt://') or template_hash.startswith('file://')):
# If the template hash is passed as string, and it's not a file
# (starts with the salt:// or file:// URI), then make it a list
# of 1 element (for the iteration below)
template_hash = [template_hash]
elif template_hash and isinstance(template_hash, six.string_types) and\
(template_hash.startswith('salt://') or template_hash.startswith('file://')):
# If the template hash is a file URI, then provide the same value
# for each of the templates in the list, as probably they all
# share the same hash file, otherwise the user should provide
# this as a list
template_hash = [template_hash] * len(template_name)
elif not template_hash:
template_hash = [None] * len(template_name)
for tpl_index, tpl_name in enumerate(template_name):
tpl_hash = template_hash[tpl_index]
tpl_hash_name = template_hash_name[tpl_index]
_rand_filename = __salt__['random.hash'](tpl_name, 'md5')
_temp_file = __salt__['file.join']('/tmp', _rand_filename)
_managed = __salt__['file.get_managed'](name=_temp_file,
source=tpl_name,
source_hash=tpl_hash,
source_hash_name=tpl_hash_name,
user=None,
group=None,
mode=None,
attrs=None,
template=template_engine,
context=context,
defaults=defaults,
saltenv=saltenv,
skip_verify=skip_verify)
if not isinstance(_managed, (list, tuple)) and isinstance(_managed, six.string_types):
_loaded['comment'] += _managed
_loaded['result'] = False
elif isinstance(_managed, (list, tuple)) and not len(_managed) > 0:
_loaded['result'] = False
_loaded['comment'] += 'Error while rendering the template.'
elif isinstance(_managed, (list, tuple)) and not len(_managed[0]) > 0:
_loaded['result'] = False
_loaded['comment'] += _managed[-1] # contains the error message
if _loaded['result']: # all good
_temp_tpl_file = _managed[0]
_temp_tpl_file_exists = __salt__['file.file_exists'](_temp_tpl_file)
if not _temp_tpl_file_exists:
_loaded['result'] = False
_loaded['comment'] += 'Error while rendering the template.'
return _loaded
_rendered += __salt__['file.read'](_temp_tpl_file)
__salt__['file.remove'](_temp_tpl_file)
else:
return _loaded # exit
loaded_config = _rendered
if _loaded['result']: # all good
fun = 'load_merge_candidate'
if replace: # replace requested
fun = 'load_replace_candidate'
if salt.utils.napalm.not_always_alive(__opts__):
# if a not-always-alive proxy
# or regular minion
# do not close the connection after loading the config
# this will be handled in _config_logic
# after running the other features:
# compare_config, discard / commit
# which have to be over the same session
napalm_device['CLOSE'] = False # pylint: disable=undefined-variable
_loaded = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
fun,
**{
'config': _rendered
}
)
else:
salt.utils.versions.warn_until(
'Sodium',
'Native NAPALM templates support will be removed in the Sodium '
'release. Please consider using the Salt rendering pipeline instead.'
'If you are using the \'netntp\', \'netsnmp\', or \'netusers\' Salt '
'State modules, you can ignore this message'
)
# otherwise, use NAPALM render system, injecting pillar/grains/opts vars
load_templates_params = defaults if defaults else {}
load_templates_params.update(template_vars)
load_templates_params.update(
{
'template_name': template_name,
'template_source': template_source, # inline template
'pillar': __pillar__, # inject pillar content
'grains': __grains__, # inject grains content
'opts': __opts__ # inject opts content
}
)
if salt.utils.napalm.not_always_alive(__opts__):
# if a not-always-alive proxy
# or regular minion
# do not close the connection after loading the config
# this will be handled in _config_logic
# after running the other features:
# compare_config, discard / commit
# which have to be over the same session
# so we'll set the CLOSE global explicitly as False
napalm_device['CLOSE'] = False # pylint: disable=undefined-variable
_loaded = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'load_template',
**load_templates_params
)
return _config_logic(napalm_device, # pylint: disable=undefined-variable
_loaded,
test=test,
debug=debug,
replace=replace,
commit_config=commit,
loaded_config=loaded_config,
commit_at=commit_at,
commit_in=commit_in,
revert_in=revert_in,
revert_at=revert_at,
**template_vars)
@salt.utils.napalm.proxy_napalm_wrap
def commit(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument
'''
Commits the configuration changes made on the network device.
CLI Example:
.. code-block:: bash
salt '*' net.commit
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'commit_config',
**{}
)
@salt.utils.napalm.proxy_napalm_wrap
def discard_config(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument
"""
Discards the changes applied.
CLI Example:
.. code-block:: bash
salt '*' net.discard_config
"""
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'discard_config',
**{}
)
@salt.utils.napalm.proxy_napalm_wrap
def compare_config(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument
'''
Returns the difference between the running config and the candidate config.
CLI Example:
.. code-block:: bash
salt '*' net.compare_config
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'compare_config',
**{}
)
@salt.utils.napalm.proxy_napalm_wrap
def rollback(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument
'''
Rollbacks the configuration.
CLI Example:
.. code-block:: bash
salt '*' net.rollback
'''
return salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'rollback',
**{}
)
@salt.utils.napalm.proxy_napalm_wrap
def config_changed(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument
'''
Will prompt if the configuration has been changed.
:return: A tuple with a boolean that specifies if the config was changed on the device.\
And a string that provides more details of the reason why the configuration was not changed.
CLI Example:
.. code-block:: bash
salt '*' net.config_changed
'''
is_config_changed = False
reason = ''
try_compare = compare_config(inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
if try_compare.get('result'):
if try_compare.get('out'):
is_config_changed = True
else:
reason = 'Configuration was not changed on the device.'
else:
reason = try_compare.get('comment')
return is_config_changed, reason
@salt.utils.napalm.proxy_napalm_wrap
def config_control(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument
'''
Will check if the configuration was changed.
If differences found, will try to commit.
In case commit unsuccessful, will try to rollback.
:return: A tuple with a boolean that specifies if the config was changed/committed/rollbacked on the device.\
And a string that provides more details of the reason why the configuration was not committed properly.
CLI Example:
.. code-block:: bash
salt '*' net.config_control
'''
result = True
comment = ''
changed, not_changed_rsn = config_changed(inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
if not changed:
return (changed, not_changed_rsn)
# config changed, thus let's try to commit
try_commit = commit()
if not try_commit.get('result'):
result = False
comment = 'Unable to commit the changes: {reason}.\n\
Will try to rollback now!'.format(
reason=try_commit.get('comment')
)
try_rollback = rollback()
if not try_rollback.get('result'):
comment += '\nCannot rollback! {reason}'.format(
reason=try_rollback.get('comment')
)
return result, comment
def cancel_commit(jid):
'''
.. versionadded:: 2019.2.0
Cancel a commit scheduled to be executed via the ``commit_in`` and
``commit_at`` arguments from the
:py:func:`net.load_template <salt.modules.napalm_network.load_template>` or
:py:func:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit is scheduled
via the functions named above.
CLI Example:
.. code-block:: bash
salt '*' net.cancel_commit 20180726083540640360
'''
job_name = '__napalm_commit_{}'.format(jid)
removed = __salt__['schedule.delete'](job_name)
if removed['result']:
saved = __salt__['schedule.save']()
removed['comment'] = 'Commit #{jid} cancelled.'.format(jid=jid)
else:
removed['comment'] = 'Unable to find commit #{jid}.'.format(jid=jid)
return removed
def confirm_commit(jid):
'''
.. versionadded:: 2019.2.0
Confirm a commit scheduled to be reverted via the ``revert_in`` and
``revert_at`` arguments from the
:mod:`net.load_template <salt.modules.napalm_network.load_template>` or
:mod:`net.load_config <salt.modules.napalm_network.load_config>`
execution functions. The commit ID is displayed when the commit confirmed
is scheduled via the functions named above.
CLI Example:
.. code-block:: bash
salt '*' net.confirm_commit 20180726083540640360
'''
if __grains__['os'] == 'junos':
# Confirm the commit, by committing (i.e., invoking the RPC call)
confirmed = __salt__['napalm.junos_commit']()
confirmed['result'] = confirmed.pop('out')
confirmed['comment'] = confirmed.pop('message')
else:
confirmed = cancel_commit(jid)
if confirmed['result']:
confirmed['comment'] = 'Commit #{jid} confirmed.'.format(jid=jid)
return confirmed
def save_config(source=None,
path=None):
'''
.. versionadded:: 2019.2.0
Save the configuration to a file on the local file system.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``,
``startup``. Default: ``running``.
path
Absolute path to file where to save the configuration.
To push the files to the Master, use
:mod:`cp.push <salt.modules.cp.push>` Execution function.
CLI Example:
.. code-block:: bash
salt '*' net.save_config source=running
'''
if not source:
source = 'running'
if not path:
path = salt.utils.files.mkstemp()
running_config = __salt__['net.config'](source=source)
if not running_config or not running_config['result']:
log.error('Unable to retrieve the config')
return running_config
with salt.utils.files.fopen(path, 'w') as fh_:
fh_.write(running_config['out'][source])
return {
'result': True,
'out': path,
'comment': '{source} config saved to {path}'.format(source=source, path=path)
}
def replace_pattern(pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
search_only=False,
show_changes=True,
backslash_literal=False,
source=None,
path=None,
test=False,
replace=True,
debug=False,
commit=True):
'''
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' net.replace_pattern "bind-address\\s*=" "bind-address:"
CLI Example:
.. code-block:: bash
salt '*' net.replace_pattern PREFIX-LIST_NAME new-prefix-list-name
salt '*' net.replace_pattern bgp-group-name new-bgp-group-name count=1
'''
config_saved = save_config(source=source, path=path)
if not config_saved or not config_saved['result']:
return config_saved
path = config_saved['out']
replace_pattern = __salt__['file.replace'](path,
pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
search_only=search_only,
show_changes=show_changes,
backslash_literal=backslash_literal)
with salt.utils.files.fopen(path, 'r') as fh_:
updated_config = fh_.read()
return __salt__['net.load_config'](text=updated_config,
test=test,
debug=debug,
replace=replace,
commit=commit)
def blockreplace(marker_start,
marker_end,
content='',
append_if_not_found=False,
prepend_if_not_found=False,
show_changes=True,
append_newline=False,
source='running',
path=None,
test=False,
commit=True,
debug=False,
replace=True):
'''
.. versionadded:: 2019.2.0
Replace content of the configuration source, delimited by the line markers.
A block of content delimited by comments can help you manage several lines
without worrying about old entries removal.
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered,
so whitespace or extra content before or after the marker is included
in final output.
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered,
so whitespace or extra content before or after the marker is included
in final output.
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_stop``.
append_if_not_found: ``False``
If markers are not found and set to True then, the markers and content
will be appended to the file.
prepend_if_not_found: ``False``
If markers are not found and set to True then, the markers and content
will be prepended to the file.
append_newline: ``False``
Controls whether or not a newline is appended to the content block.
If the value of this argument is ``True`` then a newline will be added
to the content block. If it is ``False``, then a newline will not be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
show_changes: ``True``
Controls how changes are presented. If ``True``, this function will
return the of the changes made.
If ``False``, then it will return a boolean (``True`` if any changes
were made, otherwise False).
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path: ``None``
Save the temporary configuration to a specific path, then read from
there. This argument is optional, can be used when you prefers a
particular location of the temporary file.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
CLI Example:
.. code-block:: bash
salt '*' net.blockreplace 'ntp' 'interface' ''
'''
config_saved = save_config(source=source, path=path)
if not config_saved or not config_saved['result']:
return config_saved
path = config_saved['out']
replace_pattern = __salt__['file.blockreplace'](path,
marker_start=marker_start,
marker_end=marker_end,
content=content,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
show_changes=show_changes,
append_newline=append_newline)
with salt.utils.files.fopen(path, 'r') as fh_:
updated_config = fh_.read()
return __salt__['net.load_config'](text=updated_config,
test=test,
debug=debug,
replace=replace,
commit=commit)
def patch(patchfile,
options='',
saltenv='base',
source_hash=None,
show_changes=True,
source='running',
path=None,
test=False,
commit=True,
debug=False,
replace=True):
'''
.. versionadded:: 2019.2.0
Apply a patch to the configuration source, and load the result into the
running config of the device.
patchfile
A patch file to apply to the configuration source.
options
Options to pass to patch.
source_hash
If the patch file (specified via the ``patchfile`` argument) is an
HTTP(S) or FTP URL and the file exists in the minion's file cache, this
option can be passed to keep the minion from re-downloading the file if
the cached copy matches the specified hash.
show_changes: ``True``
Controls how changes are presented. If ``True``, this function will
return the of the changes made.
If ``False``, then it will return a boolean (``True`` if any changes
were made, otherwise False).
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path: ``None``
Save the temporary configuration to a specific path, then read from
there. This argument is optional, can the user prefers a particular
location of the temporary file.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
CLI Example:
.. code-block:: bash
salt '*' net.patch https://example.com/running_config.patch
'''
config_saved = save_config(source=source, path=path)
if not config_saved or not config_saved['result']:
return config_saved
path = config_saved['out']
patchfile_cache = __salt__['cp.cache_file'](patchfile)
if patchfile_cache is False:
return {
'out': None,
'result': False,
'comment': 'The file "{}" does not exist.'.format(patchfile)
}
replace_pattern = __salt__['file.patch'](path,
patchfile_cache,
options=options)
with salt.utils.files.fopen(path, 'r') as fh_:
updated_config = fh_.read()
return __salt__['net.load_config'](text=updated_config,
test=test,
debug=debug,
replace=replace,
commit=commit)
# <---- Configuration specific functions -------------------------------------------------------------------------------
|
the-stack_0_18890 | # CS 212, hw1-2: Jokers Wild
#
# -----------------
# User Instructions
#
# Write a function best_wild_hand(hand) that takes as
# input a 7-card hand and returns the best 5 card hand.
# In this problem, it is possible for a hand to include
# jokers. Jokers will be treated as 'wild cards' which
# can take any rank or suit of the same color. The
# black joker, '?B', can be used as any spade or club
# and the red joker, '?R', can be used as any heart
# or diamond.
#
# The itertools library may be helpful. Feel free to
# define multiple functions if it helps you solve the
# problem.
#
# -----------------
# Grading Notes
#
# Muliple correct answers will be accepted in cases
# where the best hand is ambiguous (for example, if
# you have 4 kings and 3 queens, there are three best
# hands: 4 kings along with any of the three queens).
import itertools
allranks = "23456789TJQKA"
blackcards = [r + s for r in allranks for s in 'SC']
redcards = [r + s for r in allranks for s in 'DH']
def best_wild_hand(hand):
"Try all values for jokers in all 5-card selections."
hands = set(best_hand(h) for h in itertools.product(*map(replacement, hand)))
return max(hands, key=hand_rank)
def replacement(card):
if card == "?B":
return blackcards
elif card == "?R":
return redcards
else:
return [card]
def test_best_wild_hand():
assert (sorted(best_wild_hand("6C 7C 8C 9C TC 5C ?B".split()))
== ['7C', '8C', '9C', 'JC', 'TC'])
print(sorted(best_wild_hand("TD TC 5H 5C 7C ?R ?B".split())))
assert (sorted(best_wild_hand("TD TC 5H 5C 7C ?R ?B".split()))
== ['7C', 'TC', 'TD', 'TH', 'TS'])
assert (sorted(best_wild_hand("JD TC TH 7C 7D 7S 7H".split()))
== ['7C', '7D', '7H', '7S', 'JD'])
return 'test_best_wild_hand passes'
# ------------------
# Provided Functions
#
# You may want to use some of the functions which
# you have already defined in the unit to write
# your best_hand function.
def hand_rank(hand):
"Return a value indicating the ranking of a hand."
ranks = card_ranks(hand)
if straight(ranks) and flush(hand):
return (8, max(ranks))
elif kind(4, ranks):
return (7, kind(4, ranks), kind(1, ranks))
elif kind(3, ranks) and kind(2, ranks):
return (6, kind(3, ranks), kind(2, ranks))
elif flush(hand):
return (5, ranks)
elif straight(ranks):
return (4, max(ranks))
elif kind(3, ranks):
return (3, kind(3, ranks), ranks)
elif two_pair(ranks):
return (2, two_pair(ranks), ranks)
elif kind(2, ranks):
return (1, kind(2, ranks), ranks)
else:
return (0, ranks)
def card_ranks(hand):
"Return a list of the ranks, sorted with higher first."
ranks = ['--23456789TJQKA'.index(r) for r, s in hand]
ranks.sort(reverse=True)
return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks
def flush(hand):
"Return True if all the cards have the same suit."
suits = [s for r, s in hand]
return len(set(suits)) == 1
def straight(ranks):
"""Return True if the ordered
ranks form a 5-card straight."""
return (max(ranks) - min(ranks) == 4) and len(set(ranks)) == 5
def kind(n, ranks):
"""Return the first rank that this hand has
exactly n-of-a-kind of. Return None if there
is no n-of-a-kind in the hand."""
for r in ranks:
if ranks.count(r) == n: return r
return None
def two_pair(ranks):
"""If there are two pair here, return the two
ranks of the two pairs, else None."""
pair = kind(2, ranks)
lowpair = kind(2, list(reversed(ranks)))
if pair and lowpair != pair:
return (pair, lowpair)
else:
return None
def best_hand(hand):
"""
From a 7-card hand, return the best 5 card hand.
:param hand:
:return:
"""
return max(itertools.combinations(hand, 5), key=hand_rank)
|
the-stack_0_18891 | # TRATAMENTO DE EXCEÇÕES
try:
a = int(input('Digite numerador: '))
b = int(input('Digite denominador: '))
r = a / b
except (ValueError):
print("ERRO: Digite um valor válido!")
else:
print(f"O valor de r é {r:.2}")
finally:
print("Obrigado") |
the-stack_0_18892 | """Provides the FSQueue module, implementing a Queue-like interface atop the filesystem."""
import os
from queue import Queue
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# This is annoying, and ugly as. See
# https://mypy.readthedocs.io/en/stable/common_issues.html
# using-classes-that-are-generic-in-stubs-but-not-at-runtime
# for details.
FSQUEUEBASE = Queue[str] # pylint: disable = unsubscriptable-object
else:
FSQUEUEBASE = Queue
class FSQueue(FSQUEUEBASE): # pylint: disable = too-few-public-methods
"""Implements a Queue interface atop the filesystem.
Provides a queue like entity which presents leverages a directory of files
to store values in a queue. The queue is lazy, and will evaluate only upon
getting/putting from/to the queue, exchanging reduced memory consumption
for increased CPU/IO cost.
"""
def __init__(
self,
directory: str = '.',
file_prefix: str = 'FSList-',
maxsize: int = 0,
) -> None:
if not os.path.isdir(directory):
raise ValueError("Directory must exist")
self.directory = directory
self.file_prefix = file_prefix
self.maxsize = maxsize
super().__init__(maxsize)
def _init(self, _) -> None:
pass
def _qsize(self) -> int:
return len(os.listdir(self.directory))
# Get an item from the queue
def _get(self) -> str:
items = os.listdir(self.directory)
items.sort()
target = os.path.join(self.directory, items[0])
try:
if os.path.isfile(target):
content = ""
with open(target) as file_handle:
content = file_handle.read()
# Getting from the queue de-queues, so we should remove the
# file here.
os.remove(target)
return content
raise KeyError("Unable to return file contents for {}".format(target))
except (ValueError, TypeError) as err:
raise KeyError("{} doesn't exist.".format(target)) from err
# Put a new item in the queue
def _put(self, data: str) -> None:
# Get the next available file_name for us to store the queue entry in.
extant_items = os.listdir(self.directory)
extant_items.sort()
highest_name = '{}{}'.format(self.file_prefix, 1)
for item in extant_items:
print('current: {}, highest so far: {}'.format(item, highest_name))
if item > highest_name:
highest_name = item
# Process the index out of the file name.
suffix = highest_name.split(self.file_prefix)[-1]
current_index = int(suffix) if suffix else 0
new_name = '{}{}'.format(
self.file_prefix,
current_index + 1,
)
with open(os.path.join(self.directory, new_name), 'w') as file_handle:
file_handle.write(data)
|
the-stack_0_18893 | #!/usr/bin/env python
from __future__ import print_function
import roslib; roslib.load_manifest('teleop_twist_keyboard')
import rospy
from geometry_msgs.msg import TwistStamped
from sd_msgs.msg import SDControl
import sys, select, termios, tty
msg = """
Reading from the keyboard and Publishing to SD_Control!
uses "w, a, s , d, x " keys or numpad
---------------------------
Apply throttle:
'w' or '8'
Ease off throttle:
's' or '5'
Turn steering left:
'a' or '4'
Turn steering right:
'd' or '6'
Apply brakes:
'x' or '2'
CTRL-C to quit
Notes:
The twizy has a deadband of throttle, and requires >25% throttle to begin moving.
Steering will centre upon braking
All requests ramp upon sustain key press. Depress key to maintain steady request
"""
#First number controls rate of increase of brake, steer or throttle etc when button press is held
#First number is a placeholder for overrun, steering centalling functionality.
throttleKeys={
'w':(1,0),
'8':(1,0)
}
overrunKeys={
's':(1,0),
'5':(1,0)
}
brakeKeys={
'x':(5,0),
'2':(5,0),
}
leftKeys={
'a':(4,0),
'4':(4,0),
}
rightKeys={
'd':(4,0),
'6':(4,0),
}
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
print(msg)
pub = rospy.Publisher('/sd_control', SDControl, queue_size = 1)
control_msg = SDControl()
rospy.init_node('teleop_twist_keyboard')
overrun = 0
brake = 0
torque = 0
steer = 0
try:
while(1):
key = getKey()
if key in throttleKeys.keys():
torque += throttleKeys[key][0]
brake = 0
elif key in overrunKeys.keys():
torque -= overrunKeys[key][0]
brake = 0
elif key in brakeKeys.keys():
brake -= brakeKeys[key][0]
steer = 0
elif key in leftKeys.keys():
steer += leftKeys[key][0]
elif key in rightKeys.keys():
steer -= rightKeys[key][0]
else:
brake = 0
if (key == '\x03'):
break
torque = min(100, torque)
torque = max(0, torque)
steer = max(-100, steer)
steer = min(100, steer)
brake = max(-100, brake)
if(brake):
control_msg.torque = brake
else:
control_msg.torque = torque
control_msg.steer = steer
print("Throttle " , torque, " brake ", brake, " steer ", steer)
pub.publish(control_msg)
except Exception as e:
print(e)
finally:
control_msg.torque = 0
control_msg.steer = 0
pub.publish(control_msg)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
|
the-stack_0_18894 | #!/usr/bin/python
#Filename: biased_lexrank.py
def b_lexrank(G, baseline_score, alpha = 0.85, personalization=None, max_iter=100, tol=1.0e-6, weight='weight', seed_weight = 1):
""" Return the biased Lexrank scores of the nodes in the graph
This program is based upon the pagerank_scipy program from the networkx
source.
Parameters
___________
G: graph
A NetworkX graph
alpha: float, optional
A damping parameter for PageRank, default = 0.85
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each node.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
baseline_score: vector, float
similarity scores between the seed and sentences within the graph
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G=nx.DiGraph(nx.path_graph(4))
>>> pr=nx.pagerank_scipy(G,alpha=0.9)
Notes
-----
The eigenvector calculation uses power iteration with a SciPy
sparse matrix representation.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
[3] Otterbacher, Erkan and Radev, Biased LexRank: Passage Retrieval using Random
Walks with Question-Based Priors (2008)
"""
try:
import scipy.sparse
import networkx as nx
from numpy import diag
from networkx.exception import NetworkXError
except ImportError:
raise ImportError("pagerank_scipy() requires SciPy: http://scipy.org/")
if len(G) == 0:
return {}
# choose ordering in matrix
if personalization is None: # use G.nodes() ordering
nodelist=G.nodes()
elif personalization is 'biased':
nodelist = G.nodes()
else: # use personalization "vector" ordering
nodelist=list(personalization.keys())
M=nx.to_scipy_sparse_matrix(G,nodelist=nodelist,weight=weight,dtype='f')
(n,m)=M.shape # should be square
S=scipy.array(M.sum(axis=1)).flatten()
# for i, j, v in zip( *scipy.sparse.find(M) ):
# M[i,j] = v / S[i]
S[S>0] = 1.0 / S[S>0]
#creates a sparse diagonal matrix with normalization values
Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')
M = Q * M
x=scipy.ones((n))/n # initial guess
dangle=scipy.array(scipy.where(M.sum(axis=1)==0,1.0/n,0)).flatten()
# add "teleportation"/personalization
if personalization is 'biased':
v = scipy.array(baseline_score)
v = v/v.sum()
v = seed_weight * v/v.sum()
#print v.shape
elif personalization is not None:
v=scipy.array(list(personalization.values()),dtype=float)
v=v/v.sum()
else:
v=x
#print v.shape
i=0
while i <= max_iter:
# power iteration: make up to max_iter iterations
xlast=x
x=alpha*(x*M+scipy.dot(dangle,xlast))+(1-alpha)*v
x=x/x.sum()
# check convergence, l1 norm
err=scipy.absolute(x-xlast).sum()
if err < n*tol:
return dict(list(zip(nodelist,list(map(float,x)))))
i+=1
raise NetworkXError('pagerank_scipy: power iteration failed to converge'
'in %d iterations.'%(i+1))
|
the-stack_0_18896 | load("//tools:defaults.bzl", "jasmine_node_test")
load("@io_bazel_rules_webtesting//web:web.bzl", "web_test")
load("//tools/server-test:index.bzl", "server_test")
def webdriver_test(name, data = [], tags = [], **kwargs):
jasmine_node_test(
name = "%s_jasmine_test" % name,
data = data + [
"@npm//@bazel/typescript",
],
tags = tags + ["manual"],
**kwargs
)
web_test(
name = "%s_chromium_web_test" % name,
browser = "@npm//@angular/dev-infra-private/browsers/chromium:chromium",
tags = tags + ["manual"],
test = ":%s_jasmine_test" % name,
)
server_test(
name = "%s_chromium" % name,
server = "//src/e2e-app:devserver",
test = ":%s_chromium_web_test" % name,
tags = tags + ["e2e"],
)
native.test_suite(
name = name,
tests = [
":%s_chromium" % name,
],
)
|
the-stack_0_18897 | """Test Renault diagnostics."""
import pytest
from homeassistant.components.diagnostics import REDACTED
from homeassistant.components.renault import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from tests.common import mock_device_registry
from tests.components.diagnostics import (
get_diagnostics_for_config_entry,
get_diagnostics_for_device,
)
pytestmark = pytest.mark.usefixtures("patch_renault_account", "patch_get_vehicles")
VEHICLE_DETAILS = {
"vin": REDACTED,
"registrationDate": "2017-08-01",
"firstRegistrationDate": "2017-08-01",
"engineType": "5AQ",
"engineRatio": "601",
"modelSCR": "ZOE",
"deliveryCountry": {"code": "FR", "label": "FRANCE"},
"family": {"code": "X10", "label": "FAMILLE X10", "group": "007"},
"tcu": {
"code": "TCU0G2",
"label": "TCU VER 0 GEN 2",
"group": "E70",
},
"navigationAssistanceLevel": {
"code": "NAV3G5",
"label": "LEVEL 3 TYPE 5 NAVIGATION",
"group": "408",
},
"battery": {
"code": "BT4AR1",
"label": "BATTERIE BT4AR1",
"group": "968",
},
"radioType": {
"code": "RAD37A",
"label": "RADIO 37A",
"group": "425",
},
"registrationCountry": {"code": "FR"},
"brand": {"label": "RENAULT"},
"model": {"code": "X101VE", "label": "ZOE", "group": "971"},
"gearbox": {
"code": "BVEL",
"label": "BOITE A VARIATEUR ELECTRIQUE",
"group": "427",
},
"version": {"code": "INT MB 10R"},
"energy": {"code": "ELEC", "label": "ELECTRIQUE", "group": "019"},
"registrationNumber": REDACTED,
"vcd": "SYTINC/SKTPOU/SAND41/FDIU1/SSESM/MAPSUP/SSCALL/SAND88/SAND90/SQKDRO/SDIFPA/FACBA2/PRLEX1/SSRCAR/CABDO2/TCU0G2/SWALBO/EVTEC1/STANDA/X10/B10/EA2/MB/ELEC/DG/TEMP/TR4X2/RV/ABS/CAREG/LAC/VT003/CPE/RET03/SPROJA/RALU16/CEAVRH/AIRBA1/SERIE/DRA/DRAP08/HARM02/ATAR/TERQG/SFBANA/KM/DPRPN/AVREPL/SSDECA/ASRESP/RDAR02/ALEVA/CACBL2/SOP02C/CTHAB2/TRNOR/LVAVIP/LVAREL/SASURV/KTGREP/SGSCHA/APL03/ALOUCC/CMAR3P/NAV3G5/RAD37A/BVEL/AUTAUG/RNORM/ISOFIX/EQPEUR/HRGM01/SDPCLV/TLFRAN/SPRODI/SAN613/SSAPEX/GENEV1/ELC1/SANCML/PE2012/PHAS1/SAN913/045KWH/BT4AR1/VEC153/X101VE/NBT017/5AQ",
"assets": [
{
"assetType": "PICTURE",
"renditions": [
{
"resolutionType": "ONE_MYRENAULT_LARGE",
"url": "https://3dv2.renault.com/ImageFromBookmark?configuration=SKTPOU%2FPRLEX1%2FSTANDA%2FB10%2FEA2%2FDG%2FVT003%2FRET03%2FRALU16%2FDRAP08%2FHARM02%2FTERQG%2FRDAR02%2FALEVA%2FSOP02C%2FTRNOR%2FLVAVIP%2FLVAREL%2FNAV3G5%2FRAD37A%2FSDPCLV%2FTLFRAN%2FGENEV1%2FSAN913%2FBT4AR1%2FNBT017&databaseId=1d514feb-93a6-4b45-8785-e11d2a6f1864&bookmarkSet=RSITE&bookmark=EXT_34_DESSUS&profile=HELIOS_OWNERSERVICES_LARGE",
},
{
"resolutionType": "ONE_MYRENAULT_SMALL",
"url": "https://3dv2.renault.com/ImageFromBookmark?configuration=SKTPOU%2FPRLEX1%2FSTANDA%2FB10%2FEA2%2FDG%2FVT003%2FRET03%2FRALU16%2FDRAP08%2FHARM02%2FTERQG%2FRDAR02%2FALEVA%2FSOP02C%2FTRNOR%2FLVAVIP%2FLVAREL%2FNAV3G5%2FRAD37A%2FSDPCLV%2FTLFRAN%2FGENEV1%2FSAN913%2FBT4AR1%2FNBT017&databaseId=1d514feb-93a6-4b45-8785-e11d2a6f1864&bookmarkSet=RSITE&bookmark=EXT_34_DESSUS&profile=HELIOS_OWNERSERVICES_SMALL_V2",
},
],
},
{
"assetType": "PDF",
"assetRole": "GUIDE",
"title": "PDF Guide",
"description": "",
"renditions": [
{
"url": "https://cdn.group.renault.com/ren/gb/myr/assets/x101ve/manual.pdf.asset.pdf/1558704861676.pdf"
}
],
},
{
"assetType": "URL",
"assetRole": "GUIDE",
"title": "e-guide",
"description": "",
"renditions": [{"url": "http://gb.e-guide.renault.com/eng/Zoe"}],
},
{
"assetType": "VIDEO",
"assetRole": "CAR",
"title": "10 Fundamentals about getting the best out of your electric vehicle",
"description": "",
"renditions": [{"url": "39r6QEKcOM4"}],
},
{
"assetType": "VIDEO",
"assetRole": "CAR",
"title": "Automatic Climate Control",
"description": "",
"renditions": [{"url": "Va2FnZFo_GE"}],
},
{
"assetType": "URL",
"assetRole": "CAR",
"title": "More videos",
"description": "",
"renditions": [{"url": "https://www.youtube.com/watch?v=wfpCMkK1rKI"}],
},
{
"assetType": "VIDEO",
"assetRole": "CAR",
"title": "Charging the battery",
"description": "",
"renditions": [{"url": "RaEad8DjUJs"}],
},
{
"assetType": "VIDEO",
"assetRole": "CAR",
"title": "Charging the battery at a station with a flap",
"description": "",
"renditions": [{"url": "zJfd7fJWtr0"}],
},
],
"yearsOfMaintenance": 12,
"connectivityTechnology": "RLINK1",
"easyConnectStore": False,
"electrical": True,
"rlinkStore": False,
"deliveryDate": "2017-08-11",
"retrievedFromDhs": False,
"engineEnergyType": "ELEC",
"radioCode": REDACTED,
}
VEHICLE_DATA = {
"battery": {
"batteryAutonomy": 141,
"batteryAvailableEnergy": 31,
"batteryCapacity": 0,
"batteryLevel": 60,
"batteryTemperature": 20,
"chargingInstantaneousPower": 27,
"chargingRemainingTime": 145,
"chargingStatus": 1.0,
"plugStatus": 1,
"timestamp": "2020-01-12T21:40:16Z",
},
"charge_mode": {
"chargeMode": "always",
},
"cockpit": {
"totalMileage": 49114.27,
},
"hvac_status": {
"externalTemperature": 8.0,
"hvacStatus": "off",
},
}
@pytest.mark.usefixtures("fixtures_with_data")
@pytest.mark.parametrize("vehicle_type", ["zoe_40"], indirect=True)
async def test_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry, hass_client
):
"""Test config entry diagnostics."""
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert await get_diagnostics_for_config_entry(hass, hass_client, config_entry) == {
"entry": {
"data": {
"kamereon_account_id": REDACTED,
"locale": "fr_FR",
"password": REDACTED,
"username": REDACTED,
},
"title": "Mock Title",
},
"vehicles": [{"details": VEHICLE_DETAILS, "data": VEHICLE_DATA}],
}
@pytest.mark.usefixtures("fixtures_with_data")
@pytest.mark.parametrize("vehicle_type", ["zoe_40"], indirect=True)
async def test_device_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry, hass_client
):
"""Test config entry diagnostics."""
device_registry = mock_device_registry(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
device = device_registry.async_get_device({(DOMAIN, "VF1AAAAA555777999")})
assert device is not None
assert await get_diagnostics_for_device(
hass, hass_client, config_entry, device
) == {"details": VEHICLE_DETAILS, "data": VEHICLE_DATA}
|
the-stack_0_18898 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
__all__ = ["compute_neuron_head_importance", "reorder_head", "reorder_neuron"]
def compute_neuron_head_importance(task_name,
model,
data_loader,
num_layers,
num_heads,
loss_fct=nn.loss.CrossEntropyLoss(),
intermediate_name='linear1',
output_name='linear2'):
"""
Compute the importance of multi-head attention and feed-forward neuron in each transformer layer.
Args:
task_name(str): task name.
model(paddle.nn.Layer): the instance of transformer model.
data_loader(DataLoader): An iterable data loader is used for evaluate. An instance of `paddle.io.Dataloader`.
num_layers(int): number of transformer layers.
num_heads(int): number of heads in each multi-head attention.
loss_fct(Loss|optional): loss function can be a `paddle.nn.Layer` instance. Default: `nn.loss.CrossEntropyLoss()`.
intermediate_name(str|optional): the name of intermediate `Linear` layer in feed-forward. Default: `linear1`.
output_name(str|optional): the name of output `Linear` layer in feed-forward. Default: `linear2`.
"""
head_importance = paddle.zeros(
shape=[num_layers, num_heads], dtype='float32')
head_mask = paddle.ones(shape=[num_layers, num_heads], dtype='float32')
head_mask.stop_gradient = False
intermediate_weight = []
intermediate_bias = []
output_weight = []
for name, w in model.named_parameters():
if intermediate_name in name:
if len(w.shape) > 1:
intermediate_weight.append(w)
else:
intermediate_bias.append(w)
if output_name in name:
if len(w.shape) > 1:
output_weight.append(w)
neuron_importance = []
for w in intermediate_weight:
neuron_importance.append(np.zeros(shape=[w.shape[1]], dtype='float32'))
if task_name.lower() != 'mnli':
data_loader = (data_loader, )
for data in data_loader:
for batch in data:
input_ids, segment_ids, labels = batch
logits = model(
input_ids, segment_ids, attention_mask=[None, head_mask])
loss = loss_fct(logits, labels)
loss.backward()
head_importance += paddle.abs(
paddle.to_tensor(head_mask.gradient()))
for w1, b1, w2, current_importance in zip(
intermediate_weight, intermediate_bias, output_weight,
neuron_importance):
current_importance += np.abs(
(np.sum(w1.numpy() * w1.gradient(), axis=0) + b1.numpy() *
b1.gradient()))
current_importance += np.abs(
np.sum(w2.numpy() * w2.gradient(), axis=1))
return head_importance, neuron_importance
def reorder_head(layer, index):
"""
Reorder head weights according index.
Args:
layer(paddle.nn.Layer): the instance of `paddle.nn.MultiHeadAttention` layer.
index(list): the sort indices of multi-head.
"""
assert isinstance(layer, nn.MultiHeadAttention), \
"layer in reorder_head must be the instance of `paddle.nn.MultiHeadAttention`."
n, a = layer.num_heads, layer.head_dim
idx = paddle.reshape(
paddle.index_select(
paddle.reshape(
paddle.arange(
0, n * a, dtype='int64'), shape=[n, a]),
index=index,
axis=0),
shape=[-1])
def reorder_head_matrix(linearLayer, index, dim=1):
W = paddle.index_select(linearLayer.weight, index, axis=dim).detach()
if linearLayer.bias is not None:
if dim == 0:
b = paddle.assign(linearLayer.bias).detach()
else:
b = paddle.assign(
paddle.index_select(
linearLayer.bias, index, axis=0)).detach()
linearLayer.weight.stop_gradient = True
linearLayer.weight.set_value(W)
linearLayer.weight.stop_gradient = False
if linearLayer.bias is not None:
linearLayer.bias.stop_gradient = True
linearLayer.bias.set_value(b)
linearLayer.bias.stop_gradient = False
reorder_head_matrix(
layer.q_proj.fn if hasattr(layer.q_proj, 'fn') else layer.q_proj, idx)
reorder_head_matrix(
layer.k_proj.fn if hasattr(layer.k_proj, 'fn') else layer.k_proj, idx)
reorder_head_matrix(
layer.v_proj.fn if hasattr(layer.v_proj, 'fn') else layer.v_proj, idx)
reorder_head_matrix(
layer.out_proj.fn if hasattr(layer.out_proj, 'fn') else layer.out_proj,
idx,
dim=0)
def reorder_neuron(layer, index, dim=0):
"""
Reorder feed-forward weights according index.
Args:
layer(paddle.nn.Layer): the instance of `paddle.nn.Linear` layer.
index(list): the sort indices of feed-forward.
dim(int): select weights according to the dim.
"""
linearLayer = layer.fn if hasattr(layer, 'fn') else layer
W = paddle.index_select(linearLayer.weight, index, axis=dim).detach()
if linearLayer.bias is not None:
if dim == 0:
b = paddle.assign(linearLayer.bias).detach()
else:
b = paddle.assign(
paddle.index_select(
linearLayer.bias, index, axis=0)).detach()
linearLayer.weight.stop_gradient = True
linearLayer.weight.set_value(W)
linearLayer.weight.stop_gradient = False
if linearLayer.bias is not None:
linearLayer.bias.stop_gradient = True
linearLayer.bias.set_value(b)
linearLayer.bias.stop_gradient = False
### monkey patch for MultiHeadAttention _prepare_qkv to change num_heads.
def _prepare_qkv(self, query, key, value, cache=None):
q = self.q_proj(query)
if hasattr(self.q_proj,
'fn') and self.q_proj.fn.cur_config['expand_ratio'] != None:
self.num_heads = int(self.num_heads *
self.q_proj.fn.cur_config['expand_ratio'])
q = paddle.reshape(x=q, shape=[0, 0, self.num_heads, self.head_dim])
q = paddle.transpose(x=q, perm=[0, 2, 1, 3])
if isinstance(cache, self.StaticCache):
# for encoder-decoder attention in inference and has cached
k, v = cache.k, cache.v
else:
k, v = self.compute_kv(key, value)
if isinstance(cache, self.Cache):
# for decoder self-attention in inference
k = paddle.concat([cache.k, k], axis=2)
v = paddle.concat([cache.v, v], axis=2)
cache = self.Cache(k, v)
return (q, k, v) if cache is None else (q, k, v, cache)
### monkey patch for MultiHeadAttention forward to accept head_mask
### attn_mask[0] = attn_mask, attn_mask[1] = head_mask
def _mha_forward(self, query, key, value, attn_mask=None, cache=None):
key = query if key is None else key
value = query if value is None else value
# compute q ,k ,v
if cache is None:
q, k, v = self._prepare_qkv(query, key, value, cache)
else:
q, k, v, cache = self._prepare_qkv(query, key, value, cache)
# scale dot product attention
# TODO: use paddle.matmul, however it doesn't support `alpha`
product = paddle.fluid.layers.matmul(
x=q, y=k, transpose_y=True, alpha=self.head_dim**-0.5)
if attn_mask[0] is not None:
# TODO(guosheng): support bool mask
product = product + attn_mask[0]
weights = F.softmax(product)
if self.dropout:
weights = F.dropout(
weights,
self.dropout,
training=self.training,
mode="upscale_in_train")
if attn_mask[1] is not None:
weights = weights * attn_mask[1]
out = paddle.matmul(weights, v)
# combine heads
out = paddle.transpose(out, perm=[0, 2, 1, 3])
out = paddle.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])
# project to output
out = self.out_proj(out)
outs = [out]
if self.need_weights:
outs.append(weights)
if cache is not None:
outs.append(cache)
if hasattr(self.q_proj,
'fn') and self.q_proj.fn.cur_config['expand_ratio'] != None:
self.num_heads = int(
float(self.num_heads) / self.q_proj.fn.cur_config['expand_ratio'])
return out if len(outs) == 1 else tuple(outs)
### monkey patch for TransformerEncoder forward to accept head_mask
### attn_mask[0] = attn_mask, attn_mask[1] = head_mask
def _encoder_forward(self, src, src_mask=[None, None]):
output = src
if src_mask[1] is not None:
head_mask = src_mask[1]
if len(head_mask.shape) == 1:
head_mask = paddle.unsqueeze(
paddle.unsqueeze(
paddle.unsqueeze(paddle.unsqueeze(head_mask, 0), 0), -1),
-1)
head_mask = paddle.expand(
head_mask, shape=[self.num_layers] + head_mask.shape[1:])
elif len(head_mask.shape) == 2:
head_mask = paddle.unsqueeze(
paddle.unsqueeze(paddle.unsqueeze(head_mask, 1), -1), -1)
else:
head_mask = [None] * self.num_layers
for i, mod in enumerate(self.layers):
output = mod(output, src_mask=[src_mask[0], head_mask[i]])
if self.norm is not None:
output = self.norm(output)
return output
nn.MultiHeadAttention.forward = _mha_forward
nn.MultiHeadAttention._prepare_qkv = _prepare_qkv
nn.TransformerEncoder.forward = _encoder_forward
|
the-stack_0_18900 | #program to insert an element at a specified position into a given list.
def insert_spec_position(x, n_list, pos):
return n_list[:pos-1]+[x]+n_list[pos-1:]
n_list = [1,1,2,3,4,4,5,1]
print("Original list:")
print(n_list)
kth_position = 3
x = 14
result = insert_spec_position(x, n_list, kth_position)
print("\nAfter inserting an element at kth position in the said list:")
print(result) |
the-stack_0_18904 | # -*- coding: utf-8 -*-
"""
const
~~~~~
Implements CONSTS
:author: Feei <[email protected]>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
# Match-Mode
mm_function_param_controllable = 'function-param-regex' # 函数正则匹配
mm_regex_param_controllable = 'vustomize-match' # 自定义匹配
mm_regex_only_match = 'only-regex'
match_modes = [
mm_regex_only_match,
mm_regex_param_controllable,
mm_function_param_controllable
]
#
# Function-Param-Controllable
#
# (?:eval|call_function)\s*\((.*)(?:\))
# eval ($test + $test2);
# call_function ($exp);
#
fpc = '\s*\((.*)(?:\))'
fpc_single = '[f]{fpc}'.format(fpc=fpc)
fpc_multi = '(?:[f]){fpc}'.format(fpc=fpc)
#
# Find All variables
#
# Hallo $var. blabla $var, $iam a var $varvarvar gfg djf jdfgjh fd $variable $_GET['req']
#
fav = '\$([a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*)'
|
the-stack_0_18905 | import fasteners
import hightime
import math
import niscope
import numpy
import os
import pytest
import tempfile
# There are system tests below that need either a PXI-5124 or a PXI-5142 instead of the PXIe-5164 we use everywhere else
# because of specific capabilities on those models. Due to internal NI bug 969274, opening a simulated session to those models
# sometimes fails. As a workaround, the nimi-bot VMs are configured with one persistently simulated instrument of each kind respectively
# named "5124" and "5142". If you want to run these tests on your own system, you will need to create these two simulated
# instruments using MAX.
# In addition, we need a global lock in order to keep us from opening more than one session to the same simulated instrument
# at the same time. This is because NI-SCOPE (like other MI driver runtimes) disallow two simultaneous sessions to the same
# instrument, even when the instrument is simulated. This will impact the performance at which system tests run because we
# parallelize at the tox level :(.
daqmx_sim_5124_lock_file = os.path.join(tempfile.gettempdir(), 'daqmx_5124.lock')
daqmx_sim_5124_lock = fasteners.InterProcessLock(daqmx_sim_5124_lock_file)
daqmx_sim_5142_lock_file = os.path.join(tempfile.gettempdir(), 'daqmx_5142.lock')
daqmx_sim_5142_lock = fasteners.InterProcessLock(daqmx_sim_5142_lock_file)
@pytest.fixture(scope='function')
def session():
with niscope.Session('FakeDevice', False, True, 'Simulate=1, DriverSetup=Model:5164; BoardType:PXIe') as simulated_session:
yield simulated_session
@pytest.fixture(scope='function')
def session_5124():
with daqmx_sim_5124_lock:
with niscope.Session('5124') as simulated_session: # 5124 is needed for video triggering
yield simulated_session
@pytest.fixture(scope='function')
def session_5142():
with daqmx_sim_5142_lock:
with niscope.Session('5142') as simulated_session: # 5142 is needed for OSP
yield simulated_session
# Attribute tests
def test_vi_boolean_attribute(session):
session.allow_more_records_than_memory = False
default_option = session.allow_more_records_than_memory
assert default_option is False
def test_vi_string_attribute(session):
session.acq_arm_source = 'NISCOPE_VAL_IMMEDIATE'
start_trigger_source = session.acq_arm_source
assert start_trigger_source == 'NISCOPE_VAL_IMMEDIATE'
# Basic usability tests
def test_read(session):
test_voltage = 1.0
test_record_length = 2000
test_channels = range(2)
test_num_channels = 2
test_num_records = 3
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, test_num_records, True)
waveforms = session.channels[test_channels].read(num_samples=test_record_length, num_records=test_num_records)
assert len(waveforms) == test_num_channels * test_num_records
for i in range(len(waveforms)):
assert len(waveforms[i].samples) == test_record_length
def test_fetch(session):
test_voltage = 1.0
test_record_length = 2000
test_channels = range(2)
test_num_channels = 2
test_num_records = 3
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, test_num_records, True)
with session.initiate():
waveforms = session.channels[test_channels].fetch(num_samples=test_record_length, num_records=test_num_records)
assert len(waveforms) == test_num_channels * test_num_records
for i in range(len(waveforms)):
assert len(waveforms[i].samples) == test_record_length
def test_fetch_defaults(session):
test_voltage = 1.0
test_record_length = 2000
test_channels = range(2)
test_num_channels = 2
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, 1, True)
with session.initiate():
waveforms = session.channels[test_channels].fetch()
assert len(waveforms) == test_num_channels
for i in range(len(waveforms)):
assert len(waveforms[i].samples) == test_record_length
def test_fetch_array_measurement(session):
test_voltage = 1.0
test_record_length = 2000
test_channels = range(2)
test_num_channels = 2
test_num_records = 3
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, test_num_records, True)
with session.initiate():
waveforms = session.channels[test_channels].fetch_array_measurement(niscope.enums.ArrayMeasurement.ARRAY_GAIN)
assert len(waveforms) == test_num_channels * test_num_records
for i in range(len(waveforms)):
assert len(waveforms[i].samples) == test_record_length
def test_fetch_binary8_into(session):
test_voltage = 1.0
test_record_length = 2000
test_channels = range(2)
test_num_channels = 2
waveform = numpy.ndarray(test_num_channels * test_record_length, dtype=numpy.int8)
# Initialize with NaN so we can later verify all samples were overwritten by the driver.
waveform.fill(float('nan'))
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, 1, True)
with session.initiate():
waveforms = session.channels[test_channels].fetch_into(waveform=waveform)
for sample in waveform:
assert not math.isnan(sample)
assert len(waveforms) == test_num_channels
for i in range(len(waveforms)):
record_wfm = waveforms[i].samples
assert len(record_wfm) == test_record_length
for j in range(len(record_wfm)):
assert record_wfm[j] == waveform[i * test_record_length + j]
def test_fetch_binary16_into(session):
test_voltage = 1.0
test_record_length = 2000
test_channels = range(2)
test_num_channels = 2
waveform = numpy.ndarray(test_num_channels * test_record_length, dtype=numpy.int16)
# Initialize with NaN so we can later verify all samples were overwritten by the driver.
waveform.fill(float('nan'))
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, 1, True)
with session.initiate():
waveforms = session.channels[test_channels].fetch_into(waveform=waveform)
for sample in waveform:
assert not math.isnan(sample)
assert len(waveforms) == test_num_channels
for i in range(len(waveforms)):
record_wfm = waveforms[i].samples
assert len(record_wfm) == test_record_length
for j in range(len(record_wfm)):
assert record_wfm[j] == waveform[i * test_record_length + j]
def test_fetch_binary32_into(session):
test_voltage = 1.0
test_record_length = 2000
test_channels = range(2)
test_num_channels = 2
waveform = numpy.ndarray(test_num_channels * test_record_length, dtype=numpy.int32)
# Initialize with NaN so we can later verify all samples were overwritten by the driver.
waveform.fill(float('nan'))
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, 1, True)
with session.initiate():
waveforms = session.channels[test_channels].fetch_into(waveform=waveform)
for sample in waveform:
assert not math.isnan(sample)
assert len(waveforms) == test_num_channels
for i in range(len(waveforms)):
record_wfm = waveforms[i].samples
assert len(record_wfm) == test_record_length
for j in range(len(record_wfm)):
assert record_wfm[j] == waveform[i * test_record_length + j]
def test_fetch_double_into(session):
test_voltage = 1.0
test_record_length = 2000
test_channels = range(2)
test_num_channels = 2
waveform = numpy.ndarray(test_num_channels * test_record_length, dtype=numpy.float64)
# Initialize with NaN so we can later verify all samples were overwritten by the driver.
waveform.fill(float('nan'))
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, 1, True)
with session.initiate():
waveforms = session.channels[test_channels].fetch_into(waveform=waveform)
for sample in waveform:
assert not math.isnan(sample)
assert len(waveforms) == test_num_channels
for i in range(len(waveforms)):
record_wfm = waveforms[i].samples
assert len(record_wfm) == test_record_length
for j in range(len(record_wfm)):
assert record_wfm[j] == waveform[i * test_record_length + j]
def test_read_measurement(session):
test_voltage = 1.0
test_record_length = 1000
test_channels = range(2)
test_num_channels = 2
test_num_records = 3
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, test_num_records, True)
with session.initiate():
measurement = session.channels[test_channels].read_measurement(niscope.enums.ScalarMeasurement.NO_MEASUREMENT, 5.0)
assert len(measurement) == test_num_records * test_num_channels
for meas in measurement:
assert meas == 0.0
def test_fetch_measurement(session):
test_voltage = 1.0
test_record_length = 1000
test_channels = range(2)
test_num_channels = 2
test_num_records = 3
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, test_num_records, True)
with session.initiate():
measurement = session.channels[test_channels].fetch_measurement(niscope.enums.ScalarMeasurement.NO_MEASUREMENT, 5.0)
assert len(measurement) == test_num_records * test_num_channels
for meas in measurement:
assert meas == 0.0
def test_fetch_measurement_stats(session):
test_voltage = 1.0
test_record_length = 1000
test_channels = range(2)
test_num_channels = 2
test_num_records = 3
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, test_num_records, True)
with session.initiate():
measurement_stats = session.channels[test_channels].fetch_measurement_stats(niscope.enums.ScalarMeasurement.NO_MEASUREMENT, 5.0)
assert len(measurement_stats) == test_num_channels * test_num_records
for stat in measurement_stats:
assert stat.result == 0.0
def test_clear_waveform_measurement_stats(session):
test_voltage = 1.0
test_record_length = 1000
test_channels = 0
test_num_records = 1
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, test_num_records, True)
with session.initiate():
session.channels[test_channels].fetch_measurement(niscope.enums.ScalarMeasurement.FREQUENCY, 5.0)
uncleared_stats = session.channels[test_channels].fetch_measurement_stats(niscope.enums.ScalarMeasurement.FREQUENCY, 5.0)
uncleared_stats_2 = session.channels[test_channels].fetch_measurement_stats(niscope.enums.ScalarMeasurement.FREQUENCY, 5.0)
session.channels[test_channels].clear_waveform_measurement_stats(niscope.enums.ClearableMeasurement.FREQUENCY)
cleared_stats = session.channels[test_channels].fetch_measurement_stats(niscope.enums.ScalarMeasurement.FREQUENCY, 5.0)
# The principle here is using consistent behavior (i.e. if stats are fetched twice on a single record/channel measurement in a row, it will always be the same)
# to demonstrate that clearing the stats does in fact cause a measurable change.
assert uncleared_stats[0].result == uncleared_stats_2[0].result
assert uncleared_stats[0].stdev == uncleared_stats_2[0].stdev
assert uncleared_stats[0].mean == uncleared_stats_2[0].mean
assert uncleared_stats[0].min_val == uncleared_stats_2[0].min_val
assert uncleared_stats[0].max_val == uncleared_stats_2[0].max_val
assert uncleared_stats[0].num_in_stats == uncleared_stats_2[0].num_in_stats
assert uncleared_stats[0].num_in_stats != cleared_stats[0].num_in_stats
def test_waveform_processing(session):
test_voltage = 1.0
test_record_length = 1000
test_channels = range(2)
test_num_channels = 2
test_num_records = 3
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, test_num_records, True)
with session.initiate():
session.add_waveform_processing(niscope.enums.ArrayMeasurement.DERIVATIVE)
processed_waveforms = session.channels[test_channels].fetch_measurement(niscope.enums.ScalarMeasurement.MID_REF_VOLTS, 5.0)
session.clear_waveform_processing()
unprocessed_waveforms = session.channels[test_channels].fetch_measurement(niscope.enums.ScalarMeasurement.MID_REF_VOLTS, 5.0)
assert len(processed_waveforms) == test_num_channels * test_num_records
assert len(unprocessed_waveforms) == test_num_channels * test_num_records
# Here the idea is to leave a large margin to not test too specifically for any returned values but to demonstrate that the waveform processing does
# undeniably cause a consistent shift in the values returned. The "0" exception for processed is due to the nature of derivatives -- if two samples
# next to each other are identical, the derivative will be 0.
for processed, unprocessed in zip(processed_waveforms, unprocessed_waveforms):
assert abs(unprocessed) < 1
assert abs(processed) > 1 or processed == 0
def test_measurement_stats_str(session):
test_voltage = 1.0
test_record_length = 1000
test_channels = 0
test_num_records = 1
session.configure_vertical(test_voltage, niscope.VerticalCoupling.AC)
session.configure_horizontal_timing(50000000, test_record_length, 50.0, test_num_records, True)
with session.initiate():
measurement_stat = session.channels[test_channels].fetch_measurement_stats(niscope.enums.ScalarMeasurement.NO_MEASUREMENT, 5.0)
assert isinstance(measurement_stat[0].__str__(), str)
assert isinstance(measurement_stat[0].__repr__(), str)
def test_get_self_cal_last_date_time(session):
last_cal = session.get_self_cal_last_date_and_time()
assert last_cal.month == 12
assert last_cal.day == 21
assert last_cal.year == 1999
assert last_cal.hour == 0
assert last_cal.minute == 0
def test_get_ext_cal_last_date_time(session):
last_cal = session.get_ext_cal_last_date_and_time()
assert last_cal.month == 12
assert last_cal.day == 21
assert last_cal.year == 1999
assert last_cal.hour == 0
assert last_cal.minute == 0
def test_get_self_cal_last_temperature(session):
last_cal_temp = session.get_self_cal_last_temp()
assert last_cal_temp == 25
def test_get_ext_cal_last_temperature(session):
last_cal_temp = session.get_ext_cal_last_temp()
assert last_cal_temp == 25
def test_self_test(session):
# We should not get an assert if self_test passes
session.self_test()
def test_reset(session):
deault_fetch_relative_to = session._fetch_relative_to
assert deault_fetch_relative_to == niscope.FetchRelativeTo.PRETRIGGER
session._fetch_relative_to = niscope.FetchRelativeTo.READ_POINTER
non_default_acqusition_type = session._fetch_relative_to
assert non_default_acqusition_type == niscope.FetchRelativeTo.READ_POINTER
session.reset()
assert session._fetch_relative_to == niscope.FetchRelativeTo.PRETRIGGER
def test_reset_device(session):
deault_meas_time_histogram_high_time = session.meas_time_histogram_high_time
assert deault_meas_time_histogram_high_time == hightime.timedelta(microseconds=500)
session.meas_time_histogram_high_time = hightime.timedelta(microseconds=1000)
non_default_meas_time_histogram_high_time = session.meas_time_histogram_high_time
assert non_default_meas_time_histogram_high_time == hightime.timedelta(microseconds=1000)
session.reset_device()
assert session.meas_time_histogram_high_time == hightime.timedelta(microseconds=500)
def test_reset_with_defaults(session):
deault_meas_time_histogram_high_time = session.meas_time_histogram_high_time
assert deault_meas_time_histogram_high_time == hightime.timedelta(microseconds=500)
session.meas_time_histogram_high_time = hightime.timedelta(microseconds=1000)
non_default_meas_time_histogram_high_time = session.meas_time_histogram_high_time
assert non_default_meas_time_histogram_high_time == hightime.timedelta(microseconds=1000)
session.reset_device()
assert session.meas_time_histogram_high_time == hightime.timedelta(microseconds=500)
def test_error_message():
try:
# We pass in an invalid model name to force going to error_message
with niscope.Session('FakeDevice', False, True, 'Simulate=1, DriverSetup=Model:invalid_model; BoardType:PXIe'):
assert False
except niscope.Error as e:
assert e.code == -1074118609
assert e.description.find('Simulation does not support the selected model and board type.') != -1
def test_get_error(session):
try:
session.instrument_model = ''
assert False
except niscope.Error as e:
assert e.code == -1074135027 # Error : Attribute is read-only.
assert e.description.find('Attribute is read-only.') != -1
def test_acquisition_status(session):
assert session.acquisition_status() == niscope.AcquisitionStatus.COMPLETE
def test_self_cal(session):
session.self_cal(niscope.Option.SELF_CALIBRATE_ALL_CHANNELS)
def test_probe_compensation_signal(session):
session.probe_compensation_signal_start()
session.probe_compensation_signal_stop()
def test_configure_horizontal_timing(session):
session.configure_vertical(5.0, niscope.VerticalCoupling.DC)
session.auto_setup()
session.configure_horizontal_timing(10000000, 1000, 50.0, 1, True)
session.trigger_modifier = niscope.TriggerModifier.AUTO
session.configure_trigger_immediate()
session.horz_record_length == 1000
session.horz_sample_rate == 10000000
def test_configure_chan_characteristics(session):
session.vertical_range = 4.0
session.configure_chan_characteristics(50, 0)
assert 50.0 == session.input_impedance
def test_filter_coefficients(session_5142):
assert [1.0] + [0.0] * 34 == session_5142.get_equalization_filter_coefficients() # coefficients list should have 35 items
try:
filter_coefficients = [1.0, 0.0, 0.0]
session_5142.configure_equalization_filter_coefficients(filter_coefficients)
except niscope.Error as e:
assert "Incorrect number of filter coefficients." in e.description
assert e.code == -1074135024
filter_coefficients = [0.01] * 35
session_5142.configure_equalization_filter_coefficients(filter_coefficients)
assert filter_coefficients == session_5142.get_equalization_filter_coefficients()
def test_send_software_trigger_edge(session):
session.send_software_trigger_edge(niscope.WhichTrigger.ARM_REFERENCE)
def test_disable(session):
assert session.allow_more_records_than_memory is False
session.allow_more_records_than_memory = True
session.disable()
assert session.allow_more_records_than_memory is False
# Basic configuration tests
def test_configure_ref_levels(session):
session._configure_ref_levels()
assert 90.0 == session.meas_chan_high_ref_level
def test_configure_trigger_digital(session):
session.configure_trigger_digital('VAL_RTSI_0')
session.vertical_range = 5
assert 'VAL_RTSI_0' == session.trigger_source
def test_configure_trigger_edge(session):
assert niscope.TriggerSlope.POSITIVE == session.trigger_slope
session.configure_trigger_edge('0', 0.0, niscope.TriggerCoupling.DC)
session.commit()
assert '0' == session.trigger_source
assert niscope.TriggerCoupling.DC == session.trigger_coupling
def test_configure_trigger_hysteresis(session):
session.configure_trigger_hysteresis('1', 0.0, 0.05, niscope.TriggerCoupling.DC)
assert '1' == session.trigger_source
assert niscope.TriggerCoupling.DC == session.trigger_coupling
def test_import_export_buffer(session):
test_value_1 = 1
test_value_2 = 5
session.vertical_range = test_value_1
assert session.vertical_range == test_value_1
buffer = session.export_attribute_configuration_buffer()
session.vertical_range = test_value_2
assert session.vertical_range == test_value_2
session.import_attribute_configuration_buffer(buffer)
assert session.vertical_range == test_value_1
def test_import_export_file(session):
test_value_1 = 1
test_value_2 = 5
temp_file = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)
# NamedTemporaryFile() returns the file already opened, so we need to close it before we can use it
temp_file.close()
path = temp_file.name
session.vertical_range = test_value_1
assert session.vertical_range == test_value_1
session.export_attribute_configuration_file(path)
session.vertical_range = test_value_2
assert session.vertical_range == test_value_2
session.import_attribute_configuration_file(path)
assert session.vertical_range == test_value_1
os.remove(path)
def test_configure_trigger_software(session):
session.configure_trigger_software()
def test_configure_trigger_video(session_5124):
session_5124.configure_trigger_video('0', niscope.VideoSignalFormat.PAL, niscope.VideoTriggerEvent.FIELD1, niscope.VideoPolarity.POSITIVE, niscope.TriggerCoupling.DC)
assert niscope.VideoSignalFormat.PAL == session_5124.tv_trigger_signal_format
assert niscope.VideoTriggerEvent.FIELD1 == session_5124.tv_trigger_event
assert niscope.VideoPolarity.POSITIVE == session_5124.tv_trigger_polarity
assert niscope.TriggerCoupling.DC == session_5124.trigger_coupling
def test_configure_trigger_window(session):
session.configure_trigger_window('1', 0, 5, niscope.TriggerWindowMode.ENTERING, niscope.TriggerCoupling.DC)
assert '1' == session.trigger_source
assert niscope.TriggerWindowMode.ENTERING == session.trigger_window_mode
|
the-stack_0_18907 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import functools
import tensorflow as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
def _get_padding_shapes(dataset, max_num_boxes=None, num_classes=None,
spatial_image_shape=None):
"""Returns shapes to pad dataset tensors to before batching.
Args:
dataset: tf.data.Dataset object.
max_num_boxes: Max number of groundtruth boxes needed to computes shapes for
padding.
num_classes: Number of classes in the dataset needed to compute shapes for
padding.
spatial_image_shape: A list of two integers of the form [height, width]
containing expected spatial shape of the image.
Returns:
A dictionary keyed by fields.InputDataFields containing padding shapes for
tensors in the dataset.
Raises:
ValueError: If groundtruth classes is neither rank 1 nor rank 2.
"""
if not spatial_image_shape or spatial_image_shape == [-1, -1]:
height, width = None, None
else:
height, width = spatial_image_shape # pylint: disable=unpacking-non-sequence
padding_shapes = {
fields.InputDataFields.image: [height, width, 3],
fields.InputDataFields.source_id: [],
fields.InputDataFields.filename: [],
fields.InputDataFields.key: [],
fields.InputDataFields.groundtruth_difficult: [max_num_boxes],
fields.InputDataFields.groundtruth_boxes: [max_num_boxes, 4],
fields.InputDataFields.groundtruth_instance_masks: [max_num_boxes, height,
width],
fields.InputDataFields.groundtruth_is_crowd: [max_num_boxes],
fields.InputDataFields.groundtruth_group_of: [max_num_boxes],
fields.InputDataFields.groundtruth_area: [max_num_boxes],
fields.InputDataFields.groundtruth_weights: [max_num_boxes],
fields.InputDataFields.num_groundtruth_boxes: [],
fields.InputDataFields.groundtruth_label_types: [max_num_boxes],
fields.InputDataFields.groundtruth_label_scores: [max_num_boxes],
fields.InputDataFields.true_image_shape: [3],
fields.InputDataFields.multiclass_scores: [
max_num_boxes, num_classes + 1 if num_classes is not None else None],
}
# Determine whether groundtruth_classes are integers or one-hot encodings, and
# apply batching appropriately.
classes_shape = dataset.output_shapes[
fields.InputDataFields.groundtruth_classes]
if len(classes_shape) == 1: # Class integers.
padding_shapes[fields.InputDataFields.groundtruth_classes] = [max_num_boxes]
elif len(classes_shape) == 2: # One-hot or k-hot encoding.
padding_shapes[fields.InputDataFields.groundtruth_classes] = [
max_num_boxes, num_classes]
else:
raise ValueError('Groundtruth classes must be a rank 1 tensor (classes) or '
'rank 2 tensor (one-hot encodings)')
if fields.InputDataFields.original_image in dataset.output_shapes:
padding_shapes[fields.InputDataFields.original_image] = [None, None, 3]
if fields.InputDataFields.groundtruth_keypoints in dataset.output_shapes:
tensor_shape = dataset.output_shapes[fields.InputDataFields.
groundtruth_keypoints]
padding_shape = [max_num_boxes, tensor_shape[1].value,
tensor_shape[2].value]
padding_shapes[fields.InputDataFields.groundtruth_keypoints] = padding_shape
if (fields.InputDataFields.groundtruth_keypoint_visibilities
in dataset.output_shapes):
tensor_shape = dataset.output_shapes[fields.InputDataFields.
groundtruth_keypoint_visibilities]
padding_shape = [max_num_boxes, tensor_shape[1].value]
padding_shapes[fields.InputDataFields.
groundtruth_keypoint_visibilities] = padding_shape
return {tensor_key: padding_shapes[tensor_key]
for tensor_key, _ in dataset.output_shapes.items()}
def build(input_reader_config, transform_input_data_fn=None,
batch_size=None, max_num_boxes=None, num_classes=None,
spatial_image_shape=None):
"""Builds a tf.data.Dataset.
Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all
records. Applies a padded batch to the resulting dataset.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
transform_input_data_fn: Function to apply to all records, or None if
no extra decoding is required.
batch_size: Batch size. If None, batching is not performed.
max_num_boxes: Max number of groundtruth boxes needed to compute shapes for
padding. If None, will use a dynamic shape.
num_classes: Number of classes in the dataset needed to compute shapes for
padding. If None, will use a dynamic shape.
spatial_image_shape: A list of two integers of the form [height, width]
containing expected spatial shape of the image after applying
transform_input_data_fn. If None, will use dynamic shapes.
Returns:
A tf.data.Dataset based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
config = input_reader_config.tf_record_input_reader
if not config.input_path:
raise ValueError('At least one input path must be specified in '
'`input_reader_config`.')
label_map_proto_file = None
if input_reader_config.HasField('label_map_path'):
label_map_proto_file = input_reader_config.label_map_path
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=input_reader_config.load_instance_masks,
instance_mask_type=input_reader_config.mask_type,
label_map_proto_file=label_map_proto_file)
def process_fn(value):
processed = decoder.decode(value)
if transform_input_data_fn is not None:
return transform_input_data_fn(processed)
return processed
dataset = dataset_util.read_dataset(
functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000),
process_fn, config.input_path[:], input_reader_config)
if batch_size:
padding_shapes = _get_padding_shapes(dataset, max_num_boxes, num_classes,
spatial_image_shape)
dataset = dataset.apply(
tf.contrib.data.padded_batch_and_drop_remainder(batch_size,
padding_shapes))
return dataset
raise ValueError('Unsupported input_reader_config.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.