metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jefvantongerloo/scrapli_community",
"score": 2
}
|
#### File: scrapli/genericdriver/sync.py
```python
import time
from scrapli.driver import NetworkDriver
def default_sync_on_open(conn: NetworkDriver) -> None:
"""
scrapli_genericdriver default on_open callable
This is tested with a cisco wlc using auth_bypass so we have to send creds during on open
Args:
conn: NetworkDriver object
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
time.sleep(0.25)
conn.channel.write(channel_input=conn.transport.auth_username)
conn.channel.send_return()
time.sleep(0.25)
conn.channel.write(channel_input=conn.transport.auth_password)
conn.channel.send_return()
def default_sync_on_close(conn: NetworkDriver) -> None:
"""
scrapli_genericdriver default on_close callable
Args:
conn: NetworkDriver object
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
conn.channel.write(channel_input="logout")
conn.channel.send_return()
```
|
{
"source": "jegabe/ColorMyConsole",
"score": 2
}
|
#### File: jegabe/ColorMyConsole/conanfile.py
```python
from conans import ConanFile, CMake, tools
import os
class ColorMyConsole(ConanFile):
name = "colmc"
version_major = 1
version_minor = 0
version_bugfix = 0
version = str(version_major) + "." + str(version_minor) + "." + str(version_bugfix)
url = "TODO"
license = "MIT"
description = "TODO"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
exports_sources = "CMakeLists.txt", "include*", "src*", "tests*", "LICENSE.txt"
no_copy_source=True
def configure_cmake(self):
cmake = CMake(self)
cmake.configure()
return cmake
def version_file(self):
return os.path.join(self.build_folder, "include", "colmc", "version.h")
def build(self):
version_file_content = ""
try:
version_file_content = tools.load(self.version_file())
except:
pass
new_version_file_content = """// (c) 2021 <NAME>. Licensed under the MIT license.
#ifndef colmc_version_h_INCLUDED
#define colmc_version_h_INCLUDED
// Don't modify this file; it has been generated by conanfile.py
#include <cstdint>
#include <colmc/push_warnings.h>
namespace colmc {{
constexpr std::uint8_t version_major = {m};
constexpr std::uint8_t version_minor = {n};
constexpr std::uint8_t version_bugfix = {b};
constexpr char version_str[] = "{m}.{n}.{b}";
}}
#include <colmc/pop_warnings.h>
#endif
""".format(m=self.version_major, n=self.version_minor, b=self.version_bugfix)
if new_version_file_content != version_file_content:
tools.save(self.version_file(), new_version_file_content)
cmake = self.configure_cmake()
args = None
if self.settings.compiler == "Visual Studio":
args = ["Thost=x64"] # Use 64 bit compiler/liker host
cmake.configure(args=args)
cmake.build()
def package(self):
self.copy("*.lib", dst="lib", src="lib")
self.copy("*.a", dst="lib", src="lib")
self.copy("*.h", dst="include", src="include")
self.copy("*.h", dst="include", src=os.path.join(self.build_folder, "include"))
self.copy("LICENSE.txt", dst="license", src="")
def package_info(self):
self.cpp_info.libs = ["colmc"];
```
|
{
"source": "jegalyardt/DeMin",
"score": 2
}
|
#### File: jegalyardt/DeMin/DeDriver.py
```python
import sys, os, shutil, popen2, cPickle, shelve, math, time, gzip
import random, copy, re, urllib, xml.parsers.expat
from optparse import OptionParser
__version__ = '2.4.4'
farmdict = {}
curFarm = ''
def Pause(lockFName='lockme.loc'):
""" Pause the algorithm by creating a lock file.
"""
loc = open(lockFName, 'w')
loc.close()
def SendMail(address, subject, message):
""" Send mail to 'address'.
"""
child = popen2.Popen4('mail -s \"%s\" %s' % (subject, address))
child.tochild.write(message)
child.tochild.close()
retVal = child.wait()
child.fromchild.close()
return retVal
def WriteLog(fname, message):
""" Write 'message' to file 'fname'.
"""
try:
log = open(fname, 'a')
log.write(message)
log.close()
except:
print message
def XmlStartElement(attrName, attrs):
""" Start element handler for the Expat XML parser. Used in parsing
the output of the XML-based CAF best submit CGI script. We explicitly
exclude the RedHat 7.3 account group.
"""
global farmdict
global curFarm
if attrName == 'farm':
curFarm = attrs['name']
# clear this farm's list of account groups
farmdict[curFarm] = []
elif (attrName == 'acctgroup' and
attrs['name'] != 'group_rh73'):
farmdict[curFarm].append(attrs)
def StdDevScaleFac(F, cr, np):
""" Calculate the theoretical scale factor which acts upon the
mean standard deviation of trial solution elements in the current
generation to give the mean std dev. of trial solution elements
for the next generation (this std. dev. is an average over the
population and the trial vector elements).
See Zaharie, D., 'Critical Values for the Control Parameters of
Differential Evolution Algorithms.'
"""
return math.sqrt(2. * F**2 * cr + (cr**2 - 2. * cr) / np + 1.0)
def OptMutationScaleFac(stdDevScale, cr, np):
""" Calculate the theoretical mutation scale factor ('F') for the
DE rand/1/bin algorithm, given a desired std. dev. scale factor.
See Zaharie, D., 'Critical Values for the Control Parameters of
Differential Evolution Algorithms.'
"""
return math.sqrt(0.5*((stdDevScale**2 - 1.) / cr + (2. - cr) / np ))
def OptCrossProb(stdDevScale, F, np):
""" Calculate the theoretically optimal crossover probability
('Cr') for the DE rand/1/bin algorithm, given a desired
std. dev. scale factor.
See Zaharie, D., 'Critical Values for the Control Parameters of
Differential Evolution Algorithms.'
"""
try:
cr = -(np*F**2 - 1) + math.sqrt((np*F**2 - 1)**2 - np*(1 - stdDevScale**2))
except ValueError, data:
print data
cr = 0.2
print 'Setting cross prob to the eminently reasonable value of %f' % cr
return cr
class Member:
"""Population member; holds one trial vector + (tuple of) fcn eval(s)"""
_LARGEVAL = 1.0E20
_defLoBnd = 0.0
_defHiBnd = 1.0E3
_constrEvalMax = 500
_constrCallMax = 10
def __init__(self, dim=1, fdim=1, gen=-1, ipop=-1):
self.nDim = dim
self.fDim = fdim
self.generation = gen # Negative gen --> trial vect
self.popIndex = ipop # index in the population (< 0 --> trial)
self.x = [0.0] * self.nDim
self.y = [Member._LARGEVAL]*self.fDim
self.parmBounds = []
self.deltaConstr = 0.0
self.isValid = True
for i in range(self.nDim):
self.parmBounds.append((Member._defLoBnd, Member._defHiBnd))
def __repr__(self):
sep = '\n'
a = ' Generation: %s \t Population Index: %s' \
% (self.generation,self.popIndex)
b = ' Cost: %s \t\t Vector: %s' \
% (self.y, self.x)
return sep.join((a,b))
def isTrial(self):
return (self.generation < 0 or self.popIndex < 0)
def getKey(self):
# generate a string object from self.generation and
# self.popIndex which can be used as a
# dictionary key.
return self.popIndex
def __getitem__(self, key):
return self.x[key]
def __setitem__(self, key, value):
if type(value) != type(1.0) and type(value) != type(1):
raise TypeError, 'Only floats and ints can be stored in Member.x[].'
self.x[key] = value
def __len__(self):
return self.x.__len__()
def __cmp__(self, other):
# Overload the comparison function -- useful for sorting
# Member objects
# May want to define the rich comparison operators
# (e.g. __lt__() for '<') instead of __cmp__().
if not isinstance(other, Member):
raise TypeError, ('Comparison with type %s not supported!'
% (other.__class__))
if len(self.y) > 1:
raise ValueError, 'Pareto DE not yet implemented.'
else:
if self.y[0] > other.y[0]:
return 1
elif self.y[0] == other.y[0]:
return 0
else:
return -1
def makeTrial(self):
# Make a copy of self, but tag it as a trial;
# return the copy
trialMember = copy.deepcopy(self)
trialMember.generation = -1
trialMember.popIndex = -1
trialMember.y = [Member._LARGEVAL]*self.fDim
return trialMember
def __add__(self, other):
""" Overload the '+' operator; add the parameter values for
each index rather than concatenating the underlying list objects. """
## if not isinstance(other, Member):
if not isinstance(other, self.__class__):
raise TypeError, (
'Right-hand operand (type %s) must be a %s object!'
% (other.__class__, self.__class__))
if other.nDim != self.nDim:
raise ValueError, (
'Member dimension mismatch! left.nDim = %s, right.nDim = %s'
% (trialMember.nDim, other.nDim))
trialMember = self.makeTrial()
trialMember.x = map((lambda a,b: a+b), trialMember.x, other.x)
return trialMember
def __iadd__(self, other):
""" Overload the '+=' operator; """
## if not isinstance(other, Member):
if not isinstance(other, self.__class__):
raise TypeError, (
'Right-hand operand (type %s) must be a %s object!'
% (other.__class__, self.__class__))
if other.nDim != self.nDim:
raise ValueError, (
'Member dimension mismatch! left.nDim = %s, right.nDim = %s'
% (self.nDim, other.nDim))
self.x = map((lambda a,b: a+b), self.x, other.x)
return self
def __sub__(self, other):
""" Overload the '-' operator; subtract the parameter values for
each index. """
## if not isinstance(other, Member):
if not isinstance(other, self.__class__):
raise TypeError, (
'Right-hand operand (type %s) must be a %s object!'
% (other.__class__, self.__class__))
if other.nDim != self.nDim:
raise ValueError, (
'Member dimension mismatch! left.nDim = %s, right.nDim = %s'
% (trialMember.nDim, other.nDim))
trialMember = self.makeTrial()
trialMember.x = map((lambda a,b: a-b), trialMember.x, other.x)
return trialMember
def __isub__(self,other):
""" Overload the '-=' operator; change self.x in place. """
## if not isinstance(other, Member):
if not isinstance(other, self.__class__):
raise TypeError, (
'Right-hand operand (type %s) must be a %s object!'
% (other.__class__, self.__class__))
if other.nDim != self.nDim:
raise ValueError, (
'Member dimension mismatch! left.nDim = %s, right.nDim = %s'
% (self.nDim, other.nDim))
self.x = map((lambda a,b: a-b), self.x, other.x)
return self
def __mul__(self, scale):
""" Overload the '*' operator; scales each
element of self.x by factor 'scale'. """
if type(scale) != type(1.0) and type(scale) != type(1):
raise TypeError, (
'Right-hand operand (type %s) must be an int or float!'
% type(scale))
trialMember = self.makeTrial()
trialMember.x = map((lambda a,b=scale: a*b), trialMember.x)
return trialMember
def __imul__(self, scale):
""" Overload the '*=' operator; change self.x in place. """
if type(scale) != type(1.0) and type(scale) != type(1):
raise TypeError, (
'Right-hand operand (type %s) must be an int or float!'
% type(scale))
self.x = map((lambda a,b=scale: a*b), self.x)
return self
def __rmul__(self, scale):
""" Overload the '*' operator for the case when 'scale'
is the left-hand operand. """
if type(scale) != type(1.0) and type(scale) != type(1):
raise TypeError, (
'Left-hand operand (type %s) must be an int or float!'
% type(scale))
trialMember = self.makeTrial()
trialMember.x = map((lambda a,b=scale: a*b), trialMember.x)
return trialMember
def __div__(self, scale):
""" Overload the '/' operator. """
if type(scale) != type(1.0) and type(scale) != type(1):
raise TypeError, (
'Right-hand operand (type %s) must be an int or float!'
% type(scale))
if scale == 0:
raise ZeroDivisionError
trialMember = self.makeTrial()
trialMember.x = map((lambda a,b=scale: a/b), trialMember.x)
return trialMember
def __idiv__(self, scale):
""" Overload the '/=' operator; change self.x in place """
if type(scale) != type(1.0) and type(scale) != type(1):
raise TypeError, (
'Right-hand operand (type %s) must be an int or float!'
% type(scale))
if scale == 0:
raise ZeroDivisionError
self.x = map((lambda a,b=scale: a/b), self.x)
return self
def setParmBounds(self, i, loBnd=0.0, hiBnd=1.0E2):
""" Set the lower and upper bounds for the ith parameter in this
Member object.
"""
if type(i) != type(1) or (type(loBnd) != type(1)
and type(loBnd) != type(1.0)) or \
(type(hiBnd) != type(1) and
type(hiBnd) != type(1.0)):
raise TypeError
if i < 0 or i >= self.nDim:
raise IndexError, 'Invalid index, %s.' % i
self.parmBounds[i] = (loBnd, hiBnd)
def crossOver(self, crossProb, mutant):
""" Perform the crossover operation with 'self' as the parent
Member object using the binomial distribution; return a new Member object.
"""
if type(crossProb) != type(1.0):
raise TypeError
if crossProb < 0.0 or crossProb > 1.0:
raise ValueError, 'crossProb should be in [0.0,1.0).'
## if not isinstance(mutant, Member):
if not isinstance(mutant, self.__class__):
raise TypeError, 'crossOver is only defined for two Member objects.'
trialMember = self.makeTrial()
trialMember.popIndex = self.popIndex
# Randomly select one index for crossover
irand = random.randrange(0, self.nDim)
for i in range(self.nDim):
if (i == irand or
random.random() < crossProb):
trialMember[i] = mutant[i]
return trialMember
def repairHardConstr(self, hardConstraints, callCtr=0):
""" Repair rule for hard constraint violation.
"""
evalCtr = 0
# hardConstraints should be a sequence of strings
# each string should be of the form 'x[0] - 10.0'
x = copy.deepcopy(self.x)
# Create a compiled RE object
patt = re.compile("\[([0-9]+)\]")
for constr in hardConstraints:
# re.compile('pattern').findall('string') returns a list of
# matching substrings
delta = 0.0
j = 0
indexlist = []
strlist = patt.findall(constr)
if len(strlist) == 0:
# Malformed constraint! There are no substrings like 'x[0]'.
# Skip this constraint.
continue
for i in strlist:
# Convert the list indices found to integers
indexlist.append(int(i))
# Set j to be the first list index found in the constraint string
i = 0
j = indexlist[i]
try:
delta = eval(constr)
except(ZeroDivisionError, ValueError, AssertionError,
FloatingPointError, OverflowError):
delta += Member._LARGEVAL
except (IndexError, KeyError, NameError, TypeError,
SyntaxError, AttributeError):
# Malformed constraint; skip it. (KeyError will probably
# only be raised if the underlying parameter container
# for the Member class is changed to a dictionary)
continue
except:
continue
while delta > 0.0:
## if evalCtr < Member._constrEvalMax:
if evalCtr < self._constrEvalMax:
evalCtr += 1
else:
break
try:
# Throw a random number within the bounds of
# parameter j.
x[j] = random.uniform(self.parmBounds[j][0],
self.parmBounds[j][1])
# evaluate the string constr as a Python expression
delta = eval(constr)
except (ZeroDivisionError, ValueError, AssertionError,
FloatingPointError, OverflowError):
delta += Member._LARGEVAL
# this should not be an infinite loop, given
# that we've already successfully eval'd the
# constraint once.
continue
except (IndexError, KeyError):
# Fixup j and continue?
break
# Cycle through the list indices found in this
# constraint -- attempt to systematically vary
# parameters when constraints involve more than
# one parameter. For example, if the constraint
# string is 'x[0] + 2.0 * x[1]', and it evaluates
# to a positive definite value (i.e. constraint is
# violated), first throw a random number for x[0]
# and test the constraint again; if it is still violated,
# throw a random number for x[1] and re-eval.
######
# There's probably a better way to code this, e.g. using
# iterators, but this should work for now...
if i in range(len(indexlist)-1):
i += 1
# max value of i is (len(indexlist) - 1)
else:
i = 0
j = indexlist[i]
if evalCtr > 0:
# At least one constraint was violated at least once
## if callCtr < Member._constrCallMax:
## if evalCtr < Member._constrEvalMax:
if callCtr < self._constrCallMax:
if evalCtr < self._constrEvalMax:
# update the parm vector
self.x = x
# Call this function recursively to verify that
# the new parm vector satisfies the constraints (this
# is necessary for a set of constraints which is
# interwoven -- e.g. x[0] occurs in more than one
# constraint.
self.repairHardConstr(hardConstraints, callCtr+1)
return
else:
# Having trouble getting a viable parm vector,
# so try starting over.
self.repairHardConstr(hardConstraints, callCtr+1)
return
else:
# We've reached the recursive call limit; give up on
# this Member object and ensure that it will not be
# passed to the objective function.
self.isValid = False
return
else:
# Only evaluated each constraint once (exactly), and all constraints
# were satisfied. We have a viable parm vector, so return.
self.isValid = True
return
def repairSoftConstr(self, softConstraints):
""" Repair rule for soft constraint violation.
"""
if self.isValid:
# softConstraints should be a sequence of strings,
x = copy.deepcopy(self.x)
sumDelta = 0.0
# sum up the total magnitude of the constraint violation
for constr in softConstraints:
delta = 0.0
try:
delta = eval(constr)
except (ZeroDivisionError, ValueError, AssertionError,
FloatingPointError, OverflowError):
sumDelta += Member._LARGEVAL
continue
except (IndexError, KeyError, NameError, TypeError,
SyntaxError, AttributeError):
# This constraint seems to be malformed;
# ignore it.
continue
if delta > 0.0:
sumDelta += delta
self.deltaConstr = sumDelta
def dist(self, other, xmin, xmax):
""" Calculate the normalized Euclidean distance between
vectors self.x and other.x. Note: normalization is over the
parameter x_min[i] and x_max[j] as derived from the current
population (passed to this method as args).
"""
# SADE
# make sure to check for constraint violation before using
# this distance in an objective fcn.
# Should have no ZeroDivision problems, as the parmBounds
# are not user-settable.
if not isinstance(other, Member):
raise TypeError, 'Member.dist() only valid btwn two Member objects.'
d = 0.0
for i in xrange(self.nDim):
## d += ((self.x[i] - other.x[i]) / (self.parmBounds[i][1] -
## self.parmBounds[i][0]))**2
delta = xmax[i] - xmin[i] + 1.0E-6
d += ((self.x[i] - other.x[i]) / delta)**2
return math.sqrt(d)
#--------------------------------------------------------------------------
class SAMember(Member):
""" A subclass of Member which implements a history of function values,
with the average over the history taken as the reported function value.
"""
_constrEvalMax = 500
_constrCallMax = 10
_TINY = 1.0E-15
def __init__(self, dim=1, fdim=1, gen=-1, ipop=-1, histlen=10):
self.nDim = dim
self.fDim = fdim
self.histLen = histlen
self.generation = gen # Negative gen --> trial vect
self.popIndex = ipop # index in the population (< 0 --> trial)
self.x = [0.0] * self.nDim
## self.yHist = [[Member._LARGEVAL]*self.fDim for i in range(self.histLen)]
self.yHist = [[0.0]*self.fDim for i in range(self.histLen)]
self.y = [Member._LARGEVAL]*self.fDim
self.parmBounds = []
self.deltaConstr = 0.0
self.isValid = True
for i in range(self.nDim):
self.parmBounds.append((Member._defLoBnd, Member._defHiBnd))
def dominates(self, other):
""" Return values:
-1 if self dominates other
1 if other dominates self
0 if both self and other are non-dominated
"""
flag1 = False
flag2 = False
for i in range(self.fDim):
if self.y[i] < other.y[i]:
flag1 = True
elif self.y[i] > other.y[i]:
flag2 = True
if flag1 and not flag2:
return -1
elif not flag1 and flag2:
return 1
else:
return 0
def __cmp__(self, other):
# Overload the comparison function -- useful for sorting
# Member objects
# May want to define the rich comparison operators
# (e.g. __lt__() for '<') instead of __cmp__().
if not isinstance(other, SAMember):
raise TypeError, ('Comparison with type %s not supported!'
% (other.__class__))
# SADE
return self.dominates(other)
def isUnique(self, other):
""" Determine whether this is a unique individual based upon
decision vector.
"""
# calc the infinity-norm of the vector
delta = self - other
for i in xrange(len(delta)):
delta[i] = abs(delta[i])
norm = max(delta)
if norm > self._TINY:
return True
else:
return False
#--------------------------------------------------------------------------
class Population:
"""An indexed collection of Member objects"""
_LARGEVAL = 1.0E20
_TINY = 1.0E-12
_defInitRange = (-100.0, 100.0)
_defGaussParms = (0., 50.) # (mu, sigma)
# _cmpMetrics = ('stdDev', 'range', 'RMS', 'fractol', 'chisq')
_strategies = (('rand',1), ('best',1), ('best',2), ('rand-sort',1),
('rand-trig',1), ('best-trig',1))
def __init__(self, dim=1, fdim=1, popSize=4, gen=-1, prob=0.1, f=0.5,
mt=0.05, initGauss=False):
self.nDim = dim
self.fDim = fdim
self.generation = gen ## May want to take 'gen' out of constructor arglist
self.np = popSize
self.crossProb = prob
self.F = f
self.mt = mt ## only used for rand-trig, best-trig strategies
self.ibest = ''
self.iworst = ''
#self.cmpMetric = Population._cmpMetrics[0]
self.strategy = Population._strategies[0]
self.hardConstraints = ()
self.softConstraints = ()
self.initRange = [(Population._defInitRange[0],
Population._defInitRange[1])]*self.nDim
self.initGauss = initGauss
self.initGaussParms = {}
self.memberColl = {}
for i in range(self.np):
tmp = Member(self.nDim, self.fDim, self.generation, i)
tmpkey = tmp.getKey()
for j in range(tmp.nDim):
#tmp[j] = tmp.getRandom(j, lo, hi)
tmp[j] = 0.0
self.memberColl[tmpkey] = tmp
if i == 0:
self.ibest = tmpkey
self.iworst = tmpkey
def __getitem__(self, key):
# SHOULD RAISE TypeError IF key IS AN INVALID TYPE.
# SHOULD RAISE KeyError IF key IS NOT IN THIS COLLECTION.
# No. Let the underlying container class do it.
return self.memberColl[key]
def __setitem__(self, key, value):
# SHOULD *NOT* RAISE KeyError IF key IS NOT IN THIS COLLECTION.
## if not isinstance(value, Member):
## if not isinstance(value, self.memberColl[0].__class__):
## raise TypeError, (
## 'Objects of type %s cannot be stored by Population instances.'
## % (value.__class__))
if value.isTrial():
msg = 'Please set the generation and popIndex of value;'
msg += (' found (gen, popIdx) = (%s, %s).'
% (value.generation, value.popIndex))
raise ValueError, msg
self.memberColl[key] = value
def __contains__(self, key):
return self.memberColl.has_key(key)
def __len__(self):
return len(self.memberColl)
def keys(self):
return self.memberColl.keys()
def setGeneration(self, gen):
""" Set the Population generation, pass this on to all the Member objects.
"""
self.generation = gen
for key in self.keys():
self[key].generation = gen
def setPopParmBounds(self):
""" Set the low and high bounds for each parameter of each Member
object in this Population using the available hard constraints.
Should be called before Population::initialize().
"""
# Form a compiled regular expression to capture the parm indices
# in each constraint (e.g. capture '0' in 'x[0] - 1')
idx = '\[([0-9]+)\]'
indexpatt = re.compile(idx)
#
pm = '([-+])?'
oppMult = '(?:\s*\*\s*)?'
oppAdd = '\s*'+pm+'\s*'
absnum = '((?:\d+(?:\.\d*)?|\d*\.\d+)(?:[eE][-+]?\d+)?)?'
bndpatt = re.compile(pm + absnum + oppMult + 'x' + idx + oppAdd + absnum)
# Loop through hard constraints, look for parameter bounds
bounds = {}
for constr in self.hardConstraints:
j = -1
strlist = indexpatt.findall(constr)
if len(strlist) == 1:
# This constraint only involves one parameter,
# so it is a boundary candidate.
j = int(strlist[0])
if j not in bounds.keys():
bounds[j] = [Member._defLoBnd, Member._defHiBnd]
if j < self.nDim and j >= 0:
# Determine the low and high bounds
match = bndpatt.search(constr)
if (len(match.string[match.start():match.end()]) <
len(match.string)):
# The length of the matched substring is less than
# the full string --> there are characters left
# over!
continue
mgroups = match.groups()
if mgroups:
# constraint is of the form 's0 * c0 * x[j] + s1 * c1 <= 0'
# where the '<= 0' is implicit and c0 > 0, c1 >= 0,
# s0, s1 = +- 1.0
# --> s0 * x[i] <= - s1 * c1 / c0
#
# >>> m = bndpatt.search('-55e-2x[0]+3.2')
# >>> m.groups()
# ('-', '55e-2', '0', '+', '3.2')
#
c0 = 1.0
s1 = 1.0 # sign of c1
c1 = 0.0 # c1 >= 0
if mgroups[1]:
c0 = float(mgroups[1])
if mgroups[3] == '-':
s1 = -1.0
if mgroups[4]:
c1 = float(mgroups[4])
if mgroups[0] == '+' or mgroups[0] == None:
# s0 > 0
# we have an upper bound
bounds[j][1] = -s1 * c1 / c0
elif mgroups[0] == '-':
# s0 < 0
# we have a lower bound
bounds[j][0] = s1 * c1 / c0
for key in self.keys():
for j in bounds.keys():
self[key].setParmBounds(j, bounds[j][0], bounds[j][1])
def initialize(self):
""" Initialize the population with random trial vectors;
should only be done for generation -1!"""
# Set the parameter upper and lower bounds from the hard constraints
# given by the user.
self.setPopParmBounds()
for key in self.keys():
# Initialize the Member objects' parameter vectors
for i in range(self[key].nDim):
if not self.initGauss:
self[key][i] = random.uniform(self.initRange[i][0],
self.initRange[i][1])
else:
if i in self.initGaussParms.keys():
self[key][i] = random.gauss(self.initGaussParms[i][0],
self.initGaussParms[i][1])
else:
self[key][i] = random.uniform(self.initRange[i][0],
self.initRange[i][1])
# Repair the hard and soft constraints
self[key].repairHardConstr(self.hardConstraints)
self[key].repairSoftConstr(self.softConstraints)
self[key].y = [Member._LARGEVAL]*self.fDim
self.setGeneration(0)
def getKey(self):
""" Get a key for this Population instance for use with the
shelve module. """
sep1 = ":"
sep2 = "::"
key1 = sep1.join(("gen", `self.generation`))
key2 = sep1.join(("np", `self.np`))
key3 = sep1.join(("nDim", `self.nDim`))
return sep2.join((key1, key2, key3))
def setStrategy(self, strat):
if strat not in Population._strategies:
raise ValueError, ('%s is not an implemented DE strategy.'
% strat)
self.strategy = strat
def printAvailStrategies(self):
print "Available DE strategies:\n%s" % Population._strategies
def getSortedKeys(self):
""" Sort the Member objects in this population by increasing cost
value.
"""
mbrkeys = self.keys()
# sort the list of keys in place according to the cost value
mbrkeys.sort(lambda a,b: Member.__cmp__(self[a], self[b]))
return mbrkeys
def getNpTrunc(self, truncFrac=0.15):
""" Get the truncated number of individuals for use in convergence tests.
"""
return max(int(math.ceil(truncFrac*self.np)), self.nDim + 1, 3)
def getCovMatrix(self, n=2):
""" Get the covariance matrix of the parameters for truncated
population defined by truncFrac. Return value is a tuple:
(list of mean values, covariance matrix)
"""
covMat = [[Population._LARGEVAL]*self.nDim]
for i in range(self.nDim-1):
covMat.append(copy.deepcopy(covMat[0]))
if n <= 0:
return ([Population._LARGEVAL]*self.nDim,
covMat)
sortedKeys = self.getSortedKeys()
truncKeys = sortedKeys[:n]
# Calc the mean values
sum = 0.0
mu = []
for iparm in range(self.nDim):
sum = 0.0
for key in truncKeys:
sum += self[key].x[iparm]
mu.append(sum / float(n))
# Now calc the covariance matrix
sum = 0.0
for iparm in range(self.nDim):
# Cov matrix should be symmetric, so only calc the
# upper triangle.
for jparm in range(iparm, self.nDim):
sum = 0.0
for key in truncKeys:
sum += ((self[key].x[iparm] - mu[iparm]) *
(self[key].x[jparm] - mu[jparm]))
covMat[iparm][jparm] = sum / float(n)
# Now set the lower triangle...
for iparm in range(1, self.nDim):
for jparm in range(iparm):
covMat[iparm][jparm] = copy.deepcopy(covMat[jparm][iparm])
return (mu, covMat)
def getCovMatRepr(self, covMat):
""" Print the covariance matrix in a pretty way.
"""
str = ''
for i in range(len(covMat)):
for j in range(len(covMat[i])):
str += '%.6E ' % covMat[i][j]
str += '\n'
return str
def getStats(self, truncFrac=0.15):
""" Get the stats for this population.
"""
# Return a tuple of the form,
# (bestCost, worstCost, meanCost, stdDev, fracTol, chisq, ndf)
orderedKeys = self.getSortedKeys()
# Get the truncated list of keys
# The number of keys in the truncated list must be at least (nDim+1)
# so that ndf >= 1 (must have at least one degree of freedom)
npTrunc = self.getNpTrunc(truncFrac)
ndf = npTrunc - self.nDim
truncKeys = []
truncSuccess = True
for key in orderedKeys:
if (self[key].isValid and
self[key].deltaConstr == 0.0):
truncKeys.append(key)
truncKeys = truncKeys[:npTrunc]
if len(truncKeys) < npTrunc:
truncSuccess = False
# Get the best and worst Member objects
# Ensure that they are viable
# -- Actually, the viability check for the best Member obejct
# should not be necessary, since all nonviable objects are
# automatically assigned a cost value of Population._LARGEVAL
i = 0
while (i < len(orderedKeys) and
(not self[orderedKeys[i]].isValid or
self[orderedKeys[i]].deltaConstr > 0)):
i += 1
if i < len(orderedKeys):
self.ibest = orderedKeys[i]
else:
# We've traversed the entire list of keys and not found
# a viable Member object, so choose randomly
self.ibest = random.choice(orderedKeys)
i = -1
while (i >= -len(orderedKeys) and
(not self[orderedKeys[i]].isValid or
self[orderedKeys[i]].deltaConstr > 0)):
i -= 1
if i >= -len(orderedKeys):
self.iworst = orderedKeys[i]
else:
# we've traversed the entire list of keys and not found
# a viable Member object
## self.iworst = self.ibest
self.iworst = random.choice(orderedKeys)
#### SADE
bestCost = self[self.ibest].y[0]
worstCost = self[self.iworst].y[0]
if self.ibest == self.iworst:
# We've got problems -- not enough viable Member objects!
# Returning Population._LARGEVAL for most stats will ensure that we
# do not converge early.
(muParms, covMat) = self.getCovMatrix(npTrunc)
return (bestCost, worstCost, Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
ndf, muParms, covMat)
# Find the mean cost
sum = 0.0
sumsq = 0.0
sumTrunc = 0.0
sumsqTrunc = 0.0
mu = Population._LARGEVAL
musq = Population._LARGEVAL
stdDev = Population._LARGEVAL
varTrunc = Population._TINY
chisq = Population._LARGEVAL
fracTol = Population._LARGEVAL
# Calc the mean and standard deviation together
n = 0
for key in self.keys():
# Only include valid Population Members in the costs;
# we're only going to use this list of cost values for
# statistical & convergence purposes, so we don't want to
# skew the results with non-viable Members.
if (self[key].isValid and
self[key].deltaConstr == 0.0):
sum += self[key].y[0]
sumsq += self[key].y[0]**2
n += 1
if n > 0:
mu = sum / float(n)
musq = sumsq / float(n)
diff = musq - mu**2
if diff > 0.0 and n > 1:
# Calc the standard deviation for the entire population
stdDev = math.sqrt(n * diff / float(n - 1))
# Loop through the sorted, truncated list of keys,
# excluding the best individual
if truncSuccess:
#for key in truncKeys[1:]:
for key in truncKeys:
# We've already checked every Member key in truncKeys[] for
# viability and discarded the unviable ones
sumTrunc += self[key].y[0]
sumsqTrunc += self[key].y[0]**2
## muTrunc = sumTrunc / float(npTrunc - 1)
## musqTrunc = sumsqTrunc / float(npTrunc - 1)
muTrunc = sumTrunc / float(npTrunc)
musqTrunc = sumsqTrunc / float(npTrunc)
diffTrunc = musqTrunc - muTrunc**2
if diffTrunc > 0:
## varTrunc = (npTrunc - 1) * diffTrunc / float(npTrunc - 2)
varTrunc = npTrunc * diffTrunc / float(npTrunc - 1)
chisq = 0.0
for key in truncKeys[1:]:
chisq += (self[key].y[0] - bestCost)**2
#chisq /= (varTrunc + Population._TINY)
#chisq /= (varTrunc)
if abs(musqTrunc) > Population._TINY:
chisq /= (musqTrunc)
else:
chisq = Population._LARGEVAL
#
######chisq = (bestCost - muTrunc)**2 / varTrunc
range = self[truncKeys[-1]].y[0] - bestCost
fracTol = 2.0 * abs(range) / (abs(self[truncKeys[-1]].y[0]) +
abs(bestCost) + Population._TINY)
else:
# Calculate the fractional tolerance
range = worstCost - bestCost
fracTol = 2.0 * abs(range) / (abs(worstCost) + abs(bestCost) +
Population._TINY)
(muParms, covMat) = self.getCovMatrix(npTrunc)
return (bestCost, worstCost, mu, stdDev, fracTol, chisq, ndf,
muParms, covMat)
def getRndMembers(self, nMembers, *targets):
""" Randomly select 'nMembers' from the Population instance.
They must all be different from each other and from 'targets'.
Returns a tuple with the selected Member objects' keys. """
# This is much like the random.sample(population,k) utility,
# but we need to exclude 'targets' as well.
if nMembers >= self.np:
raise ValueError, 'Requested more random members than are in the population!'
rndMemberKeys = []
keys = self.keys()
for target in targets:
rndMemberKeys.append(target)
for i in range(nMembers):
tmp = random.choice(keys)
while tmp in rndMemberKeys:
tmp = random.choice(keys)
rndMemberKeys.append(tmp)
for target in targets:
rndMemberKeys.remove(target)
return tuple(rndMemberKeys)
def getMutant(self, parentKey):
""" Generate a mutant Member object according to the current
Differential Evolution strategy. Accepts a parent key as input
(not a Member object), and returns a new Member object. """
if parentKey not in self.keys():
raise KeyError, ('Key %s is not in this Population!'
% parentKey)
mutant = self[parentKey].makeTrial()
rndKeys = ()
if self.strategy == ('best', 1):
rndKeys = self.getRndMembers(2, parentKey, self.ibest)
mutant = self[self.ibest] + self.F * (self[rndKeys[0]] -
self[rndKeys[1]])
elif self.strategy == ('best', 2):
rndKeys = self.getRndMembers(4, parentKey, self.ibest)
mutant = self[self.ibest] + self.F * (self[rndKeys[0]] +
self[rndKeys[1]] -
self[rndKeys[2]] -
self[rndKeys[3]])
elif self.strategy == ('best-trig',1):
#### SADE
rndKeys = self.getRndMembers(2, parentKey, self.ibest)
pprime = (abs(self[rndKeys[0]].y[0]) + abs(self[rndKeys[1]].y[0]) +
abs(self[self.ibest].y[0]))
if random.random() <= self.mt and pprime > Population._TINY:
# 'mutant' is biased toward the region of lower function
# value in the available parameter space (defined by
# the 'rndKeys' list)
p = [abs(self[self.ibest].y[0]) / pprime,
abs(self[rndKeys[0]].y[0]) / pprime,
abs(self[rndKeys[1]].y[0]) / pprime]
mutant = ((self[self.ibest] + self[rndKeys[0]] +
self[rndKeys[1]]) / 3.0 +
(p[1] - p[0]) * (self[self.ibest] - self[rndKeys[0]]) +
(p[2] - p[1]) * (self[rndKeys[0]] - self[rndKeys[1]]) +
(p[0] - p[2]) * (self[rndKeys[1]] - self[self.ibest]))
else:
mutant = self[self.ibest] + self.F * (self[rndKeys[0]] -
self[rndKeys[1]])
## elif self.strategy == ('rand-to-best', 1):
## rndKeys = self.getRndMembers(3, parentKey, self.ibest)
## mutant = (self[rndKeys[0]] + self.lmbda * (self[self.ibest] -
## self[rndKeys[0]]) +
## self.F * (self[rndKeys[1]] - self[rndKeys[2]]))
elif self.strategy == ('rand-sort', 1):
rndKeys = list(self.getRndMembers(3, parentKey))
rndKeys.sort(lambda a,b: Member.__cmp__(self[a], self[b]))
shuffledKeys = rndKeys[1:]
random.shuffle(shuffledKeys)
mutant = self[rndKeys[0]] + self.F * (self[shuffledKeys[0]] -
self[shuffledKeys[1]])
elif self.strategy == ('rand', 1):
# assume self.strategy == ('rand', 1):
rndKeys = self.getRndMembers(3, parentKey)
mutant = self[rndKeys[0]] + self.F * (self[rndKeys[1]] -
self[rndKeys[2]])
elif self.strategy == ('rand-trig',1):
rndKeys = self.getRndMembers(3, parentKey)
pprime = (abs(self[rndKeys[0]].y[0]) + abs(self[rndKeys[1]].y[0]) +
abs(self[rndKeys[2]].y[0]))
if random.random() <= self.mt and pprime > Population._TINY:
# 'mutant' is biased toward the region of lower function
# value in the available parameter space (defined by
# the 'rndKeys' list)
p = [abs(self[rndKeys[0]].y[0]) / pprime,
abs(self[rndKeys[1]].y[0]) / pprime,
abs(self[rndKeys[2]].y[0]) / pprime]
mutant = ((self[rndKeys[0]] + self[rndKeys[1]] +
self[rndKeys[2]]) / 3.0 +
(p[1] - p[0]) * (self[rndKeys[0]] - self[rndKeys[1]]) +
(p[2] - p[1]) * (self[rndKeys[1]] - self[rndKeys[2]]) +
(p[0] - p[2]) * (self[rndKeys[2]] - self[rndKeys[0]]))
else:
# use standard DE/rand/1 strategy
rndKeys = self.getRndMembers(3, parentKey)
mutant = self[rndKeys[0]] + self.F * (self[rndKeys[1]] -
self[rndKeys[2]])
else:
raise ValueError
return mutant
def __repr__(self):
stats = self.getStats()
str = """Object type: class Population
Generation: %s \t Population Size: %s \t Depth: %s """ % (
self.generation, self.np, self.nDim)
str += """\nStats:
Best cost: %s \t Worst cost: %s
Mean cost: %s \t Standard Deviation: %s
Fractional Tolerance: %s \t Chi-Square Tolerance: %s \t NDF: %s
Mean parameter values: %s
Covariance Matrix: %s
""" % stats
return str
def isComplete(self):
""" Test to see whether all Member objects in this population
have the same generation; returns the number of Member objects
which have gen index less than the population gen index."""
# *** should change the name, since 'is' in the name implies
# *** that the function will return a Boolean!
if len(self) < self.np:
print """Ack! Population should have %s Member objects, but
only found %s!""" % (self.np, len(self))
count = 0
for key in self.keys():
if self[key].generation < self.generation:
count += 1
elif self[key].generation > self.generation:
# found a Member object with a larger generation index;
# can either raise an error or take on the new gen index.
# For now, print a message; if we do see a higher gen index
# we should call isComplete() recursively, since count needs
# to be updated.
print "Ack! Member object with key %s has a higher gen index!" \
% key
if count == 0:
orderedKeys = self.getSortedKeys()
self.ibest = orderedKeys[0]
self.iworst = orderedKeys[-1]
return count
def getTrialMember(self, parentKey):
""" Generate a new trial vector to possibly replace the ith member
from members of the current population."""
## if not isinstance(parent, Member):
## raise TypeError, ('Member.getTrialMember(parent): parent must be a %s instance.'
## % self.__class__)
# trial members get a negative generation number
mutant = self.getMutant(parentKey)
# Perform the crossover operation -- trialMember will get the
# popIndex of the parent
trialMember = self[parentKey].crossOver(self.crossProb, mutant)
# Enforce Hard constraints
trialMember.repairHardConstr(self.hardConstraints)
# Check Soft constraints
trialMember.repairSoftConstr(self.softConstraints)
return trialMember
def saveMe(self, filename='popDB.dbm'):
""" Append the current Population instance to a DBM file with the
Python shelve module (filename can refer to a new or existing file). """
popDB = shelve.open(filename)
popDB[self.getKey()] = self
popDB.close()
#--------------------------------------------------------------------------
class SAPopulation(Population):
""" Subclass of Population class for use with self-adaptive DE.
x[0] -> F
x[1] -> Cr
y[0] -> density objective
y[1] -> gain objective
"""
_defInitRange = (0.01,0.99)
def __init__(self, dim=1, fdim=1, popSize=4, gen=-1, prob=0.1,
f=0.5, mt=0.05, histlen=5, lam=0.2, initGauss=False):
self.nDim = dim
self.fDim = fdim
self.histLen = histlen
self.lam = lam
self.fi = 0
self.generation = gen ## May want to take 'gen' out of constructor arglist
self.np = popSize
self.crossProb = prob
self.F = f
self.mt = mt ## only used for rand-trig, best-trig strategies
self.initGauss = initGauss
self.initGaussParms = {}
self.ibest = ''
self.iworst = ''
#self.cmpMetric = Population._cmpMetrics[0]
self.strategy = ('rand-trig',1)
self.hardConstraints = ()
self.softConstraints = ()
self.initRange = [(SAPopulation._defInitRange[0],
SAPopulation._defInitRange[1])]*self.nDim
self.memberColl = {}
for i in range(self.np):
tmp = SAMember(self.nDim, self.fDim, self.generation, i, self.histLen)
tmpkey = tmp.getKey()
for j in range(tmp.nDim):
#tmp[j] = tmp.getRandom(j, lo, hi)
tmp[j] = 0.0
self.memberColl[tmpkey] = tmp
if i == 0:
self.ibest = tmpkey
self.iworst = tmpkey
def getMutant(self, parentKey):
""" Generate a mutant Member object according to the modified
DE/rand/1 scheme. Accepts a parent key as input and returns a new
Member object.
"""
if parentKey not in self.keys():
raise KeyError, ('Key %s is not in this Population!'
% parentKey)
rndKeys = ()
mutant = self[parentKey].makeTrial()
mu = 0.5
#sigma = 0.1
#sigma = 0.3
sigma = 0.4
## scale = random.gauss(mu,sigma)
# Breit-Wigner / Cauchy pdf
# scale = mu + sigma * math.tan(math.pi * (random.random() - 0.5))
scale = random.uniform(0.0,1.0)
if self.strategy == ('rand',1):
rndKeys = self.getRndMembers(3, parentKey)
mutant = self[rndKeys[0]] + scale * (self[rndKeys[1]] -
self[rndKeys[2]])
elif self.strategy == ('rand-trig',1):
rndKeys = self.getRndMembers(3, parentKey)
if random.random() <= self.mt:
# 'mutant' is biased toward the region of lower function
# value in the available parameter space (defined by
# the 'rndKeys' list)
pprime = (abs(self[rndKeys[0]].y[self.fi]) +
abs(self[rndKeys[1]].y[self.fi]) +
abs(self[rndKeys[2]].y[self.fi]))
p = [abs(self[rndKeys[0]].y[self.fi]) / pprime,
abs(self[rndKeys[1]].y[self.fi]) / pprime,
abs(self[rndKeys[2]].y[self.fi]) / pprime]
mutant = ((self[rndKeys[0]] + self[rndKeys[1]] +
self[rndKeys[2]]) / 3.0 +
(p[1] - p[0]) * (self[rndKeys[0]] - self[rndKeys[1]]) +
(p[2] - p[1]) * (self[rndKeys[1]] - self[rndKeys[2]]) +
(p[0] - p[2]) * (self[rndKeys[2]] - self[rndKeys[0]]))
else:
# use standard DE/rand/1 strategy
rndKeys = self.getRndMembers(3, parentKey)
mutant = self[rndKeys[0]] + scale * (self[rndKeys[1]] -
self[rndKeys[2]])
return mutant
def front(self, SubPopKeys):
""" Recursive function to identify the non-dominated
individuals in the population.
"""
if len(SubPopKeys) == 1:
return SubPopKeys
else:
halfkey = int(math.floor(len(SubPopKeys)/2.))
# "Top half" of sub-population
TKeys = self.front(SubPopKeys[:halfkey])
# "Bottom half" of sub-population
BKeys = self.front(SubPopKeys[halfkey:])
# "Merged" sub-population
MKeys = copy.deepcopy(TKeys)
# Create the merged set: M := T Union {B_i},
# where {B_i} is the set of Members in B which
# are non-dominated w.r.t. all Members in T
nondomflag = True
for bkey in BKeys:
nondomflag = True
for tkey in TKeys:
if self[tkey] < self[bkey]:
# tkey dominates bkey, so bkey does not
# get added to the merged population.
nondomflag = False
break
if nondomflag:
MKeys.append(bkey)
return MKeys
def nonDomSet(self):
""" Kung et al.'s Efficient Method for sorting a popultion to
identify the non-dominated individuals (Ref: Kung et
al. 1975).
"""
# First, sort the population by increasing value of the first
# objective function value
#
# There's got to be a better way to do this than making a copy of
# the underlying dictionary...
PKeyVals = sorted(self.memberColl.items(),
lambda a,b: cmp(a[1].y[0],
b[1].y[0]))
PKeys = []
for kv in PKeyVals:
PKeys.append(kv[0])
# Now recursively sort the set
return self.front(PKeys)
## def update(self):
## """ Update the population; remove all dominated individuals.
## """
## if self.generation % 2 == 0:
## nonDomKeys = self.nonDomSet()
## mbrkeys = self.keys()
## for key in mbrkeys:
## if key not in nonDomKeys:
## mbrkeys.remove(key)
## while len(mbrkeys) < 4:
## # need to make sure we have enough members to
## # perform a mutation for the control parms.
## mbrkeys.extend(self.getRndMembers(1,*mbrkeys))
## for key in self.keys():
## if key not in mbrkeys:
## del self.memberColl[key]
# def update(self):
# """ Update the population; remove all dominated individuals.
# """
# if self.generation % 2 == 0:
# nonDomKeys = self.nonDomSet()
# ## while len(nonDomKeys) < 4:
# #minkeys = max(int(math.ceil(0.10*self.np)), 4)
# minkeys = max(int(math.ceil(0.30*self.np)), 8)
# maxtrials = len(self.memberColl)
# ntrials = 0
# while (len(nonDomKeys) < minkeys and
# ntrials < maxtrials):
# # need to make sure we have enough members to
# # perform a mutation for the control parms.
# tmp = self.getRndMembers(1,*nonDomKeys)
# uniq = True
# for key in nonDomKeys:
# if not self.memberColl[tmp[0]].isUnique(self.memberColl[key]):
# uniq = False
# if uniq:
# nonDomKeys.extend(tmp)
# ntrials += 1
# for key in self.keys():
# if key not in nonDomKeys:
# del self.memberColl[key]
def update(self):
""" Update the population; remove all dominated individuals.
"""
if self.generation % 2 == 0:
nonDomKeys = self.nonDomSet()
## while len(nonDomKeys) < 4:
# minkeys = max(int(math.ceil(0.10*self.np)), 4)
#minkeys = max(int(math.ceil(0.10*self.np)), 10)
minkeys = max(int(math.ceil(0.20*self.np)), 8) # min(20,self.np)
tmpkeys = copy.deepcopy(self.keys())
random.shuffle(tmpkeys)
if len(tmpkeys) > minkeys:
for key in tmpkeys:
if key not in nonDomKeys and len(self.keys()) > minkeys:
del self.memberColl[key]
def densityFcn(self, mainPop, child):
""" The distance objective function for use in self-adapting
the DE control parms. The largest hypersphere in which
'memKey' is the only individual is a measure of the
crowdedness around individual 'memKey' (NSGA-II's density
function). The nearest neighbor will reside on the surface of
this hypersphere.
Could use niche count instead (as in NPGA); such kernel-based density
estimation techniquies have been shown to perform better than
the 'nearest neighbor' technique implemented here (also in
NSGA-II). Reference:
M.Laumanns, E.Zizler, L.Thiele, On the Effects of Archiving,
Elitism, and Density Based Seelction in Evolutionary
Multi-objective Optimization. In Proceedings of EMO 2001,
pp. 181-196, 2001.
"""
# NB! This is a minimization objective for SADE
d = []
x = [0.0]*mainPop.nDim
xmin = [0.0]*mainPop.nDim
xmax = [0.0]*mainPop.nDim
for i in xrange(mainPop.nDim):
for key in mainPop.keys():
x[i] = mainPop[key][i]
xmin[i] = min(x)
xmax[i] = max(x)
for key in mainPop.keys():
if key != child.getKey():
d.append(child.dist(mainPop[key], xmin, xmax))
return -min(d)
def gainFcn(self, mainPop, parent, child):
""" The norm'd distance traveled 'downward' in function space
for use in self-adapting the DE control parms. NB! The
normalization is w.r.t. the f_min and f_max found in the
current gen's population.
INPUT: parent -- parent Member object
in the current gen's population
child -- child Member object which is
challenging parent (NB! needs to be
a full Member object!)
"""
# Implementation for arbitrary number of objective functions:
# NB! Minimization objective! (reflected in sign of 'gain')
fmin, fmax, gain = 0.0, 0.0, 0.0
flist = []
# Collect all the function values for the main population
# so we can normalize the gain between parent and child.
for i in xrange(mainPop.fDim):
flist = []
flist.append(child.y[i])
for key in mainPop.keys():
flist.append(mainPop[key].y[i])
fmin = min(flist)
fmax = max(flist)
delta = fmax - fmin + 1.0E-6
gain += (child.y[i] - parent.y[i]) / delta
return gain
def sortedFront(self, fi=0):
""" Sort the population according to the given objective
function index 'fi'. Returns an ordered list of Member keys.
"""
sort = sorted(self.memberColl.items(),
lambda a,b: cmp(a[1].y[fi],
b[1].y[fi]))
keys = []
for item in sort:
keys.append(item[0])
return keys
def getPopFrac(self, zeta=0.1, fi=0):
""" Utility function; returns a list of keys (from the current
population) which represent the first 'zeta' fraction of the
population, sorted according to objective 'fi'.
"""
sfKeys = self.sortedFront(fi)
if zeta > 1.0 or zeta < 0.:
raise ValueError, 'zeta required to be in [0,1].'
uindex = int(zeta*len(sfKeys))
if uindex < 1:
uindex = 1
return sfKeys[:uindex]
def selCtrlMember(self, zeta=0.1, fi=0):
""" Select the DE control parameters to use from the Pareto
front according to parameter 'zeta'. Returns a SAMember object.
"""
return copy.deepcopy(self.memberColl[random.choice(self.getPopFrac(zeta,fi))])
def evalCtrl(self, mainPop, parent, child, ctrlChild):
""" Evaluate a set of DE control parameters according to the
parent and child of the main population.
OUTPUT: y[0] -> density objective
y[1] -> gain objective
"""
del ctrlChild.yHist[0:1]
ctrlChild.yHist.append([self.densityFcn(mainPop, child),
self.gainFcn(mainPop, parent, child)])
ysum = [0.0,0.0]
n = range(self.histLen)
w = []
for i in n:
w.append(math.e**(-self.lam*i))
ysum[0] += ctrlChild.yHist[i][0] * w[i]
ysum[1] += ctrlChild.yHist[i][1] * w[i]
return ysum
def __repr__(self):
sortedKeysA = self.sortedFront(0)
sortedKeysB = self.sortedFront(1)
str = 'Most exploratory SAMember:\n'
str += `self[sortedKeysA[0]]`
sortedKeys = self.sortedFront(1)
str += '\nMost exploitative SAMember:\n'
str += `self[sortedKeysB[0]]`
str += '\n'
return str
#--------------------------------------------------------------------------
class DeDriver:
""" Driver class for the parellelized Differential Evolution algorithm.
"""
# Dictionary of available farms and their crucial info:
# entries are like this: (group, available queues)
# Standard queues and their limits (for now just realtime limit, in seconds)
_stdQueueData = {}
_stdQueueData['short'] = 6 * 3600.0
_stdQueueData['medium'] = 30 * 3600.0
_stdQueueData['long'] = 72 * 3600.0
_stdQueues = _stdQueueData.keys()
# Only dCAFs running Scientific Linux, for now
# _availFarms dictionary keys are the strings used to specify the
# farm at submission time; the last element of the value tuple is
# the farm's 'official' name (really only needed for cnaf!)
_availFarms = {}
_availFarms['local'] = ('common', _stdQueues, 'local')
## _availFarms['ASCAF'] = ('cdf', _stdQueues, 'ascaf')
_availFarms['BCNCAF'] = ('common', _stdQueues, 'bcncaf')
_availFarms['RUTCAF'] = ('common', _stdQueues, 'rutcaf')
## _availFarms['KORCAF'] = ('common', _stdQueues, 'korcaf')
_availFarms['GroupCAF'] = ('common', _stdQueues, 'caf')
_availFarms['Fermigrid'] = ('common', _stdQueues, 'fermigrid')
_availFarms['NAmCAF'] = ('common', _stdQueues, 'namcaf')
## _availFarms['LCGCAF'] = ('common', _stdQueues, 'lcgcaf')
## _availFarms['CNAFCAF'] = ('common', _stdQueues, 'cnaf')
## _availFarms['LyonCAF'] = ('common', ('short', 'medium'), 'lyoncaf')
## _availFarms['SDSCCAF'] = ('common', _stdQueues, 'sdsccaf')
## _availFarms['TORCAF'] = ('common', _stdQueues, 'torcaf')
#### STOP STOP STOP
#### WHAT ABOUT OTHER GROUPS? IF THE USER SPECIFIES 'MCprod', WE SHOULD
#### BE ABLE TO SUBMIT JOBS USING 'MCprod' OR 'common'!!
# Define the available convergence strategies
_availConvgStrats = ('chisq', 'fractol')
#_safetyFac = 3600.0
# Define an additive safety factor, used in determining whether we should
# worry that the last job submitted has not completed. Make it rather
# long to account for busy dCAFs.
#_safetyFac = 0.75 * _stdQueueData['short']
_safetyFac = 0.85 * _stdQueueData['short']
def __init__(self, dim=1, fdim=1, np=4, gen=-1, fname='de_state.dat.gz',
email='', costFcn=None, costArgs=None):
""" Constructor for the DeDriver class.
"""
# Cost function (python function) for local running:
self.costFcn = costFcn
self.costArgs = costArgs
self.sade = False
# Create the population(s)
self.population = Population(dim, fdim, np, gen)
# Create the control parm population
## self.ctrlPop = SAPopulation(2, 2, int(np/2.0), gen)
self.ctrlPop = SAPopulation(2, 2, np, gen)
# Set the control parm population's constraints
## # The radical of D.Zaharie's critical scale factor should be positive:
## rad = ('2.*x[0]**2*x[1]+(x[1]**2-2.*x[1])/'
## + `self.ctrlPop.np` + '1.0')
## # The actual critical variance scale factor:
## critSF = '-math.sqrt(' + rad + ')+1.0'
self.ctrlEps = 3E-2
critSF = '-2.0*%d*x[0]**2+2.0-x[1]+2*%.1E*%d/x[1]' % (self.population.np,
self.ctrlEps,
self.population.np)
self.ctrlPop.hardConstraints = ('-x[0]+0.05','x[0]-0.99',
'-x[1]+0.0', 'x[1]-0.99', critSF)
## self.ctrlPop.softConstraints = (critSF,)
## self.ctrlPop.hardConstraints = ('-x[0]+0.05','x[0]-0.99',
## '-x[1]+0.0', 'x[1]-0.5')
self.ctrlPop.initRange[0] = (0.3,0.99)
self.ctrlPop.initRange[1] = (0.0,0.3)
#
self.stateFName = fname
# load the population from a file?
self.initFromFile = False
self.saveInitPop = False
self.popFile = ''
# Differential Evolution Parameters
self.pareto = False
self.xi = 0.25 # phase 1 strategy used for first (xi*Gmax) generations
self.dimThresh = 6
self.zeta = 0.4
self.fi = 0
# Phase 1 (Exploration) parameters
self.phase1LoDimCr = 0.5
self.phase1HiDimCr = 0.7
self.phase1F = 0.8
self.phase1Strategy = ('rand', 1) # 'DE/rand/1/bin'
# Phase 2 (Elitism) parameters
self.phase2LoDimCr = 0.3
self.phase2HiDimCr = 0.4
self.phase2F = 0.6
self.phase2Strategy = ('best', 2) # 'DE/best/2/bin'
# number of previous generations to look at for convergence
self.m = 5
# List to hold the statistics of the previous 'm' generations
# Use integer keys, value should be a tuple:
# (bestCost, worstCost, meanCost, stdDev, fracTol, chisq, ndf)
# For multi-objective optimization, each tuple entry should
# be a tuple whose elements correspond to the various
# functions.
self.prevMGenStats = [(Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL)] * self.m
# Construct a list which will hold the max
# delta(Stat(i,gen_j), Stat(i,gen_j-1)) for the prev m generations
self.maxDeltaStats = [Population._LARGEVAL] * len(self.prevMGenStats[0])
# CONVERGENCE CRITERIA
self.Gmin = 10 # run 10 generations, minimum
self.Gmax = 50
self.convgStrategy = DeDriver._availConvgStrats[1]
# Use 'tol' to define the tolerance for the convergence strategy,
# use 'deltaTol' to define the tolerance for the time-stability of the
# convergence.
self.tol = 1.0E-3
self.deltaTol = 1.0E-4
self.truncFrac = 0.15
# logistical files
self.lockFName = 'dedriver.loc'
self.fcnEvalFName = 'de_fcn.dat'
# CAF job parameters
if email != '':
self.emailAddr = email
else:
self.emailAddr = os.environ['USER']+'@fnal.gov'
self.cafFarm = random.choice(DeDriver._availFarms.keys())
# Get the absolute path of the working directory;
# NOTE: DO NOT USE THIS ON THE dCAFs!! THE GLIDE-CAFs ESPECIALLY USE
# SUPER-LONG PATHNAMES WHICH ARE LIABLE TO SCREW STUFF UP (e.g. Fortran)
self.workDir = os.getcwd()
self.cafBuildDir = os.path.join(self.workDir, 'build_de')
self.scriptsDir = os.path.join(self.workDir, 'scripts')
self.cafQueue = 'short'
self.cafGroup = 'common'
self.cafOutLocale = '' # e.g. '<EMAIL>:~/'
self.cafDataSrc = 'None' # should only be changed for SAM access
self.cafNEvalsPerSeg = 1
self.cafCmd = ''
# This is only needed for monolithic tarball submission:
self.cafFcnTarball = ''
# Split tarball submission:
self.cafFcnTarballUrl = ''
# This is the submission tarball in the split framework; should contain
# the master script and a nested 'user' tarball (self.cafNestedTarball)
self.cafSubTarball = 'deDriveCaf.tgz'
# This is the 'user' tarball in the split tarball framework; should contian
# a pickled DeDriver instance (the state file), plus
# any other scripts required by DeDriver
self.cafNestedTarball = 'deFiles.tgz'
self.cafSubmitScript = os.path.join(self.scriptsDir, 'submit_DE')
self.cafBuildTarScript = os.path.join(self.scriptsDir, 'build_user_tarball')
self.cafMasterScript = 'deCaf.sh'
self.cafSegmentMap = {}
self.cafSegment = 0
self.cafOutFName = ''
self.cafSubmitTime = 0.0 # in seconds since the epoch (a float)
self.cafJobCompltFrac = 0.2
self.local = False
self.runLocalDir = 'de_local'
# Web Monitoring Parameters
## self.monitorNode = ''
## self.monitorDir = ''
self.monitorLoc = '' # e.g. '<EMAIL>:~/'
self.monitorUrl = ''
self.verbose = False
self.debug = False
self.logFName = 'de.log'
def __repr__(self):
""" Overload __repr__() so that `deDriver` will print the algorithm
state to STDOUT.
"""
stats = self.population.getStats(self.truncFrac)
outStr = ("""DeDriver state:
Generation: %s
best trial solution's cost value: %s
best trial solution: %s\n
worst trial solution's cost value: %s
worst trial solution: %s\n
mean cost value: %s
standard deviation of cost values: %s
fractional tolerance: %s
chi-square: %s
ndf: %s
np_trunc: %s\n
Parameter mean values:\n%s\n
Covariance Matrix for np_trunc members:\n%s
Name of dCAF last used: %s
Time of last CAF job submission: %s""" %
(self.population.generation,
stats[0],
self.population[self.population.ibest].x,
stats[1],
self.population[self.population.iworst].x,
stats[2],
stats[3],
stats[4],
stats[5],
stats[6],
self.population.getNpTrunc(self.truncFrac),
stats[7],
self.population.getCovMatRepr(stats[8]),
self.cafFarm,
time.ctime(self.cafSubmitTime)))
return outStr
def printFull(self):
""" Return a string with the full instance state.
"""
ph1Cr = 0.0
ph2Cr = 0.0
if self.population.nDim < self.dimThresh:
ph1Cr = self.phase1LoDimCr
ph2Cr = self.phase2LoDimCr
else:
ph1Cr = self.phase1HiDimCr
ph2Cr = self.phase2HiDimCr
outStr = """DeDriver Algorithmic Parameters:
--------------------------------
Pareto: %s
Generation: %s
Population size (Np): %s
Population Depth (D): %s
Hard Constraints: %s \n
Soft Constraints: %s \n
Initial Ranges: %s \n
Save Initial Population: %s
Load Initial Population: %s
Phase 1 Cr: %s
Phase 1 F: %s
Phase 1 Strategy: %s
Phase 2 Cr: %s
Phase 2 F: %s
Phase 2 Strategy: %s
Xi: %s
trig-prob(m_t): %s
DeDriver Convergence Parameters:
--------------------------------
Gmin: %s
Gmax: %s
Convergence strategy: %s
m: %s
Tolerance: %s
Max tolerance for last 'm' generations: %s
ChiSquare truncation fraction: %s
DeDriver CAF Parameters:
------------------------
Objective Function Command: %s
Objective Function Tarball URL: %s
Queue: %s
Group: %s
Output location: %s
Number of function evaluations per CAF segment: %s
Minimum fraction of completed CAF segments: %s
DeDriver Misc Logistical Parameters:
------------------------------------
Email: %s
Work dir: %s
State file: %s
Initial Population file: %s
Lock file: %s
Objective function output file: %s\n
""" % (self.pareto, self.population.generation, self.population.np,
self.population.nDim, ', '.join(self.population.hardConstraints),
', '.join(self.population.softConstraints),
`self.population.initRange`, self.saveInitPop, self.initFromFile,
ph1Cr, self.phase1F, self.phase1Strategy,
ph2Cr, self.phase2F, self.phase2Strategy, self.xi, self.population.mt,
self.Gmin, self.Gmax, self.convgStrategy, self.m, self.tol,
self.deltaTol, self.truncFrac,
self.cafCmd, self.cafFcnTarballUrl, self.cafQueue, self.cafGroup,
self.cafOutLocale, self.cafNEvalsPerSeg, self.cafJobCompltFrac,
self.emailAddr, self.workDir, self.stateFName, self.popFile,
self.lockFName, self.fcnEvalFName)
return outStr
def getKey(self):
""" Return a string which can be used as a dictionary key for the
current DeDriver instance. Form: 'de_state:5'
"""
return ':'.join((self.stateFName.split('.')[0],
`self.population.generation`))
def saveState(self):
""" Save the algorithm state to a gzipped file.
"""
try:
gfile = gzip.open(self.stateFName, 'wb')
cPickle.dump(self, gfile)
gfile.close()
except(IOError, gzip.zlib.error, cPickle.PicklingError):
return False
else:
return True
def shelveState(self):
""" Save the algorithm state to a gzipped shelve file;
this state can be retrieved later by keyed DB lookup (see
Python's shelve module). Currently, the DBM file is not
compressed.
"""
fname = self.stateFName.split('.')[0] + '_history.dbm'
try:
stateDB = shelve.open(fname)
stateDB[self.getKey()] = self
stateDB.close()
except:
return False
else:
return True
def pause(self):
""" Create the lock file, pausing the algorithm.
"""
Pause(self.lockFName)
def sendMail(self, subject, message):
""" Send mail to the user [wrapper for DeDriver.SendMail()]
"""
msg = ("DeDriver State File: %s\nDeDriver Function Command: %s\n\n"
% (self.stateFName, self.cafCmd))
msg += '-' * 30 + '\n\n' + message + '\n\n' + '-' * 30 + '\n\n' + `self`
return SendMail(self.emailAddr, subject, msg)
def writeLog(self, message):
""" Write a message to the log file.
"""
#msg = '\n\n' + '-' * 80 + '\n\n'
msg = '\n\n'
msg += '<<' + time.ctime(time.time()) + '>>' + '\n'
#msg += message + `self`
msg += message
WriteLog(self.logFName, msg)
def getCafOutFileName(self, generation, segment):
""" Return a string var containing the CAF output filename for the
given segment; assume that this file is gzipped.
"""
return ('gen' + `generation` +
'_seg' + `segment` + '.dat.gz')
def getCafStateFileName(self):
""" Return a string defining the state file name to be used
for CAF jobs. Hopefully, this will save the user from
inadvertently overwriting the current state file if they
should happen to expand a CAF output tarball in the DeDriver
working directory.
"""
return ('gen' + `self.population.generation` + '_' + self.stateFName)
def setCafOutFileName(self):
""" Set the CAF output filename for the current CAF segment.
"""
self.cafOutFName = self.getCafOutFileName(self.population.generation+1,
self.cafSegment)
def setDEStrategy(self, xi=0.7, dimThresh=6,
ph1LDCr=0.5, ph1HDCr=0.7, ph1F=0.8,
ph2LDCr=0.1, ph2HDCr=0.2, ph2F=0.6, mt=0.05,
ph1StratStr='rand-trig1', ph2StratStr='best1',
sade=False, zeta=0.5):
## ph1Strat=('rand', 1), ph2Strat=('best', 2)):
""" Set the parameters for the DE strategy.
"""
# Ugly implementation of DE strategy
ph1Strat = Population._strategies[-2]
ph2Strat = Population._strategies[1]
if ph1StratStr == 'rand-trig1':
ph1Strat = ('rand-trig',1)
elif ph1StratStr == 'rand1':
ph1Strat = ('rand',1)
elif ph1StratStr == 'best1':
ph1Strat = ('best',1)
elif ph1StratStr == 'best2':
ph1Strat = ('best',2)
elif ph1StratStr == 'rand-sort1':
ph1Strat = ('rand-sort',1)
elif ph1StratStr == 'best-trig1':
ph1Strat = ('best-trig',1)
#
if ph2StratStr == 'rand-trig1':
ph2Strat = ('rand-trig',1)
elif ph2StratStr == 'rand1':
ph2Strat = ('rand',1)
elif ph2StratStr == 'best1':
ph2Strat = ('best',1)
elif ph2StratStr == 'best2':
ph2Strat = ('best',2)
elif ph2StratStr == 'rand-sort1':
ph2Strat = ('rand-sort',1)
elif ph2StratStr == 'best-trig1':
ph2Strat = ('best-trig',1)
#
self.xi = xi
self.dimThresh = dimThresh
# Set Phase 1 (Exploration) parameters
if ph1Strat in Population._strategies:
self.phase1Strategy = ph1Strat
else:
self.phase1Strategy = ('rand', 1)
self.phase1LoDimCr = ph1LDCr
self.phase1HiDimCr = ph1HDCr
self.phase1F = ph1F
if self.population.nDim < dimThresh:
self.population.crossProb = self.phase1LoDimCr
else:
self.population.crossProb = self.phase1HiDimCr
self.population.strategy = self.phase1Strategy
self.population.F = self.phase1F
self.population.mt = mt
# Set Phase 2 (convergence) parameters
if ph2Strat in Population._strategies:
self.phase2Strategy = ph2Strat
else:
self.phase2Strategy = ('best', 2)
self.phase2LoDimCr = ph2LDCr
self.phase2HiDimCr = ph2HDCr
self.phase2F = ph2F
self.sade = sade
self.zeta = zeta
def setConstraints(self, softConstr=[], hardConstr=[]):
""" Set the constraints. Assume they are well-formed.
"""
self.population.hardConstraints = tuple(hardConstr)
self.population.softConstraints = tuple(softConstr)
def setConvergenceStrategy(self, strat, gmin=10, gmax=50,
tol=1.0E-3, dtol=1.0E-4, m=5, truncFrac=0.15):
""" Set the convergence strategy for the DE algorithm.
"""
if gmax < gmin and gmin > 0:
self.Gmax = gmin
elif gmax > 0:
self.Gmax = gmax
self.Gmin = gmin
if strat not in DeDriver._availConvgStrats:
self.convgStrategy = DeDriver._availConvgStrats[1]
else:
self.convgStrategy = strat
if tol > 0:
self.tol = tol
if dtol > 0:
self.deltaTol = dtol
if m < self.Gmax and m > 0:
self.m = m
else:
# setting self.m = self.GMax will effectively
# ensure that the algorithm will not converge before
# reaching Gmax generations.
self.m = self.Gmax
if truncFrac > 0.0 and truncFrac <= 1.0:
self.truncFrac = truncFrac
self.prevMGenStats = [(Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL,
Population._LARGEVAL)] * self.m
self.maxDeltaStats = [Population._LARGEVAL] * len(self.prevMGenStats[0])
def setCafParms(self, cmd, neval=1, url='', tarfile='', queue='short',
group='common', outloc='', dataSrc='None', frac=0.2,
local=False, runLocalDir=''):
""" Set CAF parameters.
"""
## ms='./scripts/deCaf.sh',
## ss='./scripts/submit_DE',
## bs='./scripts/build_user_tarball',
# Error handling!
if not cmd:
raise ValueError, 'CAF Command not set!'
if not outloc:
# Construct the output location from the current nodename, username,
# and working directory
# Should probably check that fcpd is running on the local host...
self.cafOutLocale = (os.environ['USER'] + '@' + os.environ['HOST'] +
':' + self.workDir)
else:
self.cafOutLocale = outloc
self.cafFcnTarball = tarfile
self.cafFcnTarballUrl = url
self.cafCmd = cmd
self.cafNEvalsPerSeg = neval
self.cafQueue = queue
self.cafGroup = group
self.cafDataSrc = dataSrc
## self.cafMasterScript = ms
## self.cafSubmitScript = ss
## self.cafBuildTarScript = bs
self.cafJobCompltFrac = frac
self.local = local
if self.local:
self.cafFarm = 'local'
self.runLocalDir = runLocalDir
# now build self.cafSegmentMap, which need only be done once
# per minimization run.
nSeg = int(math.ceil(float(self.population.np) / float(self.cafNEvalsPerSeg)))
mPtr = 0
upper = 0
for i in range(1,nSeg+1):
if mPtr+self.cafNEvalsPerSeg < len(self.population):
upper = mPtr+self.cafNEvalsPerSeg
else:
upper = len(self.population)
self.cafSegmentMap[i] = tuple(range(mPtr, upper))
mPtr += self.cafNEvalsPerSeg
if self.debug:
print '\nCaf Segment Map: %s\n' % `self.cafSegmentMap`
def initialize(self, ir=[], gaussParms=[]):
""" Initialize the Population.
"""
# Expect initRange to be a list of tuples, each tuple
# of the form (index, lobnd, hibnd); assume that
# the tuple values are valid.
# NOTE: if len(ir) < self.population.nDim, the remainder
# of the parameters will take their initial ranges from
# the default range, defined in Population.__init__()
#if len(self.popFile) == 0:
#
# SADE --> INTIALIZE CONTROL PARM POPULATION!!
#
# NOTE: Gaussian parms will be used only for those
# decision vector elements for which they are
# defined; other elements will use a random uniform
# distribution using the bounds set in 'ir'.
#
if len(gaussParms) > 0:
self.population.initGauss = True
if not self.initFromFile:
for item in ir:
self.population.initRange[item[0]] = tuple(item[1:])
for item in gaussParms:
self.population.initGaussParms[item[0]] = tuple(item[1:])
self.population.initialize()
self.ctrlPop.initialize()
if self.saveInitPop:
try:
pfile = gzip.open(self.popFile, 'wb')
cPickle.dump(self.population, pfile)
cPickle.dump(self.ctrlPop, pfile)
pfile.close()
except:
print """%s: Warning: Failed to save initial population to file '%s'.
Continuing... """
else:
# load the population from a file
# population should be in a gzipped pickle file, not a DBM (shelve)
try:
pfile = gzip.open(self.popFile, 'rb')
# Weak design: order of objects in file should not be
# important!
tmp = cPickle.load(pfile)
tmpCtrlPop = cPickle.load(pfile)
pfile.close()
if not isinstance(tmp, Population):
raise (TypeError, 'Expected a pickled instance of class Population,'
+' found a pickled object of type %s' % tmp.__class__)
if not isinstance(tmpCtrlPop, SAPopulation):
raise (TypeError, 'Expected a pickled instance of class '
+'SAPopulation, found an object of type %s'
% tmpCtrlPop.__class__)
if tmp.nDim != self.population.nDim:
raise ValueError, 'Expected D=%s, found D=%s' % (
self.population.nDim, tmp.nDim)
if tmp.np != self.population.np:
raise ValueError, 'Expected Np=%s, found Np=%s' % (
self.population.np, tmp.np)
if tmpCtrlPop.np < 1:
raise (ValueError, 'Need at least ONE member in the'
+'control parm population.')
# If we've gotten this far, it should be safe to use the
# loaded Population object.
self.population = tmp
self.ctrlPop = tmpCtrlPop
except ((IOError, gzip.zlib.error,
cPickle.PicklingError, TypeError, ValueError), sys.exc_info()[1]):
print """%s: ERROR: %s.\nFailed to load initial population from file %s.
Exiting...
""" % (sys.argv[0], data, self.popFile)
sys.exit(1)
def updateMonitor(self):
""" Update the web-based monitor's data.
"""
msg = '\n' + `self` + '\n'
self.writeLog(msg)
# print " Monitor URL for this job: %s" % deDriver.monitorUrl
def getBestSubmit(self):
""" Get the best-submit dCAF for the current username.
"""
if self.local:
self.cafFarm = 'local'
return
global farmdict
try:
# FAIL!
raise ValueError
farmdict = {}
bestfarm = {'name':'', 'group':'', 'score':-500.0, 'free_vms':0}
# create the XML parser object
xp = xml.parsers.expat.ParserCreate()
xp.returns_unicode = False
xp.StartElementHandler = XmlStartElement
# Open the URL for the XML-based best-submit CGI script.
## urlstream = urllib.urlopen(
## 'http://dcafmon.fnal.gov/cgi-bin/dcafmon/xml/best_submit.py?user=%s'
## % os.environ['USER'])
urlstream = urllib.urlopen(
'http://cdfcaf.fnal.gov/cgi-bin/dcafmon/xml/best_submit.py?user=%s'
% os.environ['USER'])
# Get the actual data.
bsxml = urlstream.read()
xp.Parse(bsxml, True)
#
groups = ['common', 'group_CDFgeneric']
if self.cafGroup == 'MCprod':
groups.append('group_MCprod')
# Loop through the farms
for farm in farmdict.keys():
# Consider only those farms which are 'available' (ie. they
# are dCAFs and the desired queue is available on each farm)
if (farm in DeDriver._availFarms.keys() and
self.cafQueue in DeDriver._availFarms[farm][1]):
for grp in farmdict[farm]:
# farmdict[farm] is a list of dictionaries.
# grp will be a dictionary of attributes for the
# current account group for farm 'farm'
# --> grp['name'] is the account group name
if (grp['name'] in groups and
float(grp['score']) > bestfarm['score'] and
int(grp['free_vms']) >= bestfarm['free_vms']):
bestfarm['name'] = farm
bestfarm['group'] = grp['name']
bestfarm['score'] = float(grp['score'])
bestfarm['free_vms'] = int(grp['free_vms'])
self.cafFarm = bestfarm['name']
except:
# Problems with the best submit script, so choose
# the farm randomly
farm = ''
farmKeys = DeDriver._availFarms.keys()
if len(farmKeys) == 2:
# only one choice; choose the one that's not 'local'
for f in farmKeys:
if f != 'local':
farm = f
break
elif len(farmKeys) > 2:
# we have choices!
while True:
farm = random.choice(farmKeys)
if (farm != self.cafFarm and farm != 'local' and
self.cafQueue in DeDriver._availFarms[farm][1]):
# Either we chose the same farm as used in the last
# job, or the requested queue is not available for
# this farm, so generate a new one.
break
## else:
## raise ValueError, 'No valid farms to choose from!'
if farm == '' or farm == 'local':
raise ValueError, 'No valid farms to choose from!'
self.cafFarm = farm
# OTHER ERROR HANDLING? .k5login checking?
def setupCafJob(self):
""" Setup the CAF job.
"""
self.getBestSubmit()
self.cafSubTarball = 'deDriveCaf_gen%s.tgz' % self.population.generation
self.cafNestedTarball = 'deFiles_gen%s.tgz' % self.population.generation
# Build the job submission tarball.
# MONOLITHIC Tarball submission:
# The submission tarball should contain both the
# user's tarball (which contains everything necessary
# for the objective function eval) and the DeDriver files
# (the pickled DeDriver instance + necessary scripts)
# SPLIT TARBALL FRAMEWORK:
# The submission tarball should *only* contain the
# DeDriver files (state file + scripts) + the master
# script which handles downloading and expanding the
# user's tarball (analogous to mcProduction/scripts/cafmc.sh).
# ** The tarball which contains all the files for function evaluation
# ** should be housed on a public web server (URL provided by user in
# ** DeDriver setup mode).
#--
if self.cafFcnTarballUrl != '':
# Assume the user want's to use the split tarball framework.
# Copy the files needed for the CAF job to the build directory
if os.path.isdir(self.cafBuildDir):
# build directory exists; remove it, ignoring errors
try:
shutil.rmtree(self.cafBuildDir, True)
except:
pass
os.mkdir(self.cafBuildDir)
# Copy the Python distribution to the build dir
# REALLY need a check on the Python version available
# from the system here!!
try:
shutil.copytree(os.path.join(self.workDir, 'bin'),
os.path.join(self.cafBuildDir, 'bin'))
shutil.copytree(os.path.join(self.workDir, 'lib'),
os.path.join(self.cafBuildDir, 'lib'))
shutil.copytree(os.path.join(self.workDir, 'include'),
os.path.join(self.cafBuildDir, 'include'))
except:
# STOP STOP STOP
msg = """DeDriver::setupCafJob() Failed to find the proper
Python distribution; job submission is impossible. The
DeDriver algorithm will be paused (lock file: %s.)\n""" % (
self.lockFName)
if self.debug:
print msg
else:
self.writeLog('ERROR -- DeDriver::setupCafJob()\n'+msg)
self.sendMail('DeDriver::setupCafJob() ERROR', msg)
self.pause()
return False
return True
# May want to be more flexible about the path to the scripts...
deScript = os.path.basename(sys.argv[0])
try:
shutil.copy(os.path.join(self.workDir, deScript),
self.cafBuildDir)
except:
msg = """DeDriver.setupCafJob() failed to find the
necessary shell scripts; job submission is impossible.
The DeDriver algorithm will be paused."""
if self.debug:
print msg
else:
self.writeLog('ERROR -- DeDriver.setupCafJob()\n'+msg)
self.sendMail('DeDriver::setupCafJob() ERROR', msg)
self.pause()
return False
try:
shutil.copy(os.path.join(self.workDir, self.stateFName),
os.path.join(self.cafBuildDir,
self.getCafStateFileName()))
except:
msg = """DeDriver.setupCafJob() failed to find the DE
state file %s; job submission is impossible.
The DeDriver algorithm will be paused (lock file: %s)""" % (
self.stateFName, self.lockFName)
if self.debug:
print msg
else:
self.writeLog('ERROR -- DeDriver.setupCafJob()\n' + msg)
self.sendMail('DeDriver::setupCafJob() ERROR', msg)
self.pause()
return False
# Copy the fcp stuff to the build directory...
try:
shutil.copy(os.path.join(self.scriptsDir, 'setup_fcp.sh'),
self.cafBuildDir)
shutil.copytree(os.path.join(self.workDir, 'fcp'),
os.path.join(self.cafBuildDir, 'fcp'))
shutil.copytree(os.path.join(self.workDir, 'fcslib'),
os.path.join(self.cafBuildDir, 'fcslib'))
except:
msg = """DeDriver.setupCafJob() failed to copy the fcp-
related files to the build directory."""
if self.debug:
print msg
else:
self.writeLog('ERROR --DeDriver.setupCafJob()\n' + msg)
self.sendMail('DeDriver.setupCafJob() ERROR', msg)
self.pause()
return False
# any other scripts to copy to the build dir?
# (The master logistical script run on the CAF, deCaf.sh, will be copied
# automagically by build_user_tarball).
buildCmd = ('%s -b %s -s %s -t %s -u %s' %
(self.cafBuildTarScript,
self.cafBuildDir,
os.path.join(self.scriptsDir, self.cafMasterScript),
self.cafSubTarball,
self.cafNestedTarball))
if self.debug:
print 'DeDriver::setupCafJob(): buildCmd = %s' % buildCmd
child = popen2.Popen4(buildCmd)
retVal = child.wait()
if retVal != 0:
# send mail to user with the error message
subject = 'DeDriver Job Submission Error'
message = """DeDriver Job Submission Error:
Problems with the command\n\n%s\n\nThe error:\n\n%s""" % (
buildCmd, child.fromchild.read())
child.tochild.close()
child.fromchild.close()
self.writeLog(subjet+'\n\n'+message)
self.sendMail(subject, message)
# Write the lock file
self.pause()
return False
else:
# Remove the nested tarball build directory, ignoring errors
child.tochild.close()
child.fromchild.close()
shutil.rmtree(self.cafBuildDir, True)
return True
else:
# use the Monolithic tarball framework
# Disable, for now
return False
def submitCafJob(self):
""" Submit the CAF job.
"""
# Check that the proper environment is setup
# Build the tarball, etc.
if not self.setupCafJob():
return False
else:
# Set the CAF group to use (could be 'common' or 'MCProd')
group = self.cafGroup
# Is this bit necessary? or will 'common' be translated
# correctly by the dCAFs which have a different name for
# that group?
if self.cafGroup == 'common':
group = DeDriver._availFarms[self.cafFarm][0]
#
# Construct the DeDriver command for caf mode; note that
# we have to use a raw string with the escaped '$' in order
# to make sure the CAF segment number gets passed.
deScript = os.path.basename(sys.argv[0])
#deCmd = r'./%s --state-file %s -j $ caf' % (deScript, self.stateFName)
deCmd = r'./%s --state-file %s -j $ caf' % (deScript,
self.getCafStateFileName())
segments = '1:%s' % len(self.cafSegmentMap)
# Construct the submission command
submitCmd = '%s' % self.cafSubmitScript
if self.debug:
submitCmd += ' -v debug_only'
if self.verbose:
submitCmd += ' -v yes'
submitCmd += r' -n -c %s -f %s -g %s -q %s -S %s -s %s -t %s -w %s -o %s' % (
self.cafNestedTarball, DeDriver._availFarms[self.cafFarm][2],
group, self.cafQueue,
self.cafMasterScript, segments, self.cafSubTarball,
self.cafFcnTarballUrl, self.cafOutLocale)
if self.local:
submitCmd += r' -R %s' % (self.runLocalDir)
submitCmd += r' %s' % (deCmd)
if self.debug or self.verbose:
self.writeLog('CAF Job Submission command:\n\n%s\n\n' % submitCmd)
#self.cafSubmitTime = time.time()
# NOTE: submission tarball is *not* removed.
#return True
# Submit the job
child = popen2.Popen4(submitCmd)
# Do we need / want to capture the JOB ID?
# Close the child's standard input
child.tochild.close()
retVal = child.wait()
# Trap the output of CafSubmit, send it to the user
# if CafSubmit terminates with an error condition.
if retVal != 0:
msg = child.fromchild.read()
subj = 'DeDriver Job Submission Error'
self.writeLog(subj+'\n\n'+msg)
self.sendMail(subj, msg)
# Write the lock file
self.pause()
return False
if self.debug:
self.writeLog('\nOutput of Job submission cmd:\n%s'
% child.fromchild.read())
child.fromchild.close()
# Remove the submission tarball...
if not self.debug:
try:
os.remove(self.cafSubTarball)
except:
pass
# Lastly, record the submission time in seconds since the epoch
self.cafSubmitTime = time.time()
return True
def saveStatistics(self):
""" Save the stats of the current population.
"""
# Find the best and worst population members
# Best and worst members are found in population.getStats(),
# which is called below -- should happen for every generation.
# Shift the current values back one index
#for i in range(self.m-1):
del self.prevMGenStats[0:1]
self.prevMGenStats.append(self.population.getStats(self.truncFrac)[:7])
# Loop over the statistics defined in Population::getStats()
for j in range(len(self.prevMGenStats[0])):
maxDelta = -Population._LARGEVAL
delta = -Population._LARGEVAL
#for i in range(self.m-1):
for i in range(len(self.prevMGenStats)-1):
# Take the abs of the difference in statistic j
# for generation i and generation i+1
delta = abs(self.prevMGenStats[i][j] -
self.prevMGenStats[i+1][j])
if delta > maxDelta:
maxDelta = delta
self.maxDeltaStats[j] = maxDelta
def converged(self):
""" Check for convergence of the DE algorithm.
"""
viCtr = 0
npTrunc = self.population.getNpTrunc(self.truncFrac)
chi2ndf = self.prevMGenStats[-1][5] / float(self.prevMGenStats[-1][6])
maxChi2ndf = self.maxDeltaStats[5] / float(self.prevMGenStats[-1][6])
# Count the viable Member objects
for key in self.population.keys():
if (self.population[key].isValid == True and
self.population[key].deltaConstr == 0.0):
viCtr += 1
# Check the fractional tolerance of the current generation,
# as well as the fractional tolerance of the last m generations
if self.population.generation < self.Gmin:
return False
elif self.population.generation >= self.Gmax:
return True
elif (self.convgStrategy == 'fractol' and
viCtr >= 2 and
self.prevMGenStats[-1][4] < self.tol and
self.maxDeltaStats[4] < self.deltaTol):
return True
elif (self.convgStrategy == 'chisq' and
viCtr >= npTrunc and
chi2ndf < (1.0 + self.tol) and
maxChi2ndf < self.deltaTol):
return True
else:
return False
def loadMembers(self, filename, gen):
""" Load Member objects from a file without a priori
knowledge of how many Member objects will be in the file. Expect the
Member objects to be from generation 'gen'.
"""
mainSubPop = {}
ctrlSubPop = {}
uniq = True
try:
gfile = gzip.open(filename, 'rb')
except IOError:
## return False
return (mainSubPop, ctrlSubPop)
## mbrCtr = 0
while True:
uniq = True
try:
tmpObj = cPickle.load(gfile)
# Check the class name of the object just retrieved
if isinstance(tmpObj, SAMember):
if (tmpObj.generation != gen or
tmpObj.nDim != self.ctrlPop.nDim):
raise ValueError
for key in ctrlSubPop.keys():
if not tmpObj.isUnique(ctrlSubPop[key]):
# we've loaded a non-unique individual (w.r.t. decision
# vector), but this individual may have performed better
# than it's twin(s).
uniq = False
if tmpObj < ctrlSubPop[key]:
# the newer individual dominates the old (strictly), so
# replace the old with the new
if tmpObj.getKey() != key:
del ctrlSubPop[key]
uniq = True
break
if uniq:
ctrlSubPop[tmpObj.getKey()] = tmpObj
elif isinstance(tmpObj, Member):
if (tmpObj.generation != gen or
tmpObj.nDim != self.population.nDim):
raise ValueError
## mbrCtr += 1
mainSubPop[tmpObj.getKey()] = tmpObj
else:
raise TypeError, 'Attempted to load an object of type other than Member or SAMember'
except EOFError:
break
except ValueError:
continue
except TypeError:
# Found an object which is not of type Member;
# can either close the file, or look for more
# Member objects...
continue
except cPickle.UnpicklingError:
continue
gfile.close()
return (mainSubPop, ctrlSubPop)
def loadNextGen(self, nseg):
""" Load the members of the next generation from the output files of
the last CAF job. Expect at least nseg segments to have returned.
"""
nextGen = self.population.generation + 1
nextGenPop = copy.deepcopy(self.population)
nextGenPop.setGeneration(nextGen)
tmpCtrlPop = {}
# Get the Member objects for this CAF segment;
# the file will be gzipped
success = False
mbrCtr = 0
segCtr = 0
loadedFiles = []
lastGenFiles = []
for seg in self.cafSegmentMap.keys():
fname = self.getCafOutFileName(nextGen, seg)
if nextGenPop.generation > 1:
lastGenFiles.append(self.getCafOutFileName(nextGen-1, seg))
mainSubPop, ctrlSubPop = self.loadMembers(fname, nextGen)
n = len(mainSubPop)
if n > 0:
loadedFiles.append(fname)
segCtr += 1
mbrCtr += n
nextGenPop.memberColl.update(mainSubPop)
if self.sade:
for mkey in ctrlSubPop.keys():
if mkey in tmpCtrlPop.keys():
# This key already loaded! (can happen because
# we're sampling from the Pareto front in a
# stochastic fasion)
uniq = ctrlSubPop[mkey].isUnique(tmpCtrlPop[mkey])
if ctrlSubPop[mkey] < tmpCtrlPop[mkey]:
# The new SAMember object dominates the old
# object with the same key, so replace
# the old with the new (regardless of whether
# the new is actually unique w.r.t. the old).
tmpCtrlPop[mkey] = ctrlSubPop[mkey]
elif uniq and ctrlSubPop[mkey] == tmpCtrlPop[mkey]:
# we have two non-dominated individuals,
# create a new dict key for the SAMember
# just loaded if the new guy is unique.
keylist = copy.deepcopy(self.ctrlPop.keys())
keylist.extend(tmpCtrlPop.keys())
maxkey = max(keylist)
ctrlSubPop[mkey].popIndex = maxkey+1
tmpCtrlPop[maxkey+1] = ctrlSubPop[mkey]
else:
# check that all members of ctrlSubPop are unique
# w.r.t. tmpCtrlPop
uniq = True
for tkey in tmpCtrlPop.keys():
if not ctrlSubPop[mkey].isUnique(tmpCtrlPop[tkey]):
uniq = False
if ctrlSubPop[mkey] < tmpCtrlPop[tkey]:
# the newer individual dominates the old (strictly), so
# replace the old with the new
if mkey != tkey:
del tmpCtrlPop[tkey]
uniq = True
break
if uniq:
tmpCtrlPop[mkey] = ctrlSubPop[mkey]
if segCtr >= nseg:
self.population = nextGenPop
if self.sade:
self.ctrlPop.memberColl.update(tmpCtrlPop)
self.ctrlPop.setGeneration(nextGen)
# remove the files from the last generation
if not self.debug:
for fname in lastGenFiles:
try:
os.remove(fname)
except:
pass
return mbrCtr
def cafJobComplete(self):
""" Check whether the last CAF Job submitted has completed
"""
# deDriver.cafJobComplete() should return True if we have all
# output files from the CAF job or we have exceeded the real-time
# limit for the queue used (in this case, send an error report to
# the user; proceed with the next generation's CAF job only if at
# least one CAF job segment came back okay).
nextGen = self.population.generation + 1
elapsedTime = time.time() - self.cafSubmitTime
# The safety factor is additive because we're trying to account
# for variations in file transfer times; we want the same safety
# factor no matter the length of the queue real time limit.
queueTime = DeDriver._safetyFac + DeDriver._stdQueueData[self.cafQueue]
fileCtr = 0
nseg = len(self.cafSegmentMap)
for seg in self.cafSegmentMap.keys():
fname = self.getCafOutFileName(nextGen, seg)
if os.path.exists(fname):
fileCtr += 1
if fileCtr == nseg:
return True
elif (fileCtr >= self.cafJobCompltFrac * nseg and
elapsedTime > queueTime):
# Some fraction of our total number of segments have returned,
# but there should have been plenty of time for the remaining
# jobs to transfer their output.
# Assume a fraction of the jobs crashed and will not give
# any output -- continue with the minization alg.
return True
elif (fileCtr < self.cafJobCompltFrac * nseg and
elapsedTime > queueTime):
# No files have returned yet and
# too much time has elapsed since job submission,
# create the lockfile and send email to user.
self.pause()
## bakfname = self.stateFName + '.bak'
## msg = """DeDriver Error: \t Low output file count for elapsed time
## greater than the queue real-time limit plus a safety factor
## (%s seconds).\n
## The Current algorithmic state will be saved to %s and what files
## did return from the CAF job will be loaded to %s and used when the
## algorithm is resumed by the removal of the lock file (%s).""" % (
## DeDriver._safetyFac, bakfname, self.stateFName, self.lockFName)
msg = """DeDriver Error: \t Low output file count for elapsed time
greater than the queue real-time limit plus a safety factor
(%s seconds). The algorithm may be resumed by removing the lock
file (%s)""" % (DeDriver._safetyFac, self.lockFName)
self.writeLog('ERROR -- DeDriver.py CAF Job Incomplete\n'+msg)
self.sendMail('ERROR -- DeDriver.py CAF Job Incomplete', msg)
# Backup the last generation's state
## try:
## shutil.copy(os.path.join(self.workDir, self.stateFName),
## os.path.join(self.workDir, bakfname))
## except:
## pass
## # Load what new Member objects we can find
## nloaded = self.loadNextGen(1)
## self.writeLog('\nLoaded a total of %s CAF output files of generation %s.\n' % (
## nloaded, self.population.generation))
## # Save the state of the algorithm
## if not self.saveState():
## msg = """DeDriver ERROR: failed to save state (state file: %s).
## Algorithm is paused (lock file: %s)""" % (self.stateFName,
## self.lockFName)
## self.writeLog('ERROR -- DeDriver::cafJobComplete()\n'+msg)
## self.sendMail("DeDriver ERROR", msg)
## self.pause()
return False
else:
# Not enough time has passed since the submission of the job,
# so we should not expect to have a complete set of output files.
return False
def updateStrategy(self):
""" Update the current DE strategy based upon the fraction of
the maximum number of generations completed so far.
"""
if self.population.generation < self.xi * self.Gmax:
self.fi = 0
self.ctrlPop.fi = 0
self.population.strategy = self.phase1Strategy
self.population.F = self.phase1F
if self.population.nDim < self.dimThresh:
self.population.crossProb = self.phase1LoDimCr
else:
self.population.crossProb = self.phase1HiDimCr
else:
self.fi = 1
self.ctrlPop.fi = 1
self.population.strategy = self.phase2Strategy
self.population.F = self.phase2F
if self.population.nDim < self.dimThresh:
self.population.crossProb = self.phase2LoDimCr
else:
self.population.crossProb = self.phase2HiDimCr
def getParentList(self):
""" Return the list of population member indices which are up
for replacement in the current CAF segment.
"""
return self.cafSegmentMap[self.cafSegment]
def getCtrlParentKeys(self, zeta):
""" Returns a list of keys for the ctrlPop member indices
which are up for replacement in the current CAF segment.
"""
allKeys = self.ctrlPop.getPopFrac(zeta, self.fi)
segKeyMap = {}
mptr = 0
nseg = len(self.cafSegmentMap)
nCtrlParentsPerSeg = int(math.ceil(float(len(allKeys)) / float(nseg)))
for iseg in range(1,nseg+1):
if mptr+nCtrlParentsPerSeg < len(allKeys):
upper = mptr + nCtrlParentsPerSeg
else:
upper = len(allKeys)
segKeyMap[iseg] = allKeys[mptr:upper]
mptr += nCtrlParentsPerSeg
if mptr >= len(allKeys):
mptr = 0
return segKeyMap[self.cafSegment]
def evalFcn(self, trial):
""" Evaluate the objective function for Member object 'trial'.
"""
result = [Population._LARGEVAL] * self.population.fDim
if self.local:
# DO WE NEED TO HAVE BOTH PYTHON CALLABLE FUNCTIONS
# AND COMPLETELY EXTERNAL FUNCTIONS? IF SO, NEED A
# WAY TO DISTINGUISH BETWEEN THESE TWO MODES.
#
# NEED A WAY TO PASS EXTRA ARGS TO self.costFcn!!!
try:
result = self.costFcn(trial.x, self.costArgs)
except NameError, TypeError:
print 'Please set the cost function.'
raise TypeError
except:
pass
else:
# Create the actual command to execute
# First, convert the parameters to strings
args = ''
for arg in trial.x:
args += ' %s' % arg
# Now append the argument string to the command supplied by the user
cmd = self.cafCmd + args
# Spawn a child process and block while it executes
child = popen2.Popen4(cmd)
# close child's STDIN
child.tochild.close()
# close child's STDOUT + STDERR -- don't care about log info
child.fromchild.close()
# Block while the child process is running, capture the return value
retval = child.wait()
if retval == 0:
# Read the fucntion values from the expected file
# What about child processes which return very
# quickly with a non-zero value? Do we generate a different
# parameter vector and try again? (up to some max # of tries)
try:
file = open(self.fcnEvalFName, 'r')
except IOError:
# Problems opening the file with the function values,
# so return Population._LARGEVAL for each function
return result
## if self.pareto:
## # Get a list object with each element corresponding to
## # a line from the file (including it's newline char)
## tmp = file.readlines()
## for i in range(len(tmp)):
## # Trim off the trailing newline character
## result[i] = float(tmp[i].rstrip('\n'))
## else:
## tmp = file.readline()
## result = float(tmp.rstrip('\n'))
# Get a list object with each element corresponding to
# a line from the file (including it's newline char)
tmp = file.readlines()
for i in range(len(tmp)):
# Trim off the trailing newline character
result[i] = float(tmp[i].rstrip('\n'))
file.close()
# Remove the function eval results file to avoid
# collisions.
try:
os.remove(self.fcnEvalFName)
except:
pass
if len(result) < self.population.fDim:
msg = 'Cost function output length < %s.' % self.population.fDim
raise TypeError, msg
return result
def driveCron(self):
""" Drive the algorithm while in cron mode.
"""
if self.cafJobComplete():
logsep = '='*50
genmsg = 'DeDriver (cron mode) Generation %s' % (
self.population.generation+1)
msg = logsep.center(80) + '\n\n' + genmsg.center(80) + '\n'
self.writeLog(msg)
# Load the Member objects of the next generation
nloaded = self.loadNextGen(int(round(self.cafJobCompltFrac *
len(self.cafSegmentMap))))
#########
if self.population.generation == 1:
# Just loaded the initialized population, so check the
# cost functions
ctr = 0
for key in self.population.keys():
if self.population[key].y == ([Population._LARGEVAL]*
self.population.fDim):
ctr += 1
if ctr == nloaded:
self.pause()
msg = 'ERROR: The population initialization failed!'
msg += (' All loaded members had cost values of %s!'
% Population._LARGEVAL)
self.writeLog(msg)
self.sendMail('ERROR: Population Init Failed!', msg)
sys.exit(1)
#########
self.writeLog('\nLoaded a total of %s Members of generation %s.\n' % (
nloaded, self.population.generation))
# Update the DE strategy based upon the current fraction of Gmax
# generations we have already completed.
self.updateStrategy()
if self.sade:
if self.ctrlPop.generation > 2:
# Update the control parm population
# (remove dominated members)
self.ctrlPop.update()
# Save the statistics for the current generation
self.saveStatistics()
# Backup the current state file -- guaranteed to be the state
# file of the previous generation
bakfname = self.stateFName + '.bak'
try:
shutil.copy(self.stateFName, bakfname)
except:
pass
if self.converged():
# SEND MAIL stating that we have converged!
msg = """DeDriver has converged!
\nMax Delta(stat) for the last %s generations:
Max Delta(best cost value): %s
Max Delta(worst cost value): %s
Max Delta(mean cost value): %s
Max Delta(standard deviation of cost): %s
Max Delta(fractional tolerance): %s
Max Delta(chi square): %s\n""" % (
self.m,
self.maxDeltaStats[0],
self.maxDeltaStats[1],
self.maxDeltaStats[2],
self.maxDeltaStats[3],
self.maxDeltaStats[4],
self.maxDeltaStats[5])
self.writeLog('DeDriver Converged!\n'+msg)
self.sendMail('DeDriver Converged!', msg)
# Set the lock file so that no more CAF jobs
# are submitted, in case the user forgets to
# remove the cronjob; also dump the algorithm state
self.pause()
else:
# save the state prior to job submission
if not self.saveState():
msg = """DeDriver ERROR: failed to save state (state file: %s).
Algorithm is paused (lock file: %s)""" % (self.stateFName,
self.lockFName)
self.writeLog('ERROR -- DeDriver.driveCron()\n'+msg)
self.sendMail("DeDriver ERROR", msg)
self.pause()
sys.exit(1)
# Submit a CAF job
if not self.submitCafJob():
# mail is sent and alg is paused within submitCafJob()
sys.exit(1)
# Save the state of the algorithm after the CAF job has been submitted
# or we have converged.
if not self.saveState():
msg = """DeDriver ERROR: failed to save state (state file: %s).
Algorithm is paused (lock file: %s)""" % (self.stateFName,
self.lockFName)
self.writeLog("ERROR -- DeDriver.driveCron()\n"+msg)
self.sendMail("DeDriver ERROR", msg)
self.pause()
sys.exit(1)
if not self.shelveState():
msg = """Failed to shelve state to DBM file. Execution continues...
"""
self.writeLog("WARNING -- DeDriver.driveCron()\n"+msg)
# Update the web-monitor files
self.updateMonitor()
def getCtrlMember(self, parentKey, ctrlParentKey):
""" Get the DE control parameters to use for a given pop
index.
"""
# SADE
## if parentKey % 2 == 0:
if self.population.generation % 2 == 0:
# Generate a new control parameter individual
ctrlChild = self.ctrlPop.getTrialMember(ctrlParentKey)
while not ctrlChild.isValid or ctrlChild.deltaConstr != 0:
ctrlChild = self.ctrlPop.getTrialMember(ctrlParentKey)
else:
# Select a control parameter individual from the
# corresponding Pareto front.
ctrlChild = self.ctrlPop.selCtrlMember(self.zeta, self.fi)
return ctrlChild
def select(self, parent, child=None, ctrlParent=None,
ctrlChild=None, toFile=True):
""" Between two Members, select which survives to the next population.
"""
nextGen = self.population.generation + 1
selMbr = None
selCtrlMbr = None
if (isinstance(ctrlParent,SAMember)):
sade = True
selCtrlMbr = ctrlParent
else:
sade = False
if not isinstance(child,Member):
# This must be the initialization generation,
# so just accept the parent.
try:
parent.y = self.evalFcn(parent)
except:
pass
parent.generation = nextGen
if toFile:
try:
gfile = gzip.open(self.cafOutFName, 'ab')
cPickle.dump(parent, gfile)
gfile.close()
except:
pass
else:
self.population[parent.getKey()] = parent
return
if child.isValid:
if child.deltaConstr == 0:
# no soft constraint violations, so evaluate the cost
# function
try:
child.y = self.evalFcn(child)
# Need to wait until *after* the function eval is done
# to open the output file!
if child <= parent:
# Save the child ; it will survive to the next
# generation of the population.
# Use '<=' rather than '<' for the selection
# test to avoid stagnation of the algorithm.
selMbr = child
else:
# Parent is better, so parent survives to the
# next generation.
selMbr = parent
except:
selMbr = parent
if sade:
if isinstance(ctrlChild, SAMember):
try:
# Evaluate the DE control parameters used
ctrlChild.y = self.ctrlPop.evalCtrl(self.population,
parent, child, ctrlChild)
except:
ctrlChild.isValid = False
if ctrlChild.isValid:
if ctrlChild < ctrlParent:
# Place the child into the current ctrl parm
# population, replacing the parent
selCtrlMbr = ctrlChild
elif ctrlChild == ctrlParent:
# child and parent are non-dominated w.r.t.
# each other
if ctrlChild.isUnique(ctrlParent):
# Child is unique w.r.t. parent, so accept both.
ctrlChild.popIndex = max(self.ctrlPop.keys()) + 1
selCtrlMbr = ctrlChild
else:
# child is not unique, so select the member
# which has the best perfomance on the current
# objective. NOTE: child should have parent's
# population index at this point.
if ctrlChild.y[self.fi] <= ctrlParent.y[self.fi]:
selCtrlMbr = ctrlChild
else:
try:
ctrlParent.y = self.ctrlPop.evalCtrl(self.population,
parent, child, ctrlParent)
except:
# We have no ctrl child, so we accept the ctrl parent,
# even though it's not valid.
ctrlParent.isValid = False
else:
# 'child' violates one or more soft constraints
# 'child' will have a cost value of Member._LARGEVAL
if child.deltaConstr < parent.deltaConstr:
# child violates the soft constraints less
# than the parent, so child survives.
# child.generation = parent.generation + 1
selMbr = child
else:
# parent is either viable or more viable (violates
# constraints less) than the child, so parent survives
# parent.generation += 1
selMbr = parent
## if sade:
## # Since the child was not evaluated, ctrlChild
## # cannot be evaluated; ctrlParent survives.
## selCtrlMbr = ctrlParent
else:
# child is not viable w.r.t the hard constraints, so parent
# survives.
selMbr = parent
## if sade:
## # Since the child was not evaluated, ctrlChild
## # cannot be evaluated; ctrlParent survives.
## selCtrlMbr = ctrlParent
selMbr.generation = nextGen
if sade:
selCtrlMbr.generation = nextGen
if toFile:
try:
gfile = gzip.open(self.cafOutFName, 'ab')
cPickle.dump(selMbr, gfile)
if sade and selCtrlMbr is not None:
cPickle.dump(selCtrlMbr, gfile)
gfile.close()
except:
pass
else:
self.population[selMbr.getKey()] = selMbr
if sade and selCtrlMbr is not None:
self.ctrlPop[selCtrlMbr.getKey()] = selCtrlMbr
def driveCaf(self):
""" Drive the algorithm while in CAF mode.
"""
ctrlParent = None
ctrlChild = None
self.setCafOutFileName()
nextGen = self.population.generation + 1
if self.sade:
zeta = self.zeta
if self.population.generation < 2:
# Eval all the ctrlPop members
zeta = 1.0
## ctrlParentKeys = self.ctrlPop.getPopFrac(zeta, self.fi)
ctrlParentKeys = self.getCtrlParentKeys(zeta)
ctrlParentIndex = -1
# Loop over the parent Member objects associated
# with the current CAF segment.
for parentIndex in self.getParentList():
parent = copy.deepcopy(self.population[parentIndex])
if self.sade:
ctrlParentIndex += 1
if ctrlParentIndex >= len(ctrlParentKeys):
ctrlParentIndex = 0
if nextGen == 1:
child = None
else:
if self.sade:
# Get the DE control parameters
ctrlParent = self.ctrlPop[ctrlParentKeys[ctrlParentIndex]]
if nextGen >= 2:
# The following call is correct in using parent.getKey();
# ctrlChild is either generated or sampled, depending
# on parent.getKey().
ctrlChild = self.getCtrlMember(parent.getKey(),
ctrlParent.getKey())
self.population.F = ctrlChild[0]
self.population.crossProb = ctrlChild[1]
else:
self.population.F = ctrlParent[0]
self.population.crossProb = ctrlParent[1]
child = self.population.getTrialMember(parent.getKey())
self.select(parent, child, ctrlParent, ctrlChild, True)
def minimize(self):
""" A complete minimization driver; for use in local running only!
Assumes all parameters (including self.costFcn) have been set.
"""
ctrlParent = None
ctrlChild = None
pkeys = self.population.keys()
pkeys.sort()
while not self.converged():
self.updateStrategy()
nextGen = self.population.generation + 1
if self.sade:
zeta = self.zeta
if self.population.generation < 2:
# Eval all the ctrlPop members
zeta = 1.0
ctrlParentKeys = self.ctrlPop.getPopFrac(zeta, self.fi)
ctrlParentIndex = -1
for parentIndex in pkeys:
parent = copy.deepcopy(self.population[parentIndex])
if self.sade:
ctrlParentIndex += 1
if ctrlParentIndex >= len(ctrlParentKeys):
ctrlParentIndex = 0
if nextGen == 1:
child = None
else:
if self.sade:
# Get the DE control parameters
ctrlParent = self.ctrlPop[ctrlParentKeys[ctrlParentIndex]]
if nextGen >= 2:
# The following call is correct in using parent.getKey();
# ctrlChild is either generated or sampled, depending
# on parent.getKey().
ctrlChild = self.getCtrlMember(parent.getKey(),
ctrlParent.getKey())
self.population.F = ctrlChild[0]
self.population.crossProb = ctrlChild[1]
else:
self.population.F = ctrlParent[0]
self.population.crossProb = ctrlParent[1]
child = self.population.getTrialMember(parent.getKey())
self.select(parent, child, ctrlParent, ctrlChild, False)
self.population.setGeneration(nextGen)
self.saveStatistics()
if self.sade:
self.ctrlPop.setGeneration(nextGen)
if nextGen > 1:
# Update the control parm population
# (remove dominated members)
print 'About to update ctrlPop... np = %s' % len(self.ctrlPop.keys())
self.ctrlPop.update()
print 'CtrlPop updated. np = %s' % len(self.ctrlPop.keys())
## if nextGen % 10 == 0:
if nextGen > 0:
logsep = '='*50
genmsg = 'DeDriver (cron mode) Generation %s' % (
self.population.generation)
msg = logsep.center(80) + '\n\n' + genmsg.center(80) + '\n'
msg += `self`
print msg
print '\nControl Parm Population Summary:'
print `self.ctrlPop`
sys.stdout.flush()
## self.writeLog(msg)
# We've converged, so return the best cost value
return self.population[self.population.ibest].y
#--------------------------------------------------------------------------
def monitorMode(parser, options):
""" Monitor the progress of a DeDriver run.
"""
try:
stateFile = gzip.open(options.stateFile, 'rb')
deDriver = cPickle.load(stateFile)
except (IOError, gzip.zlib.error, EOFError, cPickle.UnpicklingError):
# handle the IO error
# Exit with an error condition
print 'DeDriver Error: Failed to open DeDriver state file %s' % options.stateFile
sys.exit(1)
stateFile.close()
print '\n'
print `deDriver`
def cronMode(parser, options):
""" Operate the DeDriver class in cronjob mode.
"""
# Check for the presence of a lock file (created by the user);
# if it exists, do nothing (i.e. do *not* execute any part of the
# optimization algorithm -- no CAF jobs will be submitted!).
########## WHAT ABOUT RESTARTING THE ALG FROM WHERE WE LEFT OFF? ########
### Will need to set the caf submit time so that DeDriver::cafJobComplete()
### does not set the lock file again
### self.cafSubmitTime = time.time()
lockFName = './dedriver.loc' # should this be a user option?
if not os.path.exists(lockFName):
try:
# This should probably be split into two separate try statements...
stateFile = gzip.open(options.stateFile, 'rb')
deDriver = cPickle.load(stateFile)
except (IOError, gzip.zlib.error, EOFError, cPickle.UnpicklingError):
# handle the IO error -- What about error message from Python?
# set the lock file
Pause(lockFName)
# SEND MAIL
addr = <EMAIL>' % os.environ['USER']
subj = 'DeDriver StateFile error'
msg = 'Failed to open the DeDriver state file, %s' % options.stateFile
msg += """\n\nThe DE algorithm will be paused until the user removes
the lock file, %s""" % lockFName
WriteLog(options.logFile, 'ERROR -- DeDriver State file error\n'+msg)
SendMail(addr, subj, msg)
sys.exit(1)
stateFile.close()
deDriver.driveCron()
def cafMode(parser, options):
""" Operate the DeDriver class in CAF mode.
"""
if options.segment < 0:
parser.error("Invalid CAF segment number.")
try:
stateFile = gzip.open(options.stateFile, 'rb')
deDriver = cPickle.load(stateFile)
except (IOError, gzip.zlib.error, EOFError, cPickle.UnpicklingError):
# handle the IO error
print 'DeDriver (CAF mode) ERROR: Failed to open the state file, %s' % options.stateFile
sys.exit(1)
stateFile.close()
deDriver.cafSegment = options.segment
deDriver.driveCaf()
def setupMode(parser, options):
""" Operate the DeDriver class in setup mode.
"""
# Regular expression used for checking for the absence of
# a required argument to an option (e.g. user inputs
# '-f -F blah.dat', in which case the argument of
# the '-f' option is '-F', rather than a valid filename)
# This 'manual' check is only necessary for options
# expecting string type arguments; other types will
# be checked by the optparse module.
if options.debug:
print '%s: Entering setup mode...' % sys.argv[0]
spatt = re.compile(r"^-")
# CAF Parameters
if options.debug:
print '%s: Testing options.cmd ...' % sys.argv[0]
if not options.cmd or spatt.search(options.cmd):
parser.error("Please specify a command for function evaluation")
if options.nDim < 1:
parser.error("Parameter vector length must be > 0")
if options.dtol < 0.0:
parser.error("delta-tol must be >= 0")
# Check for mal-formed email address
if options.debug:
print '%s: Testing options.email ...' % sys.argv[0]
epatt = re.compile(r"^\S+@(?:\S+|\.)+")
if not epatt.search(options.email):
parser.error("Malformed email address: %s" % options.email)
if options.debug:
print '%s: Testing options.stateFile ...' % sys.argv[0]
gzpatt = re.compile(r"\.gz$")
if not gzpatt.search(options.stateFile):
print """DeDriver Warning: State file will be gzipped; appending
'.gz' extension to %s""" % options.stateFile
options.stateFile += '.gz'
if options.debug:
print '%s: Testing options.fcnEvalFile ...' % sys.argv[0]
if spatt.search(options.fcnEvalFile):
parser.error("""Please specify a file to use for function value
passing""")
if options.debug:
print '%s: Testing options.group ...' % sys.argv[0]
if spatt.search(options.group):
parser.error("Malformed CAF group name %s" % options.group)
if options.Gmax < 0:
parser.error("Gmax must be > 0")
### Test the hard and soft constraints together
if options.debug:
print '%s: Testing options.hardConstr ...' % sys.argv[0]
relOps = re.compile("[<>=]")
parmPatt = re.compile("x\[\d\]")
for constr in (options.hardConstr+options.softConstr):
if relOps.search(constr):
parser.error("Constraint %s contains relational operators" % constr)
if not parmPatt.search(constr):
parser.error("Constraint %s contains no parameter variables like x[0]"
% constr)
if spatt.search(options.logFile):
parser.error("Malformed log file %s" % options.logFile)
if options.m < 1 or options.m > 10:
parser.error("""Number of previous generations to consider for
convergence should be in the domain [1, 10]""")
if options.nEvals < 1:
parser.error("Must have at least one function eval per job segment")
# Check the basic format of the output location
if options.debug:
print '%s: Testing options.outLoc ...' % sys.argv[0]
print '\toptions.outLoc = %s' % options.outLoc
olpatt = re.compile(r"^\S+@(?:\S+|\.)+:(?:/|~)")
## if not olpatt.search(options.outLoc):
## parser.error("Please specify a directory for job output in the form <EMAIL>:/path")
if options.debug:
print '%s: Testing options.queue ...' % sys.argv[0]
if options.queue not in DeDriver._stdQueueData.keys():
parser.error("Invalid CAF queue type %s" % options.queue)
# Check the format of the initial range list
# should be of the form 'int:float:float'
if options.debug:
print '%s: Testing options.initRange ...' % sys.argv[0]
irdata = []
irctr = 0
for item in options.initRange:
irlist = item.split(':', 2)
index = -1
lo = Member._defLoBnd
hi = Member._defHiBnd
if len(irlist) != 3:
parser.error("Malformed initial range string %s" % item)
try:
index = int(irlist[0])
lo = float(irlist[1])
hi = float(irlist[2])
except (ValueError, TypeError):
parser.error("Malformed initial range string %s" % item)
else:
if irctr <= options.nDim:
irctr += 1
irdata.append((index, lo, hi))
irctr = 0
gaussParms = []
for item in options.gaussParms:
gaussList = item.split(':',2)
index = -1
mu = Population._defGaussParms[0]
sigma = Population._defGaussParms[1]
if len(gaussList) != 3:
parser.error("Malformed gaussian parameter string %s" % item)
try:
index = int(gaussList[0])
mu = float(gaussList[0])
sigma = float(gaussList[0])
except:
parser.error("Malformed gaussian parameter string %s" % item)
else:
if irctr <= options.nDim:
irctr += 1
gaussParms.append((index, mu, sigma))
if options.popSize < 5:
parser.error("Population size must be >= 4")
if options.popSize < options.nDim:
parser.error("Population size must be >= dimension of parameter vector")
if options.debug:
print '%s: Testing options. ...' % sys.argv[0]
if options.tarfile:
parser.error("""%s: Monolithic tarball submission is not currently
supported. Exiting...""" % sys.argv[0])
## if options.debug:
## print '%s: Testing options.tarfile ...' % sys.argv[0]
## if spatt.search(options.tarfile):
## parser.error("Please specify a function evaluation tarball.")
## if not os.path.exists(options.tarfile):
## parser.error("Tarball for function evaluation %s does not exist!"
## % options.tarfile)
if options.tol < 0.0:
parser.error("tol must be >= 0")
# Check the basic format of the function tarball URL
if options.debug:
print '%s: Testing options.url ...' % sys.argv[0]
urlpatt = re.compile(r"(?:^http://)|(?:^ftp://)")
if not urlpatt.search(options.url):
parser.error("Malformed objective function tarball URL.")
if options.url and options.tarfile:
print """DeDriver Warning: Split tarball framework and monolithic
tarball framework are mutually exclusive. Will use split framework."""
options.tarfile = None
elif not options.url and not options.tarfile:
parser.error("""Please specify either a URL for the function eval
tarball (for split tarball framework) or a filename for monolithic
tarball framework.""")
if options.xi < 0 or options.xi > 1.0:
parser.error("Exploratory strategy fraction xi must be in [0.0, 1.0]")
if options.cmpltSegFrac < 0.0 or options.cmpltSegFrac > 1.0:
parser.error("""Fraction of complete segments returned for each job
should be in [0.0, 1.0].""")
if spatt.search(options.ph1Strat):
parser.error("Please specify a valid phase 1 DE strategy.")
if options.ph1LDCr < 0.0 or options.ph1LDCr > 1.0:
parser.error("""Phase 1 low-dim crossover probabilty should
be in [0.0, 1.0].""")
if options.ph1HDCr < 0.0 or options.ph1HDCr > 1.0:
parser.error("""Phase 1 high-dim crossover probabilty should
be in [0.0, 1.0].""")
if options.ph1F < 0.0 or options.ph1F > 2.0:
parser.error("""Phase 1 DE scale factor F should be in
[0.0, 2.0].""")
if spatt.search(options.ph2Strat):
parser.error("Please specify a valid phase 2 DE strategy.")
if options.ph2LDCr < 0.0 or options.ph2LDCr > 1.0:
parser.error("""Phase 2 low-dim crossover probabilty should
be in [0.0, 1.0].""")
if options.ph2HDCr < 0.0 or options.ph2HDCr > 1.0:
parser.error("""Phase 2 high-dim crossover probabilty should
be in [0.0, 1.0].""")
if options.ph2F < 0.0 or options.ph2F > 2.0:
parser.error("""Phase 2 DE scale factor F should be in
[0.0, 2.0].""")
if options.debug:
print '%s: Testing options.dataSrc ...' % sys.argv[0]
if spatt.search(options.dataSrc):
parser.error("Please specify a valid CAF data source.")
if options.debug:
print '%s: Testing options.convgStrat ...' % sys.argv[0]
if options.convgStrat not in DeDriver._availConvgStrats:
parser.error("Invalid convergence strategy %s" % options.convgStrat)
if options.Gmin < 0:
parser.error("Gmin must be > 0")
if options.Gmax < options.Gmin:
#parser.error("Gmax must be greater than Gmin")
print "DeDriver Warning: Gmax < Gmin --> Gmax = Gmin"
if options.dimThresh < 0 or options.dimThresh > 10:
parser.error("Invalid dimensional threshold for DE crossover parameter.")
if options.truncFrac < 0 or options.truncFrac > 1.0:
parser.error("Truncation fraction for chi-square must be in [0,1].")
## if spatt.search(options.lockFile):
## parser.error("Invalid lock file %s" % options.lockFile)
if options.mt < 0 or options.mt > 1.0:
parser.error("Trigonometric mutation probability must be in [0,1].")
if spatt.search(options.buildDir):
parser.error("Please specify a build directory for CAF job submission.")
if spatt.search(options.runLocalDir):
parser.error("Please specify a working directory for local function eval.")
## if options.monitorLoc == '':
## print "Warning: No monitor location specified, so web-based monitoring will not be available."
#parser.error("Please specify a monitor location which is served to the web")
### Create a DeDriver instance
if options.debug:
print '%s: Creating the DeDriver instance...' % sys.argv[0]
fdim = 1
deDriver = DeDriver(options.nDim, fdim, options.popSize, -1,
options.stateFile, options.email)
deDriver.verbose = options.verbose
deDriver.debug = options.debug
deDriver.logFName = options.logFile
if options.saveInitPopFile != '':
deDriver.saveInitPop = True
deDriver.popFile = options.saveInitPopFile
if options.loadInitPopFile != '':
deDriver.initFromFile = True
deDriver.popFile = options.loadInitPopFile
# Set the hard and soft constraints
if options.debug:
print '%s: Setting DeDriver constraints...' % sys.argv[0]
deDriver.setConstraints(options.softConstr, options.hardConstr)
# Set phase1 / phase2 DE parms.
# NOTE: currently, we use the default phase1/2 strategies
if options.debug:
print '%s: Setting DeDriver DE strategy...' % sys.argv[0]
deDriver.setDEStrategy(options.xi, options.dimThresh, options.ph1LDCr,
options.ph1HDCr, options.ph1F, options.ph2LDCr,
options.ph2HDCr, options.ph2F, options.mt,
options.ph1Strat, options.ph2Strat,
options.sade, options.zeta)
## ('rand-trig',1), ('best',1))
## ('rand-trig',1), ('best-trig',1))
## ('rand-sort',1), ('best',1))
## ('rand-sort',1), ('best',2))
# Set the convergence strategy parameters
if options.debug:
print '%s: Setting DeDriver convergence strategy...' % sys.argv[0]
deDriver.setConvergenceStrategy(options.convgStrat, options.Gmin,
options.Gmax, options.tol, options.dtol,
options.m, options.truncFrac)
# Set the CAF parameters
if options.debug:
print '%s: Setting DeDriver CAF parameters...' % sys.argv[0]
local = False
if options.runLocalDir != '':
local = True
deDriver.setCafParms(options.cmd, options.nEvals, options.url, options.tarfile,
options.queue, options.group, options.outLoc,
options.dataSrc, options.cmpltSegFrac,
local, options.runLocalDir)
if options.debug:
print '%s: Initializing DeDriver instance...' % sys.argv[0]
deDriver.initialize(irdata, gaussParms)
msg = (('='*50).center(80) + '\n\n' + ('DeDriver Initialized').center(80)
+ '\n\n' + deDriver.printFull() + '\n')
deDriver.writeLog(msg)
print 'DeDriver initialized!'
print '** Please setup a cronjob to run %s in cron mode.\n' % sys.argv[0]
print `deDriver`
if options.debug:
print '%s: Saving the DeDriver state...' % sys.argv[0]
if not deDriver.saveState():
print """DeDriver ERROR: failed to save state. Algorithm is paused
(lock file is set)."""
deDriver.pause()
sys.exit(1)
else:
if deDriver.debug:
print '%s: state saved to %s' % (sys.argv[0], deDriver.stateFName)
print '%s: Submitting a CAF job...' % sys.argv[0]
if deDriver.submitCafJob():
if not deDriver.saveState():
print """%s: ERROR: failed to save state after CAF job
submission. Algorithm is paused (lock file is set).""" % (
sys.argv[0])
deDriver.pause()
sys.exit(1)
if not deDriver.shelveState():
print """%s: WARNING: failed to shelve state to DBM file.
Execution continues... """ % (sys.argv[0])
else:
print """%s: ERROR Submitting CAF job (check the log file).
Exiting...""" % sys.argv[0]
sys.exit(1)
if options.debug:
print '%s: Updating the monitor info...' % sys.argv[0]
deDriver.updateMonitor()
if __name__ == "__main__":
# Set the random seed from the system clock or /dev/urandom
random.seed()
#farmdict = []
# determine the operational mode from the command-line args,
# do the appropriate setup and run the DE algorithm.
usage = "usage: %prog [options] MODE"
vers = '%prog v' + '%s' % (__version__)
parser = OptionParser(usage, version=vers)
# mode is a *required* argument, so process it as a positional arg.
##### Use 'metavar="FILE"' to change the metavariable listed in
##### the optparse help message.
## parser.add_option("-a", "--adaptive-search", action="store_true",
## default=False, help="Use adaptive search parameters.")
parser.add_option("-c", "--cmd", action="store", type="string",
dest="cmd", help="Add a command for function"+
" evaluation; should take the trial parameter vector as"+
" positional args and write the cost values to a file"+
" (de_fcn.dat). Do not include the positional args in the"+
" command.")
parser.add_option("-d", "--dim", action="store", type="int",
dest="nDim", default=1,
help="Parameter vector dimensionality [default: %default].")
parser.add_option("-D", "--delta-tol", action="store", type="float",
dest="dtol", metavar="DTOL", default=0.5E-4,
help="Maximum change in convergence measure over"+
" the previous M generations [default: %default].")
parser.add_option("-e", "--email", action="store", type="string",
dest="email", default=os.environ['USER']+'@fnal.gov',
help="Email address to use for error reports"+
" [default: %default].")
parser.add_option("-f", "--state-file", action="store", type="string",
dest="stateFile", metavar="FILE", default="de_state.dat.gz",
help="Differential Evloution driver state filename"+
" [default: %default].")
parser.add_option("-F", "--fcn-eval-file", action="store", type="string",
dest="fcnEvalFile", metavar="FILE", default="de_fcn.dat",
help="Use FILE for communicating function"+
" evaluation outputs to the DeDriver instance"+
" [default: %default]")
parser.add_option("-g", "--group", action="store", type="string",
dest="group", default="common",
help="CAF group [default: %default].")
parser.add_option("-G", "--gen-max", action="store", type="int",
dest="Gmax", metavar="GMAX", default=100,
help="Maximum number of generations [default: %default].")
parser.add_option("-H", "--add-hard-constr", action="append",
type="string", dest="hardConstr", metavar="CONSTR",
default=[],
help="Add a *hard* constraint g(x[]) (e.g."+
" \"-[0] + 10\"""), where it is assumed that g(x[]) <= 0."+
" Constraints can be linear or nonlinear. Multiple"+
" constraints are possible via passing this option"+
" multiple times. In particular, lower and upper bounds"+
" are taken from the list of hard constraints. Hard"+
" constraints will be *strictly* enforced; see the docs"+
" for the repair rule.")
parser.add_option("-i", "--add-gauss-init", action="append",
type="string", dest="gaussParms", metavar="GAUSSPARMS",
default=[], help="Add a set of gaussian parameters for"+
"initialization in the form 'i:mu:sigma', where 'i'"+
"is the decision vector index, and mu and sigma are the"+
"gaussian mean and width, respectively.")
parser.add_option("-j", "--segment", action="store", type="int",
dest="segment", default=-1,
help="CAF job segment number [default: %default].")
parser.add_option("-l", "--log-file", action="store", type="string",
dest="logFile", default="de.log", metavar="LOGFILE",
help="Log output to file LOGFILE [default: %default].")
## parser.add_option("-L", "--local", action="store_true", dest="local",
## default=False, help="Run minimization locally [default: %default].")
## parser.add_option("-L", "--local", action="store", type="string",
## dest="runLocalDir", default="./de_local", metavar="DIR"
## help="""Run minimization locally, using DIR as the
## working directory for function eval [default: %default].""")
parser.add_option("-m", "--convg-history", action="store", type="int",
dest="m", default=5, help="Number of previous"+
" generations to look at for convergence criteria"+
" [default: %default].")
parser.add_option("-n", "--num-evals-per-seg", action="store", type="int",
dest="nEvals", metavar="NEVALS", default=1,
help="Number of function evaluations per CAF segment"+
" [default: %default].")
parser.add_option("-o", "--out-location", action="store", type="string",
dest="outLoc", metavar="OUTLOC",
default=os.environ['USER']+'@'+os.uname()[1]+
':'+os.getcwd(),
help="CAF Output location [default: %default]")
parser.add_option("-p", "--pareto", action="store_true", dest="pareto",
default=False, help="Use a Pareto algorithm to"+
" minimize multiple objective functions [default: %default].")
parser.add_option("-q", "--queue", action="store", type="string",
dest="queue", default="short",
help="CAF queue [default: %default].")
parser.add_option("-R", "--run-local-dir", action="store", type="string",
dest="runLocalDir", default="", metavar="DIR",
help="Use DIR as the working directory for"+
" local function evaluation [default: %default].")
parser.add_option("-r", "--add-init-range", action="append", type="string",
dest="initRange", metavar="RANGESTR", default=[],
help="Add an initialization range"+
" for parameter i of the form, 'i:lo:hi', where 'lo' and"+
" 'hi' are the low and high bounds (floats), respectively."+
" These bounds will be used solely for initialization.")
parser.add_option("-s", "--pop-size", action="store", type="int",
dest="popSize", metavar="NP", default=5,
help="Population size (>= 5) [default: %default].")
parser.add_option("-S", "--add-soft-constr", action="append",
type="string", dest="softConstr", metavar="CONSTR",
default=[],
help="Add a *soft* constraint g(x[]) (e.g. \"-x[0] +"+
" 1.0E-1 * x[1]\"), where it is assumed that g(x[]) <= 0."+
" Constraints can be linear or nonlinear. Multiple"+
" constraints are possible via passing this option"+
" multiple times. Soft constraints may be violated, but"+
" these trial solutions will be de-weighted; in particular,"+
" non-viable trial solutions will not participate in the"+
" convergence test. See the docs for the exact algorithm.")
parser.add_option("-t", "--tarfile", action="store", type="string",
dest="tarfile", metavar="FILE",
help="The tarball to send with the job (monolithic CAF"+
" job submission mode)")
parser.add_option("-T", "--tol", action="store", type="float",
dest="tol", default=1.0E-3,
help="Fractional tolerance for determining convergence"+
" [default: %default].")
parser.add_option("-u","--tarball-url", action="store", type="string",
dest="url", metavar="URL",
help="URL for the objective function's"+
" tarball (split tarball CAF job submission mode).")
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose", default=False, help="Be verbose"+
" [default: %default].")
parser.add_option("-x", "--explr-frac", action="store", type="float",
dest="xi", metavar="XI", default=0.75,
help="Fraction xi of Gmax"+
" generations which use an exploratory strategy; (1-xi)"+
" *Gmax generations use Elitist strategy [default: %default].")
parser.add_option("", "--complt-seg-frac", action="store", type="float",
dest="cmpltSegFrac", metavar="FRAC", default=0.2,
help="Fraction of complete job segments (for each"+
" generation) required for algorithm to continue after"+
" elapsed job time exceeds queue real-time limit"+
" [default: %default].")
parser.add_option("", "--phase1DEStrategy", action="store", type="string",
dest="ph1Strat", metavar="STRAT", default="rand-trig1",
help="Set the phase 1 DE strategy to STRAT [default = %default].")
parser.add_option("", "--phase1LoDimCr", action="store", type="float",
dest="ph1LDCr", metavar="CR", default=0.5,
help="Phase 1 crossover"+
" probability for a low-dim parameter space"+
" [default: %default].")
parser.add_option("", "--phase1HiDimCr", action="store", type="float",
dest="ph1HDCr", metavar="CR", default=0.7,
help="Phase 1 crossover"+
" probability for a high-dim parameter space"+
" [default: %default].")
parser.add_option("", "--phase1F", action="store", type="float",
dest="ph1F", metavar="F", default=0.8,
help="Phase 1 scale factor F"+
" [default: %default].")
parser.add_option("", "--phase2DEStrategy", action="store", type="string",
dest="ph2Strat", metavar="STRAT", default="best1",
help="Set the phase 2 DE strategy to STRAT [default = %default].")
parser.add_option("", "--phase2LoDimCr", action="store", type="float",
dest="ph2LDCr", metavar="CR", default=0.0,
help="Phase 2 crossover"+
" probability for a low-dim parameter space"+
" [default: %default].")
parser.add_option("", "--phase2HiDimCr", action="store", type="float",
dest="ph2HDCr", metavar="CR", default=0.1,
help="Phase 2 crossover"+
" probability for a high-dim parameter space"+
" [default: %default].")
parser.add_option("", "--phase2F", action="store", type="float",
dest="ph2F", metavar="F", default=0.5,
help="Phase 2 scale factor F"+
" [default: %default].")
parser.add_option("","--sade", action="store_true", dest="sade",
default=False, help="Self-Adaptive DE: adapt the"+
"DE control parameters (F, Cr) during the run.")
parser.add_option("","--zeta", action="store", type="float", dest="zeta",
default=0.5, help="Percentage of the Pareto front to"+
"use when selecting a DE control parameter vector (F, Cr)."+
" SADE parameter.")
parser.add_option("", "--dataSrc", action="store", type="string",
dest="dataSrc", default="None",
help="Data source for the CAF [default: %default].")
parser.add_option("", "--convg-strategy", action="store", type="string",
dest="convgStrat", metavar="STRAT", default="fractol",
help="Set the convergence strategy for the DE algorithm"+
" (either 'fractol' or 'chisq') [default: %default].")
parser.add_option("", "--Gmin", action="store", type="int",
dest="Gmin", default=10,
help="Set the minimum number of generations to complete"+
" [default: %default].")
parser.add_option("", "--dim-thresh", action="store", type="int",
dest="dimThresh", metavar="THRESH", default=6,
help="Set the dimensional threshold for the DE crossover"+
" parameter [default: %default].")
parser.add_option("", "--trunc-frac", action="store", type="float",
dest="truncFrac", metavar="FRAC", default=0.15,
help="Set the truncation fraction for chi-square"+
" computation to FRAC [default: %default].")
parser.add_option("", "--trig-prob", action="store", type="float",
dest="mt", metavar="MT", default=0.05,
help="Set the probability for trigonometric mutation"+
"to MT; the probability for standard DE mutation is then"+
"(1-MT). NOTE: This option is only applicable to the"+
"'rand-trig' and 'best-trig' strategies.")
parser.add_option("", "--save-init-pop-file", action="store", type="string",
dest="saveInitPopFile", metavar="FILE", default="",
help="Save the initial population of to file FILE."+
" Should have a '.gz' extension.")
parser.add_option("", "--load-init-pop-file", action="store", type="string",
dest="loadInitPopFile", metavar="FILE", default="",
help="Load the initial population from file FILE."+
" Should have a '.gz' extension.")
# DO NOT SET THE LOCK FILE NAME MANUALLY! THERE'S NO WAY FOR THE cronMode()
# FUNCTION TO KNOW WHAT IT IS!
## parser.add_option("", "--lock-file", action="store", type="string",
## dest="lockFile", metavar="FILE", default="dedriver.loc",
## help="Use LOCKFILE as the lock file for pausing"+
## " the algorithm [default: %default].")
parser.add_option("", "--build-dir", action="store", type="string",
dest="buildDir", metavar="DIR", default="./build_de",
help="Use BUILDDIR as the tarball build directory"+
" for CAF job submission [default: %default].")
parser.add_option("","--debug", action="store_true", dest="debug",
default=False, help="Debug flag.")
## parser.add_option("", "--monitor-url", action="store", type="string",
## dest="monUrl", help="""Set the base URL (node, port, path) of the monitor.""")
## parser.add_option("-M", "--monitor-location", action="store", type="string",
## dest="monitorLoc",
## help="""Location of intermediate results for web monitoring (e.g. <EMAIL>:/path""")
(options, args) = parser.parse_args()
# Error handling for options
availModes = ('setup', 'cron', 'caf', 'monitor')
sep = ', '
if len(args) != 1:
parser.error("Invalid mode; available modes: %s" % sep.join(availModes))
if args[0] == availModes[0]:
setupMode(parser, options)
elif args[0] == availModes[1]:
cronMode(parser, options)
elif args[0] == availModes[2]:
cafMode(parser, options)
elif args[0] == availModes[3]:
monitorMode(parser, options)
else:
parser.error("Invalid mode; available modes: %s" % sep.join(availModes))
```
#### File: DeMin/test/Griewangk.py
```python
import sys, math
from DeDriver import *
class griewangk:
_LARGEVAL=1e20
def __init__(self, D=100):
self.D = D
self.x = [griewangk._LARGEVAL]*self.D
self.f = [griewangk._LARGEVAL]
def calc(self, x, args=None):
self.x = x
self.f = [griewangk._LARGEVAL]
try:
summ = 0.0
prod = 1.0
for j in xrange(self.D):
summ += self.x[j]**2/4000.0
## prod *= math.cos(self.x[j]/math.sqrt(j+1.0)) + 1.0
prod *= math.cos(self.x[j]/math.sqrt(j+1.0))
## self.f = summ - prod
self.f = [summ - prod + 1.0]
except:
pass
return self.f
def driveDeMin(D):
g = griewangk(D)
dim = D
# np = dim * 4
np = dim * 3
# np = dim * 6
## lo = -600.0
## hi = 600.0
lo = -400.0
hi = 400.0
ir = []
softConstr = []
hardConstr = []
for i in range(dim):
ir.append((i, lo, hi))
hardConstr.append('-x['+`i`+']+'+`lo`)
hardConstr.append('x['+`i`+']-'+`hi`)
# cxi = 0.1
cxi = 0.15
# cxi = 0.2
## cxi = 0.3
gmin = 5
gmax = 1000
tol = 1e-7
dtol = 5e-8
m = 5
truncFrac = 0.25
## de = DeMin(g.calc, dim, np, -1, ir)
fdim = 1
de = DeDriver(dim, fdim, np)
de.local = True
de.costFcn = g.calc
de.setConstraints(softConstr, hardConstr)
## de.setDEStrategy(xi=cxi, mt=0.05, ph2HDCr=0.5, ph2F=0.6)
## de.setDEStrategy(xi=cxi, mt=0.1, ph1HDCr=0.1, ph1F=0.5, ph2HDCr=0.1, ph2F=0.5)
de.setDEStrategy(xi=cxi, mt=0.05, ph2StratStr='best-trig1')
de.setConvergenceStrategy('fractol', gmin, gmax, tol, dtol, m, truncFrac)
de.sade = True
de.ctrlPop.mt = 0.05
## de.sade = False
# de.zeta = 0.2
de.ctrlEps = 5E-2
# de.zeta = 0.3
de.zeta = 0.1
## de.zeta = 0.5
de.initialize(ir)
print `de`
print
minCost = de.minimize()
print 'maxDeltaFracTol = %.6E' % de.maxDeltaStats[4]
print
print `de`
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage: Griewangk.py <D>'
sys.exit(1)
D = int(sys.argv[1])
if D <= 0:
sys.exit(1)
driveDeMin(D)
"""
D = 10
SADE:
>>> gSA
[495, 640, 403, 758]
>>> sigmaSA
[3.7187044869299999e-11, 1.03508919563e-16, 1.09553641431e-16, 7.8252601022399998e-17]
>>> ftolSA
[7.7907509016499994e-08, 0.0, 0.0, 0.0]
>>> saTrials
10
Notes: saTrials := number of trials attempted. Difference btwn
saTrials and len(gSA) is the number of trials which failed to converge
to the global minimum.
Standard DeDriver:
>>> g
[393]
>>> sigma
[7.5372405479399995e-17]
>>> ftol
[0.0]
>>> trials
10
SADE with ctrlPop scale factor randomly generated from a cauchy pdf (peak: 0.5, fwhm: 0.1)
>>> g
[433, 376, 641, 669, 632, 488]
>>> mean(g)
539.83333333333337
>>> std(g)
112.80871223250249
>>> sigma
[8.3337864601700004e-17, 1.60886601221e-16, 0.0035763011385000001, 8.2957758334900004e-17, 8.2623730916499998e-17, 6.5561273426600005e-17]
>>> mean(sigma)
0.00059605018975007924
>>> ftol
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
"""
```
#### File: DeMin/test/Katsuura.py
```python
import sys, math
from DeDriver import *
#from DeMin import *
class katsuura:
_LARGEVAL = 1.0E20
def __init__(self, D=10, beta=32, fname='out.dat'):
""" Setup the parameters for the function.
"""
self.D = D
self.beta = beta
self.x = [katsuura._LARGEVAL]*self.D
self.outfname = fname
self.f = katsuura._LARGEVAL
def setVect(self, x):
""" Set the parameter vector from the command-line args.
"""
if len(x) != self.D:
return False
for j in range(self.D):
self.x[j] = float(x[j])
return True
def calc(self, x, args=None):
""" Calculate Katsuura's function.
"""
self.f = katsuura._LARGEVAL
self.x = x
try:
f = 1.0
for j in range(self.D):
sum = 0.0
for k in range(self.beta+1):
sum += (abs(2**k * self.x[j] - round(2.0**k * self.x[j])) *
2**(-k))
f *= 1.0 + (j + 1.0) * sum
self.f = f
except:
pass
return self.f
def saveOutput(self):
""" Save the output to a file.
"""
try:
file = open(self.outfname, 'w')
except:
return False
else:
file.write(`self.f`)
return True
def driveDeMin(D=10, beta=32):
k = katsuura(D, beta)
dim = D
np = dim * 6
lo = -10.0
hi = 10.0
ir = []
softConstr = []
hardConstr = []
for i in range(dim):
ir.append((i, lo, hi))
hardConstr.append('-x['+`i`+']+'+`lo`)
hardConstr.append('x['+`i`+']-'+`hi`)
xi = 0.6
gmin = 5
gmax = 10000
tol = 1e-6
dtol = 5e-7
m = 5
#truncFrac = 0.4
truncFrac = 0.2
# de = DeMin(k.calc, dim, np, -1, ir)
fdim = 1
de = DeDriver(dim, fdim, np)
de.local = True
de.sade = True
de.costFcn = k.calc
de.zeta = 0.5
# de.zeta = 0.2
# de.zeta = 0.7
de.setConstraints(softConstr, hardConstr)
de.setDEStrategy(xi)
de.setConvergenceStrategy('fractol', gmin, gmax, tol, dtol, m, truncFrac)
de.ctrlPop.mt = 0.05
#de.initialize()
de.initialize(ir)
print `de`
print
minCost = de.minimize()
print 'prevMGenStats:'
for i in range(len(de.prevMGenStats)):
print
for j in range(len(de.prevMGenStats[0])):
print '%.6E ' % de.prevMGenStats[i][j],
print 'maxDeltaFracTol = %.6E' % de.maxDeltaStats[4]
print
print `de`
if __name__ == "__main__":
# kat.D should be the dimensionality of the DE vectors
# (Member objects)
if len(sys.argv) != 3:
print 'Usage: Katsurra.py <D> <beta>'
sys.exit(1)
D = int(sys.argv[1])
if D < 0:
sys.exit(1)
beta = int(sys.argv[2])
if beta < 0:
sys.exit(1)
## kat = katsuura(D, beta, 'de_fcn.dat')
## if not kat.setVect(sys.argv[3:]):
## sys.exit(1)
## kat.calc()
## if not kat.saveOutput():
## sys.exit(1)
print 'Katsuura.py: using D = %d, beta = %d\n' % (D, beta)
driveDeMin(D, beta)
```
|
{
"source": "JeganS948/WEB_SCRAPING_CHALLENGE_SJ",
"score": 3
}
|
#### File: WEB_SCRAPING_CHALLENGE_SJ/2_Flask_app/app.py
```python
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
from scrape_mars import scrape_mars_info
# Flask Setup
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/marsdb"
mongo = PyMongo(app)
# Flask Routes
@app.route("/")
def index():
mars = mongo.db.mars_coll.find_one()
return render_template("index.html", mars=mars)
# Scrape route to import scrape_mars.py with scrape f"
@app.route("/scrape")
def scrape():
# Define collection
mars_record = mongo.db.mars_coll
# Run scrape f"
x = scrape_mars_info()
# Update the MongoDB using update and upsert=TRUE
mars_record.update({}, x, upsert=True)
# Redirect back to homepage
return redirect("/")
# Define Main behaviour
if __name__=="__main__":
app.run(debug=True)
```
|
{
"source": "Jegarde/RecNet-Login",
"score": 3
}
|
#### File: RecNet-Login/examples/async_example.py
```python
import asyncio
import sys
import aiohttp
from recnetlogin import RecNetLoginAsync # Include the async client
# Insert your Rec Room account credentials
USERNAME: str = ""
PASSWORD: str = ""
async def main() -> None:
session = aiohttp.ClientSession()
rnl = RecNetLoginAsync(
username=USERNAME,
password=PASSWORD,
prompt_2fa=True,
session=session
)
async with session as session:
token = await rnl.get_token(include_bearer=True)
decoded_token = await rnl.get_decoded_token()
print(f"{token=}\n{decoded_token=}")
if __name__ == "__main__":
if sys.version_info[0] == 3 and sys.version_info[1] >= 8 and sys.platform.startswith('win'): # fix "RuntimeError: Event Loop is closed" exception with asyncio on Windows
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
```
|
{
"source": "Jegathish-16/Experiment_Basic_HTML_Tag",
"score": 3
}
|
#### File: Jegathish-16/Experiment_Basic_HTML_Tag/from http.server import HTTPServer, Base.py
```python
from http.server import HTTPServer, BaseHTTPRequestHandler
content = """
<!DOCTYPE html>
<html>
<head>
<title>My webserver</title>
</head>
<body>
<h1>name:<NAME></h1>
<h2>reference no: 21005410</h3>
<h3>email:<EMAIL></h3>
</body>
</html>
"""
class myhandler(BaseHTTPRequestHandler):
def do_GET(self):
print("request received")
self.send_response(200)
self.send_header('content-type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(content.encode())
server_address = ('',8080)
httpd = HTTPServer(server_address,myhandler)
print("my webserver is running...")
httpd.serve_forever()
```
|
{
"source": "jegentile/IVML",
"score": 3
}
|
#### File: IVML/Documentation_generator/ivml_documentation_from_file.py
```python
__author__ = 'jgentile'
class IVMLDocumentationFromFile:
def __init__(self,file_name):
self.__file_name = file_name
self.__file = open(file_name)
print ' Document generator looking at ',file_name
self.__ivml_object_documentation = '\0'
self.__type = ''
self.__ivml_non_required_attributes = {}
self.__ivml_required_attributes = {}
self.__svg_attributes = {}
self.__events = {}
self.__name = ""
self.__description = ""
def change_from_camel_case(self,string):
str = string
for i in range(65,91):
str = str.replace(chr(i),'-'+chr(i+32))
return str
def parse(self):
on_line = 0
for line in self.__file:
#check to see if the first line annotates an IVML object
if on_line ==0:
stripped_line = line.replace(' ','').replace('\t','').rstrip()
array = stripped_line.split(':')
#if the first line in the JavaScript file is not annotated for the documentaion, return
if not array[0] == '//@defining' or not len(array) ==3:
print " WARNING: First line of",self.__file_name," file does not have proper annotation. Skipping."
return
print array
if array[1] == 'ivml.chart':
self.__type = 'chart'
if array[1] == 'ivml.visualElement':
self.__type = 'visual_element'
if array[1] == 'ivml.group':
self.__type = 'group'
print 'Name: ', array[2]," --- type:",self.__type
self.__name = self.change_from_camel_case( array[2])
if on_line > 0 and self.__type:
if '//@' in line:
struct = 0
offset = 1
if '//@i' in line:
struct = self.__ivml_non_required_attributes
if '//@ir' in line:
struct = self.__ivml_required_attributes
offset = 2
if '//@s' in line:
struct = self.__svg_attributes
if '//@e' in line:
struct = self.__events
if '//@description' in line:
self.__description = line.split('//@description')[1].rstrip()
if not struct == 0:
attribute = line.strip().replace(' ','').replace('\t','').split(':')[0]
struct[self.change_from_camel_case(attribute)] = line.split('//@')[len(line.split('//@'))-1][offset:].strip().rstrip().replace('#','\#').replace('_','\_')
on_line+=1
'''
print 'ivml',self.__ivml_attributes
print 'svg',self.__svg_attributes
print 'events',self.__events
'''
def get_type(self):
if self.__type:
return self.__type
def get_ivml_required_attributes(self):
return self.__ivml_required_attributes
def get_ivml_non_required_attributes(self):
return self.__ivml_non_required_attributes
def get_svg_attributes(self):
return self.__svg_attributes
def get_event_attributes(self):
return self.__events
def get_name(self):
return self.__name
def get_description(self):
return self.__description
```
|
{
"source": "jegerjensen/sympy",
"score": 4
}
|
#### File: sympy/assumptions/refine.py
```python
from sympy.core import S, Add
from sympy.assumptions import Q, ask
from sympy.logic.boolalg import fuzzy_not
def refine(expr, assumptions=True):
"""
Simplify an expression using assumptions.
Gives the form of expr that would be obtained if symbols
in it were replaced by explicit numerical expressions satisfying
the assumptions.
Examples::
>>> from sympy import refine, sqrt, Q
>>> from sympy.abc import x
>>> refine(sqrt(x**2), Q.real(x))
Abs(x)
>>> refine(sqrt(x**2), Q.positive(x))
x
"""
if not expr.is_Atom:
args = [refine(arg, assumptions) for arg in expr.args]
# TODO: this will probably not work with Integral or Polynomial
expr = type(expr)(*args)
name = expr.__class__.__name__
handler = handlers_dict.get(name, None)
if handler is None: return expr
new_expr = handler(expr, assumptions)
if (new_expr is None) or (expr == new_expr):
return expr
return refine(new_expr, assumptions)
def refine_abs(expr, assumptions):
"""
Handler for the absolute value.
Examples::
>>> from sympy import Symbol, Q, refine, Abs
>>> from sympy.assumptions.refine import refine_abs
>>> from sympy.abc import x
>>> refine_abs(Abs(x), Q.real(x))
>>> refine_abs(Abs(x), Q.positive(x))
x
>>> refine_abs(Abs(x), Q.negative(x))
-x
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions) and \
fuzzy_not(ask(Q.negative(arg), assumptions)):
# if it's nonnegative
return arg
if ask(Q.negative(arg), assumptions):
return -arg
def refine_Pow(expr, assumptions):
"""
Handler for instances of Pow.
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.refine import refine_Pow
>>> from sympy.abc import x,y,z
>>> refine_Pow((-1)**x, Q.real(x))
>>> refine_Pow((-1)**x, Q.even(x))
1
>>> refine_Pow((-1)**x, Q.odd(x))
-1
For powers of -1, even parts of the exponent can be simplified:
>>> refine_Pow((-1)**(x+y), Q.even(x))
(-1)**y
>>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z))
(-1)**y
>>> refine_Pow((-1)**(x+y+2), Q.odd(x))
(-1)**(y + 1)
>>> refine_Pow((-1)**(x+3), True)
(-1)**(x + 1)
"""
from sympy.core import Pow, Rational
from sympy.functions import sign
if ask(Q.real(expr.base), assumptions):
if expr.base.is_number:
if ask(Q.even(expr.exp), assumptions):
return abs(expr.base) ** expr.exp
if ask(Q.odd(expr.exp), assumptions):
return sign(expr.base) * abs(expr.base) ** expr.exp
if isinstance(expr.exp, Rational):
if type(expr.base) is Pow:
return abs(expr.base.base) ** (expr.base.exp * expr.exp)
if expr.base is S.NegativeOne:
if expr.exp.is_Add:
# For powers of (-1) we can remove
# - even terms
# - pairs of odd terms
# - a single odd term + 1
# - A numerical constant N can be replaced with mod(N,2)
coeff, terms = expr.exp.as_coeff_add()
terms = set(terms)
even_terms = set([])
odd_terms = set([])
initial_number_of_terms = len(terms)
for t in terms:
if ask(Q.even(t), assumptions):
even_terms.add(t)
elif ask(Q.odd(t), assumptions):
odd_terms.add(t)
terms -= even_terms
if len(odd_terms)%2:
terms -= odd_terms
new_coeff = (coeff + S.One) % 2
else:
terms -= odd_terms
new_coeff = coeff % 2
if new_coeff != coeff or len(terms) < initial_number_of_terms:
terms.add(new_coeff)
return expr.base**(Add(*terms))
def refine_exp(expr, assumptions):
"""
Handler for exponential function.
>>> from sympy import Symbol, Q, exp, I, pi
>>> from sympy.assumptions.refine import refine_exp
>>> from sympy.abc import x
>>> refine_exp(exp(pi*I*2*x), Q.real(x))
>>> refine_exp(exp(pi*I*2*x), Q.integer(x))
1
"""
arg = expr.args[0]
if arg.is_Mul:
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if ask(Q.integer(2*coeff), assumptions):
if ask(Q.even(coeff), assumptions):
return S.One
elif ask(Q.odd(coeff), assumptions):
return S.NegativeOne
elif ask(Q.even(coeff + S.Half), assumptions):
return -S.ImaginaryUnit
elif ask(Q.odd(coeff + S.Half), assumptions):
return S.ImaginaryUnit
handlers_dict = {
'Abs' : refine_abs,
'Pow' : refine_Pow,
'exp' : refine_exp,
}
```
#### File: integrals/tests/test_integrals.py
```python
from sympy import (S, symbols, integrate, Integral, Derivative, exp, erf, oo, Symbol,
Function, Rational, log, sin, cos, pi, E, I, Poly, LambertW, diff,
Matrix, sympify, sqrt, atan, asin, acos, asinh, acosh, DiracDelta, Heaviside,
Lambda, sstr, Add, Tuple, Eq, Interval, Sum, factor, trigsimp)
from sympy.utilities.pytest import XFAIL, skip, raises
from sympy.physics.units import m, s
x,y,a,t = symbols('x,y,a,t')
n = Symbol('n', integer=True)
f = Function('f')
def diff_test(i):
"""Return the set of symbols, s, which were used in testing that
i.diff(s) agrees with i.doit().diff(s). If there is an error then
the assertion will fail, causing the test to fail."""
syms = i.free_symbols
for s in syms:
assert (i.diff(s).doit() - i.doit().diff(s)).expand() == 0
return syms
def test_improper_integral():
assert integrate(log(x), (x, 0, 1)) == -1
assert integrate(x**(-2), (x, 1, oo)) == 1
def test_constructor():
# this is shared by Sum, so testing Integral's constructor
# is equivalent to testing Sum's
s1 = Integral(n, n)
assert s1.limits == (Tuple(n),)
s2 = Integral(n, (n,))
assert s2.limits == (Tuple(n),)
s3 = Integral(Sum(x, (x, 1, y)))
assert s3.limits == (Tuple(y),)
s4 = Integral(n, Tuple(n,))
assert s4.limits == (Tuple(n),)
s5 = Integral(n, (n, Interval(1, 2)))
assert s5.limits == (Tuple(n, 1, 2),)
def test_basics():
assert Integral(0, x) != 0
assert Integral(x, (x, 1, 1)) != 0
assert Integral(oo, x) != oo
assert Integral(S.NaN, x) == S.NaN
assert diff(Integral(y, y), x) == 0
assert diff(Integral(x, (x,0,1)), x) == 0
assert diff(Integral(x, x), x) == x
assert diff(Integral(t, (t,0,x)), x) == x + Integral(0, (t, 0, x))
e=(t+1)**2
assert diff(integrate(e, (t,0,x)), x) == \
diff(Integral(e, (t, 0, x)), x).doit().expand() == \
((1+x)**2).expand()
assert diff(integrate(e, (t,0,x)), t) == \
diff(Integral(e, (t,0,x)), t) == 0
assert diff(integrate(e, (t,0,x)), a) == \
diff(Integral(e, (t, 0, x)), a) == 0
assert diff(integrate(e, t), a) == diff(Integral(e, t), a) == 0
assert integrate(e, (t,a,x)).diff(x) == \
Integral(e, (t, a, x)).diff(x).doit().expand()
assert Integral(e, (t, a, x)).diff(x).doit() == ((1+x)**2)
assert integrate(e, (t,x,a)).diff(x).doit() == (-(1+x)**2).expand()
assert integrate(t**2, (t,x,2*x)).diff(x) == 7*x**2
assert Integral(x, x).atoms() == set([x])
assert Integral(f(x), (x, 0, 1)).atoms() == set([S(0), S(1), x])
assert diff_test(Integral(x, (x, 3*y))) == set([y])
assert diff_test(Integral(x, (a, 3*y))) == set([x, y])
# sum integral of terms
assert integrate(y + x + exp(x), x) == x*y + x**2/2 + exp(x)
assert Integral(x).is_commutative
n = Symbol('n', commutative=False)
assert Integral(x, (x, n)).is_commutative is False
assert Integral(n + x, x).is_commutative is False
def test_basics_multiple():
assert diff_test(Integral(x, (x, 3*x, 5*y), (y, x, 2*x))) == set([x])
assert diff_test(Integral(x, (x, 5*y), (y, x, 2*x))) == set([x])
assert diff_test(Integral(x, (x, 5*y), (y, y, 2*x))) == set([x, y])
assert diff_test(Integral(y, y, x)) == set([x, y])
assert diff_test(Integral(y*x, x, y)) == set([x, y])
assert diff_test(Integral(x + y, y, (y, 1, x))) == set([x])
assert diff_test(Integral(x + y, (x, x, y), (y, y, x))) == set([x, y])
def test_integration():
assert integrate(0, (t,0,x)) == 0
assert integrate(3, (t,0,x)) == 3*x
assert integrate(t, (t,0,x)) == x**2/2
assert integrate(3*t, (t,0,x))== 3*x**2/2
assert integrate(3*t**2, (t,0,x)) == x**3
assert integrate(1/t, (t,1,x)) == log(x)
assert integrate(-1/t**2, (t,1,x)) == 1/x-1
assert integrate(t**2+5*t-8, (t,0,x)) == x**3/3+5*x**2/2-8*x
assert integrate(x**2, x) == x**3/3
assert integrate((3*t*x)**5, x) == (3*t)**5 * x**6 / 6
b = Symbol("b")
c = Symbol("c")
assert integrate(a*t, (t,0,x))==a*x**2/2
assert integrate(a*t**4, (t,0,x))==a*x**5/5
assert integrate(a*t**2+b*t+c, (t,0,x))==a*x**3/3+b*x**2/2+c*x
def test_multiple_integration():
assert integrate((x**2)*(y**2), (x,0,1), (y,-1,2)) == Rational(1)
assert integrate((y**2)*(x**2), x, y) == Rational(1,9)*(x**3)*(y**3)
assert integrate(1/(x+3)/(1+x)**3, x) == -S(1)/8*log(3 + x) + S(1)/8*log(1 + x) + x/(4 + 8*x + 4*x**2)
def test_issue433():
assert integrate(exp(-x), (x,0,oo)) == 1
def test_issue461():
assert integrate(x**Rational(3,2), x) == 2*x**Rational(5,2)/5
assert integrate(x**Rational(1,2), x) == 2*x**Rational(3,2)/3
assert integrate(x**Rational(-3,2), x) == -2*x**Rational(-1,2)
def test_integrate_poly():
p = Poly(x + x**2*y + y**3, x, y)
qx = integrate(p, x)
qy = integrate(p, y)
assert isinstance(qx, Poly) == True
assert isinstance(qy, Poly) == True
assert qx.gens == (x, y)
assert qy.gens == (x, y)
assert qx.as_expr() == x**2/2 + x**3*y/3 + x*y**3
assert qy.as_expr() == x*y + x**2*y**2/2 + y**4/4
def test_integrate_poly_defined():
p = Poly(x + x**2*y + y**3, x, y)
Qx = integrate(p, (x, 0, 1))
Qy = integrate(p, (y, 0, pi))
assert isinstance(Qx, Poly) == True
assert isinstance(Qy, Poly) == True
assert Qx.gens == (y,)
assert Qy.gens == (x,)
assert Qx.as_expr() == Rational(1,2) + y/3 + y**3
assert Qy.as_expr() == pi**4/4 + pi*x + pi**2*x**2/2
def test_integrate_omit_var():
y = Symbol('y')
assert integrate(x) == x**2/2
raises(ValueError, "integrate(2)")
raises(ValueError, "integrate(x*y)")
def test_integrate_poly_accurately():
y = Symbol('y')
assert integrate(x*sin(y), x) == x**2*sin(y)/2
# when passed to risch_norman, this will be a CPU hog, so this really
# checks, that integrated function is recognized as polynomial
assert integrate(x**1000*sin(y), x) == x**1001*sin(y)/1001
def test_issue536():
y = Symbol('y')
assert integrate(x**2, y) == x**2*y
assert integrate(x**2, (y, -1, 1)) == 2*x**2
# works in sympy and py.test but hangs in `setup.py test`
def test_integrate_linearterm_pow():
# check integrate((a*x+b)^c, x) -- #400
y = Symbol('y')
assert integrate(x**y, x) == x**(y+1)/(y+1)
assert integrate((exp(y)*x + 1/y)**(1+sin(y)), x) == exp(-y)*(exp(y)*x + 1/y)**(2+sin(y)) / (2+sin(y))
def test_issue519():
assert integrate(pi*x**Rational(1,2),x) == 2*pi*x**Rational(3,2)/3
assert integrate(pi*x**Rational(1,2) + E*x**Rational(3,2),x) == \
2*pi*x**Rational(3,2)/3 + \
2*E *x**Rational(5,2)/5
def test_issue524():
assert integrate(cos((n+1) * x), x) == sin(x*(n+1)) / (n+1)
assert integrate(cos((n-1) * x), x) == sin(x*(n-1)) / (n-1)
assert integrate(cos((n+1) * x) + cos((n-1) * x), x) == \
sin(x*(n+1)) / (n+1) + \
sin(x*(n-1)) / (n-1)
def test_issue565():
assert integrate(-1./2 * x * sin(n * pi * x/2), [x, -2, 0]) == 2*cos(pi*n)/(pi*n)
assert integrate(-Rational(1)/2 * x * sin(n * pi * x/2), [x, -2, 0]) \
== 2*cos(pi*n)/(pi*n)
def test_issue580():
# definite integration of rational functions gives wrong answers
assert NS(Integral(1/(x**2-8*x+17), (x, 2, 4))) == '1.10714871779409'
def test_issue587(): # remove this when fresnel itegrals are implemented
assert integrate(sin(x**2), x) == Integral(sin(x**2), x)
def test_integrate_units():
assert integrate(x * m/s, (x, 1*s, 5*s)) == 12*m*s
def test_transcendental_functions():
assert integrate(LambertW(2*x), x) == -x + x*LambertW(2*x) + x/LambertW(2*x)
def test_issue641():
f=4*log(x)-2*log(x)**2
fid=diff(integrate(f,x),x)
assert abs(f.subs(x,42).evalf() - fid.subs(x,42).evalf()) < 1e-10
def test_issue689():
assert integrate(1/(1+x**2), x) == atan(x)
def test_issue853():
f = sin(x)
assert integrate(f, x) == -cos(x)
raises(ValueError, "integrate(f, 2*x)")
def test_issue1417():
assert integrate(2**x - 2*x, x) == 2**x/log(2) - x**2
def test_matrices():
M = Matrix(2, 2, lambda i, j: (i+j+1)*sin((i+j+1)*x))
assert integrate(M, x) == Matrix([
[-cos(x), -cos(2*x)],
[-cos(2*x), -cos(3*x)],
])
# issue1012
def test_integrate_functions():
assert integrate(f(x), x) == Integral(f(x), x)
assert integrate(f(x), (x,0,1)) == Integral(f(x), (x,0,1))
assert integrate(f(x)*diff(f(x), x), x) == f(x)**2/2
assert integrate(diff(f(x),x) / f(x),x) == log(f(x))
def test_integrate_derivatives():
assert integrate(Derivative(f(x), x), x) == f(x)
assert integrate(Derivative(f(y), y), x) == x*Derivative(f(y), y)
def test_transform():
a = Integral(x**2+1, (x, -1, 2))
assert a.doit() == a.transform(x, 3*x+1).doit()
assert a.transform(x, 3*x+1).transform(x, 3*x+1, inverse=True) == a
assert a.transform(x, 3*x+1, inverse=True).transform(x, 3*x+1) == a
a = Integral(sin(1/x), (x, 0, 1))
assert a.transform(x, 1/x) == Integral(sin(x)/x**2, (x, 1, oo))
assert a.transform(x, 1/x).transform(x, 1/x) == a
a = Integral(exp(-x**2), (x, -oo, oo))
assert a.transform(x, 2*x) == Integral(2*exp(-4*x**2), (x, -oo, oo))
# < 3 arg limit handled properly
assert Integral(x, x).transform(x, a*x) == Integral(x*a**2, x)
raises(ValueError, "a.transform(x, 1/x)")
raises(ValueError, "a.transform(x, 1/x)")
_3 = S(3)
assert Integral(x, (x, 0, -_3)).transform(x, 1/x) == \
Integral(-1/x**3, (x, -oo, -1/_3))
assert Integral(x, (x, 0, _3)).transform(x, 1/x) == \
Integral(x**(-3), (x, 1/_3, oo))
def test_issue953():
f = S(1)/2*asin(x) + x*(1 - x**2)**(S(1)/2)/2
assert integrate(cos(asin(x)), x) == f
assert integrate(sin(acos(x)), x) == f
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_evalf_integrals():
assert NS(Integral(x, (x, 2, 5)), 15) == '10.5000000000000'
gauss = Integral(exp(-x**2), (x, -oo, oo))
assert NS(gauss, 15) == '1.77245385090552'
assert NS(gauss**2 - pi + E*Rational(1,10**20), 15) in ('2.71828182845904e-20', '2.71828182845905e-20')
# A monster of an integral from http://mathworld.wolfram.com/DefiniteIntegral.html
t = Symbol('t')
a = 8*sqrt(3)/(1+3*t**2)
b = 16*sqrt(2)*(3*t+1)*(4*t**2+t+1)**Rational(3,2)
c = (3*t**2+1)*(11*t**2+2*t+3)**2
d = sqrt(2)*(249*t**2+54*t+65)/(11*t**2+2*t+3)**2
f = a - b/c - d
assert NS(Integral(f, (t, 0, 1)), 50) == NS((3*sqrt(2)-49*pi+162*atan(sqrt(2)))/12,50)
# http://mathworld.wolfram.com/VardisIntegral.html
assert NS(Integral(log(log(1/x))/(1+x+x**2), (x, 0, 1)), 15) == NS('pi/sqrt(3) * log(2*pi**(5/6) / gamma(1/6))', 15)
# http://mathworld.wolfram.com/AhmedsIntegral.html
assert NS(Integral(atan(sqrt(x**2+2))/(sqrt(x**2+2)*(x**2+1)), (x, 0, 1)), 15) == NS(5*pi**2/96, 15)
# http://mathworld.wolfram.com/AbelsIntegral.html
assert NS(Integral(x/((exp(pi*x)-exp(-pi*x))*(x**2+1)), (x, 0, oo)), 15) == NS('log(2)/2-1/4',15)
# Complex part trimming
# http://mathworld.wolfram.com/VardisIntegral.html
assert NS(Integral(log(log(sin(x)/cos(x))), (x, pi/4, pi/2)), 15, chop=True) == \
NS('pi/4*log(4*pi**3/gamma(1/4)**4)', 15)
#
# Endpoints causing trouble (rounding error in integration points -> complex log)
assert NS(2+Integral(log(2*cos(x/2)), (x, -pi, pi)), 17, chop=True) == NS(2, 17)
assert NS(2+Integral(log(2*cos(x/2)), (x, -pi, pi)), 20, chop=True) == NS(2, 20)
assert NS(2+Integral(log(2*cos(x/2)), (x, -pi, pi)), 22, chop=True) == NS(2, 22)
# Needs zero handling
assert NS(pi - 4*Integral('sqrt(1-x**2)', (x, 0, 1)), 15, maxn=30, chop=True) in ('0.0', '0')
# Oscillatory quadrature
a = Integral(sin(x)/x**2, (x, 1, oo)).evalf(maxn=15)
assert 0.49 < a < 0.51
assert NS(Integral(sin(x)/x**2, (x, 1, oo)), quad='osc') == '0.504067061906928'
assert NS(Integral(cos(pi*x+1)/x, (x, -oo, -1)), quad='osc') == '0.276374705640365'
@XFAIL
def test_evalf_issue_939():
# http://code.google.com/p/sympy/issues/detail?id=939
# The output form of an integral may differ by a step function between
# revisions, making this test a bit useless. This can't be said about
# other two tests. For now, all values of this evaluation are used here,
# but in future this should be reconsidered.
assert NS(integrate(1/(x**5+1), x).subs(x, 4), chop=True) in \
['-0.000976138910649103', '0.965906660135753', '1.93278945918216']
assert NS(Integral(1/(x**5+1), (x, 2, 4))) == '0.0144361088886740'
assert NS(integrate(1/(x**5+1), (x, 2, 4)), chop=True) == '0.0144361088886740'
def xtest_failing_integrals():
#---
# Double integrals not implemented
assert NS(Integral(sqrt(x)+x*y, (x, 1, 2), (y, -1, 1)), 15) == '2.43790283299492'
# double integral + zero detection
assert NS(Integral(sin(x+x*y), (x, -1, 1), (y, -1, 1)), 15) == '0.0'
def test_integrate_DiracDelta():
assert integrate(DiracDelta(x),x) == Heaviside(x)
assert integrate(DiracDelta(x) * f(x),x) == f(0) * Heaviside(x)
assert integrate(DiracDelta(x) * f(x),(x,-oo,oo)) == f(0)
assert integrate(DiracDelta(x) * f(x),(x,0,oo)) == f(0)/2
assert integrate(DiracDelta(x**2+x-2),x) - \
(Heaviside(-1 + x)/3 + Heaviside(2 + x)/3) == 0
assert integrate(cos(x)*(DiracDelta(x)+DiracDelta(x**2-1))*sin(x)*(x-pi),x) - \
(-pi*(cos(1)*Heaviside(-1 + x)*sin(1)/2 - cos(1)*Heaviside(1 + x)*sin(1)/2) + \
cos(1)*Heaviside(1 + x)*sin(1)/2 + cos(1)*Heaviside(-1 + x)*sin(1)/2) == 0
def test_subs1():
e = Integral(exp(x-y), x)
assert e.subs(y, 3) == Integral(exp(x-3), x)
e = Integral(exp(x-y), (x, 0, 1))
assert e.subs(y, 3) == Integral(exp(x-3), (x, 0, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(x-y)*f(y), (y, -oo, oo))
assert conv.subs({x:0}) == Integral(exp(-2*y**2), (y, -oo, oo))
def test_subs2():
e = Integral(exp(x-y), x, t)
assert e.subs(y, 3) == Integral(exp(x-3), x, t)
e = Integral(exp(x-y), (x, 0, 1), (t, 0, 1))
assert e.subs(y, 3) == Integral(exp(x-3), (x, 0, 1), (t, 0, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(x-y)*f(y), (y, -oo, oo), (t, 0, 1))
assert conv.subs({x:0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1))
def test_subs3():
e = Integral(exp(x-y), (x, 0, y), (t, y, 1))
assert e.subs(y, 3) == Integral(exp(x-3), (x, 0, 3), (t, 3, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(x-y)*f(y), (y, -oo, oo), (t, x, 1))
assert conv.subs({x:0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1))
def test_subs4():
e = Integral(exp(x), (x, 0, y), (t, y, 1))
assert e.subs(y, 3) == Integral(exp(x), (x, 0, 3), (t, 3, 1))
f = Lambda(x, exp(-x**2))
conv = Integral(f(y)*f(y), (y, -oo, oo), (t, x, 1))
assert conv.subs({x:0}) == Integral(exp(-2*y**2), (y, -oo, oo), (t, 0, 1))
def test_subs5():
e = Integral(exp(-x**2), x)
assert e.subs(x, 5) == Integral(exp(-x**2), (x, 5))
e = Integral(exp(-x**2), (x, -oo, oo))
assert e.subs(x, 5) == e
e = Integral(exp(-x**2+y), x)
assert e.subs(x, 5) == Integral(exp(y - x**2), (x, 5))
assert e.subs(y, 5) == Integral(exp(-x**2+5), x)
e = Integral(exp(-x**2+y), (y, -oo, oo), (x, -oo, oo))
assert e.subs(x, 5) == e
assert e.subs(y, 5) == e
def test_subs6():
a, b = symbols('a b')
e = Integral(x*y, (x, f(x), f(y)))
assert e.subs(x, 1) == Integral(x*y, (x, f(1), f(y)))
assert e.subs(y, 1) == Integral(x, (x, f(x), f(1)))
e = Integral(x*y, (x, f(x), f(y)), (y, f(x), f(y)))
assert e.subs(x, 1) == Integral(x*y, (x, f(1), f(y)), (y, f(1), f(y)))
assert e.subs(y, 1) == Integral(x*y, (x, f(x), f(y)), (y, f(x), f(1)))
e = Integral(x*y, (x, f(x), f(a)), (y, f(x), f(a)))
assert e.subs(a, 1) == Integral(x*y, (x, f(x), f(1)), (y, f(x), f(1)))
def test_subs7():
e = Integral(x, (x, 1, y), (y, 1, 2))
assert e.subs({x:1, y:2}) == e
e = Integral(sin(x) + sin(y), (x, sin(x), sin(y)),
(y, 1, 2))
assert e._eval_subs(sin(y), 1) == e
assert e._eval_subs(sin(x), 1) == Integral(sin(x) + sin(y), (x, 1, sin(y)),
(y, 1, 2))
def test_integration_variable():
raises(ValueError, "Integral(exp(-x**2), 3)")
raises(ValueError, "Integral(exp(-x**2), (3, -oo, oo))")
def test_expand_integral():
assert Integral(cos(x**2)*(sin(x**2)+1),(x, 0, 1)).expand() == Integral(cos(x**2)*sin(x**2) + cos(x**2), (x, 0, 1))
assert Integral(cos(x**2)*(sin(x**2)+1),x).expand() == Integral(cos(x**2)*sin(x**2) + cos(x**2), x)
def test_as_sum_midpoint1():
e = Integral(sqrt(x**3+1), (x, 2, 10))
assert e.as_sum(1, method="midpoint") == 8*217**(S(1)/2)
assert e.as_sum(2, method="midpoint") == 4*65**(S(1)/2) + 12*57**(S(1)/2)
assert e.as_sum(3, method="midpoint") == 8*217**(S(1)/2)/3 + \
8*3081**(S(1)/2)/27 + 8*52809**(S(1)/2)/27
assert e.as_sum(4, method="midpoint") == 2*730**(S(1)/2) + \
4*7**(S(1)/2) + 4*86**(S(1)/2) + 6*14**(S(1)/2)
assert abs(e.as_sum(4, method="midpoint").n() - e.n()) < 0.5
e = Integral(sqrt(x**3+y**3), (x, 2, 10), (y, 0, 10))
raises(NotImplementedError, "e.as_sum(4)")
def test_as_sum_midpoint2():
e = Integral((x+y)**2, (x, 0, 1))
assert e.as_sum(1, method="midpoint").expand() == S(1)/4 + y + y**2
assert e.as_sum(2, method="midpoint").expand() == S(5)/16 + y + y**2
assert e.as_sum(3, method="midpoint").expand() == S(35)/108 + y + y**2
assert e.as_sum(4, method="midpoint").expand() == S(21)/64 + y + y**2
def test_as_sum_left():
e = Integral((x+y)**2, (x, 0, 1))
assert e.as_sum(1, method="left").expand() == y**2
assert e.as_sum(2, method="left").expand() == S(1)/8 + y/2 + y**2
assert e.as_sum(3, method="left").expand() == S(5)/27 + 2*y/3 + y**2
assert e.as_sum(4, method="left").expand() == S(7)/32 + 3*y/4 + y**2
def test_as_sum_right():
e = Integral((x+y)**2, (x, 0, 1))
assert e.as_sum(1, method="right").expand() == 1 + 2*y + y**2
assert e.as_sum(2, method="right").expand() == S(5)/8 + 3*y/2 + y**2
assert e.as_sum(3, method="right").expand() == S(14)/27 + 4*y/3 + y**2
assert e.as_sum(4, method="right").expand() == S(15)/32 + 5*y/4 + y**2
def test_as_sum_raises():
e = Integral((x+y)**2, (x, 0, 1))
raises(ValueError, "e.as_sum(-1)")
raises(ValueError, "e.as_sum(0)")
raises(NotImplementedError, "e.as_sum(oo)")
raises(NotImplementedError, "e.as_sum(3, method='xxxx2')")
def test_nested_doit():
e = Integral(Integral(x, x), x)
f = Integral(x, x, x)
assert e.doit() == f.doit()
def test_issue1566():
# Allow only upper or lower limit evaluation
e = Integral(x**2, (x, None, 1))
f = Integral(x**2, (x, 1, None))
assert e.doit() == Rational(1, 3)
assert f.doit() == Rational(-1, 3)
assert Integral(x*y, (x, None, y)).subs(y, t) == Integral(x*t, (x, None, t))
assert Integral(x*y, (x, y, None)).subs(y, t) == Integral(x*t, (x, t, None))
assert integrate(x**2, (x, None, 1)) == Rational(1, 3)
assert integrate(x**2, (x, 1, None)) == Rational(-1, 3)
assert integrate("x**2", ("x", "1", None)) == Rational(-1, 3)
def test_integral_reconstruct():
e = Integral(x**2, (x, -1, 1))
assert e == Integral(*e.args)
def test_doit():
e = Integral(Integral(2*x), (x, 0, 1))
assert e.doit() == Rational(1, 3)
assert e.doit(deep=False) == Rational(1, 3)
f = Function('f')
# doesn't matter if the integral can't be performed
assert Integral(f(x), (x, 1, 1)).doit() == 0
# doesn't matter if the limits can't be evaluated
assert Integral(0, (x, 1, Integral(f(x), x))).doit() == 0
def issue_1785():
assert integrate(sqrt(x)*(1+x)) == 2*x**Rational(3, 2)/3 + 2*x**Rational(5, 2)/5
assert integrate(x**x*(1+log(x))) == x**x
def test_is_number():
from sympy.abc import x, y, z
from sympy import cos, sin
assert Integral(x).is_number == False
assert Integral(1, x).is_number == False
assert Integral(1, (x, 1)).is_number == True
assert Integral(1, (x, 1, 2)).is_number == True
assert Integral(1, (x, 1, y)).is_number == False
assert Integral(x, y).is_number == False
assert Integral(x, (y, 1, x)).is_number == False
assert Integral(x, (y, 1, 2)).is_number == False
assert Integral(x, (x, 1, 2)).is_number == True
assert Integral(x, (y, 1, 1)).is_number == True
assert Integral(x*y, (x, 1, 2), (y, 1, 3)).is_number == True
assert Integral(x*y, (x, 1, 2), (y, 1, z)).is_number == False
assert Integral(x, (x, 1)).is_number == True
assert Integral(x, (x, 1, Integral(y, (y, 1, 2)))).is_number == True
# it is possible to get a false negative if the integrand is
# actually an unsimplified zero, but this is true of is_number in general.
assert Integral(sin(x)**2 + cos(x)**2 - 1, x).is_number == False
def test_symbols():
from sympy.abc import x, y, z
assert Integral(0, x).free_symbols == set()
assert Integral(x).free_symbols == set([x])
assert Integral(x, (x, None, y)).free_symbols == set([y])
assert Integral(x, (x, y, None)).free_symbols == set([y])
assert Integral(x, (x, 1, y)).free_symbols == set([y])
assert Integral(x, (x, y, 1)).free_symbols == set([y])
assert Integral(x, (x, x, y)).free_symbols == set([x, y])
assert Integral(x, x, y).free_symbols == set([x, y])
assert Integral(x, (x, 1, 2)).free_symbols == set()
assert Integral(x, (y, 1, 2)).free_symbols == set([x])
assert Integral(x, (y, z, z)).free_symbols == set()
assert Integral(x, (y, 1, 2), (y, None, None)).free_symbols == set([x, y])
assert Integral(x, (y, 1, 2), (x, 1, y)).free_symbols == set([y])
assert Integral(2, (y, 1, 2), (y, 1, x), (x, 1, 2)).free_symbols == set()
assert Integral(2, (y, x, 2), (y, 1, x), (x, 1, 2)).free_symbols == set()
assert Integral(2, (x, 1, 2), (y, x, 2), (y, 1, 2)).free_symbols == set([x])
def test_is_zero():
from sympy.abc import x, m, n
assert Integral(0, (x, 1, x)).is_zero
assert Integral(1, (x, 1, 1)).is_zero
assert Integral(1, (x, 1, 2)).is_zero is False
assert Integral(sin(m*x)*cos(n*x), (x, 0, 2*pi)).is_zero is None
def test_series():
from sympy.abc import x
i = Integral(cos(x))
e = i.lseries(x)
assert i.nseries(x, n=8).removeO() == Add(*[e.next() for j in range(4)])
def test_issue_1304():
z = Symbol('z', positive=True)
assert integrate(sqrt(x**2 + z**2),x) == z**2*asinh(x/z)/2 + x*(x**2 + z**2)**(S(1)/2)/2
assert integrate(sqrt(x**2 - z**2),x) == -z**2*acosh(x/z)/2 + x*(x**2 - z**2)**(S(1)/2)/2
@XFAIL
def test_issue_1304_2():
assert integrate(sqrt(-x**2 - 4), x) == -2*atan(x/(-4 - x**2)**(S(1)/2)) + x*(-4 - x**2)**(S(1)/2)/2
def tets_issue_1001():
R = Symbol('R', positive=True)
assert integrate(sqrt(R**2 - x**2), (x, 0, R)) == pi*R**2/4
def test_issue2068():
from sympy.abc import w, x, y, z
f = Function('f')
assert Integral(Integral(f(x), x), x) == Integral(f(x), x, x)
assert Integral(f(x)).args == (f(x), Tuple(x))
assert Integral(Integral(f(x))).args == (f(x), Tuple(x), Tuple(x))
assert Integral(Integral(f(x)), y).args == (f(x), Tuple(x), Tuple(y))
assert Integral(Integral(f(x), z), y).args == (f(x), Tuple(z), Tuple(y))
assert Integral(Integral(Integral(f(x), x), y), z).args == \
(f(x), Tuple(x), Tuple(y), Tuple(z))
assert integrate(Integral(f(x), x), x) == Integral(f(x), x, x)
assert integrate(Integral(f(x), y), x) == Integral(y*f(x), x)
assert integrate(Integral(f(x), x), y) == Integral(y*f(x), x)
assert integrate(Integral(2, x), x) == x**2
assert integrate(Integral(2, x), y) == 2*x*y
# don't re-order given limits
assert Integral(1, x, y).args != Integral(1, y, x).args
# do as many as possibble
assert Integral(f(x), y, x, y, x).doit() == Integral(y**2*f(x)/2, x, x)
assert Integral(f(x), (x, 1, 2), (w, 1, x), (z, 1, y)).doit() == \
Integral(-f(x) + y*f(x), (x, 1, 2), (w, 1, x))
def test_issue_1791():
z = Symbol('z', positive=True)
assert integrate(exp(-log(x)**2),x) == pi**(S(1)/2)*erf(-S(1)/2 + log(x))*exp(S(1)/4)/2
assert integrate(exp(log(x)**2),x) == -I*pi**(S(1)/2)*erf(I*log(x) + I/2)*exp(-S(1)/4)/2
assert integrate(exp(-z*log(x)**2),x) == \
pi**(S(1)/2)*erf(z**(S(1)/2)*log(x) - 1/(2*z**(S(1)/2)))*exp(S(1)/(4*z))/(2*z**(S(1)/2))
def test_issue_1277():
from sympy import simplify
assert (simplify(integrate(n*(x**(1/n)-1), (x, 0, S.Half))) ==
simplify((n**2 - 2**(S(1)/n)*n**2 - n*2**(S(1)/n)) /
(2**(1 + S(1)/n) + n*2**(1 + S(1)/n))))
def test_issue_1418():
assert integrate((x**Rational(1,2) - x**3)/x**Rational(1,3), x) == \
6*x**(Rational(7,6))/7 - 3*x**(Rational(11,3))/11
def test_issue_1100():
assert integrate(exp(-I*2*pi*y*x)*x, (x, -oo, oo)) is S.NaN
def test_issue_841():
from sympy import simplify
a = Symbol('a', positive = True)
b = Symbol('b')
c = Symbol('c')
d = Symbol('d', positive = True)
assert integrate(exp(-x**2 + I*c*x), x) == pi**(S(1)/2)*erf(x - I*c/2)*exp(-c**S(2)/4)/2
assert integrate(exp(a*x**2 + b*x + c), x) == \
I*pi**(S(1)/2)*erf(-I*x*a**(S(1)/2) - I*b/(2*a**(S(1)/2)))*exp(c)*exp(-b**2/(4*a))/(2*a**(S(1)/2))
assert simplify(integrate(exp(-a*x**2 + 2*d*x), (x, -oo, oo))) == pi**(S(1)/2)*(1 + erf(oo - d/a**(S(1)/2))) \
*exp(d**2/a)/(2*a**(S(1)/2))
def test_issue_2314():
# Note that this is not the same as testing ratint() becuase integrate()
# pulls out the coefficient.
a = Symbol('a')
assert integrate(-a/(a**2+x**2), x) == \
-a*(sqrt(-1/a**2)*log(x + a**2*sqrt(-1/a**2))/2 - sqrt(-1/a**2)*log(x -
a**2*sqrt(-1/a**2))/2)
def test_issue_1793a():
A, z, c = symbols('A z c')
P1 = -A*exp(-z)
P2 = -A/(c*t)*(sin(x)**2 + cos(y)**2)
assert integrate(c*(P2 - P1), t) == \
c*(A*(-cos(y)**2 - sin(x)**2)*log(c*t)/c + A*t*exp(-z))
def test_issue_1793b():
# Issues relating to issue 1497 are making the actual result of this hard
# to test. The answer should be something like
#
# (-sin(y) + sqrt(-72 + 48*cos(y) - 8*cos(y)**2)/2)*log(x + sqrt(-72 +
# 48*cos(y) - 8*cos(y)**2)/(2*(3 - cos(y)))) + (-sin(y) - sqrt(-72 +
# 48*cos(y) - 8*cos(y)**2)/2)*log(x - sqrt(-72 + 48*cos(y) -
# 8*cos(y)**2)/(2*(3 - cos(y)))) + x**2*sin(y)/2 + 2*x*cos(y)
expr = (sin(y)*x**3 + 2*cos(y)*x**2 + 12)/(x**2 + 2)
assert trigsimp(factor(integrate(expr, x).diff(x) - expr)) == 0
```
|
{
"source": "JegernOUTT/mmdetection",
"score": 2
}
|
#### File: models/backbones/vovnet.py
```python
from collections import OrderedDict
from typing import Optional, Sequence
import torch
import torch.nn as nn
from ..registry import BACKBONES
from mmdet.models.backbones.base_backbone import BaseBackbone, filter_by_out_idices
__all__ = ['VoVNet27Slim', 'VoVNet39', 'VoVNet57']
model_urls = {
'vovnet39': 'https://dl.dropbox.com/s/1lnzsgnixd8gjra/vovnet39_torchvision.pth?dl=1',
'vovnet57': 'https://dl.dropbox.com/s/6bfu9gstbwfw31m/vovnet57_torchvision.pth?dl=1'
}
def conv3x3(in_channels, out_channels, module_name, postfix,
stride=1, groups=1, kernel_size=3, padding=1):
"""3x3 convolution with padding"""
return [
('{}_{}/conv'.format(module_name, postfix),
nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False)),
('{}_{}/norm'.format(module_name, postfix),
nn.BatchNorm2d(out_channels)),
('{}_{}/relu'.format(module_name, postfix),
nn.ReLU(inplace=True)),
]
def conv1x1(in_channels, out_channels, module_name, postfix,
stride=1, groups=1, kernel_size=1, padding=0):
"""1x1 convolution"""
return [
('{}_{}/conv'.format(module_name, postfix),
nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False)),
('{}_{}/norm'.format(module_name, postfix),
nn.BatchNorm2d(out_channels)),
('{}_{}/relu'.format(module_name, postfix),
nn.ReLU(inplace=True)),
]
class _OSA_module(nn.Module):
def __init__(self,
in_ch,
stage_ch,
concat_ch,
layer_per_block,
module_name,
identity=False):
super(_OSA_module, self).__init__()
self.identity = identity
self.layers = nn.ModuleList()
in_channel = in_ch
for i in range(layer_per_block):
self.layers.append(nn.Sequential(
OrderedDict(conv3x3(in_channel, stage_ch, module_name, i))))
in_channel = stage_ch
# feature aggregation
in_channel = in_ch + layer_per_block * stage_ch
self.concat = nn.Sequential(
OrderedDict(conv1x1(in_channel, concat_ch, module_name, 'concat')))
def forward(self, x):
identity_feat = x
output = []
output.append(x)
for layer in self.layers:
x = layer(x)
output.append(x)
x = torch.cat(output, dim=1)
xt = self.concat(x)
if self.identity:
xt = xt + identity_feat
return xt
class _OSA_stage(nn.Sequential):
def __init__(self,
in_ch,
stage_ch,
concat_ch,
block_per_stage,
layer_per_block,
stage_num):
super(_OSA_stage, self).__init__()
if not stage_num == 2:
self.add_module('Pooling',
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True))
module_name = f'OSA{stage_num}_1'
self.add_module(module_name,
_OSA_module(in_ch,
stage_ch,
concat_ch,
layer_per_block,
module_name))
for i in range(block_per_stage - 1):
module_name = f'OSA{stage_num}_{i + 2}'
self.add_module(module_name,
_OSA_module(concat_ch,
stage_ch,
concat_ch,
layer_per_block,
module_name,
identity=True))
class VoVNetBase(BaseBackbone):
def __init__(self,
arch,
config_stage_ch,
config_concat_ch,
block_per_stage,
layer_per_block,
pretrained: bool = True,
progress: bool = True,
out_indices: Optional[Sequence[int]] = (1, 2, 3, 4)):
super(VoVNetBase, self).__init__(out_indices)
if pretrained:
from torch.hub import load_state_dict_from_url
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
self.load_state_dict(state_dict)
# Stem module
stem = conv3x3(3, 64, 'stem', '1', 2)
stem += conv3x3(64, 64, 'stem', '2', 1)
stem += conv3x3(64, 128, 'stem', '3', 2)
self.add_module('stem', nn.Sequential(OrderedDict(stem)))
stem_out_ch = [128]
in_ch_list = stem_out_ch + config_concat_ch[:-1]
self.stage_names = []
for i in range(4): # num_stages
name = f'stage{i + 2}'
self.stage_names.append(name)
self.add_module(name,
_OSA_stage(in_ch_list[i],
config_stage_ch[i],
config_concat_ch[i],
block_per_stage[i],
layer_per_block,
i + 2))
self._initialize_weights()
@filter_by_out_idices
def forward(self, x):
skips = []
x = self.stem(x)
skips.append(x)
for name in self.stage_names:
x = getattr(self, name)(x)
skips.append(x)
return skips
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@BACKBONES.register_module
class VoVNet57(VoVNetBase):
r"""Constructs a VoVNet-57 model as described in
`"An Energy and GPU-Computation Efficient Backbone Networks"
<https://arxiv.org/abs/1904.09730>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
def __init__(self, pretrained: bool = True, progress: bool = True,
out_indices: Optional[Sequence[int]] = (1, 2, 3, 4)):
super().__init__(
arch='vovnet57',
config_stage_ch=[128, 160, 192, 224],
config_concat_ch=[256, 512, 768, 1024],
block_per_stage=[1, 1, 4, 3],
layer_per_block=5,
pretrained=pretrained,
progress=progress,
out_indices=out_indices)
@BACKBONES.register_module
class VoVNet39(VoVNetBase):
r"""Constructs a VoVNet-39 model as described in
`"An Energy and GPU-Computation Efficient Backbone Networks"
<https://arxiv.org/abs/1904.09730>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
def __init__(self, pretrained: bool = True, progress: bool = True,
out_indices: Optional[Sequence[int]] = (1, 2, 3, 4)):
super().__init__(
arch='vovnet39',
config_stage_ch=[128, 160, 192, 224],
config_concat_ch=[256, 512, 768, 1024],
block_per_stage=[1, 1, 2, 2],
layer_per_block=5,
pretrained=pretrained,
progress=progress,
out_indices=out_indices)
@BACKBONES.register_module
class VoVNet27Slim(VoVNetBase):
r"""Constructs a VoVNet-39 model as described in
`"An Energy and GPU-Computation Efficient Backbone Networks"
<https://arxiv.org/abs/1904.09730>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
def __init__(self, pretrained: bool = False, progress: bool = True,
out_indices: Optional[Sequence[int]] = (1, 2, 3, 4)):
super().__init__(
arch='vovnet27_slim',
config_stage_ch=[64, 80, 96, 112],
config_concat_ch=[128, 256, 384, 512],
block_per_stage=[1, 1, 1, 1],
layer_per_block=5,
pretrained=pretrained,
progress=progress,
out_indices=out_indices)
```
#### File: models/detectors/augmix_detector.py
```python
from abc import abstractmethod, ABC
import torch
import torch.nn.functional as F
from .single_stage import SingleStageDetector
from ..registry import DETECTORS
__all__ = ['AbstractAugmixDetector']
@DETECTORS.register_module
class AbstractAugmixDetector(SingleStageDetector, ABC):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(AbstractAugmixDetector, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
self._js_loss_coeff = train_cfg.js_loss_coeff if hasattr(train_cfg, 'js_loss_coeff') else 1.
@abstractmethod
def get_objectness_tensor_by_bboxhead_output(self, x):
pass
# noinspection PyMethodOverriding
def forward_train(self,
img,
img_metas,
img_augmix_0,
img_augmix_1,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
objectness_outs = []
def _calc_losses(img):
if 'debug' in self.train_cfg and self.train_cfg['debug']:
self._debug_data_pipeline(img, img_metas, gt_bboxes, gt_labels)
x = self.extract_feat(img)
outs = self.bbox_head(x)
objectness_outs.append(self.get_objectness_tensor_by_bboxhead_output(outs))
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
losses = _calc_losses(img)
_calc_losses(img_augmix_0)
_calc_losses(img_augmix_1)
losses['js_loss'] = self.js_loss(*objectness_outs)
return losses
def js_loss(self, logits_clean: torch.Tensor, logits_aug1: torch.Tensor, logits_aug2: torch.Tensor):
# Clamp mixture distribution to avoid exploding KL divergence
p_mixture = torch.clamp((logits_clean + logits_aug1 + logits_aug2) / 3., 1e-7, 1).log()
return self._js_loss_coeff * (F.kl_div(p_mixture, logits_clean, reduction='batchmean') +
F.kl_div(p_mixture, logits_aug1, reduction='batchmean') +
F.kl_div(p_mixture, logits_aug2, reduction='batchmean')) / 3.
```
|
{
"source": "Jegeva/BruCON_2021",
"score": 3
}
|
#### File: paho.mqtt.python/examples/client_rpc_math.py
```python
import context # Ensures paho is in PYTHONPATH
import sys
import time
import json
import paho.mqtt.client as mqtt
from paho.mqtt.packettypes import PacketTypes
# These will be updated with the server-assigned Client ID
client_id = "mathcli"
reply_to = ""
# This correlates the outbound request with the returned reply
corr_id = b"1"
# This is sent in the message callback when we get the respone
reply = None
# The MQTTv5 callback takes the additional 'props' parameter.
def on_connect(mqttc, userdata, flags, rc, props):
global client_id, reply_to
print("Connected: '"+str(flags)+"', '"+str(rc)+"', '"+str(props))
if hasattr(props, 'AssignedClientIdentifier'):
client_id = props.AssignedClientIdentifier
reply_to = "replies/math/" + client_id
mqttc.subscribe(reply_to)
# An incoming message should be the reply to our request
def on_message(mqttc, userdata, msg):
global reply
print(msg.topic+" "+str(msg.payload)+" "+str(msg.properties))
props = msg.properties
if not hasattr(props, 'CorrelationData'):
print("No correlation ID")
# Match the response to the request correlation ID.
if props.CorrelationData == corr_id:
reply = msg.payload
if len(sys.argv) < 3:
print("USAGE: client_rpc_math.py [add|mult] n1 n2 ...")
sys.exit(1)
mqttc = mqtt.Client(client_id="", protocol=mqtt.MQTTv5)
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.connect(host='localhost', clean_start=True)
mqttc.loop_start()
# Wait for connection to set `client_id`, etc.
while not mqttc.is_connected():
time.sleep(0.1)
# Properties for the request specify the ResponseTopic and CorrelationData
props = mqtt.Properties(PacketTypes.PUBLISH)
props.CorrelationData = corr_id
props.ResponseTopic = reply_to
# Uncomment to see what got set
#print("Client ID: "+client_id)
#print("Reply To: "+reply_to)
#print(props)
# The requested operation, 'add' or 'mult'
func = sys.argv[1]
# Gather the numeric parameters as an array of numbers
# These can be int's or float's
args = []
for s in sys.argv[2:]:
args.append(float(s))
# Send the request
topic = "requests/math/" + func
payload = json.dumps(args)
mqttc.publish(topic, payload, qos=1, properties=props)
# Wait for the reply
while reply is None:
time.sleep(0.1)
# Extract the response and print it.
rsp = json.loads(reply)
print("Response: "+str(rsp))
mqttc.loop_stop()
```
#### File: paho.mqtt.python/examples/client_session_present.py
```python
import context # Ensures paho is in PYTHONPATH
import paho.mqtt.client as mqtt
def on_connect(mqttc, obj, flags, rc):
if obj == 0:
print("First connection:")
elif obj == 1:
print("Second connection:")
elif obj == 2:
print("Third connection (with clean session=True):")
print(" Session present: " + str(flags['session present']))
print(" Connection result: " + str(rc))
mqttc.disconnect()
def on_disconnect(mqttc, obj, rc):
mqttc.user_data_set(obj + 1)
if obj == 0:
mqttc.reconnect()
def on_log(mqttc, obj, level, string):
print(string)
mqttc = mqtt.Client(client_id="asdfj", clean_session=False)
mqttc.on_connect = on_connect
mqttc.on_disconnect = on_disconnect
# Uncomment to enable debug messages
# mqttc.on_log = on_log
mqttc.user_data_set(0)
mqttc.connect("mqtt.eclipseprojects.io", 1883, 60)
mqttc.loop_forever()
# Clear session
mqttc = mqtt.Client(client_id="asdfj", clean_session=True)
mqttc.on_connect = on_connect
mqttc.user_data_set(2)
mqttc.connect("mqtt.eclipseprojects.io", 1883, 60)
mqttc.loop_forever()
```
#### File: paho.mqtt.python/examples/loop_trio.py
```python
import socket
import uuid
import paho.mqtt.client as mqtt
import trio
client_id = 'paho-mqtt-python/issue72/' + str(uuid.uuid4())
topic = client_id
print("Using client_id / topic: " + client_id)
class TrioAsyncHelper:
def __init__(self, client):
self.client = client
self.sock = None
self._event_large_write = trio.Event()
self.client.on_socket_open = self.on_socket_open
self.client.on_socket_register_write = self.on_socket_register_write
self.client.on_socket_unregister_write = self.on_socket_unregister_write
async def read_loop(self):
while True:
await trio.hazmat.wait_readable(self.sock)
self.client.loop_read()
async def write_loop(self):
while True:
await self._event_large_write.wait()
await trio.hazmat.wait_writable(self.sock)
self.client.loop_write()
async def misc_loop(self):
print("misc_loop started")
while self.client.loop_misc() == mqtt.MQTT_ERR_SUCCESS:
await trio.sleep(1)
print("misc_loop finished")
def on_socket_open(self, client, userdata, sock):
print("Socket opened")
self.sock = sock
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2048)
def on_socket_register_write(self, client, userdata, sock):
print('large write request')
self._event_large_write.set()
def on_socket_unregister_write(self, client, userdata, sock):
print("finished large write")
self._event_large_write = trio.Event()
class TrioAsyncMqttExample:
def on_connect(self, client, userdata, flags, rc):
print("Subscribing")
client.subscribe(topic)
def on_message(self, client, userdata, msg):
print("Got response with {} bytes".format(len(msg.payload)))
def on_disconnect(self, client, userdata, rc):
print('Disconnect result {}'.format(rc))
async def test_write(self, cancel_scope: trio.CancelScope):
for c in range(3):
await trio.sleep(5)
print("Publishing")
self.client.publish(topic, b'Hello' * 40000, qos=1)
cancel_scope.cancel()
async def main(self):
self.client = mqtt.Client(client_id=client_id)
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_disconnect = self.on_disconnect
trio_helper = TrioAsyncHelper(self.client)
self.client.connect('mqtt.eclipseprojects.io', 1883, 60)
async with trio.open_nursery() as nursery:
nursery.start_soon(trio_helper.read_loop)
nursery.start_soon(trio_helper.write_loop)
nursery.start_soon(trio_helper.misc_loop)
nursery.start_soon(self.test_write, nursery.cancel_scope)
self.client.disconnect()
print("Disconnected")
print("Starting")
trio.run(TrioAsyncMqttExample().main)
print("Finished")
```
#### File: test/lib/context.py
```python
import sys
import os
import subprocess
try:
import ssl
except ImportError:
ssl = None
# Ensure can import paho_test package
try:
import paho_test
except ImportError:
# This part is only required when paho_test module is not on Python path
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
import inspect
cmd_subfolder = os.path.realpath(
os.path.abspath(
os.path.join(
os.path.split(
inspect.getfile(inspect.currentframe())
)[0],
"..",
)
)
)
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import paho_test
env = dict(os.environ)
pp = env.get('PYTHONPATH', '')
env['PYTHONPATH'] = '../../src' + os.pathsep + pp
def start_client():
args = [sys.executable, ] + sys.argv[1:]
client = subprocess.Popen(args, env=env)
return client
def check_ssl():
if ssl is None:
print("WARNING: SSL not available in current environment")
exit(0)
if not hasattr(ssl, 'SSLContext'):
print("WARNING: SSL without SSLContext is not supported")
exit(0)
```
|
{
"source": "jegger/kivy-lightdm-greeter",
"score": 2
}
|
#### File: jegger/kivy-lightdm-greeter/kivy-greeter.py
```python
import sys
from kivy.app import App
from kivy.support import install_gobject_iteration
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.config import Config
from gi.repository import LightDM
kv = '''
FloatLayout:
username_spinner: username_spinner
session_spinner: session_spinner
info_label: info_label
AnchorLayout:
BoxLayout:
size_hint: None, None
size: 800, 280
info_label: info_label
orientation: 'vertical'
GridLayout:
cols: 2
spacing: 5
Label:
text: "Session"
haling: 'middle'
valing: 'left'
text_size: self.size
font_size: 40
size_hint_x: 0.4
Spinner:
id: session_spinner
font_size: 40
text: self.values[0] if self.values else ""
Label:
text: "Username"
haling: 'middle'
valing: 'left'
text_size: self.size
font_size: 40
size_hint_x: 0.4
Spinner:
id: username_spinner
font_size: 40
text: self.values[0] if self.values else ""
Label:
text: "Password"
haling: 'middle'
valing: 'left'
text_size: self.size
font_size: 40
size_hint_x: 0.4
TextInput:
id: password_input
text: ""
password: <PASSWORD>
font_size: 40
multiline: False
background_normal: 'images/textinput.png'
background_active: 'images/textinput-active.png'
on_text_validate:
login_button.trigger_action()
Label:
id: info_label
size_hint_y: None
height: 30
color: 1,0,0,1
Button:
id: login_button
text: "Login"
size_hint_y: 0.3
on_press: app.login(username_spinner.text, password_input.text, session_spinner.text)
Image:
source: 'images/kivy_logo.png'
size: 183,120
pos: (self.parent.width-self.width)/2, 50
size_hint: None, None
'''
class GreeterApp(App):
def __init__(self, **kwargs):
super(GreeterApp, self).__init__(**kwargs)
self.password = ""
self.session = ""
# Connect to lightDM
install_gobject_iteration()
self.greeter = LightDM.Greeter()
self.greeter.connect("authentication-complete", self.authentication_complete_cb)
self.greeter.connect("show-prompt", self.show_prompt_cb)
self.greeter.connect_sync()
# Get all available sessions
available_sessions = []
for sess in LightDM.get_sessions():
available_sessions.append(LightDM.Session.get_key(sess))
# Get all available users
available_users = []
inst = LightDM.UserList.get_instance()
for user in LightDM.UserList.get_users(inst):
user_name = LightDM.User.get_name(user)
available_users.append(user_name)
self.root_widget = Builder.load_string(kv)
self.root_widget.username_spinner.values = available_users
self.root_widget.session_spinner.values = available_sessions
def build(self):
return self.root_widget
def login(self, username, password, session):
self.password = password
self.session = session
print >> sys.stderr, "Initial entry of username, send it to LightDM"
self.greeter.authenticate(username)
def show_prompt_cb(self, greeter, text, promptType):
print >> sys.stderr, "prompt type: " + str(promptType) + str(text)
if greeter.get_in_authentication():
greeter.respond(self.password)
def authentication_complete_cb(self, greeter):
if greeter.get_is_authenticated():
if not greeter.start_session_sync(self.session):
self.root_widget.info_label.text = "Error while starting session %s" % self.session
else:
print >> sys.stderr, "AUTH COMPLETED"
self.root_widget.info_label.text = ":-)"
self.stop()
else:
print >> sys.stderr, "Login failed"
self.root_widget.info_label.text = "Wrong credentials :-("
if __name__ == '__main__':
# set keyboard to onscreen
Config.set('kivy', 'keyboard_mode', 'systemandmulti')
Config.write()
Window.clearcolor = (0.4274509804, 0.4274509804, 0.4274509804, 1)
GreeterApp().run()
```
|
{
"source": "Jeglerjeg/pcbot",
"score": 3
}
|
#### File: pcbot/plugins/games.py
```python
import asyncio
import random
from datetime import datetime
from difflib import SequenceMatcher
from random import randint, choice
from threading import Timer
import discord
import bot
import plugins
client = plugins.client # type: bot.Client
# List containing all channels playing a game
started = []
def format_join_message(players: int, participants: list):
participant_list = "\n".join(participant.mention for participant in participants)
return f"To participate, press the join button! {len(participants)}/{players} joined!\n" \
f"Participants:\n{participant_list}"
class Game:
name = "Unnamed Game"
minimum_participants = 1
def __init__(self, message: discord.Message, num: int):
self.message = message
self.channel = message.channel
self.member = message.guild.me
self.num = num if num >= self.minimum_participants else self.minimum_participants
self.participants = []
async def on_start(self):
""" Notify the channel that the game has been initialized. """
embed = discord.Embed(title=f"**A game of {self.name} has started!**\n",
colour=discord.Colour.green(),
description=format_join_message(self.num, self.participants))
view = Join(game=self, game_name=self.name, players=self.num, embed=embed)
original_message = await self.channel.send(embed=embed, view=view)
await view.wait()
if len(self.participants) < self.num:
view.embed.colour = discord.Colour.red()
view.embed.description += f"\n\n**The {self.name} game failed to gather {self.num} participants.**"
started.pop(started.index(self.channel.id))
await original_message.edit(embed=view.embed, view=None)
async def prepare(self):
""" Prepare anything needed before starting the game. """
async def game(self):
""" Start playing the game. """
async def start(self):
""" Run the entire game's cycle. """
await self.on_start()
if len(self.participants) >= self.num:
await asyncio.sleep(1)
await self.prepare()
await self.game()
del started[started.index(self.channel.id)]
class Join(discord.ui.View):
def __init__(self, game: Game, game_name: str, players: int, embed: discord.Embed):
super().__init__()
self.participants = []
self.embed = embed
self.game_name = game_name
self.game = game
self.players = players
self.timeout = 30
@discord.ui.button(label='Join', style=discord.ButtonStyle.green)
async def join(self, interaction: discord.Interaction, button: discord.ui.Button):
if interaction.user not in self.participants:
self.participants.append(interaction.user)
self.game.participants.append(interaction.user)
await interaction.response.defer()
self.embed.description = format_join_message(self.players, self.participants)
await interaction.message.edit(embed=self.embed)
if len(self.participants) >= self.players:
self.stop()
else:
await interaction.response.send_message("You've already joined the game.", ephemeral=True)
class Roulette(Game):
""" A game of Roulette. """
name = "Russian Roulette"
def __init__(self, message: discord.Message, num: int):
super().__init__(message, num)
self.bullets = []
async def prepare(self):
""" Shuffle the bullets. """
self.bullets = [0] * len(self.participants)
self.bullets[randint(0, len(self.participants) - 1)] = 1
async def game(self):
""" Start playing. """
random.shuffle(self.participants)
for i, member in enumerate(self.participants):
await client.send_message(self.channel, f"{member.mention} is up next! Say `go` whenever you are ready.")
def check(m):
return m.channel == self.channel and m.author == member and "go" in m.content.lower()
try:
reply = await client.wait_for_message(timeout=15, check=check)
except asyncio.TimeoutError:
reply = None
hit = ":dash:"
if self.bullets[i] == 1 or reply is None:
hit = ":boom:"
if reply is None:
await client.send_message(self.channel, "*fuck you*")
await client.send_message(self.channel, f"{member.mention} {hit} :gun: ")
if self.bullets[i] == 1:
break
await client.send_message(self.channel, "**GAME OVER**")
class HotPotato(Game):
name = "Hot Potato"
minimum_participants = 3
def __init__(self, message: discord.Message, num: int):
super().__init__(message, num)
self.time_remaining = 0
def timer(self):
""" I honestly don't remember how this function works. """
self.time_remaining -= 1
if self.time_remaining > 0:
Timer(1, self.timer).start()
async def game(self):
""" Start the game. No comments because I was stupid and now I'm too
lazy to comment everything in. """
self.time_remaining = randint(
int(pow(14 * len(self.participants), 0.8)),
int(pow(30 * len(self.participants), 0.8))
)
member = choice(self.participants)
Timer(1, self.timer).start()
reply = True
pass_to = []
notify = randint(2, int(self.time_remaining / 2))
while self.time_remaining > 0:
if not pass_to:
pass_from = list(self.participants)
pass_from.pop(pass_from.index(member))
pass_to = [choice(pass_from)]
pass_from.pop(pass_from.index(pass_to[0]))
pass_to.append(choice(pass_from))
if reply is not None:
await client.send_message(self.channel,
f"{member.mention} :bomb: got the bomb! "
f"Pass it to either {pass_to[0].mention} or {pass_to[1].mention}!")
def check(m):
return m.channel == self.channel and m.author == member and m.mentions and m.mentions[0] in pass_to
wait = (self.time_remaining - notify) if (self.time_remaining >= notify) else self.time_remaining
try:
reply = await client.wait_for_message(timeout=wait, check=check)
except asyncio.TimeoutError:
reply = None
if reply:
member = reply.mentions[0]
pass_to = []
if self.channel.permissions_for(self.member).manage_messages:
asyncio.ensure_future(client.delete_message(reply))
elif self.time_remaining == notify:
asyncio.ensure_future(client.send_message(self.channel, ":bomb: :fire: **IT'S GONNA BLOW!**"))
self.time_remaining -= 1
await client.send_message(self.channel, f"{member.mention} :fire: :boom: :boom: :fire:")
await client.send_message(self.channel, "**GAME OVER**")
class Typing(Game):
name = "Typing"
sentences = ["GID A ragte omg"]
reply = "{member.mention} finished in **{time:.0f} seconds** / **{wpm:.0f}wpm** / **{accuracy:.02%}**"
minimum_wpm = 40
def __init__(self, message: discord.Message, num: int):
super().__init__(message, num)
self.sentence = ""
async def prepare(self):
""" Get the sentence to send. """
self.sentence = choice(self.sentences)
async def send_sentence(self):
""" Generate the function for sending the sentence. """
await client.send_message(self.channel, "**Type**: " + self.sentence)
def total_estimated_words(self):
""" Return the estimated words in our sentence. """
return len(self.sentence) / 5
def calculate_accuracy(self, content: str):
""" Calculate the accuracy. """
return SequenceMatcher(a=self.sentence, b=content).ratio()
def calculate_wpm(self, delta_seconds: int):
""" Calculate the gross WPM from the given timedelta. """
minutes = delta_seconds / 60
return self.total_estimated_words() / minutes
def calculate_timeout(self):
""" Calculate the timeout for this game. This is the same as calculate_wpm,
however it uses the same formula to calculate the time needed. """
return self.total_estimated_words() / self.minimum_wpm * 60
def is_participant(self, message: discord.Message):
""" Check when waiting for a message and remove them from our list. """
if message.author in self.participants:
self.participants.remove(message.author)
return True
return False
async def game(self):
""" Run the game. """
await self.send_sentence()
checkpoint = time_started = datetime.now()
timeout = self.calculate_timeout()
# We'll wait for a message from all of our participants
for i in range(len(self.participants)):
def check(message):
return message.channel == self.channel and self.is_participant(message) is True
try:
reply = await client.wait_for_message(timeout=timeout, check=check)
except asyncio.TimeoutError:
await client.send_message(self.channel, "**Time is up.**")
return
# Delete the member's reply in order to avoid cheating
asyncio.ensure_future(client.delete_message(reply))
now = datetime.now()
# Calculate the time elapsed since the game started
time_elapsed = (now - time_started).total_seconds()
# Calculate the accuracy, wpm and send the message
accuracy = self.calculate_accuracy(reply.clean_content)
wpm = self.calculate_wpm(int(time_elapsed))
m = self.reply.format(member=reply.author, time=time_elapsed, wpm=wpm, accuracy=accuracy)
asyncio.ensure_future(client.send_message(self.channel, m))
# Reduce the timeout by the current time elapsed and create a checkpoint for the next timeout calculation
timeout -= int((now - checkpoint).total_seconds())
checkpoint = now
await asyncio.sleep(1)
await client.send_message(self.channel, "**Everyone finished!**")
desc_template = "Starts a game of {game.name}. To participate, say `I` in the chat.\n\n" \
"The optional `participants` argument sets a custom number of participants, where " \
"`{game.minimum_participants}` is the minimum."
async def init_game(message: discord.Message, game, num: int):
""" Initialize a game.
:param game: A Game object.
:param num: The specified participants
"""
if num > message.guild.member_count:
num = sum(1 for m in message.guild.members if not m.bot and m.status is not discord.Status.offline)
# The channel should not be playing two games at once
assert message.channel.id not in started, "**This channel is already playing.**"
# Start the game
started.append(message.channel.id)
await game(message, num).start()
@plugins.command(description=desc_template.format(game=Roulette))
async def roulette(message: discord.Message, participants: int = 6):
""" The roulette command. Description is defined using a template. """
await init_game(message, Roulette, participants)
@plugins.command(description=desc_template.format(game=HotPotato))
async def hotpotato(message: discord.Message, participants: int = 4):
""" The hotpotato command. Description is defined using a template. """
await init_game(message, HotPotato, participants)
@plugins.command(description=desc_template.format(game=Typing))
async def typing(message: discord.Message, participants: int = 2):
""" The typing command. Description is defined using a template. """
await init_game(message, Typing, participants)
async def on_reload(name: str):
""" Keep the list of current games when reloading. """
global started
local_started = started
await plugins.reload(name)
started = local_started
```
#### File: plugins/osulib/caching.py
```python
import json
import os
from datetime import datetime
mapcache_path = "plugins/osulib/mapdatacache"
setcache_path = "plugins/osulib/setdatacache"
def cache_beatmapset(beatmap: dict, map_id: int):
""" Saves beatmapsets to cache. """
beatmapset_path = os.path.join(setcache_path, str(map_id) + ".json")
if not os.path.exists(setcache_path):
os.makedirs(setcache_path)
if not os.path.exists(mapcache_path):
os.makedirs(mapcache_path)
beatmapset = beatmap.copy()
beatmap["time_cached"] = datetime.utcnow().isoformat()
with open(beatmapset_path, "w", encoding="utf-8") as file:
json.dump(beatmap, file)
del beatmapset["beatmaps"]
del beatmapset["converts"]
for diff in beatmap["beatmaps"]:
beatmap_path = os.path.join(mapcache_path, str(diff["id"]) + "-" + str(diff["mode"]) + ".json")
diff["time_cached"] = datetime.utcnow().isoformat()
diff["beatmapset"] = beatmapset
with open(beatmap_path, "w", encoding="utf-8") as f:
json.dump(diff, f)
if beatmap["converts"]:
for convert in beatmap["converts"]:
convert_path = os.path.join(mapcache_path, str(convert["id"]) + "-" + str(convert["mode"]) + ".json")
convert["time_cached"] = datetime.utcnow().isoformat()
convert["beatmapset"] = beatmapset
with open(convert_path, "w", encoding="utf-8") as fp:
json.dump(convert, fp)
def retrieve_cache(map_id: int, map_type: str, mode: str = None):
""" Retrieves beatmap or beatmapset cache from memory or file if it exists """
# Check if cache should be validated for beatmap or beatmapset
result = None
if map_type == "set":
if not os.path.exists(setcache_path):
os.makedirs(setcache_path)
beatmap_path = os.path.join(setcache_path, str(map_id) + ".json")
else:
if not os.path.exists(mapcache_path):
os.makedirs(mapcache_path)
beatmap_path = os.path.join(mapcache_path, str(map_id) + "-" + mode + ".json")
if os.path.isfile(beatmap_path):
with open(beatmap_path, encoding="utf-8") as fp:
result = json.load(fp)
return result
def validate_cache(beatmap: dict):
""" Check if the map cache is still valid. """
if beatmap is None:
return False
valid_result = True
cached_time = datetime.fromisoformat(beatmap["time_cached"])
time_now = datetime.utcnow()
previous_sr_update = datetime(2021, 8, 5)
diff = time_now - cached_time
if cached_time < previous_sr_update:
valid_result = False
elif beatmap["status"] == "loved":
if diff.days > 30:
valid_result = False
elif beatmap["status"] == "pending" or beatmap["status"] == "graveyard" or beatmap["status"] == "wip" \
or beatmap["status"] == "qualified":
if diff.days > 7:
valid_result = False
return valid_result
```
#### File: plugins/osulib/enums.py
```python
from enum import Enum
from plugins.osulib.constants import mode_names
class UpdateModes(Enum):
""" Enums for the various notification update modes.
Values are valid names in a tuple. """
Full = ("full", "on", "enabled", "f", "e")
No_Mention = ("no_mention", "nomention", "silent")
Minimal = ("minimal", "quiet", "m")
PP = ("pp", "diff", "p")
Disabled = ("none", "off", "disabled", "n", "d")
@classmethod
def get_mode(cls, mode: str):
""" Return the mode with the specified name. """
for enum in cls:
if mode.lower() in enum.value:
return enum
return None
class Mods(Enum):
""" Enum for displaying mods. """
NF = 0
EZ = 1
TD = 2
HD = 3
HR = 4
SD = 5
DT = 6
RX = 7
HT = 8
NC = 9
FL = 10
AU = 11
SO = 12
AP = 13
PF = 14
Key4 = 15
Key5 = 16
Key6 = 17
Key7 = 18
Key8 = 19
FI = 20
RD = 21
Cinema = 22
Key9 = 24
KeyCoop = 25
Key1 = 26
Key3 = 27
Key2 = 28
ScoreV2 = 29
LastMod = 30
KeyMod = Key4 | Key5 | Key6 | Key7 | Key8
FreeModAllowed = NF | EZ | HD | HR | SD | FL | FI | RX | AP | SO | KeyMod # ¯\_(ツ)_/¯
ScoreIncreaseMods = HD | HR | DT | FL | FI
def __new__(cls, num):
""" Convert the given value to 2^num. """
obj = object.__new__(cls)
obj._value_ = 2 ** num
return obj
@classmethod
def list_mods(cls, bitwise: int):
""" Return a list of mod enums from the given bitwise (enabled_mods in the osu! API) """
bin_str = str(bin(bitwise))[2:]
bin_list = [int(d) for d in bin_str[::-1]]
mods_bin = (pow(2, i) for i, d in enumerate(bin_list) if d == 1)
mods = [cls(mod) for mod in mods_bin]
# Manual checks for multiples
if Mods.DT in mods and Mods.NC in mods:
mods.remove(Mods.DT)
return mods
@classmethod
def format_mods(cls, mods):
""" Return a string with the mods in a sorted format, such as DTHD.
mods is either a bitwise or a list of mod enums.
"""
if isinstance(mods, int):
mods = cls.list_mods(mods)
assert isinstance(mods, list)
return "".join((mod for mod in mods) if mods else ["Nomod"])
class GameMode(Enum):
""" Enum for gamemodes. """
osu = 0
taiko = 1
fruits = 2
mania = 3
@classmethod
def get_mode(cls, mode: str):
""" Return the mode with the specified string. """
for mode_name, names in mode_names.items():
for name in names:
if name.lower().startswith(mode.lower()):
return GameMode.__members__[mode_name]
return None
```
#### File: osulib/formatting/embed_format.py
```python
import discord
from plugins.osulib import enums, pp, api
from plugins.osulib.formatting import score_format
from plugins.osulib.utils import user_utils
def get_embed_from_template(description: str, color: discord.Colour, author_text: str, author_url: str,
author_icon: str, thumbnail_url: str = "", time: str = "", potential_string: str = "",
completion_rate: str = ""):
embed = discord.Embed(color=color)
embed.description = description
embed.set_author(name=author_text, url=author_url, icon_url=author_icon)
if thumbnail_url:
embed.set_thumbnail(url=thumbnail_url)
footer = []
if potential_string:
footer.append(potential_string)
if completion_rate:
footer.append(completion_rate)
if time:
footer.append(time)
embed.set_footer(text="\n".join(footer))
return embed
async def create_score_embed_with_pp(member: discord.Member, osu_score: dict, beatmap: dict,
mode: enums.GameMode, osu_tracking: dict, scoreboard_rank: bool = False,
twitch_link: bool = False, time: bool = False):
""" Returns a score embed for use outside of automatic score notifications. """
score_pp = await pp.get_score_pp(osu_score, mode, beatmap)
mods = enums.Mods.format_mods(osu_score["mods"])
if score_pp is not None and osu_score["pp"] is None:
osu_score["pp"] = score_pp.pp
elif osu_score["pp"] is None:
osu_score["pp"] = 0
if score_pp is not None:
beatmap["difficulty_rating"] = pp.get_beatmap_sr(score_pp, beatmap, mods)
if ("max_combo" not in beatmap or not beatmap["max_combo"]) and score_pp and score_pp.max_combo:
beatmap["max_combo"] = score_pp.max_combo
# There might not be any events
if scoreboard_rank is False and str(member.id) in osu_tracking and "new" in osu_tracking[str(member.id)] \
and osu_tracking[str(member.id)]["new"]["events"]:
scoreboard_rank = api.rank_from_events(osu_tracking[str(member.id)]["new"]["events"],
str(osu_score["beatmap"]["id"]), osu_score)
time_string = ""
if time:
time_string = score_format.get_formatted_score_time(osu_score)
embed = get_embed_from_template(await score_format.format_new_score(mode, osu_score, beatmap,
scoreboard_rank,
member if twitch_link else None),
member.color, osu_score["user"]["username"],
user_utils.get_user_url(str(member.id)),
osu_score["user"]["avatar_url"],
osu_score["beatmapset"]["covers"]["list@2x"]
if bool("beatmapset" in osu_score)
else beatmap["beatmapset"]["covers"]["list@2x"],
time=time_string,
potential_string=score_format.format_potential_pp(
score_pp if score_pp is not None and not bool(osu_score["perfect"]
and osu_score["passed"])
else None,
osu_score),
completion_rate=score_format.format_completion_rate(osu_score,
score_pp if
score_pp is not None
and not
bool(
osu_score["perfect"]
and
osu_score["passed"]
)
else None))
return embed
```
#### File: osulib/formatting/misc_format.py
```python
import logging
import traceback
from datetime import datetime
import discord
from pcbot import utils
from plugins.osulib import enums
from plugins.osulib.constants import host
from plugins.osulib.utils import misc_utils
from plugins.twitchlib import twitch
def format_mode_name(mode: enums.GameMode, short_name: bool = False, abbreviation: bool = False):
""" Return formatted mode name for user facing modes. """
name = ""
if mode is enums.GameMode.osu:
if not short_name:
name = "osu!"
elif short_name:
name = "S"
elif mode is enums.GameMode.mania:
if not short_name and not abbreviation:
name = "osu!mania"
elif short_name:
name = "M"
elif abbreviation:
name = "o!m"
elif mode is enums.GameMode.taiko:
if not short_name and not abbreviation:
name = "osu!taiko"
elif short_name:
name = "T"
elif abbreviation:
name = "o!t"
elif mode is enums.GameMode.fruits:
if not short_name and not abbreviation:
name = "osu!catch"
elif short_name:
name = "C"
elif abbreviation:
name = "o!c"
return name
def format_user_diff(mode: enums.GameMode, data_old: dict, data_new: dict):
""" Get a bunch of differences and return a formatted string to send.
iso is the country code. """
pp_rank = int(data_new["statistics"]["global_rank"]) if data_new["statistics"]["global_rank"] else 0
pp_country_rank = int(data_new["statistics"]["country_rank"]) if data_new["statistics"]["country_rank"] else 0
iso = data_new["country"]["code"]
rank = -int(misc_utils.get_diff(data_old, data_new, "global_rank", statistics=True))
country_rank = -int(misc_utils.get_diff(data_old, data_new, "country_rank", statistics=True))
accuracy = misc_utils.get_diff(data_old, data_new, "hit_accuracy", statistics=True)
pp_diff = misc_utils.get_diff(data_old, data_new, "pp", statistics=True)
ranked_score = misc_utils.get_diff(data_old, data_new, "ranked_score", statistics=True)
rankings_url = f"{host}rankings/osu/performance"
# Find the performance page number of the respective ranks
formatted = [f"\u2139`{format_mode_name(mode, abbreviation=True)} "
f"{utils.format_number(data_new['statistics']['pp'], 2)}pp "
f"{utils.format_number(pp_diff, 2):+}pp`",
f" [\U0001f30d]({rankings_url}?page="
f"{pp_rank // 50 + 1})`#{pp_rank:,}{'' if int(rank) == 0 else f' {int(rank):+}'}`",
f" [{utils.text_to_emoji(iso)}]({rankings_url}?country={iso}&page="
f"{pp_country_rank // 50 + 1})`"
f"#{pp_country_rank:,}{'' if int(country_rank) == 0 else f' {int(country_rank):+}'}`"]
rounded_acc = utils.format_number(accuracy, 3)
if rounded_acc > 0:
formatted.append("\n\U0001f4c8") # Graph with upwards trend
elif rounded_acc < 0:
formatted.append("\n\U0001f4c9") # Graph with downwards trend
else:
formatted.append("\n\U0001f3af") # Dart
formatted.append(f"`{utils.format_number(data_new['statistics']['hit_accuracy'], 3)}%"
f"{'' if rounded_acc == 0 else f' {rounded_acc:+}%'}`")
formatted.append(f' \U0001f522`{data_new["statistics"]["ranked_score"]:,}'
f'{"" if ranked_score == 0 else f" {int(ranked_score):+,}"}`')
return "".join(formatted)
async def format_stream(member: discord.Member, osu_score: dict, beatmap: dict):
""" Format the stream url and a VOD button when possible. """
stream_url = None
for activity in member.activities:
if activity and activity.type == discord.ActivityType.streaming and hasattr(activity, "platform") \
and activity.platform.lower() == "twitch":
stream_url = activity.url
if not stream_url:
return ""
# Add the stream url and return immediately if twitch is not setup
text = [f"**[Watch live]({stream_url})**"]
if not twitch.twitch_client:
text.append("\n")
return "".join(text)
# Try getting the vod information of the current stream
try:
twitch_id = await twitch.get_id(member)
vod_request = await twitch.get_videos(twitch_id)
assert len(vod_request) >= 1
except Exception:
logging.error(traceback.format_exc())
text.append("\n")
return "".join(text)
vod = vod_request[0]
# Find the timestamp of where the play would have started without pausing the game
score_created = datetime.fromisoformat(osu_score["created_at"])
vod_created = vod.created_at
beatmap_length = int(beatmap["hit_length"])
# Return if the stream was started after the score was set
if vod_created > score_created:
text.append("\n")
return "".join(text)
# Convert beatmap length when speed mods are enabled
mods = osu_score["mods"]
if "DT" in mods or "NC" in mods:
beatmap_length /= 1.5
elif "HT" in mods:
beatmap_length /= 0.75
# Get the timestamp in the VOD when the score was created
timestamp_score_created = (score_created - vod_created).total_seconds()
timestamp_play_started = timestamp_score_created - beatmap_length
# Add the vod url with timestamp to the formatted text
text.append(f" | **[`Video of this play`]({vod.url}?t={int(timestamp_play_started)}s)**\n")
return "".join(text)
```
#### File: pcbot/plugins/prank.py
```python
from io import BytesIO
import discord
from PIL import Image, ImageDraw, ImageFont
import bot
import plugins
from pcbot import Annotate
client = plugins.client # type: bot.Client
prank_path = "plugins/pranklib/"
__commands = []
image_base = Image.open(prank_path + "discord_prank.png").convert("RGBA")
image_width, image_height = image_base.size
@plugins.command()
async def prank(message: discord.Message, phrase: Annotate.CleanContent = "IT'S A"):
""" Prank! """
phrase = phrase.upper()
# Initialize the image and font
image_text = Image.new("RGBA", image_base.size, (255, 255, 255, 0))
image_font = ImageFont.truetype(prank_path + "American Captain.ttf", 50)
image_context = ImageDraw.Draw(image_text)
# Set width and height and scale down when necessary
width, height = image_context.textsize(phrase, image_font)
font_size = 50
if width > image_width:
scaled_font = None
while width > image_width:
scaled_font = ImageFont.truetype(prank_path + "American Captain.ttf", font_size)
width, height = image_context.textsize(phrase, scaled_font)
font_size -= 1
image_font = scaled_font
# Set x and y coordinates for centered text
x = (image_width - width) / 2
y = (image_height - height / 2) - image_height / 1.3
# Draw border
shadow_offset = font_size // 25
image_context.text((x - shadow_offset, y), phrase, font=image_font, fill=(0, 0, 0, 255))
image_context.text((x + shadow_offset, y), phrase, font=image_font, fill=(0, 0, 0, 255))
image_context.text((x, y - shadow_offset), phrase, font=image_font, fill=(0, 0, 0, 255))
image_context.text((x, y + shadow_offset), phrase, font=image_font, fill=(0, 0, 0, 255))
# Draw text
image_context.text((x, y), phrase, font=image_font, fill=(255, 255, 255, 255))
# Combine the base image with the font image
image = Image.alpha_composite(image_base, image_text)
# Upload the image
buffer = BytesIO()
image.save(buffer, "PNG")
buffer.seek(0)
await client.send_file(message.channel, buffer, filename="pranked.png")
```
|
{
"source": "Jeglet/pcbot",
"score": 3
}
|
#### File: Jeglet/pcbot/bot.py
```python
import asyncio
import inspect
import logging
import sys
import traceback
from argparse import ArgumentParser
from copy import copy
from datetime import datetime
import discord
import plugins
from pcbot import utils, config
# Sets the version to enable accessibility for other modules
__version__ = config.set_version("PCBOT V3")
class Client(discord.Client):
""" Custom Client class to hold the event dispatch override and
some helper functions. """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.time_started = datetime.utcnow()
self.last_deleted_messages = []
async def _handle_event(self, func, event, *args, **kwargs):
""" Handle the event dispatched. """
try:
result = await func(*args, **kwargs)
except AssertionError as e:
if event == "message": # Find the message object and send the proper feedback
message = args[0]
await self.send_message(message.channel, str(e))
else:
logging.error(traceback.format_exc())
await self.on_error(event, *args, **kwargs)
except:
logging.error(traceback.format_exc())
await self.on_error(event, *args, **kwargs)
else:
if result is True and event == "message":
log_message(args[0], prefix="... ")
def dispatch(self, event, *args, **kwargs):
""" Override event dispatch to handle plugin events. """
# Exclude blank messages
if event == "message":
message = args[0]
if not message.content and not message.attachments:
return
# Find every event that has a discord.Member argument, and filter out bots and self
member = None
for arg in list(args) + list(kwargs.values()):
if isinstance(arg, discord.User):
member = arg
break
if isinstance(arg, discord.Message):
member = arg.author
break
super().dispatch(event, *args, **kwargs)
# We get the method name and look through our plugins' event listeners
method = "on_" + event
if method in plugins.events:
for func in plugins.events[method]:
# We'll only ignore bot messages if the event has disabled for bots
if member and member.bot and not func.bot:
continue
# Same goes for messages sent by ourselves. Naturally this requires func.bot == True
if member and member == client.user and not func.self:
continue
client.loop.create_task(self._handle_event(func, event, *args, **kwargs))
@staticmethod
async def send_message(destination, content=None, *args, **kwargs):
""" Override to check if content is str and replace mass user mentions. """
# Convert content to str, but also log this sincecontent=None it shouldn't happen
if content is not None:
if not isinstance(content, str):
# Log the traceback too when the content is an exception (it was probably meant to be
# converted to string) as to make debugging easier
tb = ""
if isinstance(content, Exception):
tb = "\n" + "\n".join(traceback.format_exception(type(content), content, content.__traceback__))
logging.warning("type '%s' was passed to client.send_message: %s%s", type(content), content, tb)
content = str(content)
# Replace any @here and @everyone to avoid using them
if not kwargs.pop("allow_everyone", None):
content = content.replace("@everyone", "@ everyone").replace("@here", "@ here")
return await destination.send(content, *args, **kwargs)
async def send_file(self, destination, fp, *, filename=None, content=None, tts=False):
""" Override send_file to notify the guild when an attachment could not be sent. """
try:
return await destination.send(content=content, tts=tts,
file=discord.File(fp, filename=filename))
except discord.errors.Forbidden:
return await self.send_message(destination, "**I don't have the permissions to send my attachment.**")
async def delete_message(self, message):
""" Override to add info on the last deleted message. """
self.last_deleted_messages = [message]
await message.delete()
async def delete_messages(self, channel, messages):
""" Override to add info on the last deleted messages. """
self.last_deleted_messages = list(messages)
await channel.delete_messages(messages=messages)
async def wait_for_message(self, timeout=None, *, check=None, bot=False):
""" Override the check with the bot keyword: if bot=False, the function
won't accept messages from bot accounts, where if bot=True it doesn't care. """
def new_check(m):
return (
check(m) and (True if bot else not m.author.bot)
)
return await super().wait_for("message", check=new_check, timeout=timeout)
@staticmethod
async def say(message: discord.Message, content: str):
""" Equivalent to client.send_message(message.channel, content) """
msg = await client.send_message(message.channel, content)
return msg
def parse_arguments():
""" Parse startup arguments """
parser = ArgumentParser(description="Run PCBOT.")
parser.add_argument("--version", "-V", help="Return the current version.",
action="version", version=__version__)
# Setup a login group for handling only token or email, but not both
login_group = parser.add_mutually_exclusive_group()
login_group.add_argument("--token", "-t", help="The token to login with. Prompts if omitted.")
shard_group = parser.add_argument_group(title="Sharding",
description="Arguments for sharding for bots on 2500+ guilds")
shard_group.add_argument("--shard-id", help="Shard id. --shard-total must also be specified when used.", type=int,
default=None)
shard_group.add_argument("--shard-total", help="Total number of shards.", type=int, default=None)
parser.add_argument("--new-pass", "-n", help="Always prompts for password.", action="store_true")
parser.add_argument("--log-level", "-l",
help="Use the specified logging level (see the docs on logging for values).",
type=lambda s: getattr(logging, s.upper()), default=logging.INFO, metavar="LEVEL")
parser.add_argument("--enable-protocol-logging", "-p", help="Enables logging protocol events. THESE SPAM THE LOG.",
action="store_true")
parser.add_argument("--log-file", "-o", help="File to log to. Prints to terminal if omitted.")
parsed_args = parser.parse_args()
return parsed_args
start_args = parse_arguments()
# Setup our client
if start_args.shard_id is not None:
if start_args.shard_total is None:
raise ValueError("--shard-total must be specified")
client = Client(intents=discord.Intents.all(), shard_id=start_args.shard_id, shard_count=start_args.shard_total,
loop=asyncio.ProactorEventLoop() if sys.platform == "win32" else None)
else:
client = Client(intents=discord.Intents.all(),
loop=asyncio.ProactorEventLoop() if sys.platform == "win32" else None)
autosave_interval = 60 * 30
# Migrate deprecated values to updated values
config.migrate()
async def autosave():
""" Sleep for set time (default 30 minutes) before saving. """
while not client.is_closed:
await asyncio.sleep(autosave_interval)
await plugins.save_plugins()
logging.debug("Plugins saved")
def log_message(message: discord.Message, prefix: str = ""):
""" Logs a command/message. """
logging.info("%s@%s%s -> %s", prefix, message.author,
" ({})".format(message.guild.name) if not isinstance(message.channel,
discord.abc.PrivateChannel) else "",
message.content.split("\n")[0])
async def execute_command(command: plugins.Command, message: discord.Message, *args, **kwargs):
""" Execute a command and send any AttributeError exceptions. """
app_info = await client.application_info()
try:
await command.function(message, *args, **kwargs)
except AssertionError as e:
await client.say(message, str(e) or command.error or plugins.format_help(command, message.guild, message))
except:
logging.error(traceback.format_exc())
if plugins.is_owner(message.author) and config.owner_error:
await client.say(message, utils.format_code(traceback.format_exc()))
else:
await client.say(message, "An error occurred while executing this command. If the error persists, "
"please send a PM to {}.".format(app_info.owner))
def default_self(anno, default, message: discord.Message):
""" A silly function to make Annotate.Self work. """
if default is utils.Annotate.Self:
if anno is utils.Annotate.Member:
return message.author
if anno is utils.Annotate.Channel:
return message.channel
return default
def override_annotation(anno):
""" Returns an annotation of a discord object as an Annotate object. """
if anno is discord.Member:
return utils.Annotate.Member
if anno is discord.TextChannel:
return utils.Annotate.Channel
return anno
async def parse_annotation(param: inspect.Parameter, default, arg: str, index: int, message: discord.Message):
""" Parse annotations and return the command to use.
index is basically the arg's index in shelx.split(message.content) """
if default is param.empty:
default = None
if param.annotation is not param.empty: # Any annotation is a function or Annotation enum
anno = override_annotation(param.annotation)
def content(s):
return utils.split(s, maxsplit=index)[-1].strip("\" ")
# Valid enum checks
if isinstance(anno, utils.Annotate):
annotate = None
if anno is utils.Annotate.Content: # Split and get raw content from this point
annotate = content(message.content) or default
elif anno is utils.Annotate.LowerContent: # Lowercase of above check
annotate = content(message.content).lower() or default
elif anno is utils.Annotate.CleanContent: # Split and get clean raw content from this point
annotate = content(message.clean_content) or default
elif anno is utils.Annotate.LowerCleanContent: # Lowercase of above check
annotate = content(message.clean_content).lower() or default
elif anno is utils.Annotate.Member: # Checks member names or mentions
annotate = utils.find_member(message.guild, arg) or default_self(anno, default, message)
elif anno is utils.Annotate.Channel: # Checks text channel names or mentions
annotate = utils.find_channel(message.guild, arg) or default_self(anno, default, message)
elif anno is utils.Annotate.VoiceChannel: # Checks voice channel names or mentions
annotate = utils.find_channel(message.guild, arg, channel_type="voice")
elif anno is utils.Annotate.Code: # Works like Content but extracts code
annotate = utils.get_formatted_code(utils.split(message.content, maxsplit=index)[-1]) or default
return annotate
try: # Try running as a method
if getattr(anno, "allow_spaces", False):
arg = content(message.content)
# Pass the message if the argument has this specified
if getattr(anno, "pass_message", False):
result = anno(message, arg)
else:
result = anno(arg)
# The function can be a coroutine
if inspect.isawaitable(result):
result = await result
return result if result is not None else default
except TypeError as e:
raise TypeError(
"Command parameter annotation must be either pcbot.utils.Annotate, a callable or a coroutine") from e
except AssertionError as e: # raise the error in order to catch it at a lower level
raise AssertionError from e
except: # On error, eg when annotation is int and given argument is str
return None
return str(arg) or default # Return str of arg if there was no annotation
async def parse_command_args(command: plugins.Command, cmd_args: list, message: discord.Message):
""" Parse commands from chat and return args and kwargs to pass into the
command's function. """
signature = inspect.signature(command.function)
args, kwargs = [], {}
index = -1
start_index = command.depth # The index would be the position in the group
num_kwargs = sum(1 for param in signature.parameters.values() if param.kind is param.KEYWORD_ONLY)
num_required_kwargs = sum(1 for param in signature.parameters.values()
if param.kind is param.KEYWORD_ONLY and param.default is param.empty)
pos_param = None
num_given_kwargs = 0
has_pos = any(param.kind is param.VAR_POSITIONAL for param in signature.parameters.values())
num_pos_args = 0
# Parse all arguments
for param in signature.parameters.values():
index += 1
# Skip the first argument, as this is a message.
if index == 0:
continue
# Any argument to fetch
if index + 1 <= len(cmd_args): # If there is an argument passed
cmd_arg = cmd_args[index]
else:
if param.default is not param.empty:
anno = override_annotation(param.annotation)
if param.kind is param.POSITIONAL_OR_KEYWORD:
args.append(default_self(anno, param.default, message))
elif param.kind is param.KEYWORD_ONLY:
kwargs[param.name] = default_self(anno, param.default, message)
if not isinstance(command.pos_check, bool):
index -= 1
continue # Move onwards once we find a default
if num_pos_args == 0:
index -= 1
break # We're done when there is no default argument and none passed
if param.kind is param.POSITIONAL_OR_KEYWORD: # Parse the regular argument
tmp_arg = await parse_annotation(param, param.default, cmd_arg, index + start_index, message)
if tmp_arg is not None:
args.append(tmp_arg)
else:
return args, kwargs, False # Force quit
elif param.kind is param.KEYWORD_ONLY: # Parse a regular arg as a kwarg
# We want to override the default, as this is often handled by python itself.
# It also seems to break some flexibility when parsing commands with positional arguments
# followed by a keyword argument with it's default being anything but None.
default = param.default if isinstance(param.default, utils.Annotate) else None
tmp_arg = await parse_annotation(param, default, cmd_arg, index + start_index, message)
if tmp_arg is not None:
kwargs[param.name] = tmp_arg
num_given_kwargs += 1
else: # It didn't work, so let's try parsing it as an optional argument
if isinstance(command.pos_check, bool) and pos_param:
tmp_arg = await parse_annotation(pos_param, None, cmd_arg, index + start_index, message)
if tmp_arg is not None:
args.append(tmp_arg)
num_pos_args += 1
continue
return args, kwargs, False # Force quit
elif param.kind is param.VAR_POSITIONAL: # Parse all positional arguments
if num_kwargs == 0 or not isinstance(command.pos_check, bool):
end_search = None
else:
end_search = -num_kwargs
pos_param = param
for cmd_arg in cmd_args[index:end_search]:
# Do not register the positional argument if it does not meet the optional criteria
if not isinstance(command.pos_check, bool):
if not command.pos_check(cmd_arg):
break
tmp_arg = await parse_annotation(param, None, cmd_arg, index + start_index, message)
# Add an option if it's not None. Since positional arguments are optional,
# it will not matter that we don't pass it.
if tmp_arg is not None:
args.append(tmp_arg)
num_pos_args += 1
# Update the new index
index += (num_pos_args - 1) if num_pos_args else -1
# Number of required arguments are: signature variables - client and message
# If there are no positional arguments, subtract one from the required arguments
num_args = len(signature.parameters.items()) - 1
if not num_required_kwargs:
num_args -= (num_kwargs - num_given_kwargs)
if has_pos:
num_args -= int(not bool(num_pos_args))
num_given = index # Arguments parsed
if has_pos:
num_given -= (num_pos_args - 1) if not num_pos_args == 0 else 0
complete = (num_given == num_args)
# The command is incomplete if positional arguments are forced
if complete and command.pos_check is True and num_pos_args == 0:
complete = False
# print(num_given, num_args)
# print(args, kwargs)
return args, kwargs, complete
async def parse_command(command: plugins.Command, cmd_args: list, message: discord.Message):
""" Try finding a command """
cmd_args = cmd_args[command.depth:]
send_help = False
# If the last argument ends with the help argument, skip parsing and display help
if len(cmd_args) > 1 and cmd_args[-1] in config.help_arg or (
command.disabled_pm and isinstance(message.channel, discord.abc.PrivateChannel)):
complete = False
args, kwargs = [], {}
send_help = True
else:
# Parse the command and return the parsed arguments
args, kwargs, complete = await parse_command_args(command, cmd_args, message)
# If command parsing failed, display help for the command or the error message
if not complete:
log_message(message) # Log the command
if command.disabled_pm and isinstance(message.channel, discord.abc.PrivateChannel):
await client.say(message, "This command can not be executed in a private message.")
else:
if command.error and len(cmd_args) > 1 and not send_help:
await client.say(message, command.error)
else:
if len(cmd_args) == 1:
send_help = True
embed = discord.Embed(color=message.author.color)
embed.description = plugins.format_help(command, message.guild, message,
no_subcommand=not send_help)
await client.send_message(message.channel, embed=embed)
command = None
return command, args, kwargs
@client.event
async def on_ready():
""" Log user and user ID after bot has logged in. """
logging.info("Logged in as\n{%s} ({%s})\n%s", client.user, client.user.id, "-" * len(str(client.user.id)))
@client.event
async def on_message(message: discord.Message):
""" What to do on any message received.
The bot will handle all commands in plugins and send on_message to plugins using it. """
# Make sure the client is ready before processing commands
await client.wait_until_ready()
start_time = datetime.utcnow()
# Make a local copy of the message since some attributes are changed and they shouldn't be overridden
# in plugin based on_message events
original_message = message
message = copy(message)
# We don't care about channels we can't write in as the bot usually sends feedback
if message.guild and message.guild.owner and not message.channel.permissions_for(message.guild.me).send_messages:
return
# Don't accept commands from bot accounts
if message.author.bot:
return
# Find guild specific settings
command_prefix = config.guild_command_prefix(message.guild)
case_sensitive = config.guild_case_sensitive_commands(message.guild)
# Check that the message is a command
if not message.content.startswith(command_prefix):
return
# Remove the prefix and make sure that a command was actually specified
message.content = message.content[len(command_prefix):]
if not message.content or message.content.startswith(" "):
return
# Split content into arguments by space (surround with quotes for spaces)
cmd_args = utils.split(message.content)
# Try finding a command object using the command name (first argument)
command = plugins.get_command(cmd_args[0], case_sensitive=case_sensitive)
if not command:
return
try:
# Find the subcommand if there is one
command = plugins.get_sub_command(command, *cmd_args[1:], case_sensitive=case_sensitive)
# Check that the author is allowed to use the command
if not plugins.can_use_command(command, message.author, message.channel):
await client.send_message(message.channel, "You don't have permission to use this command.")
return
# Parse the command with the user's arguments
parsed_command, args, kwargs = await parse_command(command, cmd_args, message)
except AssertionError as e: # Return any feedback given from the command via AssertionError, or the command help
await client.send_message(message.channel,
str(e) or plugins.format_help(command, message.guild, message, no_subcommand=True))
log_message(message)
return
if not parsed_command:
return
# Log the command executed and execute said command
log_message(original_message)
client.loop.create_task(execute_command(parsed_command, original_message, *args, **kwargs))
# Manually dispatch an event for when commands are requested
client.dispatch("command_requested", message, parsed_command, *args, **kwargs)
# Log time spent parsing the command
stop_time = datetime.utcnow()
time_elapsed = (stop_time - start_time).total_seconds() * 1000
logging.debug("Time spent parsing command: {elapsed:.6f}ms".format(elapsed=time_elapsed))
async def add_tasks():
""" Create any tasks for plugins' on_ready() coroutine and create task
for autosaving. """
await client.wait_until_ready()
logging.info("Setting up background tasks.")
# Call any on_ready function in plugins
for plugin in plugins.all_values():
if hasattr(plugin, "on_ready"):
client.loop.create_task(plugin.on_ready())
client.loop.create_task(autosave())
def main():
""" The main function. Parses command line arguments, sets up logging,
gets the user's login info, sets up any background task and starts the bot. """
# Setup logger with level specified in start_args or logging.INFO
logging.basicConfig(filename=start_args.log_file, level=start_args.log_level,
format="%(levelname)s %(asctime)s [%(module)s / %(name)s]: %(message)s")
# Always keep the websockets.protocol logger at INFO as a minimum unless --enable-protocol-logging is set
if not start_args.enable_protocol_logging:
discord_logger = logging.getLogger("websockets.protocol")
discord_logger.setLevel(start_args.log_level if start_args.log_level >= logging.INFO else logging.INFO)
# Setup some config for more customization
bot_meta = config.Config("bot_meta", pretty=True, data=dict(
name="PCBOT",
command_prefix=config.default_command_prefix,
case_sensitive_commands=config.default_case_sensitive_commands,
github_repo="pckv/pcbot/",
display_owner_error_in_chat=False
))
config.name = bot_meta.data["name"]
config.github_repo = bot_meta.data["github_repo"]
config.default_command_prefix = bot_meta.data["command_prefix"]
config.default_case_sensitive_commands = bot_meta.data["case_sensitive_commands"]
config.owner_error = bot_meta.data["display_owner_error_in_chat"]
# Set the client for the plugins to use
plugins.set_client(client)
utils.set_client(client)
# Load plugin for builtin commands
plugins.load_plugin("builtin", "pcbot")
# Load all dynamic plugins
plugins.load_plugins()
# Login with the specified token if specified
token = start_args.token or input("Token: ")
login = [token]
# Setup background tasks
client.loop.create_task(add_tasks())
try:
client.run(*login)
except discord.errors.LoginFailure as e:
logging.error(utils.format_exception(e))
if __name__ == "__main__":
main()
```
#### File: plugins/osulib/api.py
```python
import asyncio
from datetime import datetime
import json
import logging
import os
import re
from collections import namedtuple
from enum import Enum
from pcbot import utils
api_url = "https://osu.ppy.sh/api/v2/"
access_token = ""
requests_sent = 0
mapcache_path = "plugins/osulib/mapdatacache"
setcache_path = "plugins/osulib/setdatacache"
replay_path = os.path.join("plugins/osulib/", "replay.osr")
mode_names = {
"osu": ["standard", "osu", "std", "osu!"],
"taiko": ["taiko", "osu!taiko"],
"fruits": ["catch", "ctb", "fruits", "osu!catch"],
"mania": ["mania", "keys", "osu!mania"]
}
async def set_oauth_client(b: str, s: str):
""" Set the osu! API key. This simplifies every API function as they
can exclude the "k" parameter.
"""
client_id = b
client_secret = s
await get_access_token(client_id, client_secret)
async def get_access_token(client_id, client_secret):
""" Retrieves access token from API and refreshes token after it expires. """
params = {
"grant_type": "client_credentials",
"client_id": int(client_id),
"client_secret": client_secret,
"scope": "public"
}
result = await utils.post_request("https://osu.ppy.sh/oauth/token", call=utils.convert_to_json, data=params)
global requests_sent
requests_sent += 1
global access_token
access_token = result["access_token"]
await asyncio.sleep(result["expires_in"])
await get_access_token(client_id, client_secret)
class GameMode(Enum):
""" Enum for gamemodes. """
osu = 0
taiko = 1
fruits = 2
mania = 3
@classmethod
def get_mode(cls, mode: str):
""" Return the mode with the specified string. """
for mode_name, names in mode_names.items():
for name in names:
if name.lower().startswith(mode.lower()):
return GameMode.__members__[mode_name]
return None
class Mods(Enum):
""" Enum for displaying mods. """
NF = 0
EZ = 1
TD = 2
HD = 3
HR = 4
SD = 5
DT = 6
RX = 7
HT = 8
NC = 9
FL = 10
AU = 11
SO = 12
AP = 13
PF = 14
Key4 = 15
Key5 = 16
Key6 = 17
Key7 = 18
Key8 = 19
FI = 20
RD = 21
Cinema = 22
Key9 = 24
KeyCoop = 25
Key1 = 26
Key3 = 27
Key2 = 28
ScoreV2 = 29
LastMod = 30
KeyMod = Key4 | Key5 | Key6 | Key7 | Key8
FreeModAllowed = NF | EZ | HD | HR | SD | FL | FI | RX | AP | SO | KeyMod # ¯\_(ツ)_/¯
ScoreIncreaseMods = HD | HR | DT | FL | FI
def __new__(cls, num):
""" Convert the given value to 2^num. """
obj = object.__new__(cls)
obj._value_ = 2 ** num
return obj
@classmethod
def list_mods(cls, bitwise: int):
""" Return a list of mod enums from the given bitwise (enabled_mods in the osu! API) """
bin_str = str(bin(bitwise))[2:]
bin_list = [int(d) for d in bin_str[::-1]]
mods_bin = (pow(2, i) for i, d in enumerate(bin_list) if d == 1)
mods = [cls(mod) for mod in mods_bin]
# Manual checks for multiples
if Mods.DT in mods and Mods.NC in mods:
mods.remove(Mods.DT)
return mods
@classmethod
def format_mods(cls, mods):
""" Return a string with the mods in a sorted format, such as DTHD.
mods is either a bitwise or a list of mod enums.
"""
if isinstance(mods, int):
mods = cls.list_mods(mods)
assert isinstance(mods, list)
return "".join((mod for mod in mods) if mods else ["Nomod"])
def def_section(api_name: str, first_element: bool = False):
""" Add a section using a template to simplify adding API functions. """
async def template(url=api_url, request_tries: int = 1, **params):
global requests_sent
# Add the API key
headers = {
"Authorization": "Bearer " + access_token
}
# Download using a URL of the given API function name
for i in range(request_tries):
try:
response = await utils.download_json(url + api_name, headers=headers, **params)
except ValueError as e:
logging.warning("ValueError Calling %s: %s", url + api_name, e)
else:
requests_sent += 1
if response is not None:
break
else:
return None
# Unless we want to extract the first element, return the entire object (usually a list)
if not first_element:
return response
# If the returned value should be the first element, see if we can cut it
return response[0] if len(response) > 0 else None
# Set the correct name of the function and add simple docstring
template.__name__ = api_name
template.__doc__ = "Get " + ("list" if not first_element else "dict") + " using " + api_url + api_name
return template
def cache_beatmapset(beatmap: dict, map_id: int):
""" Saves beatmapsets to cache. """
beatmapset_path = os.path.join(setcache_path, str(map_id) + ".json")
if not os.path.exists(setcache_path):
os.makedirs(setcache_path)
if not os.path.exists(mapcache_path):
os.makedirs(mapcache_path)
beatmapset = beatmap.copy()
beatmap["time_cached"] = datetime.utcnow().isoformat()
with open(beatmapset_path, "w", encoding="utf-8") as file:
json.dump(beatmap, file)
del beatmapset["beatmaps"]
del beatmapset["converts"]
for diff in beatmap["beatmaps"]:
beatmap_path = os.path.join(mapcache_path, str(diff["id"]) + "-" + str(diff["mode"]) + ".json")
diff["time_cached"] = datetime.utcnow().isoformat()
diff["beatmapset"] = beatmapset
with open(beatmap_path, "w", encoding="utf-8") as f:
json.dump(diff, f)
if beatmap["converts"]:
for convert in beatmap["converts"]:
convert_path = os.path.join(mapcache_path, str(convert["id"]) + "-" + str(convert["mode"]) + ".json")
convert["time_cached"] = datetime.utcnow().isoformat()
convert["beatmapset"] = beatmapset
with open(convert_path, "w", encoding="utf-8") as fp:
json.dump(convert, fp)
def retrieve_cache(map_id: int, map_type: str, mode: str = None):
""" Retrieves beatmap or beatmapset cache from memory or file if it exists """
# Check if cache should be validated for beatmap or beatmapset
result = None
if map_type == "set":
if not os.path.exists(setcache_path):
os.makedirs(setcache_path)
beatmap_path = os.path.join(setcache_path, str(map_id) + ".json")
else:
if not os.path.exists(mapcache_path):
os.makedirs(mapcache_path)
beatmap_path = os.path.join(mapcache_path, str(map_id) + "-" + mode + ".json")
if os.path.isfile(beatmap_path):
with open(beatmap_path, encoding="utf-8") as fp:
result = json.load(fp)
return result
def validate_cache(beatmap: dict):
""" Check if the map cache is still valid. """
if beatmap is None:
return False
valid_result = True
cached_time = datetime.fromisoformat(beatmap["time_cached"])
time_now = datetime.utcnow()
previous_sr_update = datetime(2021, 8, 5)
diff = time_now - cached_time
if cached_time < previous_sr_update:
valid_result = False
elif beatmap["status"] == "loved":
if diff.days > 30:
valid_result = False
elif beatmap["status"] == "pending" or beatmap["status"] == "graveyard" or beatmap["status"] == "wip" \
or beatmap["status"] == "qualified":
if diff.days > 7:
valid_result = False
return valid_result
# Define all osu! API requests using the template
async def beatmap_lookup(params, map_id, mode):
""" Looks up a beatmap unless cache exists"""
result = retrieve_cache(map_id, "map", mode)
valid_result = validate_cache(result)
if not valid_result:
await beatmapset_lookup(params=params)
result = retrieve_cache(map_id, "map", mode)
return result
async def beatmapset_lookup(params):
""" Looks up a beatmapset using a beatmap ID"""
request = def_section("beatmapsets/lookup")
result = await request(**params)
cache_beatmapset(result, result["id"])
return result
async def get_user(user, mode=None, params=None):
""" Return a user from the API"""
if mode:
request = def_section(f"users/{user}/{mode}")
else:
request = def_section(f"users/{user}")
if params:
return await request(**params)
return await request()
async def get_user_scores(user_id, score_type, params=None):
""" Returns a user's best, recent or #1 scores. """
request = def_section(f"users/{user_id}/scores/{score_type}")
if params:
return await request(**params)
return await request("")
async def get_user_beatmap_score(beatmap_id, user_id, params=None):
""" Returns a user's score on a beatmap. """
request = def_section(f"beatmaps/{beatmap_id}/scores/users/{user_id}")
if params:
result = await request(**params)
else:
result = await request()
if "{'error': None}" in str(result):
result = None
return result
async def get_beatmapset(beatmapset_id, force_redownload: bool = False):
""" Returns a beatmapset using beatmapset ID"""
result = retrieve_cache(beatmapset_id, "set")
valid_result = validate_cache(result)
if not valid_result or force_redownload:
request = def_section(f"beatmapsets/{beatmapset_id}")
result = await request()
cache_beatmapset(result, result["id"])
else:
beatmapset_path = os.path.join(setcache_path, str(beatmapset_id) + ".json")
with open(beatmapset_path, encoding="utf-8") as fp:
result = json.load(fp)
return result
async def get_user_recent_activity(user, params=None):
""" Return a user's recent activity. """
request = def_section(f"users/{user}/recent_activity")
if params:
return await request(**params)
return await request()
beatmap_url_pattern_v1 = re.compile(r"https?://(osu|old)\.ppy\.sh/(?P<type>[bs])/(?P<id>\d+)(?:\?m=(?P<mode>\d))?")
beatmapset_url_pattern_v2 = \
re.compile(r"https?://osu\.ppy\.sh/beatmapsets/(?P<beatmapset_id>\d+)/?(?:#(?P<mode>\w+)/(?P<beatmap_id>\d+))?")
beatmap_url_pattern_v2 = re.compile(r"https?://osu\.ppy\.sh/beatmaps/(?P<beatmap_id>\d+)(?:\?mode=(?P<mode>\w+))?")
BeatmapURLInfo = namedtuple("BeatmapURLInfo", "beatmapset_id beatmap_id gamemode")
def parse_beatmap_url(url: str):
""" Parse the beatmap url and return either a BeatmapURLInfo.
For V1, only one parameter of either beatmap_id or beatmapset_id will be set.
For V2, only beatmapset_id will be set, or all arguments are set.
:raise SyntaxError: The URL is neither a v1 or v2 osu! url.
"""
match_v1 = beatmap_url_pattern_v1.match(url)
if match_v1:
# There might be some gamemode info in the url
mode = None
if match_v1.group("mode") is not None:
mode = GameMode(int(match_v1.group("mode")))
if match_v1.group("type") == "b":
return BeatmapURLInfo(beatmapset_id=None, beatmap_id=match_v1.group("id"), gamemode=mode)
return BeatmapURLInfo(beatmapset_id=match_v1.group("id"), beatmap_id=None, gamemode=mode)
match_v2_beatmapset = beatmapset_url_pattern_v2.match(url)
if match_v2_beatmapset:
if match_v2_beatmapset.group("mode") is None:
return BeatmapURLInfo(beatmapset_id=match_v2_beatmapset.group("beatmapset_id"), beatmap_id=None,
gamemode=None)
return BeatmapURLInfo(beatmapset_id=match_v2_beatmapset.group("beatmapset_id"),
beatmap_id=match_v2_beatmapset.group("beatmap_id"),
gamemode=GameMode.get_mode(match_v2_beatmapset.group("mode")))
match_v2_beatmap = beatmap_url_pattern_v2.match(url)
if match_v2_beatmap:
if match_v2_beatmap.group("mode") is None:
return BeatmapURLInfo(beatmapset_id=None, beatmap_id=match_v2_beatmap.group("beatmap_id"), gamemode=None)
return BeatmapURLInfo(beatmapset_id=None, beatmap_id=match_v2_beatmap.group("beatmap_id"),
gamemode=GameMode.get_mode((match_v2_beatmap.group("mode"))))
raise SyntaxError("The given URL is invalid.")
async def beatmap_from_url(url: str, *, return_type: str = "beatmap"):
""" Takes a url and returns the beatmap in the specified gamemode.
If a url for a submission is given, it will find the most difficult map.
:param url: The osu! beatmap url to lookup.
:param return_type: Defaults to "beatmap". Use "id" to only return the id (spares a request for /b/ urls).
:raise SyntaxError: The URL is neither a v1 or v2 osu! url.
:raise LookupError: The beatmap linked in the URL was not found.
"""
beatmap_info = parse_beatmap_url(url)
# Get the beatmap specified
if beatmap_info.beatmap_id is not None:
if return_type == "id":
return beatmap_info.beatmap_id
# Only download the beatmap of the id, so that only this beatmap will be returned
if return_type == "info":
return beatmap_info
params = {
"beatmap_id": beatmap_info.beatmap_id,
}
difficulties = await beatmap_lookup(params=params, map_id=beatmap_info.beatmap_id, mode="osu")
beatmapset = False
else:
beatmapset = await get_beatmapset(beatmap_info.beatmapset_id)
difficulties = beatmapset["beatmaps"]
beatmapset = True
# If the beatmap doesn't exist, the operation was unsuccessful
if not difficulties or "{'error': None}" in str(difficulties):
raise LookupError("The beatmap with the given URL was not found.")
# Find the most difficult beatmap
beatmap = None
highest = -1
if beatmapset:
for diff in difficulties:
stars = diff["difficulty_rating"]
if stars > highest:
beatmap, highest = diff, stars
else:
beatmap = difficulties
if return_type == "id":
return beatmap["id"]
if return_type == "info":
beatmap_url = f"https://osu.ppy.sh/beatmaps/{beatmap['id']}"
return parse_beatmap_url(beatmap_url)
return beatmap
async def beatmapset_from_url(url: str, force_redownload: bool = False):
""" Takes a url and returns the beatmapset of the specified beatmap.
:param url: The osu! beatmap url to lookup.
:param force_redownload: Whether or not to force a redownload of the map
:raise SyntaxError: The URL is neither a v1 or v2 osu! url.
:raise LookupError: The beatmap linked in the URL was not found.
"""
beatmap_info = parse_beatmap_url(url)
# Use the beatmapset_id from the url if it has one, else find the beatmapset
if beatmap_info.beatmapset_id is not None:
beatmapset_id = beatmap_info.beatmapset_id
beatmapset = await get_beatmapset(beatmapset_id, force_redownload=force_redownload)
else:
params = {
"beatmap_id": beatmap_info.beatmap_id,
}
beatmapset = await beatmapset_lookup(params=params)
# Also make sure we get the beatmap
if not beatmapset:
raise LookupError("The beatmapset with the given URL was not found.")
return beatmapset
def lookup_beatmap(beatmaps: list, **lookup):
""" Finds and returns the first beatmap with the lookup specified.
Beatmaps is a list of beatmap dicts and could be used with beatmap_lookup().
Lookup is any key stored in a beatmap from beatmap_lookup().
"""
if not beatmaps:
return None
for beatmap in beatmaps:
match = True
for key, value in lookup.items():
if key.lower() not in beatmap:
raise KeyError(f"The list of beatmaps does not have key: {key}")
if not beatmap[key].lower() == value.lower():
match = False
if match:
return beatmap
return None
def rank_from_events(events: dict, beatmap_id: str, score):
""" Return the rank of the first score of given beatmap_id from a
list of events gathered via get_user().
"""
for event in events:
if event["type"] == "rank":
beatmap_url = "https://osu.ppy.sh" + event["beatmap"]["url"]
beatmap_info = parse_beatmap_url(beatmap_url)
time_diff = datetime.fromisoformat(score["created_at"]) - datetime.fromisoformat(event["created_at"])
if (beatmap_info.beatmap_id == beatmap_id and event["scoreRank"] == score["rank"]) and \
(time_diff.total_seconds() < 60):
return event["rank"]
return None
```
#### File: plugins/osulib/args.py
```python
import re
from collections import namedtuple
from .api import Mods
Argument = namedtuple("Argument", "pattern kwarg_pattern type default")
mods_names = re.compile(r"\w{2}")
kwarg = r"{}=(?P<value>\S+)"
class RegexArgumentParser:
""" Create a simple orderless regex argument parser. """
def __init__(self):
self.arguments = {}
def add(self, name, pattern, arg_type, default=None):
""" Adds an argument. The pattern must have a group. """
self.arguments[name] = Argument(pattern=re.compile(pattern, flags=re.IGNORECASE),
kwarg_pattern=re.compile(kwarg.format(name)),
type=arg_type, default=default)
def parse(self, *args):
""" Parse arguments.
:raise ValueError: An argument is invalid.
"""
Namespace = namedtuple("Namespace", " ".join(self.arguments.keys()))
_namespace = {name: arg.default for name, arg in self.arguments.items()}
# Go through all arguments and find a match
for user_arg in args:
for name, arg in self.arguments.items():
# Skip any already assigned arguments
if _namespace[name] is not arg.default:
continue
# Assign the arguments on match and break the lookup
match = arg.pattern.fullmatch(user_arg, )
if match:
_namespace[name] = arg.type(match.group(1))
break
# Check for kwarg patterns (e.g acc=99.32 instead of 99.32%)
match = arg.kwarg_pattern.fullmatch(user_arg)
if match:
_namespace[name] = arg.type(match.group("value"))
break
else:
raise ValueError(f"{user_arg} is an invalid argument.")
# Return the complete Namespace namedtuple
return Namespace(**_namespace)
def mods(s: str):
""" Return a list of api.Mods from the given str. """
names = mods_names.findall(s)
mod_list = []
# Find and add all identified mods
for name in names:
for mod in Mods:
# Skip duplicate mods
if mod in mod_list:
continue
if mod.name.lower() == name.lower():
mod_list.append(mod)
break
return mod_list
parser = RegexArgumentParser()
parser.add("acc", r"([0-9.]+)%", arg_type=float)
parser.add("potential_acc", r"([0-9.]+)%pot", arg_type=float)
parser.add("c300", r"(\d+)x300", arg_type=int)
parser.add("c100", r"(\d+)x100", arg_type=int, default=0)
parser.add("c50", r"(\d+)x50", arg_type=int, default=0)
parser.add("misses", r"(\d+)(?:m|xm(?:iss)?)", arg_type=int, default=0)
parser.add("combo", r"(\d+)x", arg_type=int)
parser.add("objects", r"(\d+)objects", arg_type=int)
parser.add("score", r"(\d+)score", arg_type=int)
parser.add("dropmiss", r"(\d+)dropmiss", arg_type=int)
parser.add("mods", r"\+(\w+)", arg_type=mods)
parser.add("score_version", r"(?:score)?v([12])", arg_type=int, default=1)
parser.add("ar", r"ar([0-9.]+)", arg_type=float)
parser.add("cs", r"cs([0-9.]+)", arg_type=float)
parser.add("od", r"od([0-9.]+)", arg_type=float)
parser.add("hp", r"hp([0-9.]+)", arg_type=float)
parser.add("hits", r"(\d+)hits", arg_type=int)
parser.add("pp", r"([0-9.]+)pp", arg_type=float)
def parse(*args):
""" Parse pp arguments. """
return parser.parse(*args)
```
#### File: plugins/osulib/pp.py
```python
import logging
import os
from collections import namedtuple
from pcbot import utils
from . import api
from . import pp_bindings
from .args import parse as parse_options
host = "https://osu.ppy.sh/"
CachedBeatmap = namedtuple("CachedBeatmap", "url_or_id beatmap")
PPStats = namedtuple("PPStats", "pp stars partial_stars max_pp max_combo ar cs od hp clock_rate")
ClosestPPStats = namedtuple("ClosestPPStats", "acc pp stars")
cache_path = "plugins/osulib/mapcache"
async def is_osu_file(url: str):
""" Returns True if the url links to a .osu file. """
headers = await utils.retrieve_headers(url)
return "text/plain" in headers.get("Content-Type", "") and ".osu" in headers.get("Content-Disposition", "")
async def download_beatmap(beatmap_url_or_id, beatmap_path: str, ignore_cache: bool = False):
""" Download the .osu file of the beatmap with the given url, and save it to beatmap_path.
:param beatmap_url_or_id: beatmap_url as str or the id as int
:param beatmap_path: the path to save the beatmap in
:param ignore_cache: whether or not to ignore the in-memory cache
"""
# Parse the url and find the link to the .osu file
try:
if isinstance(beatmap_url_or_id, str):
beatmap_id = await api.beatmap_from_url(beatmap_url_or_id, return_type="id")
else:
beatmap_id = beatmap_url_or_id
except SyntaxError as e:
# Since the beatmap isn't an osu.ppy.sh url, we'll see if it's a .osu file
if not await is_osu_file(beatmap_url_or_id):
raise ValueError from e
file_url = beatmap_url_or_id
else:
file_url = host + "osu/" + str(beatmap_id)
# Download the beatmap using the url
beatmap_file = await utils.download_file(file_url)
if not beatmap_file:
raise ValueError("The given URL is invalid.")
if ignore_cache:
return beatmap_file
with open(beatmap_path, "wb") as f:
f.write(beatmap_file)
# one map apparently had a /ufeff at the very beginning of the file???
# https://osu.ppy.sh/b/1820921
if not beatmap_file.decode().strip("\ufeff \t").startswith("osu file format"):
logging.error("Invalid file received from %s\nCheck %s", file_url, beatmap_path)
raise ValueError("Could not download the .osu file.")
async def parse_map(beatmap_url_or_id, ignore_osu_cache: bool = False):
""" Download and parse the map with the given url or id, or return a newly parsed cached version.
:param beatmap_url_or_id: beatmap_url as str or the id as int
:param ignore_osu_cache: When true, does not download or use .osu file cache
"""
if isinstance(beatmap_url_or_id, str):
beatmap_id = await api.beatmap_from_url(beatmap_url_or_id, return_type="id")
else:
beatmap_id = beatmap_url_or_id
if not ignore_osu_cache:
beatmap_path = os.path.join(cache_path, str(beatmap_id) + ".osu")
else:
beatmap_path = os.path.join(cache_path, "temp.osu")
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Parse from cache or load the .osu and parse new
if ignore_osu_cache or not os.path.isfile(beatmap_path):
await download_beatmap(beatmap_url_or_id, beatmap_path)
return beatmap_path
async def calculate_pp(beatmap_url_or_id, *options, mode: api.GameMode, ignore_osu_cache: bool = False):
""" Return a PPStats namedtuple from this beatmap, or a ClosestPPStats namedtuple
when [pp_value]pp is given in the options.
:param beatmap_url_or_id: beatmap_url as str or the id as int
:param mode: which mode to calculate PP for
:param ignore_osu_cache: When true, does not download or use .osu file cache
"""
beatmap_path = await parse_map(beatmap_url_or_id, ignore_osu_cache=ignore_osu_cache)
args = parse_options(*options)
# Calculate the mod bitmask and apply settings if needed
if args.mods and api.Mods.NC in args.mods:
args.mods.remove(api.Mods.NC)
args.mods.append(api.Mods.DT)
mods_bitmask = sum(mod.value for mod in args.mods) if args.mods else 0
# If the pp arg is given, return using the closest pp function
if args.pp is not None and mode is api.GameMode.osu:
return await find_closest_pp(beatmap_path, mods_bitmask, args)
# Calculate the pp
max_pp = None
max_combo = None
if mode is api.GameMode.osu:
pp_info = pp_bindings.std_pp(beatmap_path, mods_bitmask, args.combo, args.acc, args.potential_acc, args.c300,
args.c100, args.c50, args.misses, args.objects)
max_pp = pp_info["max_pp"]
max_combo = pp_info["max_combo"]
elif mode is api.GameMode.taiko:
pp_info = pp_bindings.taiko_pp(beatmap_path, mods_bitmask, args.combo, args.acc, args.c300,
args.c100, args.misses, args.objects)
max_combo = pp_info["max_combo"]
elif mode is api.GameMode.mania:
pp_info = pp_bindings.mania_pp(beatmap_path, mods_bitmask, args.score, args.objects)
elif mode is api.GameMode.fruits:
pp_info = pp_bindings.catch_pp(beatmap_path, mods_bitmask, args.combo, args.c300, args.c100,
args.c50, args.dropmiss, args.misses, args.objects)
max_combo = pp_info["max_combo"]
else:
logging.info("Unknown gamemode {} passed to pp calculator".format(mode))
return
pp = pp_info["pp"]
total_stars = pp_info["total_stars"]
partial_stars = pp_info["partial_stars"]
ar = pp_info["ar"]
cs = pp_info["cs"]
od = pp_info["od"]
hp = pp_info["hp"]
clock_rate = pp_info["clock_rate"]
return PPStats(pp, total_stars, partial_stars, max_pp, max_combo, ar, cs, od, hp, clock_rate)
async def find_closest_pp(beatmap_path, mods_bitmask, args):
""" Find the accuracy required to get the given amount of pp from this map. """
# Define a partial command for easily setting the pp value by 100s count
def calc(accuracy: float):
pp_info = pp_bindings.std_pp(beatmap_path, mods_bitmask, args.combo, accuracy, args.potential_acc, args.c300,
args.c100, args.c50, args.misses, args.objects)
return pp_info
# Find the smallest possible value rosu-pp is willing to give
min_pp = calc(accuracy=0.0)
if args.pp <= min_pp["pp"]:
raise ValueError(f"The given pp value is too low (calculator gives **{min_pp['pp']:.02f}pp** as the "
"lowest possible).")
# Calculate the max pp value by using 100% acc
previous_pp = calc(accuracy=100.0)
if args.pp >= previous_pp["pp"]:
raise ValueError(f"PP value should be below **{previous_pp['pp']:.02f}pp** for this map.")
dec = .05
acc = 100.0 - dec
while True:
current_pp = calc(accuracy=acc)
# Stop when we find a pp value between the current 100 count and the previous one
if current_pp["pp"] <= args.pp <= previous_pp["pp"]:
break
previous_pp = current_pp
acc -= dec
# Calculate the star difficulty
totalstars = current_pp["total_stars"]
# Find the closest pp of our two values, and return the amount of 100s
closest_pp = min([previous_pp["pp"], current_pp["pp"]], key=lambda v: abs(args.pp - v))
acc = acc if closest_pp == current_pp["pp"] else acc + dec
return ClosestPPStats(round(acc, 2), closest_pp, totalstars)
```
#### File: pcbot/plugins/pokedex.py
```python
import json
import logging
import os
from collections import defaultdict
from difflib import get_close_matches
from io import BytesIO
from operator import itemgetter
import discord
import bot
import plugins
from pcbot import Config, Annotate, guild_command_prefix, utils
try:
from PIL import Image
except:
resize = False
logging.warning("PIL could not be loaded. The pokedex works like usual, however sprites will remain 1x scaled.")
else:
resize = True
client = plugins.client # type: bot.Client
api_path = "plugins/pokedexlib/pokedex.json"
sprites_path = "plugins/pokedexlib/sprites/"
pokedex_config = Config("pokedex", data=defaultdict(dict))
default_scale_factor = 1.0
min_scale_factor, max_scale_factor = 0.25, 4
pokemon_go_gen = [1, 2, 3, 4, 5, 6]
# Load the Pokedex API
with open(api_path) as api_file:
api = json.load(api_file)
pokedex = api["pokemon"]
# Load all our sprites into RAM (they don't take much space)
# Unlike the pokedex.json API, these use pokemon ID as keys.
# The values are the sprites in bytes.
sprites = {}
for file in os.listdir(sprites_path):
with open(os.path.join(sprites_path, file), "rb") as sprite_bytes:
sprites[int(file.split(".")[0])] = sprite_bytes.read()
def id_to_name(pokemon_id: int):
""" Convert the pokemon ID to a name. """
for name, pokemon in pokedex.items():
if pokemon["id"] == pokemon_id:
return name
return None
def egg_name(pokemon_evolution: list):
""" Return the egg name of the pokemon_evolution chain. """
# The pokemon are in their respective order, so we'll find the first one with
# a Pokemon GO generation pokemon
for names in pokemon_evolution:
for name in names:
pokemon = pokedex[name]
if pokemon["generation"] in pokemon_go_gen:
return pokemon["locale_name"]
return "Unknown"
def resize_sprite(sprite, factor: float):
""" Resize a sprite (string of bytes / rb). """
image = Image.open(BytesIO(sprite))
# Resize with the scaled proportions
width, height = image.size
width, height = int(width * factor), int(height * factor)
image = image.resize((width, height), Image.NEAREST)
# Return the byte-like object
return utils.convert_image_object(image)
def format_type(*types):
""" Format a string from a list of a pokemon's types. """
return " | ".join(t.capitalize() for t in types if t is not None)
def get_pokemon(name_or_id: str, assert_on_error: bool = True):
""" Returns a pokemon with the given name or id string. """
# Get the requested pokemon name
name = name_or_id
try:
pokemon_id = int(name_or_id)
except ValueError:
# See if there's a pokemon with the locale name formatted like the given name
for pokemon in pokedex.values():
if pokemon["locale_name"].lower() == name:
name = pokemon["name"]
break
# Correct the name if it is very close to an existing pokemon and there's only one close match
matches = get_close_matches(name, pokedex.keys(), n=2, cutoff=0.8)
if matches and len(matches) == 1:
name = matches[0]
if name not in pokedex:
assert not assert_on_error, "There is no pokémon called **{}** in my pokédex!\nPerhaps you meant: `{}`?".format(
name, ", ".join(get_close_matches(name, pokedex.keys(), cutoff=0.5)))
return None
else:
name = id_to_name(pokemon_id)
if name is None:
assert not assert_on_error, "There is no pokémon with ID **#{:03}** in my pokédex!".format(pokemon_id)
return None
return name
@plugins.command(name="pokedex", aliases="pd pokemon dex")
async def pokedex_(message: discord.Message, name_or_id: Annotate.LowerCleanContent):
""" Display some information of the given pokémon.
**Examples**: <http://imgur.com/a/lqG9c> """
# Do some quick replacements for flexible parsing
name_or_id = name_or_id.strip()
if name_or_id.startswith("#"):
name_or_id = name_or_id.replace("#", "")
if " " in name_or_id:
if "♀" in name_or_id or "♀" in name_or_id or name_or_id.endswith("f") or name_or_id.endswith("m"):
name_or_id = name_or_id.replace(" ", "-").replace("♂", "m").replace("♀", "f")
else:
name_or_id = name_or_id.replace(" ", "")
# Get the name of the specified pokemon
name = get_pokemon(name_or_id)
# Assign our pokemon
pokemon = pokedex[name]
# Send an image if the bots has Attach Files permission or the message is a dm
if message.guild is None or message.channel.permissions_for(message.guild.me).attach_files:
# Get the guild's scale factor
if not isinstance(message.channel, discord.abc.PrivateChannel) \
and message.guild.id in pokedex_config.data and "scale-factor" in pokedex_config.data[message.guild.id]:
scale_factor = pokedex_config.data[message.guild.id]["scale-factor"]
else:
scale_factor = default_scale_factor
# Assign our pokemon
pokemon = pokedex[name]
# Assign the sprite to use
if pokemon["id"] in sprites:
sprite = sprites[pokemon["id"]]
else:
sprite = sprites[0]
# Resize (if PIL is enabled) and upload the sprite
if resize and not round(scale_factor, 2) == 1:
sprite = resize_sprite(sprite, scale_factor)
elif resize:
sprite = BytesIO(sprite)
# Format Pokemon GO specific info
pokemon_go_info = ""
if "evolution_cost" in pokemon:
pokemon_go_info += "Evolution cost: `{} {} Candy` ".format(
pokemon["evolution_cost"], egg_name(pokemon["evolution"]))
if "hatches_from" in pokemon:
if pokemon_go_info:
pokemon_go_info += "\n"
pokemon_go_info += "Hatches from: `{}km Egg` ".format(pokemon["hatches_from"])
# Format the message
formatted_message = (
"**#{id:03} {upper_name} - GEN {generation}**\n"
"**{genus}**\n"
"Weight: `{weight}kg` Height: `{height}m`\n"
"Type: `{type}`\n"
"{pokemon_go}"
"```\n{description}```"
"**EVOLUTIONS**: {formatted_evolution}"
).format(
upper_name=pokemon["locale_name"].upper(),
type=format_type(*pokemon["types"]),
formatted_evolution=" **->** ".join(" **/** ".join(pokedex[name]["locale_name"].upper() for name in names)
for names in pokemon["evolution"]),
pokemon_go=pokemon_go_info,
**pokemon
)
embed = discord.Embed(color=message.author.color)
embed.set_image(url="attachment://{}.png".format(name))
embed.description = formatted_message
await client.send_message(message.channel, file=(discord.File(sprite, filename="{}.png".format(name))), embed=embed)
@pokedex_.command()
async def egg(message: discord.Message, egg_type: Annotate.LowerCleanContent):
""" Get the pokemon hatched from the specified egg_type
(in distance, e.g. 2 or 5km) """
# Strip any km suffix (or prefix, whatever)
egg_type = egg_type.replace("km", "")
try:
distance = int(float(egg_type)) # Using float for anyone willing to type 2.0km
except ValueError:
await client.say(message, "The egg type **{}** is invalid.".format(egg_type))
return
pokemon_criteria = []
egg_types = []
# Find all pokemon with the specified distance
for pokemon in sorted(pokedex.values(), key=itemgetter("id")):
# We've exceeded the generation and no longer need to search
if pokemon["generation"] not in pokemon_go_gen:
break
if "hatches_from" not in pokemon:
continue
if pokemon["hatches_from"] not in egg_types:
egg_types.append(pokemon["hatches_from"])
if pokemon["hatches_from"] == distance:
pokemon_criteria.append(pokemon["locale_name"])
# The list might be empty
assert pokemon_criteria, "No pokemon hatch from a **{}km** egg. **Valid distances are** ```\n{}```".format(
distance, ", ".join("{}km".format(s) for s in sorted(egg_types)))
# Respond with the list of matching criteria
await client.say(message, "**The following Pokémon may hatch from a {}km egg**:```\n{}```".format(
distance, ", ".join(sorted(pokemon_criteria))))
def assert_type(slot: str, guild: discord.Guild):
""" Assert if a type does not exist, and show the valid types. """
match = get_close_matches(slot, api["types"], n=1, cutoff=0.4)
if match:
matches_string = " Perhaps you meant `{}`?".format(match[0])
else:
matches_string = " See `{}help pokedex type`.".format(guild_command_prefix(guild))
assert slot in api["types"], "**{}** is not a valid pokemon type.{}".format(
slot.capitalize(), matches_string)
types_str = "**Valid types are** ```\n{}```".format(", ".join(s.capitalize() for s in api["types"]))
def attack_method(type):
""" Iterate through the pokemon type's attack damage factor. """
for damage_type, damage in api["types"][type]["damage_factor"].items():
yield damage_type, damage
def defense_method(type):
""" Iterate through the pokemon type's defense damage factor. """
for value in api["types"].values():
yield value["name"], value["damage_factor"][type]
def resolve_damage_factor(method, type_1: str, type_2: str = None):
""" Combine the damage factors when there are two types. """
damage_factor = {k: 0 for k in api["types"].keys()}
if not type_2:
for damage_type, damage in method(type_1):
damage_factor[damage_type] = damage
else:
for damage_type_1, damage_1 in method(type_1):
for damage_type_2, damage_2 in method(type_2):
if damage_type_1 == damage_type_2:
damage_factor[damage_type_1] = damage_1 * damage_2
return damage_factor
def format_damage(method, type_1: str, type_2: str = None):
""" Formats the effective, ineffective and no effect lists with type names
based on the damage factor.
"""
damage_factor = resolve_damage_factor(method, type_1, type_2)
effective, ineffective, useless = [], [], []
for damage_type, damage in damage_factor.items():
name = damage_type.capitalize()
if damage == 4:
effective.append(name + " x2")
elif damage == 2:
effective.append(name)
elif damage == 0.5:
ineffective.append(name)
elif damage == 0.25:
ineffective.append(name + " x2")
elif damage == 0:
useless.append(name)
return effective, ineffective, useless
def format_specific_efficacy(method, type_1: str, type_2: str = None):
""" Format the efficacy string specifically for defense or attack. """
effective, ineffective, useless = format_damage(method, type_1, type_2)
type_name = format_type(type_1, type_2)
s = "**{}** \N{EN DASH} **{}**\n".format(type_name, "DEFENSE" if method is defense_method else "ATTACK")
if effective:
s += "Super effective: `{}`\n".format(", ".join(effective))
if ineffective:
s += "Not very effective: `{}`\n".format(", ".join(ineffective))
if useless:
s += "No effect: `{}`\n".format(", ".join(useless))
return s
def format_efficacy(type_1: str, type_2: str = None):
""" Format an efficacy string so that we can use this function for
multiple commands. """
efficacy = format_specific_efficacy(attack_method, type_1, type_2)
efficacy += format_specific_efficacy(defense_method, type_1, type_2)
return efficacy.strip("\n")
@pokedex_.command(name="type", description="Show pokemon with the specified types. {}".format(types_str))
async def filter_type(message: discord.Message, slot_1: str.lower, slot_2: str.lower = None):
matched_pokemon = []
assert_type(slot_1, message.guild)
# Find all pokemon with the matched criteria
if slot_2:
assert_type(slot_2, message.guild)
# If two slots are provided, search for pokemon with both types matching
for pokemon in pokedex.values():
if pokemon["types"] == [slot_1, slot_2]:
matched_pokemon.append(pokemon["locale_name"])
else:
# All pokemon have a type in their first slot, so check if these are equal
for pokemon in pokedex.values():
if pokemon["types"][0] == slot_1:
matched_pokemon.append(pokemon["locale_name"])
# There might not be any pokemon with the specified types
assert matched_pokemon, "Looks like there are no pokemon of type **{}**!".format(format_type(slot_1, slot_2))
await client.say(message, "**Pokemon with type {}**: ```\n{}```".format(
format_type(slot_1, slot_2), ", ".join(sorted(matched_pokemon))))
@pokedex_.command(aliases="e",
description="Display type efficacy (effectiveness) of the specified type or pokemon. {}".format(
types_str))
async def effect(message: discord.Message, slot_1_or_pokemon: str.lower, slot_2: str.lower = None):
name = get_pokemon(slot_1_or_pokemon, assert_on_error=False)
formatted = ""
if name:
types = pokedex[name]["types"]
slot_1 = types[0]
if len(types) > 1:
slot_2 = types[1]
formatted += "Using types of **{}**:\n\n".format(name.capitalize())
else:
slot_1 = slot_1_or_pokemon
assert_type(slot_1, message.guild)
if slot_2:
assert_type(slot_2, message.guild)
formatted += format_efficacy(slot_1, slot_2)
await client.say(message, formatted)
@pokedex_.command(disabled_pm=True, aliases="sf", permissions="manage_guild")
async def scalefactor(message: discord.Message, factor: float = default_scale_factor):
""" Set the image scaling factor for your guild. If no factor is given, the default is set. /
**This command requires the `Manage Guild` permission.**"""
assert not factor == 0, "If you wish to disable images, remove the `Attach Files` permission from this bot."
assert factor <= max_scale_factor, "The factor **{}** is too high **(max={})**.".format(factor, max_scale_factor)
assert min_scale_factor <= factor, "The factor **{}** is too low **(min={})**.".format(factor, min_scale_factor)
if message.guild.id not in pokedex_config.data:
pokedex_config.data[message.guild.id] = {}
# Handle specific scenarios
if factor == default_scale_factor:
if "scale-factor" in pokedex_config.data[message.guild.id]:
del pokedex_config.data[message.guild.id]["scale-factor"]
reply = "Pokédex image scale factor reset to default: **{factor}**."
else:
reply = "Pokédex image scale factor is **{factor}** (default)."
else:
pokedex_config.data[message.guild.id]["scale-factor"] = factor
reply = "Pokédex image scale factor set to **{factor}**."
await pokedex_config.asyncsave()
await client.say(message, reply.format(factor=factor))
```
#### File: pcbot/plugins/web.py
```python
import discord
import bot
import plugins
from pcbot import Annotate, utils
client = plugins.client # type: bot.Client
# Create exchange rate cache and keep track of when we last reset it
# exchange_rate_cache = dict(reset=client.time_started)
@plugins.command(aliases="def")
async def define(message: discord.Message, term: Annotate.LowerCleanContent):
""" Defines a term using Urban Dictionary. """
json = await utils.download_json("http://api.urbandictionary.com/v0/define", term=term)
assert json["list"], "Could not define `{}`.".format(term)
definitions = json["list"]
msg = ""
# Send any valid definition (length of message < 2000 characters)
for definition in definitions:
# Format example in code if there is one
if definition.get("example"):
definition["example"] = "```{}```".format(definition["example"])
# Format definition
msg = "**{word}**:\n{definition}{example}".format(**definition)
# If this definition fits in a message, break the loop so that we can send it
if len(msg) <= 2000:
break
# Cancel if the message is too long
assert len(msg) <= 2000, "Defining this word would be a bad idea."
await client.say(message, msg)
# async def get_exchange_rate(base: str, currency: str):
# """ Returns the exchange rate between two currencies. """
# # Return the cached result unless the last reset was yesterday or longer
# if (base, currency) in exchange_rate_cache:
# if (datetime.now() - exchange_rate_cache["reset"]).days >= 1:
# exchange_rate_cache.clear()
# exchange_rate_cache["reset"] = datetime.now()
# else:
# return exchange_rate_cache[(base, currency)]
#
# data = await utils.download_json("https://api.fixer.io/latest", base=base, symbols=currency)
#
# # Raise an error when the base is invalid
# if "error" in data and data["error"].lower() == "invalid base":
# raise ValueError("{} is not a valid currency".format(base))
#
# # The API will not return errors on invalid symbols, so we check this manually
# if not data["rates"]:
# raise ValueError("{} is not a valid currency".format(currency))
#
# rate = data["rates"][currency]
# # Add both the exchange rate of the given order and the inverse to the cache
# exchange_rate_cache[(base, currency)] = rate
# exchange_rate_cache[(currency, base)] = 1 / rate
# return rate
# @plugins.command(aliases="ge currency cur") async def convert(message: discord.Message, value: float,
# currency_from: str.upper, currency_to: str.upper): """ Converts currency using http://fixer.io/ """ try: rate =
# await get_exchange_rate(currency_from, currency_to) except ValueError as e: await client.say(message,
# e) else: flag = utils.text_to_emoji(currency_to[:2]) e = discord.Embed(description="{} {:,.2f} {}".format(flag,
# value * rate, currency_to), color=message.author.color) await client.send_message(message.channel, embed=e)
# async def on_reload(name):
# """ Don't drop the cache. """
# global exchange_rate_cache
# local_cache = exchange_rate_cache
#
# await plugins.reload(name)
#
# exchange_rate_cache = local_cache
```
#### File: pcbot/plugins/wyr.py
```python
import asyncio
import random
import re
import discord
import bot
import plugins
from pcbot import Config
client = plugins.client # type: bot.Client
db = Config("would-you-rather", data=dict(timeout=10, responses=["**{name}** would **{choice}**!"], questions=[]),
pretty=True)
command_pattern = re.compile(r"(.+)(?:\s+or|\s*,)\s+([^?]+)\?*")
sessions = set() # All running would you rather's are in this set
@plugins.argument("{open}option ...{close} or/, {open}other option ...{close}[?]", allow_spaces=True)
async def options(arg):
""" Command argument for receiving two options. """
match = command_pattern.match(arg)
assert match
assert not match.group(1).lower() == match.group(2).lower(), "**The choices cannot be the same.**"
return match.group(1), match.group(2)
def get_choice(choices: list, choice: str):
""" Get the chosen option. This accept 1 and 2 as numbers. """
if choice == "1":
return 0
if choice == "2":
return 1
choices = list(map(str.lower, choices))
words = list(map(str.split, choices))
# Go through all words in the given message, and find any words unique to a choice
for word in choice.lower().split():
if word in words[0] and word not in words[1]:
return 0
elif word in words[1] and word not in words[0]:
return 1
# Invalid choice
return None
@plugins.command(aliases="wyr rather either")
async def wouldyourather(message: discord.Message, opt: options = None):
""" Ask the bot if he would rather, or have the bot ask you.
**Examples:**
Registering a choice: `!wouldyourather lie or be lied to`
Asking the bot: `!wouldyourather`"""
# If there are no options, the bot will ask the questions (if there are any to choose from)
if opt is None:
assert message.channel.id not in sessions, "**A would you rather session is already in progress.**"
sessions.add(message.channel.id)
assert db.data["questions"], "**There are ZERO questions saved. Ask me one!**"
question = random.choice(db.data["questions"])
choices = question["choices"]
await client.say(message, "Would you rather **{}** or **{}**?".format(*choices))
timeout = db.data["timeout"]
replied = []
# Wait for replies from anyone in the channel
while True:
def check(m):
return m.channel == message.channel and m.author not in replied
try:
reply = await client.wait_for_message(timeout=timeout, check=check)
# Break on timeout
except asyncio.TimeoutError:
break
# Check if the choice is valid
choice = get_choice(choices, reply.content)
if choice is None:
continue
# Register that this author has replied
replied.append(reply.author)
# Update the answers in the DB
# We don't care about multiples, just the amount (yes it will probably be biased)
question["answers"][choice] += 1
name = reply.author.display_name
response = random.choice(db.data["responses"]).format(name=name, NAME=name.upper(),
choice=choices[choice])
await client.say(message, response)
# Say the total tallies
await client.say(message, "A total of {0} would **{2}**, while {1} would **{3}**!".format(
*question["answers"], *choices))
await db.asyncsave()
sessions.remove(message.channel.id)
# Otherwise, the member asked a question to the bot
else:
db.data["questions"].append(dict(
choices=list(opt),
answers=[0, 0]
))
await db.asyncsave()
answer = random.choice(opt)
await client.say(message, "**I would {}**!".format(answer))
@wouldyourather.command(aliases="delete", owner=True)
async def remove(message: discord.Message, opt: options):
""" Remove a wouldyourather question with the given options. """
for q in db.data["questions"]:
if q["choices"][0] == opt[0] and q["choices"][1] == opt[1]:
db.data["questions"].remove(q)
await db.asyncsave()
await client.say(message, "**Entry removed.**")
break
else:
await client.say(message, "**Could not find the question.**")
```
|
{
"source": "JeGoi/IPa2",
"score": 3
}
|
#### File: IPa2/packages/java_file_editor.py
```python
import sys,os
import yaml
import util as u
# ===============================================
# FUNCTION create Java File Editor
# ===============================================
def create_java_editor_file(yml,armaDir):
progDir = u.define_edit_path(armaDir)
filename = progDir+""+u.get_program_name(yml)+"Editors.java"
out = open(filename, 'w')
yml['FrameDefaultVariables'] = {
"name" : "jTextField",
"rename": "jButton",
"reset" : "jButton",
"close" : "jButton",
"stop" : "jButton",
"run" : "jButton"
}
write_begin(out,yml)
write_setDefaultCloseOperation(out,yml)
write_events(out,yml)
write_functions(out,yml)
write_bottom_variables(out,yml)
write_end(out,yml)
# ===============================================
# SUB FUNCTIONS TO CREATE Java File Editor
# ===============================================
#
# Header and variables
#
def write_begin(out,yml):
write_begin_start(out,yml)
write_import_class(out,yml)
write_author_date(out,yml)
write_variables(out,yml)
write_header_variables(out,yml)
write_java_variables(out,yml,"header")
#
# Header and variables
#
def write_begin_start(out,yml):
out.write("/**\n"+
"* To change this license header, choose License Headers in Project Properties.\n"+
"* To change this template file, choose Tools | Templates\n"+
"* and open the template in the editor.\n"+
"*/\n"+
"package editors;\n"+
"\n")
def write_import_class(out,yml):
out.write("import configuration.Cluster;\n"+
"import configuration.Config;\n"+
"import configuration.Util;\n"+
"import editor.EditorInterface;\n"+
"import editor.clusterEditorProgram;\n"+
"import editor.dockerEditorProgram;\n"+
"import java.awt.Dimension;\n"+
"import java.awt.Frame;\n"+
"import java.awt.Robot;\n"+
"import java.awt.Toolkit;\n"+
"import java.awt.image.BufferedImage;\n"+
"import java.io.File;\n"+
"import javax.imageio.ImageIO;\n"+
"import javax.swing.JFileChooser;\n"+
"import java.util.ArrayList;\n"+
"import java.util.HashMap;\n"+
"import javax.swing.JCheckBox;\n"+
"import javax.swing.JComboBox;\n"+
"import javax.swing.JFileChooser;\n"+
"import javax.swing.JRadioButton;\n"+
"import javax.swing.JSpinner;\n"+
"import javax.swing.JTextField;\n"+
"import program.*;\n"+
"import workflows.armadillo_workflow;\n"+
"import workflows.workflow_properties;\n"+
"import workflows.workflow_properties_dictionnary;\n"+
"\n")
def write_author_date(out,yml):
out.write("/**\n"+
" *\n"+
" * @author : "+yml['author']+"\n"+
" * @Date : "+yml['date']+"\n"+
" */\n"+
"\n")
def write_variables(out,yml):
out.write("public class "+u.get_program_name(yml)+"Editors extends javax.swing.JDialog implements EditorInterface {\n"+
"\n"+
" /**\n"+
" * Creates new form "+u.get_program_name(yml)+"Editors\n"+
" */\n"+
" Config config=new Config();\n"+
" //ConnectorInfoBox connectorinfobox;\n"+
" workflow_properties_dictionnary dict=new workflow_properties_dictionnary();\n"+
" String selected = \"\"; // Selected properties\n"+
" Frame frame;\n"+
" workflow_properties properties;\n"+
" armadillo_workflow parent_workflow;\n"+
"\n"+
" public final String defaultNameString=\"Name\";\n"+
" static final boolean default_map=true;\n")
p = 0
for Panel in yml['Menus']:
if 'Panel' in Panel:
if Panel['isMenu']:
out.write(" public static HashMap<JCheckBox,JSpinner> DictMenuCBS"+str(p)+" = new HashMap<JCheckBox,JSpinner>();\n"+
" public static HashMap<JCheckBox,JTextField> DictMenuCBT"+str(p)+" = new HashMap<JCheckBox,JTextField>();\n"+
" public static HashMap<JCheckBox,JComboBox> DictMenuCBC"+str(p)+" = new HashMap<JCheckBox,JComboBox>();\n"+
" public static HashMap<JRadioButton,JSpinner> DictMenuRBS"+str(p)+" = new HashMap<JRadioButton,JSpinner>();\n"+
" public static HashMap<JRadioButton,JTextField> DictMenuRBT"+str(p)+" = new HashMap<JRadioButton,JTextField>();\n"+
" public static ArrayList<HashMap> listDictsMenu"+str(p)+" = new ArrayList<HashMap>();\n")
else:
out.write(" public static HashMap<JCheckBox,JSpinner> DictCBS"+str(p)+" = new HashMap<JCheckBox,JSpinner>();\n"+
" public static HashMap<JCheckBox,JTextField> DictCBT"+str(p)+" = new HashMap<JCheckBox,JTextField>();\n"+
" public static HashMap<JCheckBox,JComboBox> DictCBC"+str(p)+" = new HashMap<JCheckBox,JComboBox>();\n"+
" public static HashMap<JRadioButton,JSpinner> DictRBS"+str(p)+" = new HashMap<JRadioButton,JSpinner>();\n"+
" public static HashMap<JRadioButton,JTextField> DictRBT"+str(p)+" = new HashMap<JRadioButton,JTextField>();\n"+
" public static ArrayList<HashMap> listDicts"+str(p)+" = new ArrayList<HashMap>();\n")
p += 1
out.write("\n"+
" public "+u.get_program_name(yml)+"Editors(java.awt.Frame parent, armadillo_workflow parent_workflow){\n"+
" super(parent, false);\n"+
" this.parent_workflow=parent_workflow;\n"+
" //--Set variables and init\n"+
" frame=parent;\n")
if p>0:
for x in range(1,p):
out.write(" //listDicts"+str(x)+" = Util.createListDict(DictBoxSpinner"+str(x)+",DictBoxTextField"+str(x)+",DictBoxComboBox"+str(x)+",DictRadioButtonSpinner"+str(x)+",DictRadioButtonTextField"+str(x)+");\n")
out.write(" }\n"+
" \n"+
"\n")
def write_header_variables(out,yml):
out.write("\n"+
" /**\n"+
" * This method is called from within the constructor to initialize the form.\n"+
" * WARNING: Do NOT modify this code. The content of this method is always\n"+
" * regenerated by the Form Editor.\n"+
" */\n"+
" @SuppressWarnings(\"unchecked\")\n"+
" // <editor-fold defaultstate=\"collapsed\" desc=\"Generated Code\">//GEN-BEGIN:initComponents\n"+
" private void initComponents(){\n"+
" \n"+
" Menu_Buttons = new javax.swing.ButtonGroup();\n")
if 'Docker' in yml and yml['Docker'] is not None:
out.write(" docker_jButton = new javax.swing.JButton();\n")
if 'Cluster' in yml and yml['Cluster'] is not None:
out.write(" cluster_jButton = new javax.swing.JButton();\n")
out.write(" how_jButton = new javax.swing.JButton();\n"+
" "+u.get_program_name(yml)+"_tab = new javax.swing.JTabbedPane();\n"+
" general_jPanel1 = new javax.swing.JPanel();\n"+
" name_jLabel = new javax.swing.JLabel();\n"+
" name_jTextField = new javax.swing.JTextField();\n"+
" rename_jButton = new javax.swing.JButton();\n"+
" reset_jButton = new javax.swing.JButton();\n"+
" close_jButton = new javax.swing.JButton();\n"+
" stop_jButton = new javax.swing.JButton();\n"+
" run_jButton = new javax.swing.JButton();\n")
#
# Default Close Operation
#
def write_setDefaultCloseOperation(out,yml):
out.write("\n setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE);\n\n")
write_boxes_buttons_values(out,yml)
write_organize_boxes_buttons_values(out,yml)
write_general_panel(out,yml)
write_program_overview(out,yml)
write_nested_tabs(out,yml)
out.write("\n"+
" pack();\n"
" }\n")
#
# Default Close Operation
#
def write_boxes_buttons_values(out,yml):
if 'Docker' in yml and yml['Docker'] is not None:
write_box_and_button(out,"docker_jButton","Docker",'JButton','Access to the docker program editor')
if 'Cluster' in yml and yml['Cluster'] is not None:
write_box_and_button(out,"cluster_jButton","Cluster",'JButton','Access to the cluster program editor')
out.write(" "+u.get_program_name(yml)+"_tab.addComponentListener(new java.awt.event.ComponentAdapter() {\n"+
" public void componentShown(java.awt.event.ComponentEvent evt) {\n"+
" "+u.get_program_name(yml)+"_tab_ComponentShown(evt);\n"+
" }\n"+
" });\n"+
"\n"+
" general_jPanel1.setName(\"general_jPanel1\");\n"+
" general_jPanel1.setPreferredSize(new java.awt.Dimension(459, 400));\n"+
"\n")
write_specific_button(out,"stop_jButton","Stop",'JButton','Stop this box','91','255, 0, 0')
write_specific_button(out,"reset_jButton","Reset",'JButton','Reset to default values','91','255, 116, 0')
write_specific_button(out,"run_jButton","Run",'JButton','Run this box','91','0, 255, 3')
write_specific_button(out,"how_jButton","?",'JButton','About this box','51','255, 0, 255')
write_specific_button(out,"close_jButton","Close",'JButton','Close this box','91','0, 0, 255')
write_specific_button(out,"ClusterProgram_jButton","Cluster Options",'JButton','Get Acccess to cluster','115','0, 0, 255')
write_box_and_button(out,"name_jLabel","(re)Name",'label','Name Box')
write_box_and_button(out,"name_jTextField","Name",'txt','Rename the box here')
for Panel in yml['Menus']:
if Panel['isMenu']:
write_menu_options(out,Panel['name'])
# Add panels and commands data
write_pgrm_box_and_button(out,yml)
def write_pgrm_box_and_button(out,yml):
for Panel in yml['Menus']:
pName = Panel['name']
pNameI = u.create_initials(pName)
if 'Panel' in Panel:
for Tab in Panel['Panel']:
tName = Tab['tab']
tNameI = u.create_initials(tName)
if not Panel['isTab']:
write_box_and_button(out,pNameI+"_"+tNameI+"_JLabel",tName,'label','Sub Items')
if 'Arguments' in Tab:
for Arguments in Tab['Arguments']:
cName = Arguments['name']
cType = Arguments['cType']
cText = u.remove_hyphen(cName)
cHelp = str(Arguments['tooltip'])
c = u.create_button_name(pName,tName,cName,cType)
v = ""
write_box_and_button(out,c,cText,cType,cHelp)
if 'values' in Arguments and \
Arguments['values'] is not None and \
Arguments['values']['vType'] is not None:
vCom = Arguments['values']
vType = vCom['vType']
v = u.create_value_name(pName,tName,cName,vType)
write_connected_value(out,v,cText,vType,cHelp,vCom)
def write_connected_value(out,v,cText,vType,cHelp,vCom):
isSpin = u.is_a_spinner(vType)
isText = u.is_a_text(vType)
isLabel = u.is_a_label(vType)
isCombo = u.is_a_combo(vType)
val = ""
if isSpin:
vDefault = vCom['vDefault']
vMin = vCom['vMin']
vMax = vCom['vMax']
vJump = vCom['vJump']
val = u.return_range_value(vType,vDefault,vMin,vMax,vJump,False)
out.write(" "+v+".setModel(new javax.swing.SpinnerNumberModel("+val+"));\n")
if isCombo:
val = "\""+'", "'.join(vCom['vValues'])+"\""
out.write(" "+v+".setModel(new javax.swing.DefaultComboBoxModel(new String[] { "+val+" }));\n")
if isText or isLabel:
val = str(vCom['vValues'])
out.write(" "+v+".setText(\""+val+"\");\n")
out.write(" "+v+".setName(\""+v+"\"); // NOI18N\n"+
" "+v+".getAccessibleContext().setAccessibleDescription(\""+cHelp+"\");\n")
if isSpin or isText or isLabel:
out.write(" "+v+".setPreferredSize(new java.awt.Dimension(")
if isSpin:
out.write("115")
if isText or isLabel:
out.write("220")
out.write(", 28));\n")
if isText:
out.write(" "+v+".addFocusListener(new java.awt.event.FocusAdapter() {\n"+
" public void focusLost(java.awt.event.FocusEvent evt) {\n"+
" "+v+"_FocusLost(evt);\n"+
" }\n"+
" });\n")
if isSpin:
out.write(" "+v+".addChangeListener(new javax.swing.event.ChangeListener() {\n"+
" public void stateChanged(javax.swing.event.ChangeEvent evt) {\n"+
" "+v+"_StateChanged(evt);\n"+
" }\n"+
" });\n")
if isCombo or isText:
out.write(" "+v+".addActionListener(new java.awt.event.ActionListener() {\n"+
" public void actionPerformed(java.awt.event.ActionEvent evt) {\n"+
" "+v+"_ActionPerformed(evt);\n"+
" }\n"+
" });\n")
out.write("\n")
def write_box_and_button(out,cName,cText,cType,cHelp):
isLabel = u.is_a_label(cType)
out.write(" "+cName+".setText(\""+cText+"\");\n"+
" "+cName+".setName(\""+cName+"\"); // NOI18N\n")
if isLabel:
out.write(" "+cName+".setFont(new java.awt.Font(\"Ubuntu\", 3, 15)); // NOI18N\n")
if cHelp != "" or cHelp is not "None":
out.write(" "+cName+".getAccessibleContext().setAccessibleDescription(\""+cHelp+"\");\n")
if not isLabel:
isText = u.is_a_text(cType)
if isText:
out.write(" "+cName+".addFocusListener(new java.awt.event.FocusAdapter() {\n"+
" public void focusLost(java.awt.event.FocusEvent evt) {\n"+
" "+cName+"_FocusLost(evt);\n"+
" }\n"+
" });\n")
out.write(" "+cName+".addActionListener(new java.awt.event.ActionListener(){\n"+
" public void actionPerformed(java.awt.event.ActionEvent evt){\n"+
" "+cName+"_ActionPerformed(evt);\n"+
" }\n"+
" });\n")
out.write("\n")
def write_specific_button(out,cName,cText,bType,bHelp,bLength,bColor):
# look how merge it with function: add_box_and_button
out.write(" "+cName+".setText(\""+cText+"\");\n"+
" "+cName+".setName(\""+cName+"\"); // NOI18N\n"+
" "+cName+".setMaximumSize(new java.awt.Dimension("+bLength+",29));\n"+
" "+cName+".setMinimumSize(new java.awt.Dimension("+bLength+",29));\n"+
" "+cName+".setPreferredSize(new java.awt.Dimension("+bLength+",29));\n"+
" "+cName+".setForeground(new java.awt.Color("+bColor+"));\n"+
" "+cName+".getAccessibleContext().setAccessibleDescription(\""+bHelp+"\");\n"+
" "+cName+".addActionListener(new java.awt.event.ActionListener(){\n"+
" public void actionPerformed(java.awt.event.ActionEvent evt){\n"+
" "+cName+"_ActionPerformed(evt);\n"+
" }\n"+
" });\n")
def write_menu_options(out,mName):
mNameS = u.name_without_space(mName)
out.write( " Menu_Buttons.add("+mNameS+");\n"+
" "+mNameS+".setText(\""+mName+"\");\n"+
" "+mNameS+".setName(\""+mNameS+"\"); // NOI18N\n"+
" "+mNameS+".addActionListener(new java.awt.event.ActionListener(){\n"+
" public void actionPerformed(java.awt.event.ActionEvent evt){\n"+
" "+mNameS+"_ActionPerformed(evt);\n"+
" }\n"+
" });\n")
def write_organize_boxes_buttons_values(out,yml):
for Panel in yml['Menus']:
pName = Panel['name']
pLen = 0
if 'Panel' in Panel:
if not Panel['isTab']:
write_jPanel(out,pName,Panel['Panel'])
if Panel['isTab']:
pLen = len(Panel['Panel'])
for Tab in Panel['Panel']:
tName = Tab['tab']
if 'Arguments' in Tab:
write_jPanel_isTab(out,pName,tName,Tab['Arguments'],pLen)
if pLen > 1 and Panel['isTab']: # Means need a tabs
pNameI = u.create_initials(pName)
tmName = pNameI+"_"+pNameI
write_tab_in_jPanel(out,pNameI,tmName)
def write_tab_in_jPanel(out,pNameI,tmName):
out.write( " javax.swing.GroupLayout "+pNameI+"_Layout = new javax.swing.GroupLayout("+pNameI+"_JPanel);\n"+
" "+pNameI+"_JPanel.setLayout("+pNameI+"_Layout);\n"+
" "+pNameI+"_Layout.setHorizontalGroup(\n"+
" "+pNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addComponent("+tmName+"_JTabbedPane)\n"+
" );\n"+
" "+pNameI+"_Layout.setVerticalGroup(\n"+
" "+pNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addGroup("+pNameI+"_Layout.createSequentialGroup()\n"+
" .addComponent("+tmName+"_JTabbedPane)\n"+
" .addContainerGap())\n"+
" );\n"+
"\n")
def write_jPanel(out,pName,Panel):
(tabBV,dictBV,infB,infV) = u.refactor_components_notTab(pName,Panel)
pNameI = u.create_initials(pName)
out.write( " javax.swing.GroupLayout "+pNameI+"_Layout = new javax.swing.GroupLayout("+pNameI+"_JPanel);\n"+
" "+pNameI+"_JPanel.setLayout("+pNameI+"_Layout);\n"+
" "+pNameI+"_Layout.setHorizontalGroup(\n"+
" "+pNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addGroup("+pNameI+"_Layout.createSequentialGroup()\n"+
" .addContainerGap()\n")
out.write( " .addGroup("+pNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n")
for bv in tabBV:
out.write( " .addComponent("+bv+")\n")
out.write( " )\n"+
" .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)\n"+
" .addGroup("+pNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)\n")
for bv in tabBV:
if dictBV[bv] != "":
out.write( " .addComponent("+dictBV[bv]+", javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)\n")
out.write( " )\n"+
" .addContainerGap() \n"+
" )\n"+
" );\n"+
" "+pNameI+"_Layout.setVerticalGroup(\n"+
" "+pNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addGroup("+pNameI+"_Layout.createSequentialGroup()\n"+
" .addContainerGap()\n")
x = 1
l = len(tabBV)
for bv in tabBV:
out.write( " .addGroup("+pNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)\n"+
" .addComponent("+bv+")\n")
if dictBV[bv] != "":
out.write( " .addComponent("+dictBV[bv]+", javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)\n")
out.write( " )\n")
if x < l :
out.write( " .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)\n")
x+=1
out.write( " .addContainerGap()\n"+
" )\n"+
" );\n"+
"\n")
def write_jPanel_isTab(out,pName,tName,Tab,pLen):
dictBV = {}
tabBV = []
(tabBV,dictBV) = u.refactor_components_Tab(pName,tName,Tab)
pNameI = u.create_initials(pName)
tNameI = pNameI+"_"+u.create_initials(tName)
out.write( " javax.swing.GroupLayout "+tNameI+"_Layout = new javax.swing.GroupLayout("+tNameI+"_JPanel);\n"+
" "+tNameI+"_JPanel.setLayout("+tNameI+"_Layout);\n"+
" "+tNameI+"_Layout.setHorizontalGroup(\n"+
" "+tNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addGroup("+tNameI+"_Layout.createSequentialGroup()\n"+
" .addContainerGap()\n"+
" .addGroup("+tNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n")
for bv in tabBV:
out.write( " .addComponent("+bv+")\n")
out.write( " )\n"+
" .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)\n"+
" .addGroup("+tNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)\n")
for bv in tabBV:
if dictBV[bv] != "":
out.write( " .addComponent("+dictBV[bv]+", javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)\n")
out.write( " )\n"+
" .addContainerGap()) \n"+#)\n"+
" );\n"+
" "+tNameI+"_Layout.setVerticalGroup(\n"+
" "+tNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addGroup("+tNameI+"_Layout.createSequentialGroup()\n"+
" .addContainerGap()\n")
x = 1
l = len(tabBV)
for bv in tabBV:
out.write( " .addGroup("+tNameI+"_Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)\n"+
" .addComponent("+bv+")\n")
if dictBV[bv] != "":
out.write( " .addComponent("+dictBV[bv]+", javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)\n")
out.write( " )\n")
if x < l :
out.write( " .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)\n")
x+=1
out.write( " .addContainerGap()) \n"+#)\n"+
" );\n"+
"\n")
def write_general_panel(out,yml):
out.write( " general_jPanel1.setName(\"general_jPanel1\"); // NOI18N\n"+
" general_jPanel1.setPreferredSize(new java.awt.Dimension(459, 400));\n"+
" javax.swing.GroupLayout general_jPanel1Layout = new javax.swing.GroupLayout(general_jPanel1);\n"+
" general_jPanel1.setLayout(general_jPanel1Layout);\n"+
" general_jPanel1Layout.setHorizontalGroup(\n"+
" general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addGroup(general_jPanel1Layout.createSequentialGroup()\n"+
" .addGroup(general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)\n"+
" .addComponent(reset_jButton, javax.swing.GroupLayout.PREFERRED_SIZE, 95, javax.swing.GroupLayout.PREFERRED_SIZE)\n"+
" .addGap(18, 18, 18)\n"+
" .addComponent(stop_jButton, javax.swing.GroupLayout.PREFERRED_SIZE, 95, javax.swing.GroupLayout.PREFERRED_SIZE)\n"+
" .addGap(18, 18, 18)\n"+
" .addComponent(run_jButton, javax.swing.GroupLayout.PREFERRED_SIZE, 95, javax.swing.GroupLayout.PREFERRED_SIZE))\n"+
" .addGroup(general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)\n"+
" .addComponent(name_jLabel, javax.swing.GroupLayout.PREFERRED_SIZE, 95, javax.swing.GroupLayout.PREFERRED_SIZE)\n"+
" .addGap(18, 18, 18)\n"+
" .addComponent(name_jTextField, javax.swing.GroupLayout.PREFERRED_SIZE, 204, javax.swing.GroupLayout.PREFERRED_SIZE))\n")
allMenu = []
for Panel in yml['Menus']:
if Panel['isMenu']:
allMenu.append(u.name_without_space(Panel['name']))
mLen = len(allMenu)
x = 0
if mLen>1:
out.write( " .addGroup(general_jPanel1Layout.createSequentialGroup()\n")
for menu in allMenu:
if mLen%2 != 0:
if mLen>1 and (mLen-x-1)%2==0 and mLen-x>1:
out.write( " .addGroup(general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)\n ")
if x%2 != 0 and mLen>1 :
out.write( " .addGap(18, 18, 18)\n ")
out.write( " .addComponent("+menu+")\n")
if mLen>1 and (mLen-x-1)%2==1 and mLen-x>1:
out.write( " )\n")
if mLen%2 == 0:
if x%2==0:
out.write( " .addGroup(general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)\n ")
out.write( " .addComponent("+menu+")\n")
if x%2 == 0 and mLen>1 :
out.write( " .addGap(18, 18, 18)\n ")
if x%2 != 0:
out.write( " )\n")
x+=1
if mLen>1:
out.write( " )\n")
out.write( " .addComponent(main_jScroll))\n"+
" );\n"+
" general_jPanel1Layout.setVerticalGroup(\n"+
" general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addGroup(general_jPanel1Layout.createSequentialGroup()\n"+
" .addContainerGap()\n"+
" .addGroup(general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)\n"+
" .addComponent(stop_jButton, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)\n"+
" .addComponent(reset_jButton)\n"+
" .addComponent(run_jButton, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))\n"+
" .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)\n"+
" .addGroup(general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)\n"+
" .addComponent(name_jLabel)\n"+
" .addComponent(name_jTextField, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))\n")
x = 0
for menu in allMenu:
if mLen%2 != 0:
if x%2 == 0:
out.write(" .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)\n")
if mLen>1 and (mLen-x-1)%2==0 and mLen-x>1:
out.write(" .addGroup(general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)\n ")
if x%2 != 0 and mLen>1 :
out.write(" ")
out.write(" .addComponent("+menu+")\n")
if mLen>1 and (mLen-x-1)%2==1 and mLen-x>1:
out.write( " )\n")
if mLen%2 == 0:
if x%2 == 0:
out.write(" .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)\n")
out.write(" .addGroup(general_jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)\n ")
if x%2 != 0 and mLen>1 :
out.write(" ")
out.write(" .addComponent("+menu+")\n")
if x%2 != 0:
out.write( " )\n")
x+=1
out.write( " .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)\n"+
" .addComponent(main_jScroll, javax.swing.GroupLayout.PREFERRED_SIZE, 266, javax.swing.GroupLayout.PREFERRED_SIZE)\n"+
" .addContainerGap())\n"+
" );\n"+
"\n")
def write_program_overview(out,yml):
out.write(" javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());\n"+
" getContentPane().setLayout(layout);\n"+
" layout.setHorizontalGroup(\n"+
" layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addGroup(layout.createSequentialGroup()\n"+
" .addComponent(close_jButton, javax.swing.GroupLayout.PREFERRED_SIZE, 95, javax.swing.GroupLayout.PREFERRED_SIZE)\n"+
" .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)\n"+
" .addGap(18, 18, 18)\n"+
" .addComponent(ClusterProgramButton)\n"+
" .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)\n")
if 'Docker' in yml and yml['Docker'] is not None:
out.write(" .addComponent(docker_jButton)\n"+
" .addGap(18, 18, 18)\n")
if 'Cluster' in yml and yml['Cluster'] is not None:
out.write(" .addComponent(cluster_jButton)\n"+
" .addGap(18, 18, 18)\n")
out.write(" .addComponent(how_jButton, javax.swing.GroupLayout.PREFERRED_SIZE, 95, javax.swing.GroupLayout.PREFERRED_SIZE))\n"+
" .addComponent("+u.get_program_name(yml)+"_tab, javax.swing.GroupLayout.PREFERRED_SIZE, 308, javax.swing.GroupLayout.PREFERRED_SIZE)\n"+
" );\n"+
" layout.setVerticalGroup(\n"+
" layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n"+
" .addGroup(layout.createSequentialGroup()\n"+
" .addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)\n"+
" .addComponent(close_jButton, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)\n"+
" .addComponent(ClusterProgramButton))\n")
if 'Docker' in yml and yml['Docker'] is not None:
out.write(" .addComponent(docker_jButton)\n")
if 'Cluster' in yml and yml['Cluster'] is not None:
out.write(" .addComponent(cluster_jButton)\n")
out.write(" .addComponent(how_jButton, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))\n"+
" .addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)\n"+
" .addComponent("+u.get_program_name(yml)+"_tab, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))\n"+
" );\n"+
"\n"+
" "+u.get_program_name(yml)+"_tab.getAccessibleContext().setAccessibleName(\""+u.get_program_name(yml)+"\");\n"+
" "+u.get_program_name(yml)+"_tab.addTab(\""+u.get_program_name(yml)+"\", general_jPanel1);\n"+
" "+u.get_program_name(yml)+"_tab.addTab(\""+u.get_program_name(yml)+"\", general_jPanel1);\n"+
" "+u.get_program_name(yml)+"_tab.addComponentListener(new java.awt.event.ComponentAdapter() {\n"+
" public void componentShown(java.awt.event.ComponentEvent evt) {\n"+
" "+u.get_program_name(yml)+"_tab_ComponentShown(evt);\n"+
" }\n"+
" });\n"+
" main_jScroll.setViewportView(options_tab_panel);\n")
def write_nested_tabs(out,yml):
for Panel in yml['Menus']:
pNameI = u.create_initials(Panel['name'])
if 'Panel' in Panel:
out.write(" options_tab_panel.addTab(\""+pNameI+"\","+pNameI+"_JPanel);\n")
for Tab in Panel['Panel']:
if 'Arguments' in Tab:
pLen = len(Panel['Panel'])
if pLen> 1 and Panel['isTab']: # Means need a tabs
# Create a panel to insert Arguments
tmName = pNameI+"_"+pNameI
tName = pNameI+"_"+u.create_initials(Tab['tab'])
out.write(" "+tmName+"_JTabbedPane.addTab(\""+Tab['tab']+"\","+tName+"_JPanel);\n")
#
# Events part
#
def write_events(out,yml):
write_events_start(out,yml)
write_event_menu(out,yml)
write_event_commands(out,yml)
#
# Events part
#
def write_events_start(out,yml):
out.write( " // </editor-fold>//GEN-END:initComponents\n"+
"\n"+
" private void "+u.get_program_name(yml)+"_tab_ComponentShown(java.awt.event.ComponentEvent evt){//GEN-FIRST:event_"+u.get_program_name(yml)+"_tab_ComponentShown\n"+
" // TODO add your handling code here:\n"+
" }//GEN-LAST:event_"+u.get_program_name(yml)+"_tab_ComponentShown\n"+
" \n"+
" private void how_jButton_ActionPerformed(java.awt.event.ActionEvent evt){//GEN-FIRST:event_how_jButton_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" HelpEditor help = new HelpEditor(this.frame, false, properties);\n"+
" help.setVisible(true);\n"+
" }//GEN-LAST:event_how_jButton_ActionPerformed\n"+
"\n"+
" private void close_jButton_ActionPerformed(java.awt.event.ActionEvent evt){//GEN-FIRST:event_close_jButton_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" this.setVisible(false);\n"+
" }//GEN-LAST:event_close_jButton_ActionPerformed\n"+
"\n"+
" private void run_jButton_ActionPerformed(java.awt.event.ActionEvent evt){//GEN-FIRST:event_run_jButton_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" if (this.properties.isSet(\"ClassName\")){\n"+
" this.parent_workflow.workflow.updateDependance();\n"+
" programs prog=new programs(parent_workflow.workbox.getCurrentWorkflows());\n"+
" prog.Run(properties);\n"+
" }\n"+
" }//GEN-LAST:event_run_jButton_ActionPerformed\n"+
"\n"+
" private void stop_jButton_ActionPerformed(java.awt.event.ActionEvent evt){//GEN-FIRST:event_stop_jButton_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" properties.put(\"Status\", Config.status_nothing);\n"+
" properties.killThread();\n"+
" }//GEN-LAST:event_stop_jButton_ActionPerformed\n"+
"\n"+
" private void reset_jButton_ActionPerformed(java.awt.event.ActionEvent evt){//GEN-FIRST:event_reset_jButton_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" properties.load(); //--reload current properties from file\n"+
" this.setProperties(properties);//--Update current field\n"+
" //this.display(properties);\n"+
" this.setVisible(false);\n"+
" }//GEN-LAST:event_reset_jButton_ActionPerformed\n"+
"\n"+
" private void name_jTextField_ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_name_jTextField_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" properties.put(\"Name\", name_jTextField.getText());\n"+
" }//GEN-LAST:event_name_jTextField_ActionPerformed\n"+
"\n"+
" private void name_jTextField_FocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_name_jTextField_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" properties.put(\"Name\", name_jTextField.getText());\n"+
" }//GEN-LAST:event_name_jTextField_ActionPerformed\n"+
"\n"+
" private void ClusterProgram_jButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_ClusterProgram_jButton_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" clusterEditorProgram clus = new clusterEditorProgram(this.frame, false, properties);\n"+
" clus.setVisible(true);\n"+
" }//GEN-LAST:event_ClusterProgram_jButtonActionPerformed\n"+
"\n")
if 'Docker' in yml and yml['Docker'] is not None:
out.write(" private void docker_jButton_ActionPerformed(java.awt.event.ActionEvent evt){//GEN-FIRST:event_docker_jButton_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" dockerEditorProgram dock = new dockerEditorProgram(this.frame, false, properties);\n"+
" dock.setVisible(true);\n"+
" }//GEN-LAST:event_docker_jButton_ActionPerformed\n"+
" \n")
if 'Cluster' in yml and yml['Cluster'] is not None:
out.write(" private void cluster_jButton_ActionPerformed(java.awt.event.ActionEvent evt){//GEN-FIRST:event_cluster_jButton_ActionPerformed\n"+
" // TODO add your handling code here:\n"+
" clusterEditorProgram clus = new clusterEditorProgram(this.frame, false, properties);\n"+
" clus.setVisible(true);\n"+
" }//GEN-LAST:event_cluster_jButton_ActionPerformed\n"+
" \n")
def write_event_menu(out,yml):
allMenu = []
for Panel in yml['Menus']:
if Panel['isMenu']:
allMenu.append(u.name_without_space(Panel['name']))
mLen = len(allMenu)
for menu in allMenu:
out.write( " private void "+menu+"_ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_"+menu+"_ActionPerformed\n"+
" // TODO add your handling code here:\n")
if mLen > 1:
p = 0
out.write( " if (")
for menu2 in allMenu:
if menu != menu2:
if p == 0:
out.write( "properties.isSet("+menu2+".getName())")
else :
out.write( " &&\n properties.isSet("+menu2+".getName())")
p+=1
out.write( "){\n")
for menu2 in allMenu:
if menu != menu2:
out.write( " properties.remove("+menu2+".getName());\n")
out.write( " }\n"+
" Util.buttonEventSpinner(properties,"+menu+",null);\n"+
" menuFields(properties);\n")
out.write( " }//GEN-LAST:event_"+menu+"_ActionPerformed\n"+
" \n")
def write_event_commands(out,yml):
for Panel in yml['Menus']:
pName = Panel['name']
if 'Panel' in Panel:
for Tab in Panel['Panel']:
tName = Tab['tab']
if 'Arguments' in Tab:
for Arguments in Tab['Arguments']:
cName = Arguments['name']
cType = Arguments['cType']
childrens = Arguments['parentOf']
opposites = Arguments['oppositeTo']
c = u.create_button_name(pName,tName,cName,cType)
v = ""
vType = ""
if 'values' in Arguments and \
Arguments['values'] is not None and \
Arguments['values']['vType'] is not None:
vCom = Arguments['values']
vType = vCom['vType']
v = u.create_value_name(pName,tName,cName,vType)
write_event_command_value(out,c,cType,v,vType)
write_event_command(out,c,cType,v,vType,childrens,opposites)
def write_event_command(out,c,cType,v,vType,childrens,opposites):
out.write( " private void "+c+"_ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_"+c+"_ActionPerformed\n"+
" // TODO add your handling code here:\n")
if opposites != None :
write_event_command_is_opposite_to(out,opposites)
if vType != '':
vType = u.get_value_java_type(vType)
isCheckBox = u.is_a_box(cType)
isButton = u.is_a_button(cType)
if v == "":
v = 'null'
if isCheckBox:
if vType == "" or 'Spinner' in vType:
out.write(" Util.boxEventSpinner(properties,"+c+","+v+");\n")
if 'TextField' in vType:
out.write(" Util.boxEventText(properties,"+c+","+v+");\n")
if 'ComboBox' in vType:
out.write(" Util.boxEventComboBox(properties,"+c+","+v+");\n")
if 'Dir' in vType:
write_event_command_dir(out,c,cType,v,vType)
if isButton:
if vType == "" or 'Spinner' in vType:
out.write(" Util.buttonEventSpinner(properties,"+c+","+v+");\n")
if 'TextField' in vType:
out.write(" Util.buttonEventText(properties,"+c+","+v+");\n")
if childrens != None :
write_event_command_is_a_parent(out,c,childrens)
out.write(" }//GEN-LAST:event_"+c+"_ActionPerformed\n")
def write_event_command_is_a_parent(out,c,childrens):
out.write( " if (properties.isSet("+c+".getName())) {\n")
for child in childrens:
out.write(" "+child[0]+".setEnabled(true);\n")
if child[1] != None and child[1] != '':
out.write( " if (!properties.isSet("+child[0]+".getName())) {\n"+
" "+child[1]+".setEnabled(false);\n"+
" }\n")
out.write( " } else {\n")
for child in childrens:
out.write(" "+child[0]+".setEnabled(false);\n"+
" "+child[0]+".setSelected(false);\n"+
" properties.remove("+child[0]+".getName());\n");
if child[1] != None and child[1] != '':
out.write( " "+child[1]+".setEnabled(false);\n")
out.write( " }\n"+
"\n")
def write_event_command_is_opposite_to(out,opposites):
x=0
oLen = len(opposites)
out.write( " if (\n")
for o in opposites:
out.write( " properties.isSet("+o[0]+".getName())")
if x < (oLen-1) and oLen > 1:
out.write( " && \n")
x+=1
out.write( "\n ){\n")
for o in opposites:
out.write( " properties.remove("+o[0]+".getName());\n"+
" "+o[0]+".setSelected(false);\n")
if o[1] != "":
out.write( " "+o[1]+".setEnabled(false);\n")
out.write( " }\n"+
"\n")
def write_event_command_dir(out,c,cType,v,vType):
print 'is a directory. Need to be done'
def write_event_command_value(out,c,cType,v,vType):
d = u.get_java_eventHandler_simple(vType)
vType = u.get_value_java_type(vType)
isCheckBox = u.is_a_box(cType)
isButton = u.is_a_button(cType)
action = "ActionPerformed"
if vType == "" or 'Spinner' in vType:
action = "StateChanged"
if 'TextField' in vType:
action = "FocusLost"
if 'ComboBox' in vType:
action = "ActionPerformed"
out.write( " private void "+v+"_"+d[3]+"("+d[4]+""+d[2]+" evt) {//GEN-FIRST:event_"+v+"_"+d[0]+"\n"+
" // TODO add your handling code here:\n")
if isCheckBox:
if vType == "" or 'Spinner' in vType:
out.write(" Util.boxEventSpinner(properties,"+c+","+v+");\n")
if 'TextField' in vType:
out.write(" Util.boxEventText(properties,"+c+","+v+");\n")
if 'ComboBox' in vType:
out.write(" Util.boxEventComboBox(properties,"+c+","+v+");\n")
if 'Dir' in vType:
write_event_command_dir_value(out,c,cType,v,vType)
if isButton:
if vType == "" or 'Spinner' in vType:
out.write(" Util.buttonEventSpinner(properties,"+c+","+v+");\n")
if 'TextField' in vType:
out.write(" Util.buttonEventText(properties,"+c+","+v+");\n")
out.write( " }//GEN-LAST:event_"+v+"_"+d[0]+"\n"+
"\n")
if 'TextField' in vType:
action = "ActionPerformed"
out.write( " private void "+v+"_ActionPerformed("+d[4]+"ActionEvent evt) {//GEN-FIRST:event_"+v+"_"+d[0]+"\n"+
" // TODO add your handling code here:\n")
if isCheckBox:
if 'TextField' in vType:
out.write(" Util.boxEventText(properties,"+c+","+v+");\n")
if isButton:
if vType == "" or 'Spinner' in vType:
out.write(" Util.buttonEventSpinner(properties,"+c+","+v+");\n")
if 'TextField' in vType:
out.write(" Util.buttonEventText(properties,"+c+","+v+");\n")
out.write( " }//GEN-LAST:event_"+v+"_"+d[0]+"\n"+
"\n")
def write_event_command_dir_value(out,c,cType,v,vType):
print 'is a directory. Need to be done'
out.write( " JFileChooser d;\n"+
" if (this."+v+".getText().isEmpty()):\n"+
" d=new JFileChooser(config.getExplorerPath());\n"+
" else:\n"+
" d=new JFileChooser(this."+v+".getText());\n"+
" \n")
if v.endswith('DirRep'):
out.write(" d.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);\n")
else:
out.write(" d.setFileSelectionMode(JFileChooser.FILES_ONLY);\n")
out.write(" d.setAcceptAllFileFilterUsed(false);\n")
if v.endswith('DirFiles'):
out.write(" d.setMultiSelectionEnabled(true);\n")
else:
out.write(" d.setMultiSelectionEnabled(false);\n")
out.write( " int result = d.showOpenDialog(this);\n"+
" \n"+
" if (result==JFileChooser.APPROVE_OPTION){\n"+
" File dir = d.getSelectedFile();\n"+
" \n"+
" // Set the text\n"+
" String s = dir.getAbsolutePath();\n"+
" "+v+".setText(s);\n"+
" properties.remove("+v+".getName());\n"+
" Util."+ctype+"EventText(properties,"+c+","+v+");\n"+
" }\n")
#
# Functions
#
def write_functions(out,yml):
write_objects_list_dictionaries(out,yml)
write_objects_dictionaries(out,yml)
write_configuration_object_properties(out,yml)
write_set_properties(out,yml)
write_default_program_values(out,yml)
write_menu_fields(out,yml)
write_usp_parent_children(out,yml)
write_save_image(out,yml)
#
# Functions
#
def write_objects_list_dictionaries(out,yml):
out.write( " /*******************************************************************\n"+
" * Perpare List Dictionaries\n"+
" ******************************************************************/\n"+
"\n"+
" /**\n"+
" * Perpare List of Dictionaries by a general reset\n"+
" * @param properties\n"+
" */\n"+
"\n"+
" public void resetDictionaries(workflow_properties properties){\n")
p = 0
for Panel in yml['Menus']:
if 'Panel' in Panel:
if Panel['isMenu']:
out.write(" Util.dictsReset(listDictsMenu"+str(p)+",DictMenuCBS"+str(p)+",DictMenuCBT"+str(p)+",DictMenuCBC"+str(p)+",DictMenuRBS"+str(p)+",DictMenuRBT"+str(p)+");\n")
else:
out.write(" Util.dictsReset(listDicts"+str(p)+",DictCBS"+str(p)+",DictCBT"+str(p)+",DictCBC"+str(p)+",DictRBS"+str(p)+",DictRBT"+str(p)+");\n")
p += 1
out.write(" }\n"+
"\n")
def write_objects_dictionaries(out,yml):
out.write( " /*******************************************************************\n"+
" * Perpare Dictionaries\n"+
" ******************************************************************/\n"+
"\n"+
" /**\n"+
" * Perpare Dictionaries by adding commands\n"+
" * @param properties\n"+
" */\n"+
"\n"+
" public void perpareDictionaries(workflow_properties properties){\n")
p = 0
for Panel in yml['Menus']:
pName = Panel['name']
if 'Panel' in Panel:
for Tab in Panel['Panel']:
tName = Tab['tab']
if 'Arguments' in Tab:
for Arguments in Tab['Arguments']:
cName = Arguments['name']
cType = Arguments['cType']
c = u.create_button_name(pName,tName,cName,cType)
vType = ""
v = "null"
if 'values' in Arguments and \
Arguments['values'] is not None and \
Arguments['values']['vType'] is not None:
vType = Arguments['values']['vType']
v = u.create_value_name(pName,tName,cName,vType)
cType = u.get_box_type(cType)
if vType != "":
vType = u.get_value_java_type(vType)
if 'CheckBox' in cType:
if vType == "" or 'Spinner' in vType:
if Panel['isMenu']:
out.write(" DictMenuCBS"+str(p)+".put("+c+","+v+");\n")
else:
out.write(" DictCBS"+str(p)+".put("+c+","+v+");\n")
if 'TextField' in vType:
if Panel['isMenu']:
out.write(" DictMenuCBT"+str(p)+".put("+c+","+v+");\n")
else:
out.write(" DictCBT"+str(p)+".put("+c+","+v+");\n")
if 'ComboBox' in vType:
if Panel['isMenu']:
out.write(" DictMenuCBC"+str(p)+".put("+c+","+v+");\n")
else:
out.write(" DictCBC"+str(p)+".put("+c+","+v+");\n")
if 'Button' in cType:
if vType == "" or 'Spinner' in vType:
if Panel['isMenu']:
out.write(" DictMenuRBS"+str(p)+".put("+c+","+v+");\n")
else:
out.write(" DictRBS"+str(p)+".put("+c+","+v+");\n")
if 'TextField' in vType:
if Panel['isMenu']:
out.write(" DictMenuRBT"+str(p)+".put("+c+","+v+");\n")
else:
out.write(" DictRBT"+str(p)+".put("+c+","+v+");\n")
p += 1
out.write(" }\n"+
"\n")
def write_configuration_object_properties(out,yml):
out.write("\n /*******************************************************************\n"+
" * Set the configuration properties for this object\n"+
" ******************************************************************/\n"+
"\n"+
" @Override\n"+
" public void display(workflow_properties properties){\n"+
" this.properties=properties;\n"+
" initComponents();\n"+
" setIconImage(Config.image);\n"+
" // Set position\n"+
" Dimension screenSize = Toolkit.getDefaultToolkit().getScreenSize();\n"+
" Dimension d = getSize();\n"+
" setLocation((screenSize.width-d.width)/2,\n"+
" (screenSize.height-d.height)/2);\n"+
" \n"+
" // Set the program properties\n"+
" this.setProperties(properties);\n"+
" \n"+
" if (Cluster.isClusterEnable(parent_workflow))\n"+
" ClusterProgram_jButton.setVisible(true);\n"+
" else\n"+
" ClusterProgram_jButton.setVisible(false);\n"+
" \n"+
" this.setAlwaysOnTop(true);\n"+
" this.setVisible(true);\n"+
" }\n"+
"\n")
def write_set_properties(out,yml):
out.write(" /*******************************************************************\n"+
" * Sets for Properties\n"+
" ******************************************************************/\n"+
"\n"+
" /**\n"+
" * Set Properties\n"+
" * @param properties\n"+
" */\n"+
"\n"+
" public void setProperties(workflow_properties properties){\n"+
" this.properties=properties;\n"+
" setTitle(properties.getName());\n"+
" //if (this.properties.isSet(\"Description\")) this.Notice.setText(properties.get(\"Description\"));\n"+
" \n"+
" // Prepare dictionaries\n"+
" this.resetDictionaries(properties);\n"+
" this.perpareDictionaries(properties);\n"+
" // Properties Default Options\n"+
" this.defaultPgrmValues(properties);\n"+
" // Update Saved Properties => usp\n")
p = 0
for Panel in yml['Menus']:
if 'Panel' in Panel:
if Panel['isMenu']:
out.write(" Util.updateSavedProperties(properties,listDictsMenu"+str(p)+",name_jTextField);\n")
else:
out.write(" Util.updateSavedProperties(properties,listDicts"+str(p)+",name_jTextField);\n"+
" properties.put(\""+u.name_without_space(Panel['name'])+"\",true);\n")
p += 1
out.write(" // Set the menu\n"+
" this.menuFields(properties);\n"+
" }\n"+
"\n"+
" public void setProperties(String filename, String path){\n"+
" workflow_properties tmp=new workflow_properties();\n"+
" tmp.load(filename, path);\n"+
" this.properties=tmp;\n"+
" setTitle(properties.getName());\n"+
" }\n"+
"\n")
def write_default_program_values(out,yml):
allMenu = []
for Panel in yml['Menus']:
if Panel['isMenu']:
allMenu.append(u.name_without_space(Panel['name']))
mLen = len(allMenu)
out.write(" /*******************************************************************\n"+
" * Set With default program values present in properties file\n"+
" ******************************************************************/\n"+
" private void defaultPgrmValues(workflow_properties properties){\n")
if mLen > 0:
p = 0
out.write(" boolean b = true;\n"+
" if (")
for menu in allMenu:
if p > 0:
out.write(" && ")
out.write("!(properties.isSet("+menu+".getName()))\n")
p+=1
out.write(" ){\n"+
" b = false;\n"+
" }\n"+
" \n"+
" Util.getDefaultPgrmValues(properties,b);\n")
else:
out.write(" //Util.getDefaultPgrmValues(properties,boolean to test the presence of a default value);\n")
out.write(" }\n"+
" \n")
def write_menu_fields(out,yml):
allMenu = []
allNotMenu = []
p = 0
for Panel in yml['Menus']:
if 'Panel' in Panel:
if Panel['isMenu']:
allMenu.append([u.name_without_space(Panel['name']),str(p)])
if not Panel['isMenu']:
allNotMenu.append([u.name_without_space(Panel['name']),str(p)])
p+=1
mLen = len(allMenu)
# Menu Fields Options setting
p = 0
for Panel in yml['Menus']:
pNameS = u.name_without_space(Panel['name'])
if 'Panel' not in Panel:
out.write(" /*******************************************************************\n"+
" * Set Menu fields\n"+
" ******************************************************************/\n"+
"\n"+
" private void menuFields(workflow_properties properties){\n"+
" if (properties.isSet("+pNameS+".getName())){\n"+
" "+pNameS+".setSelected(true);\n")
for menu in allMenu:
out.write(" Util.enabled_Advanced_Options(properties,false,listDictsMenu"+menu[1]+");\n")
out.write(" }\n")
if 'Panel' in Panel and Panel['isMenu']:
out.write(" else if (properties.isSet("+pNameS+".getName())){\n"+
" "+pNameS+".setSelected(true);\n")
for menu in allMenu:
out.write(" Util.enabled_Advanced_Options(properties,")
if pNameS != menu[0]:
out.write("false")
if pNameS == menu[0]:
out.write("true")
out.write(",listDictsMenu"+menu[1]+");\n")
out.write(" }\n")
for notMenu in allNotMenu:
out.write(" Util.enabled_Advanced_Options(properties,true,listDicts"+notMenu[1]+");\n")
out.write(" // update parents and children relation\n"+
" parentsChildrenUpdate(properties);\n"+
" }\n"+
"\n")
def write_usp_parent_children(out,yml):
out.write(" /*******************************************************************\n"+
" * Update parents children relation\n"+
" ******************************************************************/\n"+
"\n"+
" private void parentsChildrenUpdate(workflow_properties properties){\n")
for Panel in yml['Menus']:
pName = Panel['name']
if 'Panel' in Panel:
for Tab in Panel['Panel']:
tName = Tab['tab']
if 'Arguments' in Tab:
for Arguments in Tab['Arguments']:
cName = Arguments['name']
cType = Arguments['cType']
childrens = Arguments['parentOf']
c = u.create_button_name(pName,tName,cName,cType)
if childrens is not None:
write_usp_parent_children_update(out,c,childrens)
out.write(" }\n"+
"\n")
def write_usp_parent_children_update(out,c,childrens):
out.write( " \n"+
" if (properties.isSet("+c+".getName())) {\n")
for child in childrens:
out.write(" "+child[0]+".setEnabled(true);\n")
if child[1] != None and child[1] != '':
out.write( " if (!properties.isSet("+child[0]+".getName())) {\n"+
" "+child[1]+".setEnabled(false);\n"+
" }\n")
out.write( " } else {\n")
for child in childrens:
out.write(" "+child[0]+".setEnabled(false);\n"+
" "+child[0]+".setSelected(false);\n"+
" properties.remove("+child[0]+".getName());\n");
if child[1] != None and child[1] != '':
out.write( " "+child[1]+".setEnabled(false);\n")
out.write( " }\n"+
"\n")
def write_save_image(out,yml):
out.write("\n /*******************************************************************\n"+
" * Save Image\n"+
" ******************************************************************/\n"+
"\n"+
" public void saveImage(String filename){\n"+
" BufferedImage bi;\n"+
" try{\n"+
" bi = new Robot().createScreenCapture(this.getBounds());\n"+
" ImageIO.write(bi, \"png\", new File(filename));\n"+
" this.setVisible(false);\n"+
" } catch (Exception ex) {\n"+
" Config.log(\"Unable to save \"+filename+\" dialog image\");\n"+
" }\n"+
" }\n"+
"\n")
#
# Bottom variables
#
def write_bottom_variables(out,yml):
write_bottom_variables_start(out,yml)
if 'Docker' in yml and yml['Docker'] is not None:
out.write(" private javax.swing.JButton docker_jButton;\n")
if 'Cluster' in yml and yml['Cluster'] is not None:
out.write(" private javax.swing.JButton cluster_jButton;\n")
write_java_variables(out,yml,"bottom")
write_bottom_variables_end(out,yml)
#
# Bottom variables
#
def write_bottom_variables_start(out,yml):
out.write(" // Variables declaration - do not modify//GEN-BEGIN:variables\n"+
" private javax.swing.JButton how_jButton;\n"
" private javax.swing.JTabbedPane "+u.get_program_name(yml)+"_tab;\n"+
" private javax.swing.JPanel general_jPanel1;\n"+
" private javax.swing.JLabel name_jLabel;\n"+
" private javax.swing.JTextField name_jTextField;\n"+
" private javax.swing.JButton rename_jButton;\n"+
" private javax.swing.JButton reset_jButton;\n"+
" private javax.swing.JButton close_jButton;\n"+
" private javax.swing.JButton stop_jButton;\n"+
" private javax.swing.JButton ClusterProgram_jButton;\n"+
" private javax.swing.JButton run_jButton;\n"+
" private javax.swing.ButtonGroup Menu_Buttons;\n")
def write_bottom_variables_end(out,yml):
out.write(" // End of variables declaration//GEN-END:variables\n"+
" \n")
# ===============================================
# write_java_variables (header and bottom)
# ===============================================
def write_java_variables(out,yml,where):
for Panel in yml['Menus']:
if Panel['isMenu']:
pNameS = u.name_without_space(Panel['name'])
if (where == "header"):
out.write(" "+pNameS+" = new javax.swing.JRadioButton();\n")
if (where == "bottom"):
out.write(" private javax.swing.JRadioButton "+pNameS+";\n")
#
# Always add main scroll and tab panel
#
if (where == "header"):
out.write(" main_jScroll = new javax.swing.JScrollPane();\n")
out.write(" options_tab_panel = new javax.swing.JTabbedPane();\n")
if (where == "bottom"):
out.write(" private javax.swing.JScrollPane main_jScroll;\n")
out.write(" private javax.swing.JTabbedPane options_tab_panel;\n")
#
# Think about adding or not a default option
# Now, a default option without data is needed then add extra data
#
for Panel in yml['Menus']:
pName = Panel['name']
pNameI = u.create_initials(pName)
if 'Panel' in Panel:
# Create a panel to insert Arguments
if (where == "header"):
out.write(" "+pNameI+"_JPanel = new javax.swing.JPanel();\n")
if (where == "bottom"):
out.write(" private javax.swing.JPanel "+pNameI+"_JPanel;\n")
pLen = len(Panel['Panel'])
if pLen>1 and Panel['isTab']: # Means need a tabs
tmName = pNameI+"_"+pNameI
if (where == "header"):
out.write(" "+tmName+"_JTabbedPane = new javax.swing.JTabbedPane();\n")
if (where == "bottom"):
out.write(" private javax.swing.JTabbedPane "+tmName+"_JTabbedPane;\n")
for Tab in Panel['Panel']:
if 'Arguments' in Tab:
tName = Tab['tab']
tNameI = u.create_initials(tName)
if pLen>1 and Panel['isTab']: # Means need a tabs
if (where == "header"):
out.write(" "+pNameI+"_"+tNameI+"_JPanel = new javax.swing.JPanel();\n")
if (where == "bottom"):
out.write(" private javax.swing.JPanel "+pNameI+"_"+tNameI+"_JPanel;\n")
if not Panel['isTab']: # Means need a Label instead of tabs
if (where == "header"):
out.write(" "+pNameI+"_"+tNameI+"_JLabel = new javax.swing.JLabel();\n")
if (where == "bottom"):
out.write(" private javax.swing.JLabel "+pNameI+"_"+tNameI+"_JLabel;\n")
for Arguments in Tab['Arguments']:
cName = Arguments['name']
cType = Arguments['cType']
c = u.create_button_name(pName,tName,cName,cType)
if (where == "header"):
out.write(" "+c+" = new javax.swing."+u.get_box_type(cType)+"();\n")
if (where == "bottom"):
out.write(" private javax.swing."+u.get_box_type(cType)+" "+c+";\n")
if 'values' in Arguments and \
Arguments['values'] is not None and \
Arguments['values']['vType'] is not None:
v = ""
vType = Arguments['values']['vType']
v = u.create_value_name(pName,tName,cName,vType)
if (where == "header"):
out.write(" "+v+" = new javax.swing."+u.get_value_java_type(vType)+"();\n")
if (where == "bottom"):
out.write(" private javax.swing."+u.get_value_java_type(vType)+" "+v+";\n")
#
# End of file
#
def write_end(out,yml):
out.write(" }\n"+
"\n")
#
# End of file
#
```
|
{
"source": "jegonzal/clipper",
"score": 2
}
|
#### File: containers/python/noop_container.py
```python
from __future__ import print_function
import rpc
import os
import sys
import numpy as np
class NoopContainer(rpc.ModelContainerBase):
def __init__(self):
pass
def predict_ints(self, inputs):
return np.array([np.sum(x) for x in inputs], dtype='float32')
def predict_floats(self, inputs):
return np.array([np.sum(x) for x in inputs], dtype='float32')
def predict_doubles(self, inputs):
return np.array([np.sum(x) for x in inputs], dtype='float32')
def predict_bytes(self, inputs):
return np.array([len(x) for x in inputs], dtype='float32')
def predict_strings(self, inputs):
return np.array([len(x) for x in inputs], dtype='float32')
if __name__ == "__main__":
try:
model_name = os.environ["CLIPPER_MODEL_NAME"]
except KeyError:
print("ERROR: CLIPPER_MODEL_NAME environment variable must be set",
file=sys.stdout)
sys.exit(1)
try:
model_version = os.environ["CLIPPER_MODEL_VERSION"]
except KeyError:
print("ERROR: CLIPPER_MODEL_VERSION environment variable must be set",
file=sys.stdout)
sys.exit(1)
ip = "127.0.0.1"
if "CLIPPER_IP" in os.environ:
ip = os.environ["CLIPPER_IP"]
else:
print("Connecting to Clipper on localhost")
port = 7000
if "CLIPPER_PORT" in os.environ:
port = int(os.environ["CLIPPER_PORT"])
else:
print("Connecting to Clipper with default port: 7000")
input_type = "doubles"
if "CLIPPER_INPUT_TYPE" in os.environ:
input_type = os.environ["CLIPPER_INPUT_TYPE"]
else:
print("Using default input type: doubles")
model = NoopContainer()
rpc.start(model, ip, port, model_name, model_version, input_type)
```
|
{
"source": "jegor377/MyOwnVoiceAssistant",
"score": 3
}
|
#### File: jegor377/MyOwnVoiceAssistant/openers.py
```python
from opener import Opener
import webopener
from speechlogger import log
class WebsiteOpener(Opener):
def __init__(self):
self.keywords = ['stronę', 'strona', 'zakładka', 'zakładkę']
def do_job(self, target):
# if not 'www.' in target:
# system('start www.{}'.format(target))
# else:
# system('start {}'.format(target))
webopener.open_url(target)
log('Otworzono stronę {}'.format(target))
```
#### File: jegor377/MyOwnVoiceAssistant/webopener.py
```python
from os import system
def open_url(url):
if not 'www.' in url:
system('start www.{}'.format(url))
else:
system('start {}'.format(url))
```
|
{
"source": "jegor377/SimpleTCPChat",
"score": 2
}
|
#### File: jegor377/SimpleTCPChat/client.py
```python
import socket
from threading import Thread
import os
MAX_MSG_SIZE = 1024
is_on = True
KEY = 120
def xor_crypt(data, key):
encrypted = []
for ch in data:
encrypted.append(ch ^ key)
return bytes(encrypted)
def recv_thread(client_sock):
global is_on
global server_response
while is_on:
try:
msg = str(xor_crypt(client_sock.recv(MAX_MSG_SIZE), KEY), 'utf8')
if len(msg) > 0:
print(msg)
except:
pass
def is_cmd(strr):
return strr[0] == '/'
def send_cmd(client_sock, cmd, pars=()):
global KEY
if len(pars) > 0:
client_sock.send(xor_crypt(bytes('CMD '+cmd+(' '.join(pars)), 'utf8'), KEY))
else:
client_sock.send(xor_crypt(bytes('CMD '+cmd, 'utf8'), KEY))
def upload_thread(client_sock, file_path, file_size):
global KEY
try:
print("Uploading file \""+file_path+"\" with size "+str(file_size)+"...")
with open(file_path, 'rb') as file:
while True:
piece = file.read(1024)
if len(piece) <= 0:
break
print(str(piece, 'utf8'))
client_sock.send(xor_crypt(piece, KEY))
print("File uploaded!")
except:
print("Something went wrong!")
def upload_command(client_sock, file_path):
if os.path.exists(file_path):
file_size = os.path.getsize(file_path)
MAX_FILE_SIZE = 1024 * 1024 * 100 # 100MB
if file_size <= MAX_FILE_SIZE:
ut = Thread(target=upload_thread, args=(client_sock, file_path, file_size))
ut.daemon = True
ut.start()
else:
print("File is to damn big!")
else:
print("File doesn't exist!")
def main():
global is_on
print("SIMPLE TCP CHAT by <NAME> (CLIENT)")
print("SERVER IP: ")
ip = input()
if ip == '':
ip = '127.0.0.1'
port = 81
try:
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock.connect((ip, port))
recv_t = Thread(target=recv_thread, args = (client_sock,) )
recv_t.daemon = True
recv_t.start()
except:
print("Something went wrong!")
return
print("Client connected correctly!")
while is_on:
msg = input()
try:
if is_cmd(msg):
if msg == '/help':
print('/exit - kills the program and connection.')
print('/info - prints hosts count.')
print('/upload <file path> - reads the text file and sends it to the server.')
if msg == '/exit':
is_on = False
send_cmd(client_sock, 'KILL')
client_sock.close()
elif msg == '/info':
send_cmd(client_sock, 'INFO')
elif msg.split(' ')[0] == '/upload':
file_path = ' '.join(msg.split(' ')[1:])
upload_command(client_sock, file_path)
else:
if len(msg) <= MAX_MSG_SIZE and is_on:
client_sock.send(xor_crypt(bytes(msg, 'utf8'), KEY))
except:
print("Connection interrupted!")
is_on = False
break
if __name__ == "__main__":
main()
```
|
{
"source": "jegork/tf-pose-estimation",
"score": 2
}
|
#### File: jegork/tf-pose-estimation/get_points.py
```python
import argparse
import logging
import sys
import time
from tf_pose import common
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
def main(image, model='mobilenet_thin', resize='432x368'):
w, h = model_wh(resize)
if w == 0 or h == 0:
e = TfPoseEstimator(get_graph_path(model), target_size=(432, 368))
else:
e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
# estimate human poses from a single image !
image = common.read_imgfile(image, None, None)
if image is None:
sys.exit(-1)
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=4.0)
points = TfPoseEstimator.get_points(image, humans)
return points
```
|
{
"source": "jegork/translatio",
"score": 2
}
|
#### File: translatio/translatio/translatio.py
```python
import mtranslate
import os
from tqdm import tqdm_notebook as tqdm
from concurrent.futures import ProcessPoolExecutor
import json
import pandas as pd
from glob import glob
from typing import Dict, List, Any
import re
import time
import logging
__all__ = ["Translator"]
logging.basicConfig(
format="[%(asctime)s] Translatio [%(levelname)s]: %(message)s", datefmt="%H:%M:%S"
)
class Translator:
def __init__(
self,
target_lang: str,
max_workers: int = 10,
per_request: int = 10,
checkpoint_folder: str = None,
translate_columns: List = [],
keep_columns: List = [],
):
if target_lang is None:
raise AttributeError("target_lang not set")
if not isinstance(target_lang, str):
raise AttributeError('target_lang is not a string')
if not isinstance(max_workers, int):
raise AttributeError('max_workers is not an integer')
if not isinstance(per_request, int):
raise AttributeError('per_request is not an integer')
if not isinstance(checkpoint_folder, str):
raise AttributeError("checkpoint_folder is not a string")
if not isinstance(translate_columns, list):
raise AttributeError("translate_columns is not a list")
if not isinstance(keep_columns, list):
raise AttributeError("keep_columns is not a list")
if len(translate_columns) == 0:
raise AttributeError("translate_columns is empty")
cfg = {
'target_lang': target_lang,
'max_workers': max_workers,
'per_request': per_request,
'translate_columns': translate_columns,
'keep_columns': keep_columns,
}
self.checkpoint_folder = checkpoint_folder
if os.path.exists(os.path.join(self.checkpoint_folder, "config.json")):
self.cfg = self._read_config()
else:
cfg["last_translated_batch_file"] = None
self.cfg = cfg
self._write_config()
def generate_batches(
self,
filename: str,
names: str,
sep: str = "\t",
batch_size: int = 1000,
start_at: int = 0,
rows: int = None,
):
if not isinstance(filename, str):
raise AttributeError("Provide filename!")
df = pd.read_csv(filename, names=names, sep=sep, skiprows=start_at, nrows=rows)
use_cols = []
for r in df.columns:
if r in self.cfg['translate_columns'] or r in self.cfg['keep_columns']:
use_cols.append(r)
self.use_cols = use_cols
d = df[use_cols].to_dict("records")
return [d[i : i + batch_size] for i in range(0, len(d), batch_size)]
def _read_config(self):
with open(os.path.join(self.checkpoint_folder, "config.json"), "r") as f:
cfg = f.read()
return json.loads(cfg)
def _write_config(self):
os.makedirs(self.checkpoint_folder, exist_ok=True)
cfg = json.dumps(self.cfg)
with open(os.path.join(self.checkpoint_folder, "config.json"), "w+") as f:
f.write(cfg)
def _update_config(self, **kwargs):
cfg = self._read_config()
for k, v in kwargs.items():
if k in cfg:
cfg[k] = v
else:
raise KeyError(f"No field {k} in config!")
self.cfg = cfg
self._write_config()
def _merge_files(self):
temp_files = glob(os.path.join(self.checkpoint_folder, "temp_*.tsv"))
logging.info(f"Merging {len(temp_files)} files")
translated_dfs = [pd.read_csv(f, sep="\t") for f in temp_files]
translated_dfs = pd.concat(translated_dfs, ignore_index=True)
return translated_dfs
def _cleanup_folder(self):
temp_files = glob(os.path.join(self.checkpoint_folder, "temp_*.tsv"))
logging.info(f"Cleaning up {len(temp_files)} temporary files...")
for f in temp_files:
os.remove(f)
def ready_batches(self):
return len(glob(os.path.join(self.checkpoint_folder, "temp_*.tsv")))
def translate(self, data: List[Dict]):
translate = self.cfg['translate_columns']
keep = self.cfg['keep_columns']
translated_data = {}
to_translate = {x: [d[x] for d in data] for x in translate}
to_keep = {x: [d[x] for d in data] for x in keep}
for k, v in to_translate.items():
t = []
for i in range(0, len(v), self.cfg["per_request"]):
result = None
data_packed = "\n".join(v[i : i + self.cfg["per_request"]])
while result is None:
try:
result = mtranslate.translate(
data_packed, self.cfg["target_lang"], "en"
)
except Exception as e:
logging.exception(f"Error: {e}, retrying...")
result = result.split("\n")
if len(result) != len((v[i : i + self.cfg["per_request"]])):
raise Exception(
"Length of original and translated data is not the same! Try decreasing per_request variable."
)
t.extend(result)
translated_data[k] = t
for k in to_keep.keys():
translated_data[k] = to_keep[k]
return translated_data
def async_translate(self, d: List[Dict]):
split = int(len(d) / self.cfg["max_workers"])
submits = []
results = [] # list of dicts
with ProcessPoolExecutor(max_workers=self.cfg["max_workers"]) as executor:
for i in range(self.cfg["max_workers"]):
start_at = i * split
stop_at = (i * split) + split
if stop_at >= len(d) - 1:
submits.append(executor.submit(self.translate, d[start_at:]))
time.sleep(10)
else:
submits.append(executor.submit(self.translate, d[start_at:stop_at]))
for i in range(self.cfg["max_workers"]):
results.append(submits[i].result())
outputs = {}
for k in results[0].keys():
outputs[k] = []
for r in results:
for k, v in r.items():
outputs[k].extend(v)
return outputs
def __call__(
self, batches: List[List[Dict]], output_file: str, done_batches: int = None
):
if not isinstance(output_file, str):
raise AttributeError("Provide output_file!")
if not (isinstance(done_batches, int) or done_batches is None):
raise AttributeError("done_batches should be None or int!")
if done_batches is None:
if self.cfg["last_translated_batch_file"] is None:
start_batch = 0
else:
start_batch = (
int(
re.search(
"[0-9]+", self.cfg["last_translated_batch_file"]
).group()
)
+ 1
)
else:
start_batch = done_batches
if start_batch != 0:
logging.info(f"Skipping {start_batch} batches...")
if len(batches) - start_batch != 0:
for i, batch in tqdm(
enumerate(batches),
total=(len(batches) - start_batch),
desc="Processing batches",
):
if i < start_batch:
continue
translated = self.async_translate(batch)
pd.DataFrame(translated).to_csv(
os.path.join(self.checkpoint_folder, f"temp_{i}.tsv"),
sep="\t",
index=False,
)
self._update_config(last_translated_batch_file=f"temp_{i}.tsv")
self._cleanup_folder()
time.sleep(20)
else:
logging.info("Dataset already translated")
merged_df = self._merge_files()
merged_df = merged_df.reindex(self.use_cols, axis=1)
merged_df.to_csv(output_file, sep="\t", index=False)
logging.info("Done")
```
|
{
"source": "jegorsergeev/get-the-inspo",
"score": 2
}
|
#### File: images/api/views.py
```python
from rest_framework.generics import ListAPIView, RetrieveAPIView, ListCreateAPIView
from images.models import Image
from .serializer import ImageSerializer, ImageListSerializer
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, Http404
import json
import requests
from requests.auth import HTTPBasicAuth
class ImageListView(ListAPIView):
queryset = Image.objects.all()
serializer_class = ImageListSerializer
class ImageDetailedView(RetrieveAPIView):
queryset = Image.objects.all()
serializer_class = ImageSerializer
class ImageCreateView(ListCreateAPIView):
queryset = Image.objects.all()
serializer_class = ImageSerializer
# Better not use csrf_exempt in production server
@csrf_exempt
def log_write_view(request):
if request.method == 'POST':
# fields = requests.post('http://127.0.0.1:8124', 'describe logs.user_actions').text.split('\n')
# fields = list(map(lambda s: s.split('\t')[0], fields))[:-1]
data = json.loads(request.body.decode('utf-8'))
query = 'INSERT INTO logs.user_actions VALUES ({})'.format(str(data.values())[13:-2])
try:
response = requests.post('http://172.16.17.32:8124', query, auth=HTTPBasicAuth('default', 'hudozi95'))
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
return HttpResponse(status=408)
except requests.exceptions.TooManyRedirects:
# Resolve URL
return HttpResponse(status=404)
except requests.exceptions.RequestException as e:
return Http404
finally:
return HttpResponse(status=response.status_code, content=response.content)
else:
return Http404
```
|
{
"source": "jegpeek/dynabooks",
"score": 3
}
|
#### File: jegpeek/dynabooks/book_maker.py
```python
import os
import jinja2
import hashlib
def make_notebook_from_params(paramdct):
templatefn = os.path.join('templates', paramdct.pop('template'))
if not os.path.exists(templatefn):
templatefn = templatefn + '.ipynb'
with open(templatefn) as f:
templ = jinja2.Template(f.read())
result = templ.render(**paramdct)
# determine the output file name as the sha1 hash of the template name + content of the file
s = hashlib.sha1(templatefn.encode('utf-8'))
s.update(result.encode('utf-8'))
outfn = os.path.join('output_nbs', s.hexdigest() + '.ipynb')
with open(outfn, 'w') as f:
f.write(result)
return outfn
def generate_html_from_notebook():
return os.system('make html')
```
|
{
"source": "JegSnakkerTysk/MornBot",
"score": 2
}
|
#### File: MornBot/cogs/AntiSelfBot.py
```python
from discord.ext import commands
import re
from datetime import datetime
class AntiSelfBot(commands.Cog, command_attrs=dict(hidden=True)):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
if member.guild.id != 297798952538079233:
return
name_match = re.findall(r'[A-Z]+[a-z]+[0-9a-f]{4}', member.name)
creation_match = (datetime.now() - member.created_at).days
if name_match != [] and not member.avatar and creation_match < 31:
role = member.guild.get_role(588432481297104949)
await member.add_roles(role)
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command(aliases=['e-nebz', '1nebz'])
async def enebz(self, ctx):
if ctx.author.id != 5<PASSWORD>261<PASSWORD>4:
return await ctx.send(f'{ctx.author.mention} er ikke best')
await ctx.send('Beste karen på serveren 😎')
def setup(bot):
bot.add_cog(AntiSelfBot(bot))
```
#### File: MornBot/cogs/Dagbok.py
```python
from discord.ext import commands
import discord
import pymongo
from json import load as json_load
from os import remove
from math import ceil
import json
import asyncio
from cogs.utils import Defaults
with open('config.json', 'r', encoding='utf8') as f:
config = json_load(f)
prefix = config['prefix']
mongodb_url = config['mongodb_url']
mongo = pymongo.MongoClient(mongodb_url)
database = mongo['discord']
database_col_dagbok = database['dagbok']
class Dagbok(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def react(self, message):
if message.author.bot:
return
if message.content.lower().startswith('kjære dagbok:') or message.content.lower().startswith('kjære dagbok,'):
database_find = {'_id': message.author.id}
database_user = database_col_dagbok.find_one(database_find)
if database_user is None:
return
date = message.created_at.strftime('%d-%m-%Y')
database_col_dagbok.update_one(database_find,
{'$set': {f'data.{date}': message.clean_content}}, upsert=True)
try:
await message.add_reaction('✅')
except:
pass
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.group()
async def dagbok(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@dagbok.command()
async def på(self, ctx):
"""Legger deg inn i databasen"""
database_find = {'_id': ctx.author.id}
database_user = database_col_dagbok.find_one(database_find)
if database_user is not None:
embed = discord.Embed(description='Du ligger allerede i databasen')
await Defaults.set_footer(ctx, embed)
return await ctx.send(embed=embed)
database_col_dagbok.insert_one({'_id': ctx.author.id})
embed = discord.Embed(description=':white_check_mark: Du er nå i lagt inn i databasen')
await Defaults.set_footer(ctx, embed)
await ctx.send(embed=embed)
@dagbok.command()
async def av(self, ctx):
"""Sletter deg fra databasen"""
database_find = {'_id': ctx.author.id}
database_user = database_col_dagbok.find_one(database_find)
if database_user is None:
embed = discord.Embed(description='Du lå ikke i databasen fra før av')
await Defaults.set_footer(ctx, embed)
return await ctx.send(embed=embed)
embed = discord.Embed(color=ctx.me.color, description='Er du sikker? Reager med ✅ innen 30s har gått for å ' +
'bekrefte\n\n**Dette vil slette alle dine data og ' +
'stoppe loggingen av fremtidige meldinger frem til du ' +
'skrur den på igjen.** Om du vil hente ut data før du ' +
f'sletter, kan du skrive `{prefix}dagbok data`')
confirmation_msg = await ctx.send(embed=embed)
await confirmation_msg.add_reaction('✅')
def comfirm(reaction, user):
return user == ctx.author and str(reaction.emoji) == '✅'
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=comfirm)
except asyncio.TimeoutError:
embed = discord.Embed(color=ctx.me.color, description=f'Ingen svar ble gitt innen tiden!')
await confirmation_msg.edit(embed=embed)
await confirmation_msg.remove_reaction('✅', ctx.me)
else:
database_col_dagbok.delete_one(database_user)
embed = discord.Embed(description=':white_check_mark: Dine data har blitt slettet!')
await Defaults.set_footer(ctx, embed)
await ctx.send(embed=embed)
@dagbok.command()
async def liste(self, ctx, *side: int):
"""Se hvilke dager som ligger i dagboka"""
database_find = {'_id': ctx.author.id}
database_user = database_col_dagbok.find_one(database_find)
if str(ctx.author.color) != '#000000':
color = ctx.author.color
else:
color = discord.Colour(0x99AAB5)
if database_user is None:
return await Defaults.error_warning_send(ctx, text='Du ligger ikke i databasen. ' +
f'Skriv `{prefix}dagbok på` for å legge deg inn')
try:
entries = list(database_user['data'].keys())
except KeyError:
return await Defaults.error_fatal_send(ctx, text='Fant ingen data. Sørg for at du skriver en ' +
'dagboksmelding først (start melding med `kjære dagbok,`',)
if side is ():
side = 1
else:
side = side[0]
if side <= 0:
side = 1
start_index = (side - 1) * 10
end_index = side * 10
pagecount = ceil(len(entries) / 10)
if side > pagecount:
return await Defaults.error_fatal_send(ctx, text='Ugyldig sidetall')
entries = '\n'.join(entries[start_index:end_index])
embed = discord.Embed(color=color, description=f'```\n{entries}\n```')
embed.set_author(name=f'{ctx.author.name}#{ctx.author.discriminator}', icon_url=ctx.author.avatar_url)
embed.set_footer(text=f'Side: {side}/{pagecount}')
await ctx.send(embed=embed)
@dagbok.command()
async def dag(self, ctx, dato: str):
"""Hent opp dagboka di fra en dato"""
database_find = {'_id': ctx.author.id}
database_user = database_col_dagbok.find_one(database_find)
if str(ctx.author.color) != '#000000':
color = ctx.author.color
else:
color = discord.Colour(0x99AAB5)
if database_user is None:
return await Defaults.error_warning_send(ctx, text='Du ligger ikke i databasen. ' +
f'Skriv `{prefix}dagbok på` for å legge deg inn')
try:
data = database_user['data'][f'{dato}']
embed = discord.Embed(color=color, description=data)
embed.set_footer(text=f'{ctx.author.name}#{ctx.author.discriminator} | {dato}',
icon_url=ctx.author.avatar_url)
return await ctx.send(embed=embed)
except:
await Defaults.error_fatal_send(ctx, text='Fant ingen data fra denne datoen. Dobbelsjekk ' +
'om du har skrevet riktig dato `DD-MM-YYYY`')
@dagbok.command()
async def data(self, ctx):
"""Sender deg dine data"""
database_find = {'_id': ctx.author.id}
database_user = database_col_dagbok.find_one(database_find)
if database_user is None:
return await Defaults.error_warning_send(ctx, text='Jeg har ingen data om deg')
try:
database_user['data']
except KeyError:
return await Defaults.error_warning_send(ctx, text='Jeg har ingen data om deg')
with open(f'./assets/{ctx.author.id}.json', 'w') as f:
json.dump(database_user, f, indent=4)
try:
await ctx.author.send(file=discord.File(f'./assets/{ctx.author.id}.json'))
embed = discord.Embed(color=0x0085ff, description=':white_check_mark: Dine data har ' +
'blitt sendt i DM!')
await ctx.send(embed=embed)
except:
await Defaults.error_fatal_send(ctx, text='Sending av data feilet! Sjekk om du har blokkert meg')
try:
remove(f'./assets/{ctx.author.id}.txt')
except:
pass
def setup(bot):
bot.add_listener(Dagbok(bot).react, 'on_message')
bot.add_cog(Dagbok(bot))
```
#### File: MornBot/cogs/Errors.py
```python
from discord.ext import commands
import traceback
import sys
from datetime import datetime
from cogs.utils import Defaults
class Errors(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command(self, ctx):
status = {False: "✔", True: "❌"}
print(f'{datetime.now().strftime("%d.%m.%Y %H:%M:%S")} | ' +
f'{ctx.command} {status[ctx.command_failed]} - ' +
f'{ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id}) | ' +
f'{ctx.guild.id}-{ctx.channel.id}-{ctx.message.id}')
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
self.bot.get_command(f'{ctx.command}').reset_cooldown(ctx)
if hasattr(ctx.command, 'on_error'):
return
ignored = (commands.CommandNotFound, commands.CheckFailure)
send_help = (commands.MissingRequiredArgument,
commands.TooManyArguments,
commands.BadArgument)
error = getattr(error, 'original', error)
if isinstance(error, ignored):
return
elif isinstance(error, send_help):
self.bot.get_command(f'{ctx.command}').reset_cooldown(ctx)
return await ctx.send_help(ctx.command)
elif isinstance(error, commands.BotMissingPermissions):
permissions = ', '.join(error.missing_perms)
return await Defaults.error_warning_send(ctx, text=f'Jeg mangler følgende tillatelser:\n\n' +
f'```\n{permissions}\n```')
elif isinstance(error, commands.NotOwner):
return await Defaults.error_fatal_send(ctx, text='Du er ikke båtteier')
elif isinstance(error, commands.MissingPermissions):
permissions = ', '.join(error.missing_perms)
return await Defaults.error_warning_send(ctx, text=f'Du mangler følgende tillatelser:\n\n' +
f'```\n{permissions}\n```')
elif isinstance(error, commands.CommandOnCooldown):
return await Defaults.error_warning_send(ctx, text='Kommandoen har nettopp blitt brukt. Prøv igjen om ' +
f'`{error.retry_after:.1f}` sekunder.')
elif isinstance(error, commands.NSFWChannelRequired):
return await Defaults.error_fatal_send(ctx, text='Du må være i en NSFW-Kanal')
elif isinstance(error, commands.NoPrivateMessage):
try:
return await Defaults.error_fatal_send(ctx, text=f'`{ctx.command}` kan ikke brukes i DMs')
except:
pass
print(f'Ignoring exception in command {ctx.command}:', file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
def setup(bot):
bot.add_cog(Errors(bot))
```
#### File: MornBot/cogs/FunReplies.py
```python
from discord.ext import commands
import discord
import pymongo
from json import load as json_load
with open('config.json', 'r', encoding='utf8') as f:
config = json_load(f)
mongodb_url = config['mongodb_url']
mongo = pymongo.MongoClient(mongodb_url)
database = mongo['discord']
database_col_funreplies = database['funreplies']
class FunReplies(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.guild_only()
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.group()
async def funreplies(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@funreplies.command()
async def on(self, ctx, *, kanal: discord.TextChannel=None):
"""Skrur på funreplies for spesifisert kanal"""
if not kanal:
kanal = ctx.channel
database_find = {'_id': kanal.id}
try:
database_funreplies = database_col_funreplies.find_one(database_find)
except:
return await ctx.send(f'{ctx.author.mention} Jeg har ikke tilkobling til databasen. ' +
f'Be boteier om å fikse dette')
try:
database_funreplies[f'{kanal.id}']
except TypeError:
database_col_funreplies.insert_one({'_id': kanal.id, 'funreplies': True})
embed = discord.Embed(color=ctx.me.color, description=f'FunReplies er nå skrudd **på** for {kanal.mention}')
await ctx.send(embed=embed)
@funreplies.command()
async def off(self, ctx, *, kanal: discord.TextChannel = None):
"""Skrur av funreplies for spesifisert kanal"""
if not kanal:
kanal = ctx.channel
database_find = {'_id': kanal.id}
try:
database_funreplies = database_col_funreplies.find_one(database_find)
except:
return await ctx.send(f'{ctx.author.mention} Jeg har ikke tilkobling til databasen. ' +
'Be boteier om å fikse dette')
database_col_funreplies.delete_one(database_funreplies)
embed = discord.Embed(color=ctx.me.color, description=f'FunReplies er nå skrudd **av** for {kanal.mention}')
await ctx.send(embed=embed)
async def react(self, message):
if message.author.bot:
return
database_find = {'_id': message.channel.id}
channel = database_col_funreplies.find_one(database_find)
try:
if not channel['funreplies']:
return
except TypeError:
return
if message.content.lower() == 'morn':
await message.channel.send('Morn')
elif message.content.lower() == 'no u':
await message.channel.send(f'no u {message.author.mention}')
elif message.content.lower() == 'nei du':
await message.channel.send(f'nei du {message.author.mention}')
def setup(bot):
bot.add_listener(FunReplies(bot).react, 'on_message')
bot.add_cog(FunReplies(bot))
```
#### File: MornBot/cogs/Ordsky.py
```python
import discord
from discord.ext import commands
import pymongo
from codecs import open
from json import load as json_load
from PIL import Image
from numpy import array
from wordcloud import WordCloud
from re import sub
from os import remove
import functools
from io import BytesIO
# Fetch prefix and mongodb url
with open('config.json', 'r', encoding='utf8') as f:
config = json_load(f)
prefix = config['prefix']
mongodb_url = config['mongodb_url']
# Connect to database
mongo = pymongo.MongoClient(mongodb_url)
database = mongo['discord']
database_col_users = database['users']
async def default_db_insert(ctx):
"""Standard mongodb document"""
database_col_users.insert_one(
{'_id': ctx.author.id,
'ordsky_consent': False,})
async def error_no_data(ctx):
"""No data error message"""
embed = discord.Embed(
color=0xF1C40F,
description=':exclamation: Jeg har ingen data om deg å ' +
'sende eller så kan jeg ikke sende meldinger til deg!')
embed.set_footer(icon_url=ctx.author.avatar_url,
text=f'{ctx.author.name}#{ctx.author.discriminator}')
await ctx.send(embed=embed)
class Ordsky(commands.Cog):
def __init__(self, bot):
self.bot = bot
@staticmethod
def generate(text, mask, filtered_words):
"""Generate wordcloud"""
wc = WordCloud(max_words=4000,
mask=mask,
repeat=False,
stopwords=filtered_words)
wc.process_text(text)
wc.generate(text)
img = wc.to_image()
b = BytesIO()
img.save(b, 'png')
b.seek(0)
return b
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.command(aliases=['consent'])
async def samtykke(self, ctx):
"""Gi samtykke til å samle meldingsdataen din"""
# Look up user in database
database_find = {'_id': ctx.author.id}
try:
database_user = database_col_users.find_one(database_find)
except:
return await ctx.send(f'{ctx.author.mention} Jeg har ikke ' +
'tilkobling til databasen. ' +
'Be boteier om å fikse dette')
# Set consent and insert missing data
if database_user is None:
await default_db_insert(ctx)
database_user = database_col_users.find_one(database_find)
database_col_users.update_one(database_find,
{'$set':
{'ordsky_consent': True}})
else:
database_col_users.update_one(database_find,
{'$set':
{'ordsky_consent': True}})
# Confirmation message
embed = discord.Embed(
color=0x0085ff,
description=':white_check_mark: Samtykke registrert!')
embed.set_footer(icon_url=ctx.author.avatar_url,
text=f'{ctx.author.name}#{ctx.author.discriminator}')
await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.command(aliases=['ingensamtykke', 'noconsent', 'slettdata'])
async def tabort(self, ctx):
"""Fjern samtykke og slett meldingsdata"""
# Look up user in database
database_find = {'_id': ctx.author.id}
try:
database_user = database_col_users.find_one(database_find)
except:
return await ctx.send(f'{ctx.author.mention} Jeg har ikke ' +
'tilkobling til databasen. ' +
'Be boteier om å fikse dette')
# Remove consent & data, insert missing data
database_col_users.delete_one(database_user)
# Confirmation message
embed = discord.Embed(
color=0x0085ff,
description=':white_check_mark: Meldingsdata er slettet!')
embed.set_footer(icon_url=ctx.author.avatar_url,
text=f'{ctx.author.name}#{ctx.author.discriminator}')
await ctx.send(embed=embed)
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 60, commands.BucketType.user)
@commands.command(aliases=['mydata'])
async def minedata(self, ctx):
"""Få tilsendt dine data"""
# Look up user in database
database_find = {'_id': ctx.author.id}
try:
database_user = database_col_users.find_one(database_find)
except:
return await ctx.send(f'{ctx.author.mention} Jeg har ikke ' +
'tilkobling til databasen. ' +
'Be boteier om å fikse dette')
# Return no data error, insert missing data
if database_user is None:
await default_db_insert(ctx)
return await error_no_data(ctx)
# Fetch all user data & append to string
raw_data = ''
for key, value in database_user['ordsky_data'].items():
if value is None:
continue
else:
raw_data += value
# No data error message
if raw_data == '':
return await error_no_data(ctx)
# Insert data in .txt file
with open(f'./assets/ordsky/{ctx.author.id}.txt',
'a+', encoding='utf-8') as f:
f.write(raw_data)
# DM .txt file
try:
await ctx.author.send(
file=discord.File(f'./assets/ordsky/{ctx.author.id}.txt'))
embed = discord.Embed(
color=0x0085ff,
description=':white_check_mark: Meldingsdata har ' +
'blitt sendt i DM!')
except:
embed = discord.Embed(
color=0xFF0000,
description=':x: Sending av data feilet! ' +
'Sjekk om du har blokkert meg')
embed.set_footer(
icon_url=ctx.author.avatar_url,
text=f'{ctx.author.name}#{ctx.author.discriminator}')
await ctx.send(embed=embed)
# Remove .txt file
try:
remove(f'./assets/ordsky/{ctx.author.id}.txt')
except:
pass
@commands.bot_has_permissions(
embed_links=True, read_message_history=True, attach_files=True)
@commands.cooldown(1, 150, commands.BucketType.user)
@commands.command(aliases=['wordcloud', 'wc', 'sky'])
async def ordsky(self, ctx):
"""Generer en ordsky"""
# Look up user in database
database_find = {'_id': ctx.author.id}
try:
database_user = database_col_users.find_one(database_find)
except:
return await ctx.send(f'{ctx.author.mention} Jeg har ikke ' +
'tilkobling til databasen. ' +
'Be boteier om å fikse dette')
# Insert missing data
if database_user is None:
await default_db_insert(ctx)
database_user = database_col_users.find_one(database_find)
# Refresh database
database_user = database_col_users.find_one(database_find)
# Check consent
if database_user['ordsky_consent'] is False:
embed = discord.Embed(
color=0xF1C40F,
description=':exclamation: Du må gi meg tillatelse til å ' +
'samle og beholde meldingsdataene dine.\n\n' +
f'Skriv `{prefix}samtykke` for å gjøre dette')
embed.set_footer(
icon_url=ctx.author.avatar_url,
text=f'{ctx.author.name}#{ctx.author.discriminator}')
await ctx.send(ctx.author.mention, embed=embed)
return self.bot.get_command('ordsky').reset_cooldown(ctx)
# Send status message
embed = discord.Embed(
description='**Henter meldinger:** :hourglass:\n' +
'**Generer ordsky:** -')
embed.set_footer(
icon_url=ctx.author.avatar_url,
text=f'{ctx.author.name}#{ctx.author.discriminator}')
status_msg = await ctx.send(embed=embed)
command_prefixes = ['§', '!', '.', '-', '€', '|', '$', '=', '?', '<', ':', '#', ',']
# Fetch messages
message_data = ''
try:
database_user['ordsky_data'][f'{ctx.guild.id}']
for channel in ctx.guild.text_channels:
if not channel.permissions_for(ctx.author).send_messages:
continue
try:
async for message in channel.history(limit=300):
has_prefixes = False
if message.author.id == ctx.author.id:
for prefixes in command_prefixes:
if prefixes in message.clean_content[:3]:
has_prefixes = True
if has_prefixes is False:
message_data += '[' + \
f'{str(message.created_at)[0:19]}] ' + \
f'({message.channel.id}-{message.id}) ' + \
f'{message.clean_content} '
except:
continue
except KeyError:
for channel in ctx.guild.text_channels:
if not channel.permissions_for(ctx.author).send_messages:
continue
try:
async for message in channel.history(limit=2000):
has_prefixes = False
if message.author.id == ctx.author.id:
for prefixes in command_prefixes:
if prefixes in message.clean_content[:3]:
has_prefixes = True
if has_prefixes is False:
message_data += '[' + \
f'{str(message.created_at)[0:19]}] ' + \
f'({message.channel.id}-{message.id}) ' + \
f'{message.clean_content} '
except:
continue
if message_data != '':
database_col_users.update_one(
database_find,
{'$set':
{f'ordsky_data.{ctx.guild.id}': message_data}},
upsert=True)
database_user = database_col_users.find_one(database_find)
try:
message_data = database_user['ordsky_data'][f'{ctx.guild.id}']
except KeyError:
embed = discord.Embed(
color=0xF1C40F,
description=':x: Har ikke nok meldingsdata ' +
'for å generere ordsky')
embed.set_footer(
icon_url=ctx.author.avatar_url,
text=f'{ctx.author.name}#{ctx.author.discriminator}')
return await ctx.send(ctx.author.mention, embed=embed)
database_message_data = message_data
# Update status message
embed = discord.Embed(
description='**Henter meldinger:** :white_check_mark:\n' +
'**Generer ordsky:** :hourglass:')
embed.set_footer(
icon_url=ctx.author.avatar_url,
text=f'{ctx.author.name}#{ctx.author.discriminator}')
await status_msg.edit(embed=embed)
# URL & emote filter
text = sub(r'http\S+', '', database_message_data)
text = sub(r':\S+', '', text)
text = sub(r'#\S+', '', text)
text = sub(r'@\S+', '', text)
# Fetch pre-defined word list
with open('./assets/ordsky/ordliste.txt',
'r', encoding='utf-8') as f:
filtered_words = [line.split(',') for line in f.readlines()]
filtered_words = filtered_words[0]
# Fetch mask
mask = array(Image.open('./assets/ordsky/mask/skyform.png'))
task = functools.partial(
Ordsky.generate, text,
mask, filtered_words)
b = await self.bot.loop.run_in_executor(None, task)
# Set embed color
if str(ctx.author.color) != '#000000':
color = ctx.author.color
else:
color = discord.Colour(0x99AAB5)
# Create embed & send it
final_image = discord.File(
b, filename=f'{ctx.author.id}_{ctx.guild.id}.png')
embed = discord.Embed(
color=color,
description=':cloud: Her er ordskyen din! :cloud:')
embed.set_image(url=f'attachment://{ctx.author.id}_{ctx.guild.id}.png')
embed.set_footer(
text=f'{ctx.author.name}#{ctx.author.discriminator}',
icon_url=ctx.author.avatar_url)
await ctx.send(
file=final_image,
content=ctx.author.mention,
embed=embed)
# Delete status message
await status_msg.delete()
def setup(bot):
bot.add_cog(Ordsky(bot))
```
#### File: MornBot/cogs/ServerManagement.py
```python
from discord.ext import commands
import discord
from cogs.utils import Defaults
class ServerManagement(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.bot_has_permissions(kick_members=True)
@commands.has_permissions(kick_members=True)
@commands.guild_only()
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command(aliases=['spark'])
async def kick(self, ctx, bruker: discord.Member, *, begrunnelse: str=None):
"""Kaster ut en bruker fra serveren"""
await bruker.kick(reason=begrunnelse)
await ctx.send(f'{bruker.mention} `{bruker.name}#{bruker.discriminator}` ble kastet ut av serveren')
@commands.bot_has_permissions(ban_members=True)
@commands.has_permissions(ban_members=True)
@commands.guild_only()
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.command()
async def ban(self, ctx, bruker: discord.Member, *, begrunnelse: str=None):
"""Utesteng en bruker fra serveren"""
await bruker.ban(reason=begrunnelse)
await ctx.send(f'{bruker.mention} `{bruker.name}#{bruker.discriminator}` ble utestengt fra serveren')
@commands.bot_has_permissions(manage_messages=True)
@commands.has_permissions(manage_messages=True)
@commands.cooldown(1, 30, commands.BucketType.guild)
@commands.command(aliases=['purge', 'delete', 'slett'])
async def prune(self, ctx, antall: int):
"""Sletter de siste antall meldingene du spesifiser"""
if antall > 100:
return await Defaults.error_warning_send(ctx, text='Du kan ikke slette mer enn 100 meldinger om gangen')
await ctx.channel.purge(limit=antall+1)
await ctx.send(content=f'🗑️ Slettet `{antall}` meldinger!', delete_after=3.0)
def setup(bot):
bot.add_cog(ServerManagement(bot))
```
|
{
"source": "jeguerra/nonlinearMtnWavesSolver",
"score": 2
}
|
#### File: jeguerra/nonlinearMtnWavesSolver/computeChebyshevDerivativeMatrix.py
```python
import numpy as np
import math as mt
from HerfunChebNodesWeights import chebpolym, cheblb
def computeChebyshevDerivativeMatrix(DIMS):
# Get data from DIMS
ZH = DIMS[2]
NZ = DIMS[4]
# Initialize grid and make column vector
xi, wcp = cheblb(NZ)
# Get the Chebyshev transformation matrix
CTD = chebpolym(NZ-1, -xi)
# Make a diagonal matrix of weights
W = np.diag(wcp)
# Compute scaling for the forward transform
S = np.eye(NZ)
for ii in range(NZ - 1):
temp = W.dot(CTD[:,ii])
temp = ((CTD[:,ii]).T).dot(temp)
S[ii,ii] = temp ** (-1)
S[NZ-1,NZ-1] = 1.0 / mt.pi
# Compute the spectral derivative coefficients
SDIFF = np.zeros((NZ,NZ))
SDIFF[NZ-2,NZ-1] = 2.0 * NZ
for ii in reversed(range(NZ - 2)):
A = 2.0 * (ii + 1)
B = 1.0
if ii > 0:
c = 1.0
else:
c = 2.0
SDIFF[ii,:] = B / c * SDIFF[ii+2,:]
SDIFF[ii,ii+1] = A / c
# Chebyshev spectral transform in matrix form
temp = CTD.dot(W)
STR_C = S.dot(temp);
# Chebyshev spatial derivative based on spectral differentiation
# Domain scale factor included here
temp = (CTD).dot(SDIFF)
DDM = - (2.0 / ZH) * temp.dot(STR_C);
return DDM, STR_C
```
#### File: jeguerra/nonlinearMtnWavesSolver/computeDerivativeMatrix.py
```python
import numpy as np
import math as mt
from HerfunChebNodesWeights import hefuncm, hefunclb
from HerfunChebNodesWeights import chebpolym, cheblb
def computeAdjustedOperatorNBC(D2A, DOG, DD, tdex):
# D2A is the operator to adjust
# DOG is the original operator to adjust (unadjusted)
# DD is the 1st derivative operator
DOP = np.zeros(DD.shape)
# Get the column span size
NZ = DD.shape[1]
cdex = range(NZ)
cdex = np.delete(cdex, tdex)
scale = - DD[tdex,tdex]
# Loop over columns of the operator and adjust for BC some location tdex
for jj in cdex:
factor = DD[tdex,jj] / scale
v1 = (D2A[:,jj]).flatten()
v2 = (DOG[:,tdex]).flatten()
nvector = v1 + factor * v2
DOP[:,jj] = nvector
return DOP
# Computes Cubic Spline 1st derivative matrix
def computeCubicSplineDerivativeMatrix(DIMS, dom, isClamped):
# Initialize matrix blocks
N = len(dom)
A = np.zeros((N,N)) # coefficients to 2nd derivatives
B = np.zeros((N,N)) # coefficients to RHS of 2nd derivatives
C = np.zeros((N,N)) # coefficients to 1st derivatives
D = np.zeros((N,N)) # coefficients to additive part of 1st derivatives
# Loop over each interior point in the irregular grid
for ii in range(1,N-1):
hp = dom[ii+1] - dom[ii]
hm = dom[ii] - dom[ii-1]
hc = dom[ii+1] - dom[ii-1]
A[ii,ii-1] = -1.0 / 6.0 * hm
A[ii,ii] = 1.0 / 6.0 * (hp + hm) - 0.5 * hc
A[ii,ii+1] = -1.0 / 6.0 * hp
B[ii,ii-1] = -1.0 / hm
B[ii,ii] = (1.0 / hm + 1.0 / hp)
B[ii,ii+1] = -1.0 / hp
for ii in range(0,N-1):
hp = dom[ii+1] - dom[ii]
C[ii,ii] = -1.0 / 3.0 * hp
C[ii,ii+1] = -1.0 / 6.0 * hp
D[ii,ii] = -1.0 / hp
D[ii,ii+1] = 1.0 / hp
# Adjust the right end of the 1st derivative matrix
hn = dom[N-1] - dom[N-2]
C[N-1,N-2] = hn / 6.0
C[N-1,N-1] = hn / 3.0
D[N-1,N-2] = -1.0 / hn
D[N-1,N-1] = 1.0 / hn
if isClamped:
DDM_CFD = computeCompactFiniteDiffDerivativeMatrix1(DIMS, dom)
A[0,0] = C[0,0]
A[0,1] = C[0,1]
B[0,:] = DDM_CFD[0,:]
B[0,0] -= D[0,0]
B[0,1] -= D[0,1]
hn = dom[N-1] - dom[N-2]
A[N-1,N-2] = hn / 6.0
A[N-1,N-1] = hn / 3.0
B[N-1,:] = DDM_CFD[N-1,:]
B[N-1,N-2] += 1.0 / hn
B[N-1,N-1] += -1.0 / hn
if isClamped:
AIB = np.linalg.solve(A, B)
else:
AIB = np.zeros((N,N))
AIB[1:N-1,1:N-1] = np.linalg.solve(A[1:N-1,1:N-1], B[1:N-1,1:N-1])
DDM = C.dot(AIB) + D
return DDM, AIB
# Computes standard 4th order compact finite difference 1st derivative matrix
def computeCompactFiniteDiffDerivativeMatrix1(DIMS, dom):
# Initialize the left and right derivative matrices
N = len(dom)
LDM = np.zeros((N,N)) # tridiagonal
RDM = np.zeros((N,N)) # centered difference
# Loop over each interior point in the irregular grid
for ii in range(1,N-1):
# Set compact finite difference
# Get the metric weights
hp = dom[ii+1] - dom[ii]
hm = dom[ii] - dom[ii-1]
# Compute the stencil coefficients
hr = (hm / hp)
d = -0.25
c = d * hr**4
b = -1.0 / 8.0 * (5.0 + hr)
a = 1.0 / 8.0 * (hr**2 + hr**3) + 0.5 * hr**4
# Write the right equation
RDM[ii,ii-1] = -b
RDM[ii,ii] = (a + b)
RDM[ii,ii+1] = -a
# Write the left equation
LDM[ii,ii-1] = d * hm
LDM[ii,ii] = -(hp * (a + c) + hm * (d - b))
LDM[ii,ii+1] = c * hp
# Handle the left and right boundaries
LDM[0,0] = 1.0
LDM[N-1,N-1] = 1.0
# Left end (forward)
hp = dom[1] - dom[0]
hpp = hp + (dom[2] - dom[1])
lc = (hp - (hp**2 / hpp))
RDM[0,0] = -(1.0 / lc) * (1.0 - (hp / hpp)**2)
RDM[0,1] = (1.0 / lc)
RDM[0,2] = -(1.0 / lc) * (hp / hpp)**2
# Right end (backward)
hm = dom[N-2] - dom[N-1]
hmm = hm + (dom[N-3] - dom[N-2])
rc = (hm - (hm**2 / hmm))
RDM[N-1,N-1] = -(1.0 / rc) * (1.0 - (hm / hmm)**2)
RDM[N-1,N-2] = (1.0 / rc)
RDM[N-1,N-3] = -(1.0 / rc) * (hm / hmm)**2
# Get the derivative matrix
DDM1 = np.linalg.solve(LDM, RDM)
# Clean up numerical zeros
for ii in range(N):
for jj in range(N):
if abs(DDM1[ii,jj]) <= 1.0E-15:
DDM1[ii,jj] = 0.0
return DDM1
# Computes standard 4th order compact finite difference 2nd derivative matrix
def computeCompactFiniteDiffDerivativeMatrix2(DIMS, dom):
# Initialize the left and right derivative matrices
N = len(dom)
LDM = np.zeros((N,N)) # tridiagonal
RDM = np.zeros((N,N)) # centered difference
# Loop over each interior point in the irregular grid
for ii in range(1,N-1):
# Set compact finite difference
# Get the metric weights
hp = dom[ii+1] - dom[ii]
hm = dom[ii] - dom[ii-1]
# Compute the stencil coefficients
hr = (hm / hp)
d = 3.0 / 24.0 - 1.0 / 24.0 * (1.0 / hr)**3
c = 3.0 / 24.0 - 1.0 / 24.0 * hr**3
b = 1.0
a = hr
# Write the right equation
RDM[ii,ii-1] = b
RDM[ii,ii] = -(a + b)
RDM[ii,ii+1] = a
# Write the left equation
LDM[ii,ii-1] = -d * hm**2
LDM[ii,ii] = ((0.5 * a + c) * hp**2 + (0.5 * b + d) * hm**2)
LDM[ii,ii+1] = -c * hp**2
# Handle the left and right boundaries
LDM[0,0] = 1.0
LDM[N-1,N-1] = 1.0
# Left end (forward)
hp = dom[1] - dom[0]
hpp = hp + (dom[2] - dom[1])
hr = hp / hpp
cd = 0.5 * (hp * (dom[2] - dom[1]))
RDM[0,0] = (1.0 - hr) / cd
RDM[0,1] = -1.0 / cd
RDM[0,2] = hr / cd
# Right end (backward)
hm = dom[N-2] - dom[N-1]
hmm = hm + (dom[N-3] - dom[N-2])
hr = hm / hmm
cd = 0.5 * (hm * (dom[N-3] - dom[N-2]))
RDM[N-1,N-1] = (1.0 - hr) / cd
RDM[N-1,N-2] = -1.0 / cd
RDM[N-1,N-3] = hr / cd
# Get the derivative matrix
DDM2 = np.linalg.solve(LDM, RDM)
# Clean up numerical zeros
for ii in range(N):
for jj in range(N):
if abs(DDM2[ii,jj]) <= 1.0E-15:
DDM2[ii,jj] = 0.0
return DDM2
def computeHermiteFunctionDerivativeMatrix(DIMS):
# Get data from DIMS
L1 = DIMS[0]
L2 = DIMS[1]
NX = DIMS[3]
alpha, whf = hefunclb(NX)
HT = hefuncm(NX, alpha, True)
HTD = hefuncm(NX+1, alpha, True)
# Get the scale factor
b = (np.amax(alpha) - np.min(alpha)) / abs(L2 - L1)
# Make a diagonal matrix of weights
W = np.diag(whf, k=0)
# Compute the coefficients of spectral derivative in matrix form
SDIFF = np.zeros((NX+2,NX+1));
SDIFF[0,1] = mt.sqrt(0.5)
SDIFF[NX,NX-1] = -mt.sqrt(NX * 0.5);
SDIFF[NX+1,NX] = -mt.sqrt((NX + 1) * 0.5);
for rr in range(1,NX):
SDIFF[rr,rr+1] = mt.sqrt((rr + 1) * 0.5);
SDIFF[rr,rr-1] = -mt.sqrt(rr * 0.5);
# Hermite function spectral transform in matrix form
STR_H = (HT.T).dot(W)
# Hermite function spatial derivative based on spectral differentiation
temp = (HTD).dot(SDIFF)
temp = temp.dot(STR_H)
DDM = b * temp
return DDM, STR_H
def computeChebyshevDerivativeMatrix(DIMS):
# Get data from DIMS
ZH = DIMS[2]
NZ = DIMS[4]
# Initialize grid and make column vector
xi, wcp = cheblb(NZ)
# Get the Chebyshev transformation matrix
CT = chebpolym(NZ-1, -xi)
# Make a diagonal matrix of weights
W = np.diag(wcp)
# Compute scaling for the forward transform
S = np.eye(NZ)
for ii in range(NZ - 1):
temp = W.dot(CT[:,ii])
temp = ((CT[:,ii]).T).dot(temp)
S[ii,ii] = temp ** (-1)
S[NZ-1,NZ-1] = 1.0 / mt.pi
# Compute the spectral derivative coefficients
SDIFF = np.zeros((NZ,NZ))
SDIFF[NZ-2,NZ-1] = 2.0 * NZ
for ii in reversed(range(NZ - 2)):
A = 2.0 * (ii + 1)
B = 1.0
if ii > 0:
c = 1.0
else:
c = 2.0
SDIFF[ii,:] = B / c * SDIFF[ii+2,:]
SDIFF[ii,ii+1] = A / c
# Chebyshev spectral transform in matrix form
temp = CT.dot(W)
STR_C = S.dot(temp);
# Chebyshev spatial derivative based on spectral differentiation
# Domain scale factor included here
temp = (CT).dot(SDIFF)
DDM = -(2.0 / ZH) * temp.dot(STR_C)
return DDM, STR_C
def computeFourierDerivativeMatrix(DIMS):
# Get data from DIMS
L1 = DIMS[0]
L2 = DIMS[1]
NX = DIMS[3]
kxf = (2*mt.pi/abs(L2 - L1)) * np.fft.fftfreq(NX+1) * (NX+1)
KDM = np.diag(kxf, k=0)
DFT = np.fft.fft(np.eye(NX+1), axis=0)
DDM = np.fft.ifft(1j * KDM.dot(DFT), axis=0)
return DDM, DFT
def computeChebyshevDerivativeMatrix_X(DIMS):
# Get data from DIMS
#ZH = DIMS[2]
#NZ = DIMS[4]
# Get data from DIMS
L1 = DIMS[0]
L2 = DIMS[1]
NX = DIMS[3]
ZH = abs(L2 - L1)
NZ = NX + 1
# Initialize grid and make column vector
xi, wcp = cheblb(NZ)
# Get the Chebyshev transformation matrix
CTD = chebpolym(NZ-1, -xi)
# Make a diagonal matrix of weights
W = np.diag(wcp)
# Compute scaling for the forward transform
S = np.eye(NZ)
for ii in range(NZ - 1):
temp = W.dot(CTD[:,ii])
temp = ((CTD[:,ii]).T).dot(temp)
S[ii,ii] = temp ** (-1)
S[NZ-1,NZ-1] = 1.0 / mt.pi
# Compute the spectral derivative coefficients
SDIFF = np.zeros((NZ,NZ))
SDIFF[NZ-2,NZ-1] = 2.0 * NZ
for ii in reversed(range(NZ - 2)):
A = 2.0 * (ii + 1)
B = 1.0
if ii > 0:
c = 1.0
else:
c = 2.0
SDIFF[ii,:] = B / c * SDIFF[ii+2,:]
SDIFF[ii,ii+1] = A / c
# Chebyshev spectral transform in matrix form
temp = CTD.dot(W)
STR_C = S.dot(temp);
# Chebyshev spatial derivative based on spectral differentiation
# Domain scale factor included here
temp = (CTD).dot(SDIFF)
DDM = - (2.0 / ZH) * temp.dot(STR_C);
return DDM, STR_C
```
#### File: jeguerra/nonlinearMtnWavesSolver/computeGuellrichDomain2D.py
```python
import numpy as np
import math as mt
#import matplotlib.pyplot as plt
def computeTerrainDecayFunctions(xi, ang, StaticSolve):
if StaticSolve:
# Nominal Hybrid coordinate
m = 0.2
mi = 1.0 / m
dzdh = np.sinh(mi * (1.0 - xi)) / np.sinh(mi)
d_dzdh_dxi = -mi * np.cosh(mi * (1.0 - xi)) / np.sinh(mi)
else:
# First pass [A=0.3, p=20, m=0.2]
# Second pass [A=0.4, p=10, m=0.25]
# Third pass [A=0.25, p=25, m=0.25]
A = 0.25
p = 25
# Guellrich improvement to hybrid coordinate
cosvar = np.power(np.cos(A * ang), p)
tanvard = A * mt.pi * np.tan(A * ang)
# Guellrich Hybrid coordinate
m = 0.25
mi = 1.0 / m
hybrid = np.sinh(mi * (1.0 - xi)) / np.sinh(mi)
dhybrid = -mi * np.cosh(mi * (1.0 - xi)) / np.sinh(mi)
dzdh = cosvar * np.sinh(mi * (1.0 - xi)) / np.sinh(mi)
d_dzdh_dxi = cosvar * (dhybrid - p * hybrid * tanvard)
return dzdh, d_dzdh_dxi
def computeGuellrichDomain2D(DIMS, REFS, zRay, hx, dhdx, StaticSolve):
# Get data from DIMS and REFS
ZH = DIMS[2]
NX = DIMS[3] + 1
NZ = DIMS[4]
# input REFS = [x, z, HFM, whf, CPM, wcp]
x = REFS[0]
z = REFS[1]
# Compute the flat XZ mesh (computational domain)
HTZL, dummy = np.meshgrid(hx / ZH,z)
XL, ZL = np.meshgrid(x,z)
# High Order Improved Guellrich coordinate 3 parameter function
xi = 1.0 / ZH * ZL
ang = 1.0 / 3.0 * mt.pi * xi
dzdh, d_dzdh_dxi = computeTerrainDecayFunctions(xi, ang, StaticSolve)
dxidz = 1.0 + (HTZL * d_dzdh_dxi)
sigma = np.reciprocal(dxidz)
# Make the global array of terrain height and slope features
ZTL = np.zeros((NZ,NX))
DZT = np.zeros((NZ,NX))
for rr in range(NZ):
ZTL[rr,:] = (dzdh[rr,0] * hx) + ZL[rr,:]
DZT[rr,:] = dzdh[rr,0] * dhdx
#plt.plot(z, dzdh[:,0])
# Compute the coordinate surface at edge of Rayleigh layer
xi = 1.0 / ZH * zRay
ang = 1.0 / 3.0 * mt.pi * xi
dzdh, d_dzdh_dxi = computeTerrainDecayFunctions(xi, ang, StaticSolve)
ZRL = (dzdh * hx) + zRay
# Compute the local grid lengths at each node
DXM = np.zeros((NZ,NX))
DZM = np.zeros((NZ,NX))
for ii in range(NZ):
xdiff = np.diff(XL[ii,:])
DXM[ii,:] = np.concatenate((np.expand_dims(xdiff[0],0), xdiff))
for jj in range(NX):
zdiff = np.diff(ZTL[:,jj])
DZM[:,jj] = np.concatenate((np.expand_dims(zdiff[0],0), zdiff))
return XL, ZTL, DZT, sigma, ZRL, DXM, DZM
def computeStretchedDomain2D(DIMS, REFS, zRay, hx, dhdx):
# Get data from DIMS and REFS
ZH = DIMS[2]
NX = DIMS[3] + 1
NZ = DIMS[4]
# Get REFS data
x = REFS[0]
z = REFS[1]
DDX_1D = REFS[2]
# Compute the flat XZ mesh
DZT, dummy = np.meshgrid(dhdx,z);
XL, ZL = np.meshgrid(x,z);
# Make the global array of terrain height and slope features
ZTL = np.zeros((NZ,NX))
sigma = np.ones((NZ,NX))
for cc in range(NX):
thisZH = ZH - hx[cc]
sigma[:,cc] *= (ZH / thisZH)
ZTL[:,cc] = ZL[:,cc] * thisZH / ZH
ZTL[:,cc] += hx[cc]
# Compute the terrain derivatives
for rr in range(1,NZ):
DZT[rr,:] = DDX_1D.dot(ZTL[rr,:] - z[rr])
# Compute the coordinate surface at the edge of the Rayleigh layer
ZRL = (1.0 - zRay / ZH) * hx + zRay
return XL, ZTL, DZT, sigma, ZRL
```
#### File: jeguerra/nonlinearMtnWavesSolver/computeRayleighEquations.py
```python
import math as mt
import numpy as np
import scipy.sparse as sps
from matplotlib import cm
import matplotlib.pyplot as plt
def computeRayleighField(DIMS, REFS, height, width, applyTop, applyLateral):
# Get DIMS data
L1 = DIMS[0]
L2 = DIMS[1]
ZH = DIMS[2]
NX = DIMS[3] + 1
NZ = DIMS[4]
RP = 4
GP = 2
# Get REFS data
X = REFS[4]
Z = REFS[5]
# Set the layer bounds
dLayerZ = height
dLayerR = L2 - width
dLayerL = L1 + width
depth = ZH - height
# Assemble the Rayleigh field
RL = np.zeros((NZ, NX))
RLX = np.zeros((NZ, NX))
RLZ = np.zeros((NZ, NX))
SBR = np.ones((NZ, NX))
for ii in range(0,NZ):
for jj in range(0,NX):
# Get this X location
XRL = X[ii,jj]
ZRL = Z[ii,jj]
if applyLateral:
# Left layer or right layer or not? [1 0]
if XRL >= dLayerR:
dNormX = (L2 - XRL) / width
elif XRL <= dLayerL:
dNormX = (XRL - L1) / width
else:
dNormX = 1.0
# Evaluate the Rayleigh factor
RFX = (mt.cos(0.5 * mt.pi * dNormX))**RP
else:
RFX = 0.0
if applyTop:
# In the top layer?
if ZRL >= dLayerZ[jj]:
# This maps [depth ZH] to [1 0]
dNormZ = (ZH - ZRL) / depth[jj]
else:
dNormZ = 1.0
# Evaluate the strength of the field
RFZ = (mt.cos(0.5 * mt.pi * dNormZ))**RP
else:
RFZ = 0.0
# Set the field to max(lateral, top) to handle corners
RLX[ii,jj] = RFX
RLZ[ii,jj] = RFZ
RL[ii,jj] = np.amax([RFX, RFZ])
# Set the binary matrix
if RL[ii,jj] != 0.0:
SBR[ii,jj] = 0.0
'''
plt.figure()
plt.contourf(X, Z, RL, 101, cmap=cm.seismic)
plt.colorbar()
plt.show()
input()
'''
# Assemble the Grid Matching Layer field X and Z directions
GML = np.ones((NZ, NX))
GMLX = np.ones((NZ, NX))
GMLZ = np.ones((NZ, NX))
C1 = 0.02
C2 = 10.0
isStretchGML = True # True: trig GML to RHS, False, direct GML to state
for ii in range(0,NZ):
for jj in range(0,NX):
# Get this X location
XRL = X[ii,jj]
ZRL = Z[ii,jj]
if applyLateral:
# Left layer or right layer or not? [0 1]
if XRL >= dLayerR:
dNormX = (XRL - dLayerR) / width
elif XRL <= dLayerL:
dNormX = (dLayerL - XRL) / width
else:
dNormX = 0.0
if isStretchGML:
# Evaluate the GML factor
#RFX = (mt.tan(0.5 * mt.pi * dNormX))**GP
RFX = 2.0 * dNormX**2
else:
# Evaluate buffer layer factor
RFX = (1.0 - C1 * dNormX**2) * \
(1.0 - (1.0 - mt.exp(C2 * dNormX**2)) / (1.0 - mt.exp(C2)))
else:
RFX = 0.0
if applyTop:
# In the top layer?
if ZRL >= dLayerZ[jj]:
dNormZ = (ZRL - dLayerZ[jj]) / (ZH - height[jj])
else:
dNormZ = 0.0
if isStretchGML:
# Evaluate the strength of the field
#RFZ = (mt.tan(0.5 * mt.pi * dNormZ))**GP
RFZ = 2.0 * dNormZ**2
else:
# Evaluate buffer layer factor
RFZ = (1.0 - C1 * dNormZ**2) * \
(1.0 - (1.0 - mt.exp(C2 * dNormZ**2)) / (1.0 - mt.exp(C2)))
else:
RFZ = 0.0
if isStretchGML:
GMLX[ii,jj] = 1.0 / (1.0 + RFX)
GMLZ[ii,jj] = 1.0 / (1.0 + RFZ)
# Set the field to max(lateral, top) to handle corners
RFM = np.amax([RFX, RFZ])
GML[ii,jj] = 1.0 / (1.0 + RFM)
else:
GMLX[ii,jj] = RFX
GMLZ[ii,jj] = RFZ
# Set the field to max(lateral, top) to handle corners
GML[ii,jj] = np.amin([RFX, RFZ])
'''
plt.figure()
plt.contourf(X, Z, GMLX, 101, cmap=cm.seismic)
plt.colorbar()
plt.show()
input()
'''
return (GML, GMLX, GMLZ), RL, RLX, RLZ, SBR
def computeRayleighEquations(DIMS, REFS, depth, RLOPT, topdex, botdex):
# Get options data
width = RLOPT[1]
applyTop = RLOPT[2]
applyLateral = RLOPT[3]
mu = RLOPT[4]
# Get DIMS data
NX = DIMS[3] + 1
NZ = DIMS[4]
OPS = NX * NZ
# Set up the Rayleigh field
GML, RL, RLX, RLZ, SBR = computeRayleighField(DIMS, REFS, depth, width, \
applyTop, applyLateral)
# Compute the diagonal for full Rayleigh field
tempDiagonal = np.reshape(RL, (OPS,), order='F')
# Compute the matrix operator
RLM = sps.spdiags(tempDiagonal, 0, OPS, OPS)
'''
# Compute the diagonal for full Rayleigh field
tempDiagonal = np.reshape(RLX, (OPS,), order='F')
# Compute the matrix operator
RLXM = sps.spdiags(tempDiagonal, 0, OPS, OPS)
# Compute the diagonal for full Rayleigh field
tempDiagonal = np.reshape(RLZ, (OPS,), order='F')
# Compute the matrix operator
RLZM = sps.spdiags(tempDiagonal, 0, OPS, OPS)
'''
# Store the diagonal blocks corresponding to Rayleigh damping terms
ROPS = mu * np.array([RLM, RLM, RLM, RLM])
return ROPS, RLM, GML, SBR
```
#### File: jeguerra/nonlinearMtnWavesSolver/computeResidualViscCoeffs.py
```python
import math as mt
import numpy as np
import bottleneck as bn
# This approach blends by maximum residuals on each variable
def computeResidualViscCoeffs(RES, QM, VFLW, DX, DZ, DXD, DZD, DX2, DZ2):
# Compute a filter length...
#DL = 0.5 * (DX + DZ)
#DL = DX * DZ
DXZ = DXD * DZD
DL = mt.sqrt(DXZ)
# Compute absolute value of residuals
ARES = np.abs(RES)
# Normalize the residuals (U and W only!)
for vv in range(2):
if QM[vv] > 0.0:
ARES[:,vv] *= (1.0 / QM[vv])
else:
ARES[:,vv] *= 0.0
# Get the maximum in the residuals (unit = 1/s)
QRES_MAX = DXZ * bn.nanmax(ARES, axis=1)
# Compute flow speed plus sound speed coefficients
QMAX = DL * VFLW
#QMAX = bn.nanmax(DL * VWAV) # USE THE TOTAL MAX NORM
# Limit DynSGS to upper bound
compare = np.stack((QRES_MAX, QMAX),axis=1)
QRES_CF = bn.nanmin(compare, axis=1)
return (np.expand_dims(QRES_CF,1), np.expand_dims(QMAX,1))
def computeFlowAccelerationCoeffs(RES, DT, U, W, DX, DZ):
ARES = np.abs(RES)
QRESX = np.zeros((len(U), 4))
QRESZ = np.zeros((len(W), 4))
for vv in range(4):
# Compute the anisotropic coefficients
QRESX[:,vv] = (DX * DT) * ARES[0,vv]
QRESZ[:,vv] = (DZ * DT) * ARES[1,vv]
return (QRESX, QRESZ)
```
#### File: jeguerra/nonlinearMtnWavesSolver/HerfunChebNodesWeights.py
```python
import numpy as np
from numpy import multiply as mul
from scipy import linalg as las
import math as mt
from scipy.special import roots_hermite
from scipy.special import roots_chebyt
def hefunclb(NX):
#'''
# Compute off-diagonals of 7.84 in Spectral Methods, Springer
b = range(1,NX+1)
bd = 0.5 * np.array(b)
# Assemble the matrix
m1 = np.diag(np.sqrt(bd), k=+1)
m2 = np.diag(np.sqrt(bd), k=-1)
mm = np.add(m1,m2)
# Compute the eigenvalues of this matrix (zeros Hermite polys)
ew = las.eigvals(mm)
# Sort the eigenvalues in ascending order and store nodes
xi = np.sort(np.real(ew))
# Compute the Hermite function weights
hf = hefuncm(NX, xi, False)
w = 1.0 / (NX+1) * np.power(hf, -2.0)
#'''
'''
xi, w = roots_hermite(NX+1)
# Compute the Hermite function weights
hf = hefuncm(NX, xi, False)
w = 1.0 / (NX+1) * np.power(hf, -2.0)
'''
return xi, w
def hefuncm(NX, xi, fullMat):
# Initialize constant
cst = 1.0 / mt.pi**4;
ND = len(xi)
# Initialize the output matrix if needed
if fullMat:
HFM = np.zeros((NX+1,ND))
# Compute the first two modes of the recursion
wfun = np.exp(-0.5 * np.power(xi, 2.0))
poly0 = cst * wfun;
poly1 = cst * mt.sqrt(2.0) * (xi * wfun);
# Put the first two functions in the matrix or return low order functions
if fullMat:
HFM[0,:] = poly0
HFM[1,:] = poly1
elif NX == 0:
return poly0
elif NX == 1:
return poly1
for nn in range(1,NX):
polyn = mt.sqrt(2.0 / (nn+1)) * (xi * poly1)
polyn -= mt.sqrt(nn / (nn+1)) * poly0
poly0 = poly1;
poly1 = polyn;
# Put the new function in its matrix place
if fullMat:
HFM[nn+1,:] = polyn
else:
HFM = polyn
return HFM.T
def cheblb(NZ):
# Compute Chebyshev CGL nodes and weights
ep = NZ - 1
xc = np.array(range(NZ))
xi = -np.cos(mt.pi / ep * xc)
w = mt.pi / NZ * np.ones(NZ)
w[0] *= 0.5
w[ep] *= 0.5
return xi, w
def chebpolym(NM, xi):
# Compute Chebyshev pols (first kind) into a matrix transformation
# Functions need to be arranged bottom to top!
NX = len(xi)
CTM = np.zeros((NX, NM+1))
CTM[:,0] = np.ones(NX)
CTM[:,1] = xi
# 3 Term recursion for functions
for ii in range(2, NM+1):
CTM[:,ii] = 2.0 * \
mul(xi, CTM[:,ii-1]) - \
CTM[:,ii-2]
return CTM
```
#### File: nonlinearMtnWavesSolver/python results/Plot_convergence.py
```python
import math as mt
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
# Read in the text file
#fname = '/media/jeg/FastDATA/linearMtnWavesSolver/python results/convergence025m_discrete.txt'
#fname = '/media/jeg/FastDATA/linearMtnWavesSolver/python results/convergence025m_classical.txt'
fname = '/media/jeg/FastDATA/linearMtnWavesSolver/python results/convergence250m_classical.txt'
con_data = np.loadtxt(fname, delimiter=', ')
# Do an exponential curve fit to the total residual
def func(x, a, b):
return -b * x + a
lp = 10
xdata = np.arange(0,lp)
ydata = np.log(con_data[0:lp,4])
popt, pcov = curve_fit(func, xdata, ydata, p0=[1.0E-3, 2.0], method='lm')
rate = popt[1]
# Make the nice paper plot
fig = plt.figure(figsize=(12.0, 4.0))
xdata = np.arange(0,con_data.shape[0])
fdata = func(xdata, *popt)
# Make the plots
plt.subplot(1,2,1)
plt.plot(xdata, con_data[:,4], 'kd-')
plt.plot(xdata, np.exp(fdata), 'r--')
plt.yscale('log')
plt.grid(b=None, which='major', axis='both', color='k', linestyle='--', linewidth=0.5)
plt.legend(('Total Residual', 'Convergence Rate = ' + '%.5f' % rate))
plt.xlabel('Newton Iteration')
plt.ylabel('L2-norm of Residual')
plt.title('Total Residual Convergence')
plt.subplot(1,2,2)
plt.plot(xdata, con_data[:,0:4])
plt.yscale('log')
plt.grid(b=None, which='major', axis='both', color='k', linestyle='--', linewidth=0.5)
plt.legend(('u', 'w', 'log-p', 'log-theta'))
plt.xlabel('Newton Iteration')
plt.title('Convergence per Variable')
plt.show()
```
#### File: nonlinearMtnWavesSolver/python results/Schar_comparison.py
```python
import shelve
import scipy.io as sio
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
# Make the parent figure
fig = plt.figure(figsize=(12.0, 4.0))
#%% Load in the classical linear solution from Matlab (approximate free-slip)
clin = sio.loadmat('../matlab backup/AnalyticalSchar.mat', squeeze_me=True)
XM = clin['X']
ZM = clin['Z']
WM = clin['w']
plt.subplot(1,3,1)
ccheck = plt.contourf(1.0E-3 * XM, 1.0E-3 * ZM, WM.T, 44, cmap=cm.seismic, vmin=-2.2, vmax=2.2)
#fig.colorbar(ccheck)
plt.xlim(-20.0, 20.0)
plt.ylim(0.0, 15.0)
plt.tick_params(axis='x', which='both', bottom=True, top=False, labelbottom=True)
plt.xlabel('X (km)')
plt.ylabel('Z (km)')
plt.title('Classical Fourier - W (m/s)')
plt.grid(b=None, which='major', axis='both', color='k', linestyle='--', linewidth=0.5)
#%% Load in the limited area reproduction with approximate free-slip
la_afs = shelve.open('/home/jeg/scratch/ScharTest_Classical_FreeSlipApprox_250m', 'r')
DIMS = la_afs['DIMS']
REFS = la_afs['REFS']
SOL = la_afs['SOLT']
# Make the equation index vectors for all DOF
numVar = 4
NX = DIMS[3]
NZ = DIMS[4]
OPS = DIMS[5]
udex = np.array(range(OPS))
wdex = np.add(udex, OPS)
pdex = np.add(wdex, OPS)
tdex = np.add(pdex, OPS)
X = REFS[4]
x1 = X[0,:]
Z = REFS[5]
W1 = np.reshape(SOL[wdex,0], (NZ, NX+1), order='F')
plt.subplot(1,3,2)
ccheck = plt.contourf(1.0E-3 * X, 1.0E-3 * Z, W1, 44, cmap=cm.seismic, vmin=-2.2, vmax=2.2)
#fig.colorbar(ccheck)
plt.xlim(-20.0, 20.0)
plt.ylim(0.0, 15.0)
plt.tick_params(axis='x', which='both', bottom=True, top=False, labelbottom=True)
plt.xlabel('X (km)')
plt.title('Approximate Free-Slip - W (m/s)')
plt.grid(b=None, which='major', axis='both', color='k', linestyle='--', linewidth=0.5)
#%% Make common colorbar
axc = plt.subplot(1,3,3)
axc.set_axis_off()
fig.colorbar(ccheck, fraction=1.0)
plt.tight_layout()
plt.savefig('ScharTestComparisonW.png')
#%% Make plot for iterative solution to 250m Schar problem
fig = plt.figure(figsize=(12.0, 4.0))
la_afs = shelve.open('/home/jeg/scratch/ScharTest_Newton10_FreeSlipApprox_250m', 'r')
DIMS = la_afs['DIMS']
REFS = la_afs['REFS']
SOL = la_afs['SOLT']
# Make the equation index vectors for all DOF
numVar = 4
NX = DIMS[3]
NZ = DIMS[4]
OPS = DIMS[5]
udex = np.array(range(OPS))
wdex = np.add(udex, OPS)
pdex = np.add(wdex, OPS)
tdex = np.add(pdex, OPS)
X = REFS[4]
x2 = X[0,:]
Z = REFS[5]
W = np.reshape(SOL[wdex,0], (NZ, NX+1), order='F')
plt.subplot(1,2,1)
ccheck = plt.contourf(1.0E-3 * X, 1.0E-3 * Z, W, 44, cmap=cm.seismic, vmin=-2.2, vmax=2.2)
#fig.colorbar(ccheck)
plt.xlim(-20.0, 20.0)
plt.ylim(0.0, 15.0)
plt.tick_params(axis='x', which='both', bottom=True, top=False, labelbottom=True)
plt.xlabel('X (km)')
plt.ylabel('Z (km)')
plt.title('Estimated Steady Solution - W (m/s)')
plt.colorbar()
plt.grid(b=None, which='major', axis='both', color='k', linestyle='--', linewidth=0.5)
fname = '/media/jeg/FastDATA/linearMtnWavesSolver/python results/convergence250m_classical.txt'
from scipy.optimize import curve_fit
con_data = np.loadtxt(fname, delimiter=', ')
# Do an exponential curve fit to the total residual
def func(x, a, b):
return -b * x + a
lp = 10
xdata = np.arange(0,lp)
ydata = np.log(con_data[0:lp,4])
popt, pcov = curve_fit(func, xdata, ydata, p0=[1.0E-3, 2.0], method='lm')
rate = popt[1]
# Make the nice paper plot
xdata = np.arange(0,con_data.shape[0])
fdata = func(xdata, *popt)
# Make the plots
plt.subplot(1,2,2)
plt.plot(xdata, con_data[:,4], 'kd-')
plt.plot(xdata, np.exp(fdata), 'r--')
plt.yscale('log')
plt.grid(b=None, which='major', axis='both', color='k', linestyle='--', linewidth=0.5)
plt.legend(('Total Residual', 'Convergence Rate = ' + '%.5f' % rate))
plt.xlabel('Newton Iteration')
plt.ylabel('L2-norm of Residual')
plt.title('Total Residual Convergence')
plt.tight_layout()
plt.savefig('ScharTestNewtonW.png')
```
|
{
"source": "jeguster/Peruskoodaus",
"score": 3
}
|
#### File: jeguster/Peruskoodaus/hetu3.py
```python
import stdnum.fi.hetu
class Henkilotunnus:
def __init__(self, teksti):
if not stdnum.fi.hetu.is_valid(teksti):
raise Exception(f"Ei ole kelvollinen hetu: {teksti}")
self.hetu = teksti # aseta attribuuttuun "hetu" muuttujan teksti sisältö
def sukupuoli(self):
toiseksi_viimeinen_merkki = self.hetu[-2]
luku = int(toiseksi_viimeinen_merkki)
if luku % 2 == 0: # jakojäännös 2:lla, == 0
return "Nainen" # parillinen = 0, 2, 4, 6, 8
else:
return "Mies" # pariton = 1, 3, 5, 7, 9
if __name__ == '__main__':
hetu = Henkilotunnus('010101-8x8B')
sukupuoli = hetu.sukupuoli()
print("Henkilötunnuksen sukupuoli", sukupuoli)
```
#### File: jeguster/Peruskoodaus/main2.py
```python
import stdnum.fi.hetu
import hetutarkistus2
def main():
while True:
hetu = input("<NAME>: ")
if stdnum.fi.hetu.is_valid(hetu):
print("Oikeanlainen HETU")
print(hetutarkistus2.sukupuoli(hetu))
print("Syntymäpäivä:", hetutarkistus2.syntymapaiva(hetu))
print("ikä", hetutarkistus2.ika(hetu))
break
else:
print("vääränlainen HETU")
if __name__ == '__main__':
main()
```
|
{
"source": "jeguzzi/mt_speaker",
"score": 2
}
|
#### File: mt_speaker/scripts/sound_node.py
```python
import rospy
from dynamic_reconfigure.server import Server
from sound_play.libsoundplay import SoundClient
from std_msgs.msg import String
from mt_sound.cfg import SoundConfig
class Sound(object):
def __init__(self):
rospy.init_node('sound_node', anonymous=True)
self.soundhandle = SoundClient(blocking=False)
self.volume = rospy.get_param('~volume', 0.5)
self.language = rospy.get_param('~language', 'en-US')
Server(SoundConfig, self.callback)
rospy.Subscriber('say', String, self.say, queue_size=1)
rospy.Subscriber('play', String, self.play, queue_size=1)
rospy.on_shutdown(self.on_shutdown)
rospy.spin()
def callback(self, config, level):
self.volume = config.volume
self.language = config.language
return config
def say(self, msg):
self.soundhandle.stopAll()
# self.soundhandle.say support utf strings
self.soundhandle.say(msg.data, volume=self.volume, voice=self.language)
def play(self, msg):
self.soundhandle.stopAll()
self.soundhandle.playWaveFromPkg('mt_sound', 'sound/{0}.wav'.format(msg.data), volume=self.volume)
def on_shutdown(self):
self.soundhandle.stopAll()
if __name__ == '__main__':
Sound()
```
|
{
"source": "jeguzzi/resilience",
"score": 2
}
|
#### File: resilience/code/experiment.py
```python
import itertools
import multiprocessing
import os
from datetime import datetime as dt
import jinja2
import networkx as nx
import numpy as np
import pandas as pd
import yaml
from tqdm import tqdm
from classifier import classifier_output
from graphs import random_delaunay, random_grid, random_realization
from planner import (CellPlanner, DoorPlanner, Planner)
def all_realizations(size):
ls = size * [[True, False]]
return itertools.product(*ls)
def policy_label(policy, th):
if policy == 'optimal':
return policy
else:
return f'{policy}@{th}'
def _chunks(size, chunk_size=None, number_of_chunks=None, index=0):
if number_of_chunks is None:
number_of_chunks = size // chunk_size
return _chunks(size, number_of_chunks=number_of_chunks, index=index)
chunk_size = size // number_of_chunks
rem = size % number_of_chunks
chunks = (np.array([chunk_size] * number_of_chunks) +
np.array([1] * rem + [0] * (number_of_chunks - rem)))
r = np.concatenate([[0], np.cumsum(chunks)]) + index
return [range(*x) for x in zip(r[:-1], r[1:])]
def samples_for_classifier(samples, sigma=None, accuracy=None, **kwargs):
if accuracy in [0.5, 1] or sigma in [0, 1]:
return 1
else:
return samples
def dict_product(**config):
ps = [[(k, v) for v in vs] for k, vs in config.items() if isinstance(vs, list)]
rs = [(k, v) for k, v in config.items() if not isinstance(v, list)]
return [dict(list(v) + rs) for v in itertools.product(*ps)]
def all_policies(thresholds=[], **kwargs):
return [('optimal', 0)] + [('optimistic', th) for th in thresholds]
def classifier_sample(planner, realization, sources, classifier_config={}, policy_config={}):
policies = all_policies(**policy_config)
row = []
cols = (['source', 'sigma', 'gamma', 'classification'] +
[policy_label(*policy) for policy in policies])
while True:
ps, sigma, gamma = classifier_output(realization, **classifier_config)
cs = [[] for _ in sources]
valid = True
for i, source in enumerate(sources):
for policy, th in policies:
c, r = planner.cost(ps, realization, source, policy=policy,
optimistic_threshold=th)
if r:
cs[i].append(c)
else:
valid = False
break
if not valid:
break
if not valid:
continue
min_cs = [planner.min_cost(realization, source) for source in sources]
crs = np.array(cs) / np.array(min_cs)[:, np.newaxis]
r = pd.DataFrame(
[row + [source, sigma, gamma, ps] + list(cr) for source, cr in zip(sources, crs)],
columns=cols)
yield r
def all_classifier_samples(realization, planner, sources=[], classifier_config={},
policy_config={}):
ns = [source for source in sources if planner.is_connected(source, realization)]
configs = dict_product(**classifier_config)
gens = [itertools.islice(classifier_sample(planner, realization, ns, classifier_config=config,
policy_config=policy_config),
samples_for_classifier(**config))
for config in configs]
try:
data = pd.concat(itertools.chain(*gens))
except ValueError as e:
print(f'Exception {e}')
data = pd.DataFrame()
return data
# TODO: get/set/save the seeds. For the moment the seed is set to None.
# Cannot be directly retrieved but could save the result of np.random.get_state()
# Setting seed is simplier: np.random.seed(<int>). Scipy random draws uses numpy.
# I'm also using python random module which also as functions random.seed and random.getstate()
# to get/set the seed.
class Experiment(object):
@classmethod
def new_experiment(cls, name, data, save=False, pool=6, return_data=None, seed=None):
t = data['map']['type']
return _experimentTypes[t](name, data, save=save, pool=pool, return_data=return_data,
seed=seed)
def __init__(self, name, data, save=False, return_data=None, pool=6, seed=None):
self.name = name
self.data = data
self.save = save
if return_data is None:
self.return_data = not self.save
else:
self.return_data = return_data
self.pool = pool
self.classifier_config = data['classifier']
self.policy_config = data['policy']
self.map_config = dict(data['map'])
self.number = self.map_config.pop('number', 1)
self.map_config.pop('type')
if save:
os.makedirs(name)
with open(f'{self.name}/experiment.yaml', 'w') as f:
yaml.dump({self.name: self.data}, f)
def compute(self):
# indices = _chunks(self.number, chunk, index=0)
indices = ([x] for x in range(self.number))
if self.pool > 0:
with multiprocessing.Pool(self.pool) as p:
return pd.concat(
tqdm(
p.imap_unordered(
self.compute_samples, indices), # optional arg chunk_size=1
total=self.number, desc=f'Experiment {self.name}'))
else:
return pd.concat(map(self.compute_samples, indices))
def compute_samples(self, indices):
return pd.concat([self.compute_sample(i) for i in indices])
def compute_sample(self, index):
np.random.seed(index)
if self.save:
os.makedirs(f'{self.name}/{index}')
data = None
while data is None:
try:
realization, planner, sources = self.sample(index)
data = all_classifier_samples(realization, planner, sources=sources,
classifier_config=self.classifier_config,
policy_config=self.policy_config)
except NameError as e:
# print(e)
continue
if self.save and not data.empty:
data.to_csv(f'{self.name}/{index}/data.csv')
if self.return_data:
return data
else:
return pd.DataFrame()
class RandomGraphExperiment(Experiment):
def sample(self, index):
g, hidden_state, s, t, cut, pruned = self.sample_map(index)
realization = random_realization(g, hidden_state, s, t)
planner = Planner(g, t, hidden_state)
if self.save:
self.save_experiment(index, g, cut, pruned, hidden_state, s, t, realization)
return realization, planner, [s]
def save_experiment(self, index, g, cut, pruned, hidden_state, s, t, realization):
r = {'hidden_state': hidden_state,
's': s,
't': t,
'realization': realization}
cut = nx.Graph(cut)
pruned = nx.Graph(pruned)
for _, _, data in cut.edges(data=True):
data['cut'] = True
for _, _, data in pruned.edges(data=True):
data['pruned'] = True
try:
pos = g.pos
except AttributeError:
pos = None
g = nx.compose(nx.compose(cut, pruned), g)
if pos is not None:
for i, p in enumerate(pos):
g.node[i]['pos'] = p
nx.write_gpickle(g, f'{self.name}/{index}/graph.gpickle')
with open(f'{self.name}/{index}/map.yaml', 'w') as f:
yaml.dump(r, f)
class RandomGridExperiment(RandomGraphExperiment):
def sample_map(self, index):
return random_grid(**self.map_config)
class RandomDelaunayExperiment(RandomGraphExperiment):
def sample_map(self, index):
return random_delaunay(**self.map_config)
class CellGraphExperiment(Experiment):
def __init__(self, *args, **kwargs):
super(CellGraphExperiment, self).__init__(*args, **kwargs)
self.sources = self.map_config['sources']
self.planner = CellPlanner(
layer_id=self.map_config['layer'], target_id=self.map_config['target'])
size = len(self.planner.hidden_state)
self.rs = list(all_realizations(size))
self.number = len(self.rs)
def sample(self, index):
realization = self.rs[index]
if self.save:
with open(f'{self.name}/{index}/map.yaml', 'w') as f:
yaml.dump({'realization': realization}, f)
return realization, self.planner, self.sources
class DoorGraphExperiment(CellGraphExperiment):
def __init__(self, *args, **kwargs):
super(CellGraphExperiment, self).__init__(*args, **kwargs)
self.sources = [self.map_config['source_id']]
planner = DoorPlanner(**self.map_config)
if self.pool < 1:
self.planner = planner
size = len(planner.hidden_state)
self.rs = list(all_realizations(size))
self.number = len(self.rs)
def sample(self, index):
if self.pool < 1:
planner = self.planner
else:
planner = DoorPlanner(**self.map_config)
realization = self.rs[index]
if self.save:
with open(f'{self.name}/{index}/map.yaml', 'w') as f:
yaml.dump({'realization': realization}, f)
return realization, planner, self.sources
def edge_from_r_graph(data):
return {'u': (data['certainty'] < 1), 'length': data['cost']}
def import_graph(path, s, t, traversable=[], prune=[], **kwargs):
original_graph = nx.read_gpickle(path)
es = [(x, y, edge_from_r_graph(data)) for x, y, data in original_graph.edges(data=True)
if [x, y] not in prune and [y, x] not in prune]
hidden_state = [[(x, y)] for x, y, d in es
if d['u'] and ([x, y] not in traversable and [y, x] not in traversable)]
g = nx.Graph(es)
for n, data in g.nodes(data=True):
data['observe'] = []
data['pos'] = original_graph.node[n]['pos']
for i, es in enumerate(hidden_state):
for x, y in es:
g.node[x]['observe'].append(i)
g.node[y]['observe'].append(i)
g[x][y]['hidden'] = True
return g, hidden_state, s, t
class RealGraphExperiment(Experiment):
def __init__(self, *args, **kwargs):
super(RealGraphExperiment, self).__init__(*args, **kwargs)
g, hs, s, t = import_graph(**self.map_config)
planner = Planner(g, t, hs)
size = len(planner.hidden_state)
self.rs = list(all_realizations(size))
self.number = len(self.rs)
self.sources = [s]
def sample(self, index):
g, hs, s, t = import_graph(**self.map_config)
planner = Planner(g, t, hs)
realization = self.rs[index]
if self.save:
with open(f'{self.name}/{index}/map.yaml', 'w') as f:
yaml.dump({'realization': realization}, f)
return realization, planner, self.sources
_experimentTypes = {'grid': RandomGridExperiment,
'delaunay': RandomDelaunayExperiment,
'cells': CellGraphExperiment,
'real': RealGraphExperiment,
'doors': DoorGraphExperiment
}
def execute_all_experiments(config_file='./experiment.yaml', pool=6):
if os.path.splitext(config_file)[1] == '.j2':
print('Load Jinjia template')
jinjia_env = jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
template = jinjia_env.get_template(config_file)
experiments = yaml.load(template.render())
else:
with open(config_file) as f:
experiments = yaml.load(f)
for name, data in tqdm(experiments.items(), desc='All experiments'):
if os.path.exists(name):
print(f'Experiment {name} already computed')
continue
print(f'Starting to compute experiment {name}')
description = data.get('description', '')
if description:
print(f'***\n\t{description}\n***')
start_time = dt.now()
Experiment.new_experiment(name, data, save=True, pool=pool).compute()
duration = dt.now() - start_time
secs = round(duration.total_seconds())
print(f'Experiment {name} computed in {secs} seconds')
def load_map(folder, **kwargs):
from graphs import draw_graph
g = nx.read_gpickle(f'{folder}/graph.gpickle')
cut = nx.Graph([e for e in g.edges(data=True) if 'cut' in e[2]])
pruned = nx.Graph([e for e in g.edges(data=True) if 'pruned' in e[2]])
for n, d in (list(cut.nodes(data=True)) + list(pruned.nodes(data=True))):
d.update(g.node[n])
g.remove_edges_from(list(cut.edges()) + list(pruned.edges()))
g.remove_nodes_from([n for n in g if len(g[n]) == 0])
with open(f'{folder}/map.yaml') as f:
map_config = yaml.load(f)
draw_graph(g, realization=map_config['realization'],
hidden_state=map_config['hidden_state'], cut=cut, pruned=pruned,
s=map_config['s'],
t=map_config['t'], **kwargs)
```
#### File: code/graphs/indoor.py
```python
import xml.etree.ElementTree as ET
from collections import defaultdict
import networkx as nx
from shapely import geometry
GML = 'http://www.opengis.net/gml/3.2'
INDOOR = 'http://www.opengis.net/indoorgml/1.0/core'
NAVI = 'http://www.opengis.net/indoorgml/1.0/navigation'
XLINK = 'http://www.w3.org/1999/xlink'
ALMA = 'http://www.idsia.ch/alma'
SVG = 'http://www.w3.org/2000/svg'
GMLNS = '{%s}' % GML
INDOORNS = '{%s}' % INDOOR
NAVINS = '{%s}' % INDOOR
XLINKNS = '{%s}' % XLINK
ALMANS = '{%s}' % ALMA
SVGNS = '{%s}' % SVG
my_namespaces = {INDOOR: 'indoorCore', GML: "gml", XLINK: 'xlink',
ALMA: 'alma', NAVI: 'indoorNavi'}
ns = {v: k for k, v in my_namespaces.items()}
ET._namespace_map.update(my_namespaces)
def argmin(iterable):
return min(enumerate(iterable), key=lambda x: x[1])[0]
def coord_to_gml(shape):
return " ".join(["%.1f %.1f" % p for p in shape.coords])
def point_to_gml(point, id):
gml = ET.Element("gml:Point", {'srsDimension': '2', 'gml:id': id})
pos = ET.SubElement(gml, "gml:pos")
pos.text = coord_to_gml(point)
return gml
def line_to_gml(line, id):
gml = ET.Element("gml:LineString", {'gml:id': id})
pos = ET.SubElement(gml, "gml:posList")
pos.text = coord_to_gml(line)
return gml
def ring_to_gml(ring):
gml = ET.Element("gml:LinearRing")
pos = ET.SubElement(gml, "gml:posList")
pos.text = coord_to_gml(ring)
return gml
def polygon_to_gml(polygon, id):
gml = ET.Element("gml:Polygon", {'gml:id': id})
exterior = ET.SubElement(gml, "gml:exterior")
exterior.append(ring_to_gml(polygon.exterior))
for ring in polygon.interiors:
i = ET.SubElement(gml, "gml:interior")
i.append(ring_to_gml(ring))
return gml
def coordinatesFromGML(p):
node = p.find('.//gml:pos', ns)
pos = node.text
point = [float(n) for n in pos.split()[:2]]
return tuple(point)
_ns2types = {}
_ns2types['indoorCore'] = ['CellSpace', 'CellSpaceBoundary', 'SpaceLayer',
'MultiLayeredGraph']
_ns2types['indoorNavi'] = ['NavigableSpace', 'NavigableBoundary',
'GeneralSpace', 'TransferSpace', 'TransitionSpace',
'AnchorSpace', 'ConnectionSpace']
class GMLFeature(object):
"""A minimal GML feature"""
def __init__(self, id):
super(GMLFeature, self).__init__()
self.id = id
self.name = ''
self.description = ''
self.type = 'GMLFeature'
def __repr__(self):
return ("%s: %s %s %s" %
(self.type, self.id,
'(' + self.name + ')' if self.name else '',
'- ' + self.description if self.description else ''))
@staticmethod
def readMetaData(node, name):
return node.find(".//{name}".format(name=name), ns)
@classmethod
def loadFromXMLNode(cls, node):
if node is None:
return None
i = node.get('%sid' % GMLNS)
instance = cls(i)
t = node.tag.split('}')
instance.type = t.pop()
nameNode = node.find('gml:name', ns)
if(nameNode is not None):
instance.name = nameNode.text
descNode = node.find('gml:description', ns)
if(descNode is not None):
instance.description = descNode.text
return instance
class State(GMLFeature):
"""State is modeled after an indoorGML State"""
def __init__(self, id):
super(State, self).__init__(id)
self.connects = []
self.geometry = None
self.duality = None
self.attribute = {}
self.equals = defaultdict(set)
self.within = defaultdict(set)
self.contains = defaultdict(set)
self.overlaps = defaultdict(set)
# OR of equals within contains overlaps
self.intersects = defaultdict(set)
# NOT in indoorGML
self.touches = defaultdict(set)
self.type = 'State'
self.layer = None
self.default = {}
self.up = set()
self.down = set()
@property
def neighbors(self):
if not self.layer.graph.has_node(self.id):
return []
return [self.layer.states.get(s) for s in self.layer.graph[self.id]]
def transitionsTo(self, state):
if not self.layer.graph.has_node(self.id):
return None
ts = self.layer.graph[self.id].get(state.id, {}).values()
return [self.layer.transitions.get(t['id']) for t in ts]
@classmethod
def loadFromXMLNode(cls, node):
if(node is None):
return None
state = super(cls, cls).loadFromXMLNode(node)
geometryNode = node.find('indoorCore:geometry//gml:pos', ns)
if(geometryNode is not None):
pos = node.find('indoorCore:geometry//gml:pos', ns).text
point = [float(n) for n in pos.split()[:2]]
state.geometry = geometry.Point(tuple(point))
state.connects = []
cellXML = node.find('indoorCore:duality/*', ns)
state.duality = Cell.loadFromXMLNode(cellXML)
if(state.duality):
state.duality.duality = state
for n in node.findall("gml:name", ns):
state.name = n.text
for mt in ['open', 'nontraversable', 'invisible']:
m = GMLFeature.readMetaData(node, 'alma:{mt}'.format(mt=mt))
if m is not None:
state.default[mt] = True
else:
state.default[mt] = False
return state
class Transition(GMLFeature):
"""Transition is modeled after an indoorGML State"""
def __init__(self, id):
super(Transition, self).__init__(id)
self.geometry = None
self.duality = None
self.type = 'Transition'
@property
def connects(self):
return [self.start, self.end]
@classmethod
def loadFromXMLNode(cls, node):
if(node is None):
return None
transition = super(cls, cls).loadFromXMLNode(node)
connects = [s.get(XLINKNS + 'href')[1:]
for s in node.findall('indoorCore:connects', ns)]
transition.start = connects[0]
transition.end = connects[1]
transition.duality = (node.find('indoorCore:duality', ns).
get(XLINKNS + 'href')[1:])
line = []
for pos in node.findall('indoorCore:geometry//gml:pos', ns):
line.append(tuple([float(n) for n in pos.text.split()[:2]]))
for posList in node.findall('.//gml:posList', ns):
coord = [float(n) for n in posList.text.split()]
line = zip(coord[::2], coord[1::2])
line = list(line)
if(len(line) > 1):
transition.geometry = geometry.LineString(line)
return transition
class Cell(GMLFeature):
"""Transition is modeled after an indoorGML CellSpace"""
def __init__(self, id):
super(Cell, self).__init__(id)
self.boundary = []
self.geometry = None
self.type = 'CellSpace'
self.outer_edge = None
self.inner_edges = []
# NavigableSpace attributes:
self.usage = None
self.className = None
self.function = None
def edges(self):
return [self.outer_edge] + self.inner_edges
def addBoundary(self, b):
self.boundary.append(b)
b.addCell(self)
def removeBoundary(self, b):
if b in self.boundary:
self.boundary.remove(b)
b.removeCell(self)
@classmethod
def loadFromExternalReferenceNode(cls, externalRef):
raise NameError("Not implemented yet")
@classmethod
def loadFromXMLNode(cls, node):
if(node is None):
return None
externalRef = node.find('indoorCore:externalReference', ns)
if(externalRef is not None):
return cls.loadFromExternalReferenceNode(externalRef)
cell = super(cls, cls).loadFromXMLNode(node)
if cell.type != 'CellSpace':
for n in node.findall('indoorNavi:class', ns):
cell.className = n.text
for n in node.findall('indoorNavi:usage', ns):
cell.usage = n.text
for n in node.findall('indoorNavi:function', ns):
cell.function = n.text
cell.boundary = []
for boundaryNode in node.findall('indoorCore:partialboundedBy', ns):
ref = boundaryNode.get(XLINKNS + 'href')
if(ref is None):
try:
cell.addBoundary(Boundary.loadFromXMLNode(boundaryNode[0]))
except Exception as e:
pass
else:
cell.boundary.append(ref[1:])
polygonXML = node.find('indoorCore:Geometry2D/gml:Polygon', ns)
if(polygonXML is None):
cell.geometry = None
cell.boundary = []
else:
interior = []
exterior = []
for pos in polygonXML.findall('gml:exterior//gml:pos', ns):
exterior.append(tuple([float(n) for n
in pos.text.split()][:2]))
for posList in polygonXML.findall('gml:exterior//gml:posList', ns):
coord = [float(n) for n in posList.text.split()]
exterior = zip(coord[::2], coord[1::2])
for loop in polygonXML.findall('gml:interior//gml:LinearRing', ns):
ls = []
for pos in loop.findall('.//gml:pos', ns):
ls.append(tuple([float(n) for n in pos.text.split()][:2]))
for posList in loop.findall('.//gml:posList', ns):
coord = [float(n) for n in posList.text.split()]
ls = zip(coord[::2], coord[1::2])
interior.append(geometry.LinearRing(ls))
cell.geometry = geometry.Polygon(exterior, interior)
if not cell.geometry.is_valid:
raise Exception("Invalid Cell %s: %s" %
(cell.id, cell.geometry.wkt))
return cell
class Boundary(GMLFeature):
"""Transition is modeled after an indoorGML CellSpaceBoundary"""
def __init__(self, id):
super(Boundary, self).__init__(id)
self.geometry = None
self.duality = None
self.type = 'CellSpaceBoundary'
self._chains = {}
self.cells = []
def addCell(self, cell):
self.cells.append(cell)
def removeCell(self, cell):
if cell in self.cells:
self.cells.remove(cell)
@classmethod
def loadFromXMLNode(cls, node):
if(node is None):
return None
boundary = super(cls, cls).loadFromXMLNode(node)
line = []
poss = node.findall('indoorCore:geometry2D/gml:LineString/gml:pos', ns)
for pos in poss:
line.append(tuple([float(n) for n in pos.text.split()][:2]))
posLists = node.findall(
'indoorCore:geometry2D/gml:LineString/gml:posList', ns)
for posList in posLists:
coord = [float(n) for n in posList.text.split()]
line = zip(coord[::2], coord[1::2])
boundary.geometry = geometry.LineString(line)
if not boundary.geometry.is_valid:
raise Exception("Invalid Boundary %s %s" %
(boundary.id, boundary.geometry.wkt))
return boundary
class Layer(GMLFeature):
"""Layer is modeled after an indoorGML SpaceLayer"""
def __init__(self, id):
super(Layer, self).__init__(id)
self.className = ''
self.usage = ''
self.function = ''
self.states = {}
self.transitions = {}
self.cells = {}
self.boundaries = {}
self.indexId = 0
self.graph = nx.MultiGraph()
self.type = 'SpaceLayer'
self.map = None
def state_with_name(self, name):
states = [s for s in self.states.values() if s.name == name]
if len(states):
return states[0]
return None
def addState(self, state):
state.layer = self
self.states[state.id] = state
if(state.duality):
cell = state.duality
self.cells[cell.id] = cell
cell.layer = self
if cell.geometry:
self.indexId += 1
if(cell.boundary):
for boundary in cell.boundary[:]:
if(isinstance(boundary, str)): # xlink:href
cell.boundary.remove(boundary)
o_boundary = self.boundaries.get(boundary, None)
if o_boundary:
cell.addBoundary(o_boundary)
else:
pass
else:
self.boundaries[boundary.id] = boundary
def connectTransition(self, transition):
if(transition.start.geometry and transition.end.geometry):
transition.geometry = geometry.LineString(
[transition.start.geometry,
transition.duality.geometry.centroid,
transition.end.geometry])
transition.start.connects.append(transition)
transition.end.connects.append(transition)
if(transition.geometry):
transition.weight = transition.geometry.length
else:
transition.weight = 0
self.graph.add_edge(transition.start.id, transition.end.id,
transition.id, id=transition.id,
weight=transition.weight)
def addTransitionWithBoundary(self, start, end, border):
ntransition = Transition('%sT%d' %
(self.id, len(self.transitions) + 1))
ntransition.duality = border
border.duality = ntransition
self.transitions[ntransition.id] = ntransition
ntransition.start = start
ntransition.end = end
self.connectTransition(ntransition)
return ntransition
def addTransition(self, transition):
transition.layer = self
transition.start = self.states.get(transition.start, None)
if not transition.start:
return
transition.start.connects.append(transition)
t_end = transition.end
transition.end = self.states.get(t_end, None)
transition.duality = self.boundaries[transition.duality]
transition.duality.duality = transition
self.transitions[transition.id] = transition
if(transition.geometry):
transition.weight = transition.geometry.length
else:
transition.weight = 0
if not transition.end:
pass
else:
transition.end.connects.append(transition)
self.graph.add_edge(transition.start.id, transition.end.id,
transition.id, id=transition.id,
weight=transition.weight)
def find_bounds(self):
self.geometry = geometry.MultiPolygon(
[s.duality.geometry for s in self.states.values() if s.duality])
self.bounds = self.geometry.bounds
@classmethod
def loadFromXMLNode(cls, node):
layer = super(cls, cls).loadFromXMLNode(node)
layer.graph = nx.MultiGraph()
layer.states = {}
layer.transitions = {}
layer.cells = {}
layer.boundaries = {}
for n in node.findall('./indoorCore:class', ns):
layer.className = n.text
for n in node.findall('./indoorCore:usage', ns):
layer.usage = n.text
for n in node.findall('./indoorCore:function', ns):
layer.function = n.text
for n in node.findall("indoorCore:nodes//indoorCore:State", ns):
layer.addState(State.loadFromXMLNode(n))
for n in node.findall("indoorCore:edges//indoorCore:Transition", ns):
layer.addTransition(Transition.loadFromXMLNode(n))
layer.find_bounds()
layer.external_states = [s for s in layer.states.values()
if not s.geometry]
return layer
def addBoundary(self, type='CellSpaceBoundary'):
b = Boundary('%sB%d' % (self.id, len(self.boundaries) + 1))
b.type = type
self.boundaries[b.id] = b
return b
def addBoundaryWithGeometry(self, geometry, type='CellSpaceBoundary'):
b = self.addBoundary(type)
b.geometry = geometry
return b
class IndoorMap(GMLFeature):
"""IndoorMap is modeled after an indoorGML Multi Layered Graph"""
def __init__(self, id):
super(IndoorMap, self).__init__(id)
self.space_layers = {}
self.externalMaps = {}
self.states = {}
self.cells = {}
self.transitions = {}
self.boundaries = {}
self.file = ''
self.origin = (0, 0)
self.angle = 0
self.geometricLayer = None
def find_bounds(self):
layers = self.space_layers.values()
layers = list(layers)
if(len(layers)):
self.geometry = layers[0].geometry
for l in layers[1:]:
pass
self.bounds = self.geometry.bounds
else:
self.geometry = None
self.bounds = None
def addLayer(self, layer):
self.space_layers[layer.id] = layer
layer.map = self
def addInterEdgeFromXMLNode(self, node):
n = node.find("indoorCore:typeOfTopoExpression", ns)
if n is None:
return
t = node.find("indoorCore:typeOfTopoExpression", ns).text
if(t != "CONTAINS"):
return
startId = node.find("indoorCore:start", ns).get('%shref' % XLINKNS)[1:]
endId = node.find("indoorCore:end", ns).get('%shref' % XLINKNS)[1:]
startLayers = [l for l in self.space_layers.values()
if l.has_node(startId)]
endLayers = [l for l in self.space_layers.values()
if l.has_node(endId)]
if(len(startLayers) != 1):
raise Exception("Inter layer Connection %s not well formed. "
"Start Node %s in layers %s" %
(node, startLayers, startId))
if(len(endLayers) != 1):
raise Exception("Inter layer Connection %s not well formed. "
"End Node %s in layers %s" %
(node, endLayers, endId))
startState = startLayers[0].node[startId]
endState = endLayers[0].node[endId]
startState.setdefault("contains", []).append(endState)
endState.setdefault("contained_in", []).append(startState)
@classmethod
def loadFromFile(cls, file_name):
"""Load a multi layered graph from an indoorGML document"""
tree = ET.parse(file_name)
node = tree.getroot()
if not node.tag == INDOORNS + "MultiLayeredGraph":
node = node.find("indoorCore:MultiLayeredGraph", ns)
if node is not None:
m = cls.loadFromXMLNode(node)
m.file = file_name
return m
else:
raise Exception('Malformed xml file: no MultiLayeredGraph tag')
@classmethod
def loadFromXMLNode(cls, node):
mlg = super(cls, cls).loadFromXMLNode(node)
mlg.space_layers = {}
for n in node.findall(".//indoorCore:SpaceLayer", ns):
mlg.addLayer(Layer.loadFromXMLNode(n))
for n in node.findall(".//indoorCore:interEdges", ns):
mlg.addInterEdgeFromXMLNode(n)
mlg.states = {}
mlg.cells = {}
mlg.boundaries = {}
mlg.transitions = {}
for l in mlg.space_layers.values():
mlg.states.update(l.states)
mlg.cells.update(l.cells)
mlg.boundaries.update(l.boundaries)
mlg.transitions.update(l.transitions)
mlg.find_bounds()
return mlg
```
|
{
"source": "jeguzzi/robomaster_ros",
"score": 2
}
|
#### File: robomaster_ros/modules/chassis.py
```python
import math
import quaternion
import time
import rclpy.action
import sensor_msgs.msg
import geometry_msgs.msg
import robomaster_msgs.msg
import nav_msgs.msg
import std_srvs.srv
import rcl_interfaces.msg
import robomaster.robot
import robomaster.action
import robomaster.protocol
from typing import Optional, List, Tuple, Any
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..client import RoboMasterROS
from .. import Module
from ..action import add_cb
from ..utils import Rate, deg, rad, rate, nearest_rate
RADIUS = 0.05
AXIS = 0.2
RPM2SPEED = 2 * math.pi * RADIUS / 60
G = 9.81
MAX_ESC_ANGLE = 32767
# rmp -> [linear] speed
def linear_speed_from_rpm(value: float) -> float:
return RPM2SPEED * value
# [linear] speed -> rpm:
def rpm_from_linear_speed(value: float) -> int:
return round(value / RPM2SPEED)
Esc = Tuple[List[int], List[int], List[int], List[int]]
SaStatus = Tuple[int, ...]
def esc2angle(value: int) -> float:
return value / MAX_ESC_ANGLE * 2 * math.pi
def esc2angular_speed(value: int) -> float:
return value / 60.0 * 2 * math.pi
def quaternion_from_euler(roll: float, pitch: float, yaw: float
) -> quaternion.quaternion:
# TODO(jerome):
return quaternion.from_euler_angles(roll, pitch, yaw)
def wheel_speeds_from_twist(vx: float, vy: float, vtheta: float,
axis_length: float = AXIS) -> Tuple[float, float, float, float]:
front_left = vx - vy - axis_length * vtheta # noqa
front_right = vx + vy + axis_length * vtheta # noqa
rear_left = vx + vy - axis_length * vtheta # noqa
rear_right = vx - vy + axis_length * vtheta # noqa
return (front_left, front_right, rear_left, rear_right)
WHEEL_FRAMES = ['front_right_wheel_joint', 'front_left_wheel_joint',
'rear_left_wheel_joint', 'rear_right_wheel_joint']
class Chassis(Module):
def __init__(self, robot: robomaster.robot.Robot, node: 'RoboMasterROS') -> None:
self.clock = node.get_clock()
self.logger = node.get_logger()
self.node = node
self.timeout: Optional[float] = node.declare_parameter("chassis.timeout", 0.0).value
if self.timeout == 0.0:
self.timeout = None
self.api = robot.chassis
self.twist_to_wheel_speeds: bool = node.declare_parameter(
"chassis.twist_to_wheel_speeds", False).value
self.force_level: bool = node.declare_parameter(
"chassis.force_level", False).value
if self.twist_to_wheel_speeds:
self.logger.info("topic cmd_vel will control wheel speeds")
else:
self.logger.info("topic cmd_vel will control chassis twist")
odom_frame = node.tf_frame('odom')
base_link = node.tf_frame('base_link')
self.odom_msg = nav_msgs.msg.Odometry()
self.odom_msg.header.frame_id = odom_frame
self.odom_msg.child_frame_id = odom_frame
self.imu_msg = sensor_msgs.msg.Imu()
self.imu_msg.header.frame_id = base_link
self.wheel_state_msg = sensor_msgs.msg.JointState(
name=[node.tf_frame(name) for name in WHEEL_FRAMES])
self.transform_msg = geometry_msgs.msg.TransformStamped(child_frame_id=base_link)
self.transform_msg.header.frame_id = odom_frame
self.odom_pub = node.create_publisher(nav_msgs.msg.Odometry, 'odom', 1)
self.imu_pub = node.create_publisher(sensor_msgs.msg.Imu, 'imu', 1)
self.chassis_state_pub = node.create_publisher(
robomaster_msgs.msg.ChassisStatus, 'state', 1)
chassis_rate = rate(node, 'chassis', 10)
status_rate = rate(node, 'chassis.status', 1)
if chassis_rate:
self.subscribe(chassis_rate)
if status_rate:
self.api.sub_status(freq=status_rate, callback=self.updated_status)
node.create_subscription(geometry_msgs.msg.Twist, 'cmd_vel', self.has_received_twist, 1)
node.create_subscription(
robomaster_msgs.msg.WheelSpeeds, 'cmd_wheels', self.has_received_wheel_speeds, 1)
# self.goal_handle: Optional[rclpy.action.server.ServerGoalHandle] = None
self.action: Optional[robomaster.action.Action] = None
cbg = rclpy.callback_groups.MutuallyExclusiveCallbackGroup()
self._move_action_server = rclpy.action.ActionServer(
node, robomaster_msgs.action.Move, 'move', self.execute_move_callback,
cancel_callback=self.cancel_move_callback, callback_group=cbg)
self.engage_server = node.create_service(std_srvs.srv.SetBool, 'engage_wheels',
self.engage_cb)
node.add_on_set_parameters_callback(self.set_params_cb)
def set_params_cb(self, params: Any) -> rcl_interfaces.msg.SetParametersResult:
for param in params:
if param.name == 'chassis.timeout':
if param.value > 0:
self.timeout = param.value
else:
self.timeout = None
elif param.name == 'chassis.twist_to_wheel_speeds':
self.twist_to_wheel_speeds = param.value
if self.twist_to_wheel_speeds:
self.logger.info("topic cmd_vel will control wheel speeds")
else:
self.logger.info("topic cmd_vel will control chassis twist")
elif param.name == 'chassis.force_level':
self.force_level = param.value
elif param.name == 'chassis.rate':
# TODO(Jerome): push the actual value back
chassis_rate = nearest_rate(param.value)
self.subscribe(chassis_rate)
elif param.name == 'chassis.status':
# TODO(Jerome): push the actual value back
status_rate = nearest_rate(param.value)
if status_rate:
self.api.sub_status(freq=status_rate, callback=self.updated_status)
return rcl_interfaces.msg.SetParametersResult(successful=True)
def engage(self, value: bool) -> None:
proto = robomaster.protocol.ProtoChassisSetWorkMode()
proto._mode = 1 if value else 0
self.api._send_sync_proto(proto)
self.logger.info(f"{'Engaged' if value else 'Disengaged'} wheel motors")
def engage_cb(self, request: std_srvs.srv.SetBool.Request,
response: std_srvs.srv.SetBool.Response) -> std_srvs.srv.SetBool.Response:
self.engage(request.data)
response.success = True
return response
def abort(self) -> None:
if self.action:
self.action._abort()
while self.action is not None:
self.logger.info("wait for the action to terminate")
time.sleep(0.1)
def subscribe(self, rate: Rate) -> None:
if rate:
# There is no need to unsubscribe
self.api.sub_position(cs=1, freq=rate, callback=self.updated_position)
self.api.sub_velocity(freq=rate, callback=self.updated_velocity)
self.api.sub_attitude(freq=rate, callback=self.updated_attitude)
self.api.sub_imu(freq=rate, callback=self.updated_imu)
self.api.sub_esc(freq=rate, callback=self.updated_esc)
def unsubscribe(self) -> None:
self.api.unsub_position()
self.api.unsub_velocity()
self.api.unsub_attitude()
self.api.unsub_imu()
self.api.unsub_esc()
def stop(self) -> None:
self._move_action_server.destroy()
self.engage_server.destroy()
if self.node.connected:
self.api.drive_wheels(0, 0, 0, 0)
self.unsubscribe()
self.api.unsub_status()
def has_received_twist(self, msg: geometry_msgs.msg.Twist) -> None:
if self.twist_to_wheel_speeds:
front_left, front_right, rear_left, rear_right = wheel_speeds_from_twist(
msg.linear.x, msg.linear.y, msg.angular.z)
self.api.drive_wheels(
w1=rpm_from_linear_speed(front_right), w2=rpm_from_linear_speed(front_left),
w3=rpm_from_linear_speed(rear_left), w4=rpm_from_linear_speed(rear_right),
timeout=self.timeout)
else:
self.api.drive_speed(
x=msg.linear.x, y=-msg.linear.y, z=-deg(msg.angular.z), timeout=self.timeout)
def has_received_wheel_speeds(self, msg: robomaster_msgs.msg.WheelSpeeds) -> None:
self.api.drive_wheels(
w1=rpm_from_linear_speed(msg.front_right), w2=rpm_from_linear_speed(msg.front_left),
w3=rpm_from_linear_speed(msg.rear_left), w4=rpm_from_linear_speed(msg.rear_right),
timeout=self.timeout)
def updated_position(self, msg: Tuple[float, float, float]) -> None:
position = self.odom_msg.pose.pose.position
(position.x, position.y) = (msg[0], -msg[1])
def updated_velocity(self, msg: Tuple[float, float, float, float, float, float]) -> None:
velocity = self.odom_msg.twist.twist.linear
(velocity.x, velocity.y) = (msg[0], -msg[1])
# (yaw, pitch, roll)
def updated_attitude(self, msg: Tuple[float, float, float]) -> None:
orientation = self.odom_msg.pose.pose.orientation
yaw = -rad(msg[0])
if self.force_level:
pitch = 0.0
roll = 0.0
else:
pitch = rad(msg[1])
roll = -rad(msg[2])
q = quaternion_from_euler(yaw=yaw, pitch=pitch, roll=roll)
(orientation.x, orientation.y, orientation.z, orientation.w) = (q.x, q.y, q.z, q.w)
# (acc, ang vel)
def updated_imu(self, msg: Tuple[float, float, float, float, float, float]) -> None:
# No angle, as of now
# Check coppeliaSim IMU == REAL IMU == ROS conventions (0, 0, +G) when idle
acceleration = self.imu_msg.linear_acceleration
(acceleration.x, acceleration.y, acceleration.z) = [
f * G * value for value, f in zip(msg[:3], (1, -1, -1))]
angular_speed = self.imu_msg.angular_velocity
(angular_speed.x, angular_speed.y, angular_speed.z) = [
f * value for value, f in zip(msg[3:], (1, -1, -1))]
# TODO(jerome): better? synchronization (should also check the jittering)
stamp = self.clock.now().to_msg()
self.imu_msg.orientation = self.odom_msg.pose.pose.orientation
self.odom_msg.twist.twist.angular = self.imu_msg.angular_velocity
self.odom_msg.header.stamp = stamp
self.odom_pub.publish(self.odom_msg)
self.imu_msg.header.stamp = stamp
self.imu_pub.publish(self.imu_msg)
position = self.odom_msg.pose.pose.position
translation = self.transform_msg.transform.translation
(translation.x, translation.y, translation.z) = (position.x, position.y, position.z)
self.transform_msg.transform.rotation = self.odom_msg.pose.pose.orientation
self.transform_msg.header.stamp = stamp
self.node.tf_broadcaster.sendTransform(self.transform_msg)
# (speeds + angles + timestamps + states)
def updated_esc(self, msg: Esc) -> None:
self.wheel_state_msg.position = [
esc2angle(f * value) for value, f in zip(msg[1], (1, -1, -1, 1))]
self.wheel_state_msg.velocity = [
esc2angular_speed(f * value) for value, f in zip(msg[0], (1, -1, -1, 1))]
self.wheel_state_msg.header.stamp = self.clock.now().to_msg()
self.node.joint_state_pub.publish(self.wheel_state_msg)
# (speeds + angles + timestamps + states)
def updated_status(self, msg: SaStatus) -> None:
keys = [key for key in robomaster_msgs.msg.ChassisStatus._fields_and_field_types.keys()
if key != 'header']
kwargs = {k: bool(value) for k, value in zip(keys, msg)}
ros_msg = robomaster_msgs.msg.ChassisStatus(**kwargs)
ros_msg.header.stamp = self.clock.now().to_msg()
self.chassis_state_pub.publish(ros_msg)
def execute_move_callback(self, goal_handle: Any) -> robomaster_msgs.action.Move.Result:
# TODO(jerome): Complete with ... velocity parameters
# DONE(jerome): Complete with failures
request = goal_handle.request
try:
self.action = self.api.move(
x=request.x, y=-request.y, z=deg(request.theta), xy_speed=request.linear_speed,
z_speed=deg(request.angular_speed))
except RuntimeError as e:
self.logger.warning(f'Cannot move: {e}')
goal_handle.abort()
return robomaster_msgs.action.Move.Result()
self.logger.info(f'Start moving chassis with request {request}')
feedback_msg = robomaster_msgs.action.Move.Feedback()
def cb() -> None:
# if self.action._percent > 50:
# self.action._abort()
# return
feedback_msg.progress = self.action._percent * 0.01 # type: ignore
goal_handle.publish_feedback(feedback_msg)
add_cb(self.action, cb)
# while action.is_running:
# time.sleep(0.01)
self.action.wait_for_completed()
if self.action.has_succeeded:
goal_handle.succeed()
elif goal_handle.is_cancel_requested:
goal_handle.canceled()
else:
goal_handle.abort()
self.logger.info('Done moving chassis')
self.action = None
return robomaster_msgs.action.Move.Result()
def cancel_move_callback(self, goal_handle: Any) -> rclpy.action.CancelResponse:
self.logger.warn('It is not possible to cancel onboard actions')
return rclpy.action.CancelResponse.REJECT
# if self.action:
# self.logger.info('Canceling move action')
# self.action._abort()
# return rclpy.action.CancelResponse.ACCEPT
```
#### File: robomaster_ros/modules/sensor_adapter.py
```python
from typing import Tuple, List, Optional
import robomaster_msgs.msg
import robomaster_msgs.srv
import robomaster.robot
import robomaster.sensor
import rclpy.qos
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..client import RoboMasterROS
from .. import Module
from ..utils import rate
SensorAdapterData = Tuple[List[int], List[int]]
class SensorAdapter(Module):
def __init__(self, robot: robomaster.robot.Robot, node: 'RoboMasterROS') -> None:
self.api = robot.sensor_adaptor
self.node = node
self.clock = node.get_clock()
self.pub = node.create_publisher(
robomaster_msgs.msg.SensorAdapter, 'sensor_adapter',
rclpy.qos.qos_profile_sensor_data)
_rate = rate(node, 'sensor_adapter', 0)
if _rate:
self.api.sub_adapter(freq=_rate, callback=self.updated_reading)
node.create_service(robomaster_msgs.srv.GetADC, 'get_adc', self.get_adc_cb)
node.create_service(robomaster_msgs.srv.GetIO, 'get_io', self.get_io_cb)
node.create_service(robomaster_msgs.srv.GetPulse, 'get_pulse', self.get_pulse_cb)
def stop(self) -> None:
self.api.unsub_adapter()
def abort(self) -> None:
pass
def updated_reading(self, msg: SensorAdapterData) -> None:
ros_msg = robomaster_msgs.msg.SensorAdapter(io=msg[0], adc=msg[1])
ros_msg.header.stamp = self.clock.now().to_msg()
self.pub.publish(ros_msg)
def get_adc_cb(self, request: robomaster_msgs.srv.GetADC.Request,
response: robomaster_msgs.srv.GetADC.Response
) -> robomaster_msgs.srv.GetADC.Response:
value: Optional[int] = self.api.get_adc(id=request.id, port=request.port)
if value is not None:
response.value = value
response.valid = True
else:
response.value = 0
response.valid = False
return response
def get_io_cb(self, request: robomaster_msgs.srv.GetIO.Request,
response: robomaster_msgs.srv.GetIO.Response
) -> robomaster_msgs.srv.GetIO.Response:
value: Optional[int] = self.api.get_io(id=request.id, port=request.port)
if value is not None:
response.value = value
response.valid = True
else:
response.value = 0
response.valid = False
return response
def get_pulse_cb(self, request: robomaster_msgs.srv.GetPulse.Request,
response: robomaster_msgs.srv.GetPulse.Response
) -> robomaster_msgs.srv.GetPulse.Response:
value: Optional[int] = self.api.get_pulse(id=request.id, port=request.port)
if value is not None:
response.time_ms = value
response.valid = True
else:
response.time_ms = 0
response.valid = False
return response
```
#### File: robomaster_ros/robomaster_ros/robomaster_driver.py
```python
from typing import Any
import time
import rclpy
import rclpy.executors
import rclpy.logging
from robomaster_ros.client import RoboMasterROS
def main(args: Any = None) -> None:
rclpy.init(args=args)
executor = rclpy.executors.MultiThreadedExecutor()
# TODO(Jerome): xurrently not triggered by ctrl+C
# rclpy.get_default_context().on_shutdown(...)
should_reconnect = True
while should_reconnect:
node = RoboMasterROS(executor=executor)
should_reconnect = node.reconnect
if not node.disconnection.done():
try:
rclpy.spin_until_future_complete(node, node.disconnection, executor=executor)
except KeyboardInterrupt:
node.get_logger().warn('KeyboardInterrupt')
should_reconnect = False
node.abort()
if rclpy.ok():
rclpy.spin_once(node, executor=executor, timeout_sec=0.1)
node.stop()
if rclpy.ok():
rclpy.spin_once(node, executor=executor, timeout_sec=0.1)
node.destroy_node()
time.sleep(0.1)
if rclpy.ok():
rclpy.shutdown()
```
#### File: robomaster_ros/robomaster_ros/utils.py
```python
import math
from typing import cast, List
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
import rclpy.node
def deg(rad: float) -> float:
return 180 / math.pi * rad
def rad(deg: float) -> float:
return math.pi / 180 * deg
Rate = Literal[0, 1, 5, 10, 20, 50]
RATES: List[Rate] = [0, 1, 5, 10, 20, 50]
def nearest_rate(value: int) -> Rate:
return cast(Rate, nearest(value, cast(List[int], RATES)))
def nearest_index(value: int, values: List[int]) -> int:
ds = [abs(value - r) for r in values]
d = min(ds)
return ds.index(d)
def nearest(value: int, values: List[int]) -> int:
return values[nearest_index(value, values)]
def rate(node: rclpy.node.Node, name: str, default: Rate) -> Rate:
# DONE: add the RATE constraint
return nearest_rate(node.declare_parameter(f"{name}.rate", default).value)
```
|
{
"source": "jeguzzi/RoboMaster-SDK",
"score": 2
}
|
#### File: src/robomaster/protocol.py
```python
import struct
import binascii
from abc import abstractmethod
from . import algo
from . import logger
""" struct 速查表
format C Python size
x pad byte no value 0
c char string 1 1
b signed char integer 1
B unsigned char integer 1
? bool bool 1
h short integer 2
H unsigned short integer 2
i int integer 4
I unsigned int integer 4
l long integer 4
L unsigned long long 4
q long long long 8
Q unsigned lonlon long 8
f float float 4
d double float 8
s char[] string
p char[] string
P void* long
"""
__all__ = ['Msg', 'TextMsg']
# 默认的 ID 取值范围
RM_SDK_FIRST_SEQ_ID = 10000
RM_SDK_LAST_SEQ_ID = 20000
# 协议 ACK 类型
DUSS_MB_ACK_NO = 0
DUSS_MB_ACK_NOW = 1
DUSS_MB_ACK_FINISH = 2
# 协议加密类型
DUSS_MB_ENC_NO = 0
DUSS_MB_ENC_AES128 = 1
DUSS_MB_ENC_CUSTOM = 2
# 协议类型
DUSS_MB_TYPE_REQ = 0
DUSS_MB_TYPE_PUSH = 1
def host2byte(host, index):
return index * 32 + host
def byte2host(b):
return (b & 0x1f), (b >> 5)
def make_proto_cls_key(cmdset, cmdid):
return cmdset * 256 + cmdid
# registered protocol dict.
registered_protos = {}
class _AutoRegisterProto(type):
""" help to automatically register Proto Class where ever they're defined """
def __new__(mcs, name, bases, attrs, **kw):
return super().__new__(mcs, name, bases, attrs, **kw)
def __init__(cls, name, bases, attrs, **kw):
super().__init__(name, bases, attrs, **kw)
if name == 'ProtoData':
return
key = make_proto_cls_key(attrs['_cmdset'], attrs['_cmdid'])
if key in registered_protos.keys():
raise ValueError("Duplicate proto class %s" % (name))
registered_protos[key] = cls
class ProtoData(metaclass=_AutoRegisterProto):
_cmdset = None
_cmdid = None
_cmdtype = DUSS_MB_TYPE_REQ
_req_size = 0
_resp_size = 0
def __init__(self, **kwargs):
self._buf = None
self._len = None
def __repr__(self):
return "<{0} cmset:0x{1:2x}, cmdid:0x{2:02x}>".format(self.__class__.__name__, self._cmdset, self._cmdid)
@property
def cmdset(self):
return self._cmdset
@cmdset.setter
def cmset(self, value):
self._cmdset = value
@property
def cmdid(self):
return self._cmdid
@cmdid.setter
def cmdid(self, value):
self._cmdid = value
@property
def cmdkey(self):
if self._cmdset is not None and self._cmdid is not None:
return self._cmdset * 256 + self._cmdid
else:
return None
@abstractmethod
def pack_req(self):
""" 协议对象打包发送数据为字节流
:return: 字节流数据
"""
return b''
# @abstractmethod
def unpack_req(self, buf, offset=0):
""" 从字节流解包
:param buf:字节流数据
:param offset:字节流数据偏移量
:return:True 解包成功;False 解包失败
"""
return True
# @abstractmethod
def pack_resp(self):
""" 协议对象打包
:return:字节流数据
"""
pass
# return True when retcode == zero
# return False when restcode is not zero
# raise exceptions when internal error occur.
def unpack_resp(self, buf, offset=0):
""" 从字节流解包为返回值和相关属性
:param buf:字节流数据
:param offset:字节流数据偏移量
:return: bool: 调用结果
"""
self._retcode = buf[offset]
if self._retcode == 0:
return True
else:
return False
class MsgBase(object):
_next_seq_id = RM_SDK_FIRST_SEQ_ID
def __init__(self):
pass
class Msg(MsgBase):
def __init__(self, sender=0, receiver=0, proto=None):
self._len = 13 # default length, msg header and crc.
self._sender = sender
self._receiver = receiver
self._attri = 0
self._cmdset = None
self._cmdid = None
self._is_ack = False # True or False
self._need_ack = 2 # 0 for no need, 1 for ack now, 2 for need when finish.
if self.__class__._next_seq_id == RM_SDK_LAST_SEQ_ID:
self.__class__._next_seq_id = RM_SDK_FIRST_SEQ_ID
else:
self.__class__._next_seq_id += 1
self._seq_id = self._next_seq_id
self._proto = proto
if self._proto:
self._cmdset = self._proto.cmdset
self._cmdid = self._proto.cmdid
if self._proto._cmdtype == DUSS_MB_TYPE_PUSH:
self._need_ack = 0
self._buf = None
def __repr__(self):
return "<Msg sender:0x{0:02x}, receiver:0x{1:02x}, cmdset:0x{2:02x}, cmdid:0x{3:02x}, len:{4:d}, \
seq_id:{5:d}, is_ack:{6:d}, need_ack:{7:d}>".format(self._sender, self._receiver, self._cmdset, self._cmdid,
self._len, self._seq_id, self._is_ack, self._need_ack)
@property
def cmdset(self):
return self._cmdset
@property
def cmdid(self):
return self._cmdid
@property
def is_ack(self):
return self._is_ack
@property
def receiver(self):
host, index = byte2host(self._receiver)
return "{0:02d}{1:02d}".format(host, index)
@property
def sender(self):
host, index = byte2host(self._sender)
return "{0:02d}{1:02d}".format(host, index)
def pack(self, is_ack=False):
""" Msg 消息打包
:param is_ack: bool: 是否是ack消息
:return: bytearray,消息字节流
"""
self._len = 13
try:
if self._proto:
data_buf = b''
if is_ack:
self._neek_ack = False
data_buf = self._proto.pack_resp()
else:
self._neek_ack = (self._proto._cmdtype == DUSS_MB_TYPE_REQ)
data_buf = self._proto.pack_req()
self._len += len(data_buf)
except Exception as e:
logger.warning("Msg: pack, cmset:0x{0:02x}, cmdid:0x{1:02x}, proto: {2}, "
"exception {3}".format(self.cmdset, self.cmdid, self._proto.__class__.__name__, e))
self._buf = bytearray(self._len)
self._buf[0] = 0x55
self._buf[1] = self._len & 0xff
self._buf[2] = (self._len >> 8) & 0x3 | 4
crc_h = algo.crc8_calc(self._buf[0:3])
# attri = is_ack|need_ack|enc
self._attri = 1 << 7 if self._is_ack else 0
self._attri += self._need_ack << 5
self._buf[3] = crc_h
self._buf[4] = self._sender
self._buf[5] = self._receiver
self._buf[6] = self._seq_id & 0xff
self._buf[7] = (self._seq_id >> 8) & 0xff
self._buf[8] = self._attri
if self._proto:
self._buf[9] = self._proto.cmdset
self._buf[10] = self._proto.cmdid
self._buf[11:11 + len(data_buf)] = data_buf
else:
raise Exception("Msg: pack Error.")
# calc whole msg crc16
crc_m = algo.crc16_calc(self._buf[0:self._len - 2])
struct.pack_into('<H', self._buf, self._len - 2, crc_m)
logger.debug("Msg: pack, len:{0}, seq_id:{1}, buf:{2}".format(
self._len, self._seq_id, binascii.hexlify(self._buf)))
return self._buf
# unpack proto after recv msg, raise excpetion when error occur.
def unpack_protocol(self):
""" 从自身的buf数据解码协议及协议内容。
"""
key = make_proto_cls_key(self._cmdset, self._cmdid)
if key in registered_protos.keys():
self._proto = registered_protos[key]()
try:
if self._is_ack:
if not self._proto.unpack_resp(self._buf):
logger.warning("Msg: unpack_protocol, msg:{0}".format(self))
return False
else:
if not self._proto.unpack_req(self._buf):
logger.warning("Msg: unpack_protocol, msg:{0}".format(self))
return False
return True
except Exception as e:
logger.warning("Msg: unpack_protocol, {0} failed e {1}".format(self._proto.__class__.__name__, e))
raise
else:
logger.info("Msg: unpack_protocol, cmdset:0x{0:02x}, cmdid:0x{1:02x}, class is not registerin registered_\
protos".format(self._cmdset, self._cmdid))
pass
logger.warning("Msg: unpack_protocol, not registered_protocol, cmdset:0x{0:02x}, cmdid:0x{1:02x}".format(
self._cmdset, self._cmdid))
return False
def get_proto(self):
return self._proto
class TextMsg(MsgBase):
IS_DDS_FLAG = ";mpry:"
def __init__(self, proto=None):
self._buf = None
self._len = 0
self._need_ack = 0
if self.__class__._next_seq_id == RM_SDK_LAST_SEQ_ID:
self.__class__._next_seq_id = RM_SDK_FIRST_SEQ_ID
else:
self.__class__._next_seq_id += 1
self._seq_id = self._next_seq_id
self._proto = proto
def __repr__(self):
return "<{0}, {1}>".format(self.__class__.__name__, self._proto.resp)
def pack(self):
if self._proto:
data_buf = self._proto.pack_req()
"""pack the proto to msg"""
self._buf = data_buf
return self._buf
def unpack_protocol(self):
self._proto = TextProtoDrone()
if not self._proto.unpack_resp(self._buf):
logger.warining("TextMsg: unpack_protocol, msg:{0}".format(self))
return False
return True
def get_proto(self):
return self._proto
def get_buf(self):
return self._buf
def decode_msg(buff, protocol="v1"):
if protocol == "v1":
if len(buff) < 4:
logger.info("decode_msg, recv buf is not enouph.")
return None, buff
if buff[0] != 0x55:
logger.warning("decode_msg, magic number is invalid.")
return None, buff
if algo.crc8_calc(buff[0:3]) != buff[3]:
logger.warning("decode_msg, crc header check failed.")
return None, buff
msg_len = (buff[2] & 0x3) * 256 + buff[1]
if len(buff) < msg_len:
logger.warning("decode_msg, msg data is not enough, msg_len:{0}, buf_len:{1}".format(msg_len, len(buff)))
return None, buff
# unpack from byte array.
msg = Msg(buff[9], buff[10])
msg._len = msg_len
msg._seq_id = buff[7] * 256 + buff[6]
msg._attri = buff[8]
msg._sender = buff[4]
msg._receiver = buff[5]
msg._cmdset = int(buff[9])
msg._cmdid = int(buff[10])
msg._is_ack = msg._attri & 0x80 != 0
msg._need_ack = (msg._attri & 0x60) >> 5
msg._buf = buff[11:msg._len - 2]
left_buf = buff[msg_len:]
return msg, left_buf
elif protocol == "text":
# unpack
msg = TextMsg()
# filter out '\0xcc'
if buff[0] == 204:
logger.warning("decode_msg: recv invalid data, buff {0}".format(buff))
return None, bytearray()
else:
msg._buf = buff.decode(encoding='utf-8')
msg._len = len(msg._buf)
return msg, bytearray()
################################################################################
class ProtoGetVersion(ProtoData):
_cmdset = 0
_cmdid = 1
_resp_size = 30
def __init__(self):
self._aa = 0
self._bb = 1
self._cc = 0
self._dd = 0
self._build = 1
self._version = 0
self._minor = 1
self._major = 0
self._cmds = 0
self._rooback = 0
self._retcode = 0
def pack_req(self):
return b''
def unpack_resp(self, buf, offset=0):
if len(buf) < self._resp_size:
raise Exception("buf length is not enouph.")
self._retcode = buf[0]
if self._retcode != 0:
return False
self._aa = buf[0]
self._bb = buf[1]
self._cc = buf[2]
self._dd = buf[3]
return True
class ProtoGetProductVersion(ProtoData):
_cmdset = 0
_cmdid = 0x4f
_resp_size = 9
def __init__(self):
self._file_type = 4
self._version = None
def pack_req(self):
buf = bytearray(self._resp_size)
buf[0] = self._file_type
buf[5] = 0xff
buf[6] = 0xff
buf[7] = 0xff
buf[8] = 0xff
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
cc, bb, aa = struct.unpack_from("<HBB", buf, 9)
self._version = "{0:02d}.{1:02d}.{2:04d}".format(aa, bb, cc)
return True
else:
self._version = None
logger.warning("ProtoGetProductVersion, unpack_resp, retcode {0}".format(self._retcode))
return False
class ProtoGetSn(ProtoData):
_cmdset = 0x0
_cmdid = 0x51
_req_size = 1
def __init__(self):
self._type = 1
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._type
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
if self._retcode == 0:
self._length = buf[offset + 1]
self._sn = buf[offset + 3:self._length + offset + 3].decode('utf-8', 'ignore')
return True
else:
return False
class ProtoTakePhoto(ProtoData):
_cmdset = 0x2
_cmdid = 0x1
_req_size = 1
def __init__(self):
self._type = 1
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._type
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoSetZoom(ProtoData):
_cmdset = 0x2
_cmdid = 0x34
_req_size = 6
def __init__(self):
self._digital_enable = 1
self._digital_zoom = 1.0
self._digital_type = 1
self._digital_value = 1
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._digital_enable << 3 | self._digital_type
struct.pack_into("<h", buf, 4, self._digital_value)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoGetZoom(ProtoData):
_cmdset = 0x2
_cmdid = 0x35
def __init__(self):
pass
def pack_req(self):
return b''
def unpack_resp(self, buf, offset=0):
return True
class ProtoSetWhiteBalance(ProtoData):
_cmdset = 0x2
_cmdid = 0x2c
_req_size = 5
def __init__(self):
self._type = 0 # 0 for auto, 6 for manual
self._temp1 = 0
self._temp2 = 0
self._tint = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._type
buf[1] = self._temp1
buf[2] = self._temp2
struct.pack_into("<h", buf, 3, self._tint)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoFcSubInfoReq(ProtoData):
_cmdset = 0x3
_cmdid = 0x14
_req_size = 5
def __init__(self):
self._bagid = 0
self._freq = 10
self._timestamp = 0
self.data_num = 0
self._uuid_list = []
def pack_req(self):
buf = bytearray(self._req_size + len(self.data_num * 4))
buf[0] = self._bagid
struct.pack_into("<H", buf, 1, self._freq)
buf[3] = self._timestamp
buf[4] = self._data_num
for i, uuid in enumerate(self._uuid_list):
struct.pack_into("<I", buf, 5 + i * 4, uuid)
logger.debug("ProtoFcSubInfoReq, pack_req buf {0}".format(binascii.hexlify(buf)))
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
logger.warning("ProtoFcSubInfoReq: unpack_resp, retcode:{0}".format(self._retcode))
return False
class ProtoGimbalCtrlSpeed(ProtoData):
_cmdset = 0x4
_cmdid = 0xc
_req_size = 8
_cmdtype = DUSS_MB_TYPE_PUSH
def __init__(self):
self._yaw_speed = 0
self._roll_speed = 0
self._pitch_speed = 0
self._ctrl_byte = 0xdc
self._ctrl_byte_extend = 0
self._err_yaw_limit = 0
self._err_roll_limit = 0
self._err_pitch_limit = 0
self._auth = 0
self._prior = 0
def pack_req(self):
buf = bytearray(self._req_size)
struct.pack_into("<hhh", buf, 0, self._yaw_speed, self._roll_speed, self._pitch_speed)
buf[6] = self._ctrl_byte
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
if self._retcode == 0:
return True
else:
logger.warning("ProtoGimbalCtrlSpeed: unpack_resp, retcode:{0}".format(self._retcode))
return False
class ProtoArmorHitEvent(ProtoData):
_cmdset = 0x3f
_cmdid = 0x2
_resp_size = 0
def __init__(self):
self._index = 0
self._type = 0
self._mic_value = 0
self._acc_value = 0
self._data_buf = None
def pack_req(self):
return b''
def unpack_req(self, buf, offset=0):
self._index = buf[0] >> 4
self._type = buf[0] & 0xf
self._mic_value, self._mic_len = struct.unpack('<HH', buf[1:])
self._data_buf = [self._index, self._type, self._mic_value]
return True
class ProtoIrHitEvent(ProtoData):
_cmdset = 0x3f
_cmdid = 0x10
_resp_size = 0
def __init__(self):
self._skill_id = 0
self._role_id = 0
self._recv_dev = 0
self._recv_ir_pin = 0
self._data_buf = None
def pack_req(self):
return b''
def unpack_req(self, buf, offset=0):
self._role_id = buf[0] >> 4
self._skill_id = buf[0] & 0xf
self._recv_dev, self._recv_ir_pin = struct.unpack('<BB', buf[1:])
self._data_buf = [self._skill_id, self._role_id, self._recv_dev, self._recv_ir_pin]
return True
class ProtoSetArmorParam(ProtoData):
_cmdset = 0x3f
_cmdid = 0x7
_resp_size = 19
def __init__(self):
self._armor_mask = 0
self._voice_energy_en = 0
self._voice_energy_ex = 0
self._voice_len_max = 0
self._voice_len_min = 0
self._voice_len_silence = 0
self._voice_peak_count = 0
self._voice_peak_min = 0
self._voice_peak_ave = 0
self._voice_peak_final = 0
def pack_req(self):
buf = bytearray(self._resp_size)
struct.pack_into('<BHHHHHHHHH', buf, 0, self._armor_mask, self._voice_energy_en,
self._voice_energy_ex, self._voice_len_max, self._voice_len_min,
self._voice_len_silence, self._voice_peak_count, self._voice_peak_min,
self._voice_peak_ave, self._voice_peak_final)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoChassisWheelSpeed(ProtoData):
_cmdset = 0x3f
_cmdid = 0x26
_req_size = 4
def __init__(self):
self._w1_spd = 0
self._w2_spd = 0
self._w3_spd = 0
self._w4_spd = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._w1_spd
buf[1] = self._w2_spd
buf[2] = self._w3_spd
buf[3] = self._w4_spd
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoSetSystemLed(ProtoData):
_cmdset = 0x3f
_cmdid = 0x33
_req_size = 15
def __init__(self):
self._comp_mask = 0x3f
self._led_mask = 0xff
self._ctrl_mode = 0
self._effect_mode = 0
self._r = 0xff
self._g = 0xff
self._b = 0xff
self._loop = 0
self._t1 = 100
self._t2 = 100
def pack_req(self):
buf = bytearray(self._req_size)
struct.pack_into("<I", buf, 0, self._comp_mask)
struct.pack_into("<h", buf, 4, self._led_mask)
buf[6] = self._ctrl_mode << 4 | self._effect_mode
buf[7] = self._r
buf[8] = self._g
buf[9] = self._b
buf[10] = self._loop
struct.pack_into("<hh", buf, 11, self._t1, self._t2)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoSetRobotMode(ProtoData):
_cmdset = 0x3f
_cmdid = 0x46
_req_size = 1
def __init__(self):
self._mode = 1
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._mode
return buf
def unpack_resp(self, buff, offset=0):
self._retcode = buff[0]
if self._retcode == 0:
return True
else:
return False
class ProtoGetRobotMode(ProtoData):
_cmdset = 0x3f
_cmdid = 0x47
def __init__(self):
self._mode = 0
def pack_req(self):
return b''
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
if self._retcode == 0:
self._mode = buf[offset + 1]
return True
else:
return False
class ProtoBlasterFire(ProtoData):
_cmdset = 0x3f
_cmdid = 0x51
_req_size = 1
def __init__(self):
self._type = 0
self._times = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._type << 4 | self._times
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoBlasterSetLed(ProtoData):
_cmdset = 0x3f
_cmdid = 0x55
_req_size = 9
_cmdtype = DUSS_MB_TYPE_PUSH
def __init__(self):
self._mode = 7
self._effect = 0
self._r = 0xff
self._g = 0xff
self._b = 0xff
self._times = 1
self._t1 = 100
self._t2 = 100
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._mode << 4 | self._effect
buf[1] = self._r
buf[2] = self._g
buf[3] = self._b
buf[4] = self._times
struct.pack_into("<HH", buf, 5, self._t1, self._t2)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoSetSdkMode(ProtoData):
_cmdset = 0x3f
_cmdid = 0xd1
_req_size = 1
def __init__(self):
self._enable = 1
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._enable
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
if self._retcode == 0:
return True
else:
return False
class ProtoStreamCtrl(ProtoData):
_cmdset = 0x3f
_cmdid = 0xd2
_req_size = 3
def __init__(self):
self._ctrl = 1
self._conn_type = 0
self._state = 1
self._resolution = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._ctrl
buf[1] = self._conn_type << 4 | self._state
buf[2] = self._resolution
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoSetSdkConnection(ProtoData):
_cmdset = 0x3f
_cmdid = 0xd4
_req_size = 10
def __init__(self):
self._control = 0
self._host = 0
self._connection = 0
self._protocol = 0
self._ip = '0.0.0.0'
self._port = 10010
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._control
buf[1] = self._host
buf[2] = self._connection
buf[3] = self._protocol
ip_bytes = bytes(map(int, self._ip.split('.')))
buf[4:8] = ip_bytes
struct.pack_into("<H", buf, 8, self._port)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
self._state = buf[1]
if self._state == 2:
self._config_ip = "{0:d}.{1:d}.{2:d}.{3:d}".format(buf[2], buf[3], buf[4], buf[5])
return True
else:
return False
class ProtoSdkHeartBeat(ProtoData):
_cmdset = 0x3f
_cmdid = 0xd5
_req_size = 0
def __init__(self):
pass
def pack_req(self):
return b''
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoGimbalSetWorkMode(ProtoData):
_cmdset = 0x4
_cmdid = 0x4c
_req_size = 2
def __init__(self):
self._workmode = 0
self._recenter = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._workmode
buf[1] = self._recenter
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoGimbalCtrl(ProtoData):
_cmdset = 0x4
_cmdid = 0xd
_req_size = 2
def __init__(self):
self._order_code = 0x2ab5
def pack_req(self):
buf = bytearray(self._req_size)
struct.pack_into("<H", buf, 0, self._order_code)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoPlaySound(ProtoData):
_cmdset = 0x3f
_cmdid = 0xb3
_req_size = 10
def __init__(self):
self._action_id = 0
self._push_freq = 2
self._task_ctrl = 0
self._sound_id = 0
self._play_ctrl = 1 # 0: 停止播放 1:打断式播放 2:融合播放 3:忽略式播放
self._interval = 0
self._play_times = 0
self._retcode = None
self._accept = None
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._action_id
buf[1] = self._task_ctrl | self._push_freq << 2
struct.pack_into('<I', buf, 2, self._sound_id)
buf[6] = self._play_ctrl
struct.pack_into('<H', buf, 7, self._interval)
buf[9] = self._play_times
logger.debug("ProtoPlaySound: pack_req, buf: {0}".format(binascii.hexlify(buf)))
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
logger.debug(
"ProtoPlaySound unpack_resp, buf : {0}, self._retcode: {1}".format(binascii.hexlify(buf), self._retcode))
if self._retcode == 0:
self._accept = buf[offset + 1]
return True
else:
return False
@property
def sound_id(self):
return self._sound_id
@sound_id.setter
def sound_id(self, value):
self._sound_id = value
@property
def play_times(self):
return self._play_times
@play_times.setter
def play_times(self, value):
self._play_times = value
class ProtoSoundPush(ProtoData):
_cmdset = 0x3f
_cmdid = 0xb4
def __init__(self):
self._action_id = 0
self._percent = 0
self._reserved = 0
self._error_reason = 0
self._action_state = 0
self._sound_id = 0
def pack_req(self):
return b''
# ack push
def unpack_req(self, buf, offset=0):
self._action_id = buf[0]
self._percent = buf[1]
self._error_reason = buf[2] >> 2 & 0x03
self._action_state = buf[2] & 0x03
self._sound_id = struct.unpack_from('<I', buf, 3)
logger.debug("ProtoSoundPush unpack_req, buf {0}".format(binascii.hexlify(buf)))
return True
def unpack_resp(self, buf, offset=0):
self._action_id = buf[offset]
self._percent = buf[offset + 1]
self._error_reason = buf[offset + 2] >> 2 & 0x03
self._action_state = buf[offset + 2] & 0x03
self._sound_id = struct.unpack_from('<I', buf, offset + 3)
logger.debug("ProtoSoundPush unpack_resp, buf {0}".format(binascii.hexlify(buf)))
return True
@property
def percent(self):
return self._percent
@percent.setter
def percent(self, value):
self._percent = value
@property
def sound_id(self):
return self._sound_id
@sound_id.setter
def sound_id(self, value):
self._sound = value
class ProtoGimbalRotate(ProtoData):
_cmdset = 0x3f
_cmdid = 0xb0
_req_size = 17
def __init__(self):
self._action_id = 0
self._action_ctrl = 0 # 0 for start, 1 for cancel
self._push_freq = 2 # 0 for 1Hz, 1 for 5Hz, 2 for 10Hz
self._coordinate = 3
self._pitch_valid = 1 # 1 for valid, 0 for invalid.
self._yaw_valid = 1
self._roll_valid = 0
self._error = 0
self._pitch = 0 # Unit: 0.1 degree
self._roll = 0 # Unit: 0.1 degree
self._yaw = 0 # Unit: 0.1 degree
self._yaw_speed = 30
self._roll_speed = 0
self._pitch_speed = 30
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._action_id
buf[1] = self._action_ctrl | (self._push_freq << 2)
buf[2] = self._yaw_valid | (self._roll_valid << 1) | (self._pitch_valid << 2) | (self._coordinate << 3)
struct.pack_into('<hhh', buf, 3, self._yaw, self._roll, self._pitch)
struct.pack_into('<HHHH', buf, 9, self._error, self._yaw_speed, self._roll_speed, self._pitch_speed)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
if self._retcode == 0:
self._accept = buf[offset + 1]
return True
else:
return False
class ProtoGimbalActionPush(ProtoData):
_cmdset = 0x3f
_cmdid = 0xb1
_cmdtype = DUSS_MB_TYPE_PUSH
def __init__(self):
self._action_id = 0
self._percent = 0
self._action_state = 0
self._yaw = 0
self._roll = 0
self._pitch = 0
def pack_req(self):
return b''
def unpack_req(self, buf, offset=0):
self._action_id = buf[offset]
self._percent = buf[offset + 1]
self._action_state = buf[offset + 2] & 0x3
self._yaw, self._roll, self._pitch = struct.unpack_from('<hhh', buf, offset + 3)
return True
def unpack_resp(self, buf, offset=0):
self._action_id = buf[offset]
self._percent = buf[offset + 1]
self._action_state = buf[offset + 2] & 0x3
self._yaw, self._roll, self._pitch = struct.unpack_from('<hhh', buf, offset + 3)
return True
class ProtoGimbalRecenter(ProtoData):
_cmdset = 0x3f
_cmdid = 0xb2
_req_size = 9
def __init__(self):
self._action_id = 0
self._action_ctrl = 0
self._push_freq = 2
self._pitch_valid = 1
self._roll_valid = 0
self._yaw_valid = 1
self._yaw_speed = 100
self._roll_speed = 0
self._pitch_speed = 100
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._action_id
buf[1] = self._action_ctrl | (self._push_freq << 2)
buf[2] = self._yaw_valid | (self._roll_valid << 1) | (self._pitch_valid << 2)
struct.pack_into("<HHH", buf, 3, self._yaw_speed, self._roll_speed, self._pitch_speed)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
if self._retcode == 0:
self._accept = buf[offset + 1]
return True
else:
return False
class ProtoVisionDetectStatus(ProtoData):
_cmdset = 0x0a
_cmdid = 0xa5
def __init__(self):
self._vision_type = 0
def pack_req(self):
return b''
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
self._vision_type = buf[1] | buf[2] << 8
return True
else:
logger.warning("ProtoVisionDetectType: unpack_resp, error")
return False
class ProtoVisionSetColor(ProtoData):
_cmdset = 0x0a
_cmdid = 0xab
_req_size = 2
def __init__(self):
self._type = 0
self._color = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._type
buf[1] = self._color
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
logger.warning("ProtoVisionSetColor: unpack_resp, retcode {0}".format(self._retcode))
return False
class ProtoPositionMove(ProtoData):
_cmdset = 0x3f
_cmdid = 0x25
_req_size = 13
def __init__(self):
self._action_id = 0
self._freq = 2
self._action_ctrl = 0
self._ctrl_mode = 0
self._axis_mode = 0
self._pos_x = 0
self._pos_y = 0
self._pos_z = 0
self._vel_xy_max = 0
self._agl_omg_max = 300
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._action_id
buf[1] = self._action_ctrl | self._freq << 2
buf[2] = self._ctrl_mode
buf[3] = self._axis_mode
struct.pack_into('<hhh', buf, 4, self._pos_x, self._pos_y, self._pos_z)
buf[10] = self._vel_xy_max
struct.pack_into('<h', buf, 11, self._agl_omg_max)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
if self._retcode == 0:
self._accept = buf[offset + 1]
return True
else:
logger.warning("ProtoPositionMove: unpack_resp, retcode:{0}".format(self._retcode))
return False
class ProtoPositionPush(ProtoData):
_cmdset = 0x3f
_cmdid = 0x2a
def __init__(self):
self._action_id = 0
self._percent = 0
self._action_state = 0
self._pos_x = 0
self._pos_y = 0
self._pos_z = 0
def pack_req(self):
return b''
# ack push.
def unpack_req(self, buf, offset=0):
self._action_id = buf[0]
self._percent = buf[1]
self._action_state = buf[2]
self._pos_x, self._pos_y, self._pos_z = struct.unpack_from('<hhh', buf, 3)
return True
def unpack_resp(self, buf, offset=0):
self._action_id = buf[offset]
self._percent = buf[offset + 1]
self._action_state = buf[offset + 2]
self._pos_x, self._pos_y, self._pos_z = struct.unpack_from('<hhh', buf, offset + 3)
return True
class ProtoSetWheelSpeed(ProtoData):
_cmdset = 0x3f
_cmdid = 0x20
_req_size = 8
def __init__(self):
self._w1_spd = 0
self._w2_spd = 0
self._w3_spd = 0
self._w4_spd = 0
def pack_req(self):
buf = bytearray(self._req_size)
struct.pack_into("<hhhh", buf, 0, self._w1_spd, self._w2_spd, self._w3_spd, self._w4_spd)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
logger.warning("ProtoSetWheelSpeed: unpack_resp, retcode:{0}".format(self._retcode))
return False
class ProtoChassisSetWorkMode(ProtoData):
_cmdset = 0x3f
_cmdid = 0x19
_req_size = 1
def __init__(self):
self._mode = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._mode
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoChassisSpeedMode(ProtoData):
_cmdset = 0x3f
_cmdid = 0x21
_req_size = 12
_cmdtype = DUSS_MB_TYPE_PUSH
def __init__(self):
self._x_spd = float(0)
self._y_spd = float(0)
self._z_spd = float(0)
def pack_req(self):
buf = bytearray(self._req_size)
struct.pack_into("<fff", buf, 0, self._x_spd, self._y_spd, self._z_spd)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoChassisPwmPercent(ProtoData):
_cmdset = 0x3f
_cmdid = 0x3c
_req_size = 13
_cmdtype = DUSS_MB_TYPE_REQ
def __init__(self):
self._mask = 0
self._pwm1 = 0
self._pwm2 = 0
self._pwm3 = 0
self._pwm4 = 0
self._pwm5 = 0
self._pwm6 = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._mask
struct.pack_into('<HHHHHH', buf, 1, self._pwm1, self._pwm2, self._pwm3, self._pwm4, self._pwm5, self._pwm6)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoChassisPwmFreq(ProtoData):
_cmdset = 0x3f
_cmdid = 0x2b
_req_size = 13
_cmdtype = DUSS_MB_TYPE_REQ
def __init__(self):
self._mask = 0
self._pwm1 = 0
self._pwm2 = 0
self._pwm3 = 0
self._pwm4 = 0
self._pwm5 = 0
self._pwm6 = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._mask
struct.pack_into('<HHHHHH', buf, 1, self._pwm1, self._pwm2, self._pwm3, self._pwm4, self._pwm5, self._pwm6)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoChassisSerialSet(ProtoData):
_cmdset = 0x3f
_cmdid = 0xc0
_req_size = 6
def __init__(self):
self._baud_rate = 0
self._data_bit = 0
self._odd_even = 0
self._stop_bit = 0
self._tx_en = 0
self._rx_en = 0
self._rx_size = 0
self._tx_size = 0
self._config = 0
self._fun_en = 0
def pack_req(self):
buf = bytearray(self._req_size)
self._config = (self._stop_bit & 0x1) << 7 | \
(self._odd_even & 0x3) << 5 | \
(self._data_bit & 0x3) << 3 | \
(self._baud_rate & 0x7)
self._fun_en = ((self._tx_en & 0x1) << 1) | (self._rx_en & 0x1)
struct.pack_into('<BBHH', buf, 0, self._config, 0xff, self._rx_size, self._tx_size)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoChassisSerialMsgSend(ProtoData):
_cmdset = 0x3f
_cmdid = 0xc1
_req_size = 3
def __init__(self):
self._msg_len = 0
self._msg_type = 0x2
self._msg_buf = []
def pack_req(self):
buf = bytearray(self._msg_len + self._req_size + 1)
struct.pack_into('<BH', buf, 0, self._msg_type, self._msg_len)
buf[3:len(buf) - 1] = self._msg_buf
return buf[0:-1]
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoVisionDetectEnable(ProtoData):
_cmdset = 0x0a
_cmdid = 0xa3
_req_size = 2
def __init__(self):
self._type = 0
def pack_req(self):
buf = bytearray(self._req_size)
struct.pack_into("<H", buf, 0, self._type)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
self._error = struct.unpack_from("<H", buf, 1)
logger.warning("ProtoVisionDetectEnable: unpack_resp, error:{0}".format(self._error))
return False
class ProtoVisionDetectInfo(ProtoData):
_cmdset = 0x0a
_cmdid = 0xa4
def __init__(self):
self._type = 0
self._status = 0
self._errcode = 0
self._rect_info = []
self._data_buf = None
def pack_req(self):
return b''
def unpack_req(self, buf, offset=0):
self._type = buf[0]
self._status = buf[1]
self._errcode = buf[6] | (buf[7] << 8)
count = buf[8]
if self._type == 0: # shoulder
for i in range(0, count):
x, y, w, h, info = struct.unpack_from('<ffffI', buf, 9 + 20 * i)
t = 5
self._rect_info.append([round(x, t), round(y, t), round(w, t), round(h, t)])
elif self._type == 1: # person
for i in range(0, count):
x, y, w, h, _ = struct.unpack_from('<ffffI', buf, 9 + 20 * i)
t = 5
self._rect_info.append([round(x, t), round(y, t), round(w, t), round(h, t)])
elif self._type == 2: # gesture
for i in range(0, count):
x, y, w, h, info = struct.unpack_from('<ffffI', buf, 9 + 20 * i)
t = 5
self._rect_info.append([round(x, t), round(y, t), round(w, t), round(h, t), info])
elif self._type == 4: # line
if count > 0:
x, y, theta, C, info = struct.unpack_from("<ffffI", buf, 9)
self._rect_info.append(info)
else:
self._rect_info.append(0)
for i in range(0, count):
x, y, theta, C, info = struct.unpack_from("<ffffI", buf, 9 + 20 * i)
t = 7
self._rect_info.append([round(x, t), round(y, t), round(theta, t), round(C, t)])
elif self._type == 5: # marker
for i in range(0, count):
x, y, w, h, info, distance = struct.unpack_from('<ffffHH', buf, 9 + 20 * i)
t = 5
self._rect_info.append([round(x, t), round(y, t), round(w, t), round(h, t), info])
elif self._type == 7: # robot
for i in range(0, count):
x, y, w, h, _ = struct.unpack_from('<ffffI', buf, 9 + 20 * i)
t = 5
self._rect_info.append([round(x, t), round(y, t), round(w, t), round(h, t)])
else:
logger.warning("unsupported type: {0}".format(self._type))
self._data_buf = (self._type, self._errcode, self._rect_info)
return True
class ProtoSubscribeAddNode(ProtoData):
_cmdset = 0x48
_cmdid = 0x01
_req_size = 5
def __init__(self):
self._node_id = 0
self._sub_vision = 0x03000000
self._pub_node_id = 0
def pack_req(self):
buf = bytearray(self._req_size)
struct.pack_into("<BI", buf, 0, self._node_id, self._sub_vision)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0 or self._retcode == 0x50:
self._pub_node_id = buf[1]
return True
else:
logger.warning("ProtoSubscribeAddNode: unpack_resp, retcode:{0}".format(self._retcode))
return False
class ProtoSubNodeReset(ProtoData):
_cmdset = 0x48
_cmdid = 0x02
_req_size = 1
def __init__(self):
self._node_id = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._node_id
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoDelMsg(ProtoData):
_cmdset = 0x48
_cmdid = 0x04
_req_size = 3
def __init__(self):
self._node_id = 0
self._msg_id = 0
self._sub_mode = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._sub_mode
buf[1] = self._node_id
buf[2] = self._msg_id
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoAddSubMsg(ProtoData):
_cmdset = 0x48
_cmdid = 0x03
_req_size = 7
def __init__(self):
self._node_id = 0
self._msg_id = 0
self._timestamp = 0
self._stop_when_disconnect = 0
self._sub_mode = 0
self._sub_data_num = 0
self._sub_uid_list = []
self._sub_freq = 1
self._pub_node_id = 0
self._sub_mode = 0
self._err_uid = 0
def pack_req(self):
req_size = self._req_size + self._sub_data_num * 8
buf = bytearray(req_size)
buf[0] = self._node_id
buf[1] = self._msg_id
buf[2] = (self._timestamp & 0x1) | (self._stop_when_disconnect & 0x2)
buf[3] = self._sub_mode
buf[4] = self._sub_data_num
for i in range(0, self._sub_data_num):
logger.info("ProtoSubMsg: UID:{0}".format(hex(self._sub_uid_list[i])))
struct.pack_into("<Q", buf, 5 + 8 * i, self._sub_uid_list[i])
struct.pack_into("<H", buf, 5 + 8 * self._sub_data_num, self._sub_freq)
logger.info("ProtoSubMsg: pack_req, num:{0}, buf {1}".format(self._sub_data_num, binascii.hexlify(buf)))
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
self.pub_node_id = buf[1]
self.ack_sub_mode = buf[2]
self.ack_msg_id = buf[3]
self.ack_err_uid_data = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24)
if self._retcode == 0:
return True
else:
return False
class ProtoPushPeriodMsg(ProtoData):
_cmdset = 0x48
_cmdid = 0x8
_type = DUSS_MB_TYPE_PUSH
def __init__(self):
self._sub_mode = 0
self._msg_id = 0
self._data_buf = None
def pack_req(self):
return b''
def unpack_req(self, buf, offset=0):
self._sub_mode = buf[0]
self._msg_id = buf[1]
self._data_buf = buf[2:]
return True
class ProtoGripperCtrl(ProtoData):
_cmdset = 0x33
_cmdid = 0x11
_req_size = 4
def __init__(self):
self._id = host2byte(27, 1)
self._control = 0
self._power = 330
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._id
buf[1] = self._control
struct.pack_into("<H", buf, 2, self._power)
logger.debug("ProtoGripperCtrl: buf:{0}".format(binascii.hexlify(buf)))
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoRoboticArmMove(ProtoData):
_cmdset = 0x33
_cmdid = 0x13
_req_size = 15
def __init__(self):
self._id = host2byte(27, 2)
self._type = 0
self._mask = 0x3
self._x = 0
self._y = 0
self._z = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._id
buf[1] = self._type
buf[2] = self._mask
struct.pack_into('<iii', buf, 3, self._x, self._y, self._z)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
if self._retcode == 0:
return True
else:
return False
class ProtoRoboticArmGetPostion(ProtoData):
_cmdset = 0x33
_cmdid = 0x14
_req_size = 1
def __init__(self):
self._id = 0x5b
self._retcode = 0
self._x = 0
self._y = 0
self._z = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._id
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
self._x, self._y, self._z = struct.unpack_from('<iii', buf, 1)
return True
class ProtoSensorGetData(ProtoData):
_cmdset = 0x3f
_cmdid = 0xf0
_req_size = 1
def __init__(self):
self._port = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._port
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
self._port = buf[1]
self._adc, self._io, self._time = struct.unpack_from('<HBI', buf, 2)
if self._retcode == 0:
return True
else:
return False
class ProtoServoModeSet(ProtoData):
_cmdset = 0x33
_cmdid = 0x16
_req_size = 2
def __init__(self):
self._id = 0x19
self._mode = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._id
buf[1] = self._mode
return buf
class ProtoServoControl(ProtoData):
_cmdset = 0x33
_cmdid = 0x17
_req_size = 4
def __init__(self):
self._id = 0x19
self._enable = 1
self._value = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._id
buf[1] = self._enable
struct.pack_into('<H', buf, 2, self._value)
return buf
class ProtoServoGetAngle(ProtoData):
_cmdset = 0x33
_cmdid = 0x15
_req_size = 1
def __init__(self):
self._id = 0x19
self._enable = 1
self._value = 0
self._retcode = 0
self._angle = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._id
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[0]
self._angle = (buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24)) / 10
return True
class ProtoServoCtrlSet(ProtoData):
_cmdset = 0x3f
_cmdid = 0xb7
_req_size = 7
def __init__(self):
self._action_id = 0
self._freq = 2
self._action_ctrl = 0
self._id = 0
self._value = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._action_id
buf[1] = self._action_ctrl | self._freq << 2
buf[2] = host2byte(25, self._id)
struct.pack_into('<i', buf, 3, self._value)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
if self._retcode == 0:
self._accept = buf[offset + 1]
return True
else:
logger.warning("ProtoServoCtrlSet: unpack_resp, retcode:{0}".format(self._retcode))
return False
class ProtoServoCtrlPush(ProtoData):
_cmdset = 0x3f
_cmdid = 0xb8
def __init__(self):
self._action_id = 0
self._percent = 0
self._action_state = 0
self._value = 0
def pack_req(self):
return b''
def unpack_req(self, buf, offset=0):
self._action_id = buf[0 + offset]
self._percent = buf[1 + offset]
self._action_state = buf[2 + offset] & 0x3
self._value = struct.unpack_from('<i', buf, 3 + offset)
return True
class ProtoRoboticArmMoveCtrl(ProtoData):
_cmdset = 0x3f
_cmdid = 0xb5
_req_size = 17
def __init__(self):
self._action_id = 0
self._freq = 2
self._action_ctrl = 0
self._id = host2byte(27, 2)
self._mode = 0
self._mask = 0x3
self._x = 0
self._y = 0
self._z = 0
def pack_req(self):
buf = bytearray(self._req_size)
buf[0] = self._action_id
buf[1] = self._action_ctrl | self._freq << 2
buf[2] = self._id
buf[3] = self._mode
buf[4] = self._mask
struct.pack_into("<iii", buf, 5, self._x, self._y, self._z)
return buf
def unpack_resp(self, buf, offset=0):
self._retcode = buf[offset]
if self._retcode == 0:
self._accept = buf[offset + 1]
return True
else:
logger.warning("ProtoRoboticArmMoveCtrl: unpack_resp, retcode:{0}".format(self._retcode))
return False
class ProtoRoboticArmMovePush(ProtoData):
_cmdset = 0x3f
_cmdid = 0xb6
def __init__(self):
self._action_id = 0
self._percent = 0
self._action_state = 0
self._x = 0
self._y = 0
self._z = 0
def pack_req(self):
return b''
def unpack_req(self, buf, offset=0):
self._action_id = buf[0 + offset]
self._percent = buf[1 + offset]
self._action_state = buf[2 + offset] & 0x3
self._x, self._y = struct.unpack_from('<ii', buf, 3 + offset)
return True
class TextProtoData(object):
SUCCESSFUL_RESP_FLAG = 'ok'
def __init__(self):
self._buf = None
self._len = None
self._text_cmd = None
self._action_state = None
self._resp = None
self._percent = 0
def __repr__(self):
return "<{0}>".format(self.__class__.__name__)
@property
def text_cmd(self):
return self._text_cmd
@text_cmd.setter
def text_cmd(self, cmd):
self._text_cmd = cmd
def pack_req(self):
""" 协议对象打包发送数据为字节流。
:return: 字节流数据。
"""
logger.debug("TextProtoData: pack_req test_cmd {0}, type {1}".format(self.text_cmd, type(self.text_cmd)))
self._buf = self.text_cmd
return self._buf
def unpack_req(self, buf, offset=0):
""" 从字节流解包。
:param buf:字节流数据。
:param offset:字节流数据偏移量。
:return:True 解包成功;False 解包失败。
"""
self._action_state = buf
self._resp = buf
return True
def pack_resp(self):
""" 协议对象打包。
:return:字节流数据。
"""
pass
def unpack_resp(self, buf, offset=0):
""" 从字节流解包为返回值和相关属性。
:param buf:字节流数据。
:param offset:字节流数据偏移量。
:return: True or False.
"""
self._action_state = buf
self._resp = buf
return True
def get_status(self):
if self._resp:
if self._resp == 'error':
return False
elif self._resp == 'ok':
return True
else:
return False
else:
return False
@property
def resp(self):
if self._resp is not None:
return self._resp.strip()
else:
return self._resp
@property
def proresp(self):
""" 针对acceleration?、attitude?、temp?命令的回复进行预处理。
:return: dict.
"""
msg_dict = dict()
resp = self.resp
if resp is None:
return msg_dict
if len(resp.split("~")) == 2:
msg_dict["templ"] = int(resp.split("~")[0])
msg_dict["temph"] = int(resp.split("~")[1][:-1])
elif len(resp.split(";")) == 4:
msg_list = resp.split(";")[:-1]
for msg in msg_list:
key, value = msg.split(":")
msg_dict[key] = float(value)
else:
logger.warning("doesn't support sdk! proresp returns empty dict")
return msg_dict
class TextProtoDrone(TextProtoData):
def __init__(self):
super().__init__()
class TextProtoDronePush(TextProtoData):
def __init__(self):
super().__init__()
class TelloDdsProto(object):
DDS_PAD_MID_FLAG = "mid"
DDS_PAD_X_FLAG = "x"
DDS_PAD_Y_FLAG = "y"
DDS_PAD_Z_FLAG = "z"
DDS_PAD_MPRY_FLAG = "mpry"
DDS_PITCH_FLAG = "pitch"
DDS_ROLL_FLAG = "roll"
DDS_YAW_FLAG = "yaw"
DDS_VGX_FLAG = "vgx"
DDS_VGY_FLAG = "vgy"
DDS_VGZ_FLAG = "vgz"
DDS_TEMP_L_FLAG = "templ"
DDS_TEMP_H_FLAG = "temph"
DDS_TOF_FLAG = "tof"
DDS_HIGH_FLAG = "h"
DDS_BATTERY_FLAG = "bat"
DDS_BARO_FLAG = "baro"
DDS_MOTOR_TIME_FLAG = "time"
DDS_AGX_FLAG = "agx"
DDS_AGY_FLAG = "agy"
DDS_AGZ_FLAG = "agz"
DDS_FREQ = 10
def __init__(self):
pass
class STAConnInfo:
def __init__(self):
self._ssid = ""
self._password = ""
self._cc = "CN"
self._appid = ""
self._bssid = None
self._has_bssid = 0
self._is_pairing = 0
self._ip = None
self._mac = None
self.recv_appid = ""
def set_info(self, ssid="", password="", id="", cc="CN"):
self._ssid = ssid
self._password = password
self._appid = id
self._cc = cc
def pack(self):
ssid_len = len(self._ssid)
pwd_len = len(self._password)
if self._has_bssid == 1:
buf = bytearray(2 + 8 + 2 + ssid_len + pwd_len + 6)
else:
buf = bytearray(2 + 8 + 2 + ssid_len + pwd_len)
buf[0] = ssid_len | (pwd_len & 0x3) << 6
buf[1] = (pwd_len >> 2) | (self._has_bssid << 3)
buf[2:10] = self._appid.encode(encoding="utf-8")
buf[10:12] = self._cc.encode(encoding="utf-8")
buf[12:12 + ssid_len] = self._ssid.encode(encoding="utf-8")
buf[12 + ssid_len:12 + ssid_len + pwd_len] = self._password.encode(encoding="utf-8")
if self._has_bssid == 1:
buf[12 + ssid_len + pwd_len:] = self._bssid.encode(encoding="utf-8")
return buf
def unpack(self, buf):
blank_byte = bytearray(1)
sof, is_pairing = struct.unpack_from(">HI", buf)
if sof != 0x5a5b:
return False
self._is_pairing = is_pairing & 0x1
self._ip = "{0}.{1}.{2}.{3}".format(int(buf[6]), int(buf[7]), int(buf[8]), int(buf[9]))
self._mac = "{0:2x}:{1:2x}:{2:2x}:{3:2x}:{4:2x}:{5:2x}".format(
int(buf[10]), int(buf[11]), int(buf[12]), int(buf[13]), int(buf[14]), int(buf[15]))
self._recv_appid = str(buf[16:23], encoding='utf-8').replace(str(blank_byte, encoding='utf-8'), "")
return True
```
#### File: src/robomaster/uart.py
```python
import collections
import threading
from queue import Queue
from . import module
from . import protocol
from . import logger
__all__ = ['Uart']
class Uart(module.Module):
""" EP 串口模块 """
_host = protocol.host2byte(3, 0)
def __init__(self, robot):
super().__init__(robot)
self._robot = robot
self._publisher = collections.defaultdict(list)
self._msg_queue = Queue()
self._dispatcher_running = False
self._dispatcher_thread = None
self._rec_data = []
self._callback = None
self._cb_args = None
def __del__(self):
self.stop()
def start(self):
self._client.add_handler(self, "Uart", self._msg_recv)
self._dispatcher_thread = threading.Thread(target=self._dispatch_task)
self._dispatcher_thread.start()
def stop(self):
self._dispatcher_running = False
if self._dispatcher_thread:
self._msg_queue.put(None)
self._dispatcher_thread.join()
self._dispatcher_thread = None
@classmethod
def _msg_recv(cls, self, msg):
if msg.cmdset != 0x3f or msg.cmdid != 0xc1:
return
self._msg_queue.put(msg)
pass
def _dispatch_task(self):
self._dispatcher_running = True
logger.info("serial: dispatcher_task is running...")
while self._dispatcher_running:
msg = self._msg_queue.get(1)
if msg is None:
if not self._dispatcher_running:
break
continue
proto = msg.get_proto()
if proto is None:
logger.warning("Subscriber: _publish, msg.get_proto None, msg:{0}".format(msg))
else:
if self._callback:
# CHANGED(jeguzzi): should be msg not proto._buf
# and should be called only if it's not an ack
# self.serial_process_decode(proto._buf)
# self.serial_process_exec()
if not msg.is_ack:
self.serial_process_decode(msg)
self.serial_process_exec()
pass
pass
# TODO(jeguzzi): verify correctness
def serial_process_decode(self, msg):
# CHANGED(jeguzzi): changed to make it coherent with outgoing messages
# and the length (i.e. either msg_len + 3 iff rec_data starts at index 3)
# buf_len = msg._buf[2] << 8 | msg._buf[3]
# if msg._buf[1] == 1 and msg._len == (buf_len+3):
# self._rec_data = msg._buf[4:]
buf_len = msg._buf[1] << 8 | msg._buf[2]
if msg._buf[0] == 1 and len(msg._buf) == (buf_len + 3):
self._rec_data = msg._buf[3:]
def sub_serial_msg(self, callback=None, *args):
self._callback = callback
self._cb_args = args[0]
self._cb_kw = args[1]
pass
def unsub_serial_msg(self):
self._callback = None
def serial_process_exec(self):
self._callback(self._rec_data, *self._cb_args, **self._cb_kw)
def serial_read_data(self, msg_len):
pass
def serial_param_set(self, baud_rate=0, data_bit=1,
odd_even=0, stop_bit=0, rx_en=1,
tx_en=1, rx_size=50, tx_size=50):
""" 底盘串口参数设置
默认设置:'9600', 'bit8', 'none', '1'
:param baud_rate: 串口波特率,设置范围:0~4映射‘9600’,‘19200’,‘38400’,‘57600’,‘115200’
:param data_bit: 数据位设置,设置范围:0~3映射‘bit7’, 'bit8', 'bit9', 'bit10'
:param odd_even: 数据校验位,设置范围:0~3映射‘none’, 'odd', 'even'
:param stop_bit: 停止位,设置范围:1~2
:param rx_en: 接收使能
:param tx_en: 发送使能
:param rx_size: 接收buff大小
:param tx_size: 发送buff大小
:return: 返回串口设置结果
"""
proto = protocol.ProtoChassisSerialSet()
proto._baud_rate = baud_rate
proto._data_bit = data_bit
proto._odd_even = odd_even
proto._stop_bit = stop_bit
proto._rx_en = rx_en
proto._tx_en = tx_en
proto._rx_size = rx_size
proto._tx_size = tx_size
return self._send_sync_proto(proto, protocol.host2byte(3, 6))
def serial_send_msg(self, msg_buf):
"""
底盘串口数据数据发送
:param msg_buf: 发送的数据
:param msg_len: 发送的数据长度
:return: 返回串口数据发送结果
"""
proto = protocol.ProtoChassisSerialMsgSend()
# 字符串转字节流
if type(msg_buf) == str:
proto._msg_buf = msg_buf.encode()
# 元祖转字节流
elif type(msg_buf) == tuple:
proto._msg_buf = (','.join('%s' % d for d in msg_buf)).encode()
# 字典转字节流
elif type(msg_buf) == dict:
proto._msg_buf = str(msg_buf).encode()
elif type(msg_buf) == bytearray:
proto._msg_buf = msg_buf
else:
return False
proto._msg_len = len(proto._msg_buf)
return self._send_sync_proto(proto, protocol.host2byte(3, 6))
```
|
{
"source": "jeguzzi/robomaster_sim",
"score": 2
}
|
#### File: robomaster_sim/examples/chassis.py
```python
import time
import patch_ftp
import robomaster.config # noqa
from robomaster import robot, logger, logging # noqa
def main():
logger.setLevel(logging.ERROR)
ep_robot = robot.Robot()
ep_robot.initialize(conn_type="sta")
ep_robot.chassis.drive_speed(x=0.1, y=0.1, z=20)
rate = 5
def cb(msg, name=''):
print(f'msg {name}:', msg)
for name in ('position', 'attitude', 'velocity', 'status', 'imu', 'mode', 'esc')[:]:
print(f'\n[Test] sub_{name}\n')
getattr(ep_robot.chassis, f'sub_{name}')(freq=rate, callback=cb, name=name)
time.sleep(2)
getattr(ep_robot.chassis, f'unsub_{name}')()
ep_robot.chassis.drive_speed(x=0.0, y=0.0, z=0.0)
ep_robot.chassis.stop()
ep_robot.close()
if __name__ == '__main__':
main()
```
#### File: robomaster_sim/examples/discovery.py
```python
import time
import robomaster.config
import robomaster.config # noqa
from robomaster import robot, logger, logging # noqa
import patch_ftp
import robomaster.conn
def main():
logger.setLevel(logging.INFO)
# connection internally uses scan_robot_ip(sn=None)
# which listens to broadcast messages on <port>:
# sn != None
# -> port = 40927
# -> check sn == sender_sn
# sn == None
# -> port = 45678
# -> no check
# sn is propagate down from `ep_robot.initialize(..., sn=None)`
# remote id than is the broadcast message sender
# def get_sn_form_data(data):
# data = data.split(b'\x00')
# recv_sn = data[0]
# recv_sn = recv_sn.decode(encoding='utf-8')
# return recv_sn
#
#
# The helper is just to communicate ssid and password to the robot.
# We don't need it for the simulator (unless we want to communicate with the app)
#
# helper = robomaster.conn.ConnectionHelper()
# helper._appid = "e009555"
# helper.wait_for_connection()
print('[Scan for ip]', robomaster.conn.scan_robot_ip())
# print(robomaster.conn.scan_robot_ip_list())
print('[Connect]')
ep_robot = robot.Robot()
# ep_robot.initialize(conn_type="ap")
ep_robot.initialize(conn_type="sta", sn=None)
time.sleep(5)
print('[Unconnect]')
ep_robot.close()
if __name__ == '__main__':
main()
```
|
{
"source": "jeguzzi/rviz_paths",
"score": 2
}
|
#### File: rviz_paths/script/rviz_path_server_node.py
```python
import copy
import rospy
from geometry_msgs.msg import PoseStamped
from interactive_markers.interactive_marker_server import InteractiveMarkerServer
from interactive_markers.menu_handler import MenuHandler
from traversability_rviz_paths.msg import Path, Paths
from std_msgs.msg import ColorRGBA
from visualization_msgs.msg import (InteractiveMarker,
InteractiveMarkerControl,
Marker)
from nav_msgs.msg import Path as NavPath
import numpy as np
from tf.transformations import quaternion_from_euler
def orientation(v):
roll = np.arctan2(v[1], v[0])
pitch = np.pi * 0.5 - np.arctan2(v[2], np.sqrt(v[0]**2 + v[1]**2))
return quaternion_from_euler(0, pitch, roll, axes='szyz')
def cylinder_between(p1, p2, color_msg, width=0.1):
cylinder = Marker()
cylinder.type = Marker.CYLINDER
cylinder.scale.x = cylinder.scale.y = width
cylinder.color = color_msg
cylinder.scale.z = np.linalg.norm(p1 - p2)
m = (p1 + p2) * 0.5
cylinder.pose.position.x = m[0]
cylinder.pose.position.y = m[1]
cylinder.pose.position.z = m[2]
o = cylinder.pose.orientation
o.x, o.y, o.z, o.w = orientation(p2 - p1)
return cylinder
def sphere_at(p, color_msg, width=0.1):
sphere = Marker()
sphere.type = Marker.SPHERE
sphere.scale.x = sphere.scale.y = sphere.scale.z = width
sphere.color = color_msg
sphere.pose.position.x = p[0]
sphere.pose.position.y = p[1]
sphere.pose.position.z = p[2]
return sphere
def node(pose, delta_z):
p = pose.pose.position
return np.array([p.x, p.y, p.z + delta_z])
def create_marker(path_msg, color_msg, description, path_id, width=0.1, delta_z=0.1):
int_marker = InteractiveMarker()
int_marker.header.frame_id = path_msg.header.frame_id
int_marker.name = str(path_id)
int_marker.description = "Path {0}".format(path_id)
# line_marker = Marker()
# line_marker.type = Marker.LINE_STRIP
# line_marker.scale.x = width
# line_marker.color = color_msg
# line_marker.points = [p.pose.position for p in path_msg.poses]
# for point in line_marker.points:
# point.z += delta_z
control = InteractiveMarkerControl()
control.always_visible = True
control.interaction_mode = InteractiveMarkerControl.MENU
# control.markers.append(line_marker)
points = [node(pose, delta_z) for pose in path_msg.poses]
for p1, p2 in zip(points[:-1], points[1:]):
control.markers.append(cylinder_between(p1, p2, color_msg, width))
for p in points:
control.markers.append(sphere_at(p, color_msg, width))
int_marker.controls.append(copy.deepcopy(control))
menu_handler = MenuHandler()
# put all the information in the main menu
#d = menu_handler.insert("Description")
for line in description:
menu_handler.insert(line)#, parent=d)
return menu_handler, int_marker
def ignore(msg):
pass
def test_msg():
msg = Path()
msg.path.header.frame_id = 'base_link'
msg.path.poses.append(PoseStamped())
msg.path.poses.append(PoseStamped())
msg.path.poses[1].pose.position.y = 1
msg.color = ColorRGBA(1.0, 0.5, 0.0, 0.5)
msg.description = ["A=1"]
return msg
class RvizPathServer(object):
def __init__(self):
super(RvizPathServer, self).__init__()
rospy.init_node("traversability_rviz_paths_node")
self.server = InteractiveMarkerServer("paths")
self.paths = {}
self.delta_z = rospy.get_param('~offset', 0.15)
self.width = rospy.get_param('~width', 0.15)
self.pub = rospy.Publisher("selected_path", NavPath, queue_size=1)
rospy.Subscriber("paths", Paths, self.updatePaths, queue_size=1)
# self.add_marker(test_msg(), 0)
# self.server.applyChanges()
rospy.spin()
def add_marker(self, msg, path_id):
menu, marker = create_marker(path_msg=msg.path, color_msg=msg.color,
description=msg.description, path_id=path_id,
width=self.width,
delta_z=self.delta_z)
self.server.insert(marker, ignore)
menu.insert("FOLLOW", callback=self.goto(path_id))
menu.apply(self.server, marker.name)
self.paths[path_id] = msg.path
def goto(self, path_id):
def f(msg):
rospy.loginfo("Follow path %d", path_id)
self.pub.publish(self.paths[path_id])
return f
def updatePaths(self, msg):
path_msg = NavPath()
path_msg.header.frame_id = 'map'
self.pub.publish(path_msg)
self.server.clear()
for i, m in enumerate(msg.paths):
self.add_marker(m, i)
self.server.applyChanges()
if __name__ == '__main__':
RvizPathServer()
```
|
{
"source": "jeguzzi/simExtROS2",
"score": 2
}
|
#### File: simExtROS2/tools/parse_interfaces.py
```python
from sys import argv, exit, stderr
import os
import re
import subprocess
from rosidl_runtime_py import get_interface_path
from rosidl_adapter.parser import parse_message_file, parse_service_file, parse_action_file, MessageSpecification, ServiceSpecification, ActionSpecification
ctype_builtin = {
'bool': 'bool',
'int8': 'int8_t',
'uint8': 'uint8_t',
'int16': 'int16_t',
'uint16': 'uint16_t',
'int32': 'int32_t',
'uint32': 'uint32_t',
'int64': 'int64_t',
'uint64': 'uint64_t',
'float32': 'float',
'float64': 'double',
'string': 'std::string',
'wstring': 'std::wstring',
'time': 'ros::Time',
'duration': 'ros::Duration',
'byte': 'uint8_t',
'char': 'int8_t'
}
fast_write_types = {'int32': 'Int32', 'float32': 'Float', 'float64': 'Double'}
def camel_case_to_snake_case(x):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', x)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def monkey_patch_fields(obj):
for i, field in enumerate(obj.fields):
if field.type.is_primitive_type():
field.__dict__['cpp_type'] = ctype_builtin[field.type.type]
field.__dict__['cpp_type_normalized'] = field.type.type
else:
field.__dict__['cpp_type'] = '::'.join([field.type.pkg_name, 'msg', field.type.type])
field.__dict__['cpp_type_normalized'] = '{}__msg__{}'.format(field.type.pkg_name, field.type.type)
def parse_interface(m):
assert isinstance(m, str)
pkg, tag, name = m.split('/')
path = get_interface_path(m)
if tag == 'msg':
obj = parse_message_file(pkg, path)
monkey_patch_fields(obj)
obj.__dict__['subinterfaces'] = {'Message': obj}
elif tag == 'srv':
obj = parse_service_file(pkg, path)
monkey_patch_fields(obj.request)
monkey_patch_fields(obj.response)
obj.__dict__['subinterfaces'] = {'Request': obj.request, 'Response': obj.response}
elif tag == 'action':
obj = parse_action_file(pkg, path)
monkey_patch_fields(obj.goal)
monkey_patch_fields(obj.feedback)
monkey_patch_fields(obj.result)
obj.__dict__['subinterfaces'] = {'Goal': obj.goal, 'Feedback': obj.feedback, 'Result': obj.result}
for subinterface_name, subinterface in obj.subinterfaces.items():
subinterface.__dict__['cpp_type_normalized'] = '{}/{}'.format(m, subinterface_name).replace('/', '__')
subinterface.__dict__['cpp_type'] = '::'.join([pkg, tag, name, subinterface_name])
obj.__dict__['tag'] = tag
obj.__dict__['full_name'] = m
obj.__dict__['cpp_include'] = '{}/{}/{}.hpp'.format(pkg, tag, camel_case_to_snake_case(name))
obj.__dict__['cpp_type_normalized'] = m.replace('/', '__')
obj.__dict__['cpp_type'] = '::'.join([pkg, tag, name])
return obj
def parse_interfaces(interfaces_file):
interfaces_list = set()
with open(interfaces_file) as f:
for line in f:
line = re.sub('#.*$', '', line).strip()
if not line: continue
interfaces_list.add(line)
interfaces = {}
for interface_name in sorted(interfaces_list):
interfaces[interface_name] = parse_interface(interface_name)
return interfaces
```
|
{
"source": "jehalladay/React-Playground",
"score": 3
}
|
#### File: graphQLDemos/fgql1/s.py
```python
from flask import Flask
from flask_graphql import GraphQLView
import graphene
app = Flask(__name__)
class Query(graphene.ObjectType):
is_authenticated = graphene.Boolean(description='Indicates whether the user is logged in or not')
def resolve_is_authenticated(self, info):
print(info)
return False
class Mutation(graphene.ObjectType):
dummy_mutation = graphene.String()
def resolve_dummy_mutation(self, info):
return "this is a dummy mutation"
schema = graphene.Schema(query=Query, mutation=Mutation)
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))
```
#### File: graphQLDemos/spacy/schema.py
```python
from graphene import ObjectType, String, Schema
class ExampleQuery(ObjectType):
hello = String()
def resolve_hello(self):
return "Hello"
class RootQuery(ExampleQuery, ObjectType):
pass
schema = Schema(query=RootQuery)
```
#### File: graphQLDemos/spacy/s.py
```python
from flask import Flask, jsonify
from flask_graphql import GraphQLView
def create_app():
app = Flask(__name__)
from schema import schema
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))
@app.route("/")
def hello_world():
return "Hello World!"
return app
def get_environment_config():
return "config.DevelopmentConfig"
```
#### File: graphqlServer/python/app.py
```python
from sys import argv
from flask_graphql import GraphQLView
from flask import Flask
# import schema
def create_server():
app = Flask(__name__)
app.secret_key = b'(\<KEY>'
app.env = 'development'
return app
def graphqlSetup(app):
schema: dict = {
'name': 'Launch'
}
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))
# app.add_url_rule('/graphql/batch', view_func=GraphQLView.as_view('graphql', schema=schema, batch=True))
return app
def run(app, host='127.0.0.1'):
app.run(host=host, debug=True)
return app
def main():
app = create_server()
app = graphqlSetup(app)
host = argv[1] if len(argv) >= 2 else '127.0.0.1'
run(app, host)
(lambda : main() if __name__ == "__main__" else None)()
```
|
{
"source": "jehama/Internet.nl",
"score": 2
}
|
#### File: Internet.nl/checks/appconf.py
```python
import json
from django.apps import AppConfig
from django_redis import get_redis_connection
from django.conf import settings
from django.core.cache import cache
from checks import redis_id
def load_padded_macs_in_cache():
"""
Load the padded macs in cache for faster testing.
"""
try:
red = get_redis_connection()
with open(settings.PADDED_MACS) as f:
red.hmset(redis_id.padded_macs.id, json.load(f))
except Exception:
pass
def clear_cached_pages():
"""
Clear all previously cached pages.
"""
pattern = redis_id.simple_cache_page.id.split(':', 1)[0]
cache.delete_pattern("{}*".format(pattern))
class ChecksAppConfig(AppConfig):
name = 'checks'
def ready(self):
load_padded_macs_in_cache()
clear_cached_pages()
```
#### File: checks/batch/scheduler.py
```python
from datetime import timedelta
import random
from timeit import default_timer as timer
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
from django.conf import settings
from django.core.cache import cache
from django.utils import timezone
from pyrabbit import Client
from pyrabbit.http import HTTPError, NetworkError
from pyrabbit.api import APIError, PermissionError
from . import util
from .. import batch_shared_task, redis_id
from ..probes import batch_webprobes, batch_mailprobes
from ..tasks.dnssec import batch_web_registered as dnssec_web_taskset
from ..tasks.dnssec import batch_mail_registered as dnssec_mail_taskset
from ..tasks.ipv6 import batch_web_registered as ipv6_web_taskset
from ..tasks.ipv6 import batch_mail_registered as ipv6_mail_taskset
from ..tasks.mail import batch_mail_registered as auth_mail_taskset
from ..tasks.tls import batch_web_registered as tls_web_taskset
from ..tasks.tls import batch_mail_registered as tls_mail_taskset
from ..tasks.appsecpriv import batch_web_registered as appsecpriv_web_taskset
from ..tasks import dispatcher
from ..models import BatchRequest, BatchRequestStatus, BatchDomain
from ..models import BatchDomainStatus, BatchTestStatus
from ..models import BatchWebTest
from ..models import WebTestTls, WebTestAppsecpriv
from ..models import DomainTestReport, MailTestReport, MailTestTls
from ..models import MailTestDnssec, DomainTestDnssec
logger = get_task_logger(__name__)
BATCH_WEBTEST = {
'subtests': {
'ipv6': ipv6_web_taskset,
'dnssec': dnssec_web_taskset,
'tls': tls_web_taskset,
'appsecpriv': appsecpriv_web_taskset,
},
'report': {
'name': 'domaintestreport'
}
}
BATCH_MAILTEST = {
'subtests': {
'ipv6': ipv6_mail_taskset,
'dnssec': dnssec_mail_taskset,
'auth': auth_mail_taskset,
'tls': tls_mail_taskset,
},
'report': {
'name': 'mailtestreport'
}
}
class Rabbit():
"""
Wrapper class for the pyrabbit client.
"""
def __init__(self, rabbit, user, password):
self._rabbit = rabbit
self._user = user
self._pass = password
def _get_client(self):
"""
Get a client connection to rabbitmq.
"""
try:
self._cl = Client(self._rabbit, self._user, self._pass)
return True
except (HTTPError, NetworkError, APIError, PermissionError):
return None
def get_queue_depth(self, host, queue):
"""
Get the size of a queue on a rabbitmq virtual host.
In case of a random exception, retry before failing.
"""
tries = 5
while tries > 0:
try:
return self._cl.get_queue_depth(host, queue)
except (AttributeError, HTTPError, NetworkError, APIError,
PermissionError) as e:
self._get_client()
tries -= 1
if tries <= 0:
raise e
def is_queue_loaded(client):
"""
Check if we consider the monitor queue loaded.
"""
current_load = client.get_queue_depth(
settings.RABBIT_VHOST, settings.RABBIT_MON_QUEUE)
if current_load >= settings.RABBIT_MON_THRESHOLD:
return True
return False
def get_live_requests():
"""
Return a dictionary with active users as keys and their earliest
live batch request as value.
"""
live_requests = dict()
batch_requests = BatchRequest.objects.filter(
status=BatchRequestStatus.live).order_by('submit_date')
for request in batch_requests:
if not live_requests.get(request.user):
live_requests[request.user] = request
return live_requests
def get_user_and_request():
"""
Pick a user and his oldest live batch_request.
Users are fairly chosen regardless of the number of submitted tests.
"""
live_requests = get_live_requests()
if not live_requests:
return None, None
user = random.choice(list(live_requests.keys()))
batch_request = live_requests[user]
return user, batch_request
def pick_domain(batch_request):
"""
Pick a domain to test.
Selects the first available domain.
"""
return BatchDomain.objects.filter(
status=BatchDomainStatus.waiting, batch_request=batch_request).first()
def check_for_result_or_start_test(batch_domain, batch_test, subtest, taskset):
"""
Link the result if already available or start a test.
"""
started_test = False
subtest_model = batch_test._meta.get_field(subtest).rel.to
result = find_result(batch_domain, subtest_model)
if result:
save_result(batch_test, subtest, result)
else:
start_test(batch_domain, batch_test, subtest, taskset)
started_test = True
return started_test
def find_result(batch_domain, model):
"""
Check if we already have results for the domain. Viable results are
ones recorded after the batch submission.
"""
submit_date = batch_domain.batch_request.submit_date
try:
if model is WebTestTls:
result = model.objects.filter(
domain=batch_domain.domain,
webtestset__timestamp__gte=submit_date).latest('id')
elif model is MailTestTls:
result = model.objects.filter(
domain=batch_domain.domain,
testset__timestamp__gte=submit_date).latest('id')
elif model is MailTestDnssec:
result = model.objects.filter(
domain=batch_domain.domain,
testset__timestamp__gte=submit_date).latest('id')
elif model is WebTestAppsecpriv:
result = model.objects.filter(
domain=batch_domain.domain,
webtestset__timestamp__gte=submit_date).latest('id')
elif model is DomainTestDnssec:
result = model.objects.filter(
domain=batch_domain.domain,
maildomain_id=None,
timestamp__gte=submit_date).latest('id')
else:
result = model.objects.filter(
domain=batch_domain.domain,
timestamp__gte=submit_date).latest('id')
except model.DoesNotExist:
result = None
return result
def save_result(batch_test, subtest, result):
"""
Link results and save model.
"""
setattr(batch_test, subtest, result)
setattr(batch_test, '{}_status'.format(subtest), BatchTestStatus.done)
batch_test.save(update_fields=[
'{}_id'.format(subtest),
'{}_status'.format(subtest)])
def start_test(batch_domain, batch_test, subtest, taskset):
"""
Submit test and change status to running.
"""
submit_test(batch_domain, subtest, taskset)
setattr(batch_test, '{}_status'.format(subtest), BatchTestStatus.running)
batch_test.save(update_fields=['{}_status'.format(subtest)])
def submit_test(batch_domain, test, checks_registry):
"""
Submit the test in celery.
"""
url = batch_domain.domain
task_set = dispatcher.submit_task_set(
url, checks_registry, error_cb=error_callback)
# Need to cache it in redis, then the callback can look it up based
# on the task id.
cache_id = redis_id.running_batch_test.id.format(task_set.id)
cache_ttl = redis_id.running_batch_test.ttl
cache.set(cache_id, (batch_domain.id, test), cache_ttl)
return task_set
def check_any_subtest_for_status(batch_test, status):
"""
Check if any of the subtests has a given status.
"""
if isinstance(batch_test, BatchWebTest):
subtests = BATCH_WEBTEST['subtests']
else:
subtests = BATCH_MAILTEST['subtests']
for subtest in subtests:
if getattr(batch_test, "{}_status".format(subtest)) == status:
return True
return False
def find_or_create_report(batch_domain):
report = get_common_report(batch_domain)
if report:
batch_test = batch_domain.get_batch_test()
batch_test.report = report
batch_test.save(update_fields=['report'])
else:
create_report(batch_domain)
def get_common_report(batch_domain):
"""
Try to find the most recent common report for all subtests.
If no such report exists or at least one of the subtests is not yet
part of a report return nothing.
"""
batch_test = batch_domain.get_batch_test()
if isinstance(batch_test, BatchWebTest):
subtests = BATCH_WEBTEST['subtests']
report_details = BATCH_WEBTEST['report']
else:
subtests = BATCH_MAILTEST['subtests']
report_details = BATCH_MAILTEST['report']
report_ids = {}
for subtest in subtests:
report_ids[subtest] = set()
# example: batch_test.ipv6.mailtestreport_set.all()
for report in getattr(
getattr(batch_test, subtest),
'{}_set'.format(report_details['name'])).all():
report_ids[subtest].add(report.id)
if not report_ids[subtest]:
return None
for i, subtest in enumerate(report_ids):
if i == 0:
common_report_ids = report_ids[subtest]
else:
common_report_ids.intersection_update(report_ids[subtest])
if common_report_ids:
common_report_id = max(common_report_ids)
report_model = batch_test._meta.get_field('report').rel.to
try:
return report_model.objects.get(id=common_report_id)
except report_model.DoesNotExist:
pass
return None
def create_report(batch_domain):
"""
Create the report for this domain.
Similar to when a user is redirected to the results page.
"""
domain = batch_domain.domain
if batch_domain.webtest:
batch_test = batch_domain.webtest
report = DomainTestReport(
domain=domain,
ipv6=batch_test.ipv6,
dnssec=batch_test.dnssec,
tls=batch_test.tls,
appsecpriv=batch_test.appsecpriv)
probe_reports = batch_webprobes.get_probe_reports(report)
score = batch_webprobes.count_probe_reports_score(probe_reports)
else:
batch_test = batch_domain.mailtest
report = MailTestReport(
domain=domain,
ipv6=batch_test.ipv6,
dnssec=batch_test.dnssec,
auth=batch_test.auth,
tls=batch_test.tls)
probe_reports = batch_mailprobes.get_probe_reports(report)
score = batch_mailprobes.count_probe_reports_score(probe_reports)
report.registrar = "-Not available in batch-"
report.score = score
report.save()
batch_test.report = report
batch_test.save()
def update_domain_status(batch_domain):
"""
Check the status of the individual tests and update the domain's
entry status.
"""
if batch_domain.status == BatchDomainStatus.cancelled:
return
batch_test = batch_domain.get_batch_test()
if check_any_subtest_for_status(batch_test, BatchTestStatus.error):
batch_domain.status = BatchDomainStatus.error
elif check_any_subtest_for_status(batch_test, BatchTestStatus.waiting):
batch_domain.status = BatchDomainStatus.waiting
elif check_any_subtest_for_status(batch_test, BatchTestStatus.running):
batch_domain.status = BatchDomainStatus.running
else:
batch_domain.status = BatchDomainStatus.done
find_or_create_report(batch_domain)
batch_domain.status_changed = timezone.now()
batch_domain.save(update_fields=['status_changed', 'status'])
def update_batch_status(batch_request):
"""
Check the status of the submitted domains and update the batch
request's status if necessary.
"""
if batch_request.status in (BatchRequestStatus.cancelled,
BatchRequestStatus.done,
BatchRequestStatus.registering,
BatchRequestStatus.error):
return
waiting = batch_request.domains.filter(
status=BatchDomainStatus.waiting).exists()
running = batch_request.domains.filter(
status=BatchDomainStatus.running).exists()
if not waiting:
if running:
batch_request.status = BatchRequestStatus.running
else:
batch_request.status = BatchRequestStatus.done
batch_request.finished_date = timezone.now()
else:
batch_request.status = BatchRequestStatus.live
batch_request.save(update_fields=['status', 'finished_date'])
def batch_callback_hook(result, task_id):
"""
Link the result and change the status of the running test.
"""
if not result:
logger.error("Post callback, no result!")
return
cache_id = redis_id.running_batch_test.id.format(task_id)
cached = cache.get(cache_id)
if not cached:
logger.error(
"Post callback, could not find task id '{}'"
"".format(task_id))
return
batch_domain_id, subtest = cached
batch_domain = BatchDomain.objects.get(id=batch_domain_id)
if batch_domain.status == BatchDomainStatus.cancelled:
return
batch_test = batch_domain.get_batch_test()
save_result(batch_test, subtest, result)
cache.delete(cache_id)
update_domain_status(batch_domain)
@batch_shared_task()
def error_callback(request, exc, traceback):
"""
Increase error count and change status, if an error occurs.
.. note:: Celery only calls this when there is an exception in the chord
callback. This is a bug in celery. To compensate we periodically
check for tests stuck in the running state with
find_stalled_tests_and_update_db().
"""
logger.error("Task {0!r} raised error: {1!r}".format(request.id, exc))
cache_id = redis_id.running_batch_test.id.format(request.id)
cached = cache.get(cache_id)
if not cached:
logger.error(
"Error callback, could not find task id '{}'"
"".format(request.id))
return
batch_domain_id, test = cached
batch_domain = BatchDomain.objects.get(id=batch_domain_id)
if batch_domain.status == BatchDomainStatus.cancelled:
return
batch_test = batch_domain.get_batch_test()
record_subtest_error(batch_test, test)
update_domain_status(batch_domain)
cache.delete(cache_id)
def record_subtest_error(batch_test, subtest):
"""
Increase and return the error count for the given subtest. Also change
the status if appropriate.
"""
error_count = getattr(batch_test, '{}_errors'.format(subtest))
status = getattr(batch_test, '{}_status'.format(subtest))
error_count += 1
if status != BatchTestStatus.cancelled:
if error_count > 2:
status = BatchTestStatus.error
else:
status = BatchTestStatus.waiting
setattr(batch_test, '{}_status'.format(subtest), status)
setattr(batch_test, '{}_errors'.format(subtest), error_count)
batch_test.save(update_fields=[
'{}_status'.format(subtest),
'{}_errors'.format(subtest)])
return error_count
def find_stalled_tests_and_update_db():
"""
Find tests that have been in the running state for more than a given
threshold and update their status.
"""
running_domains = BatchDomain.objects.filter(
status=BatchDomainStatus.running)
now = timezone.now()
for batch_domain in running_domains:
timediff = (now - batch_domain.status_changed).total_seconds()
if timediff >= settings.BATCH_MAX_RUNNING_TIME:
if batch_domain.webtest:
batch_test = batch_domain.webtest
subtests = BATCH_WEBTEST['subtests']
else:
batch_test = batch_domain.mailtest
subtests = BATCH_MAILTEST['subtests']
for subtest in subtests:
status = getattr(batch_test, '{}_status'.format(subtest))
if status == BatchTestStatus.running:
errors = record_subtest_error(batch_test, subtest)
logger.info(
"{} errors for {}({})"
"".format(errors, batch_domain.domain, subtest))
update_domain_status(batch_domain)
def update_batch_request_status():
batch_requests = BatchRequest.objects.filter(
status__in=(BatchRequestStatus.live, BatchRequestStatus.running))
for batch_request in batch_requests:
update_batch_status(batch_request)
def _run_scheduler():
"""
Submit a fixed number of domains for testing if the queue is not
considered loaded.
"""
client = Rabbit(
settings.RABBIT, settings.RABBIT_USER, settings.RABBIT_PASS)
domains_to_test = settings.BATCH_SCHEDULER_DOMAINS
start_time = timer()
find_stalled_tests_and_update_db()
logger.info("Find stalled duration: {}".format(timer() - start_time))
start_time = timer()
update_batch_request_status()
logger.info("Update status duration: {}".format(timer() - start_time))
if not is_queue_loaded(client):
start_time = timer()
while domains_to_test > 0:
user, batch_request = get_user_and_request()
batch_domain = pick_domain(batch_request)
if not batch_domain:
break
subtests_started = 0
batch_test = batch_domain.get_batch_test()
if isinstance(batch_test, BatchWebTest):
subtests = BATCH_WEBTEST['subtests']
else:
subtests = BATCH_MAILTEST['subtests']
for subtest in subtests:
if (getattr(batch_test, '{}_status'.format(subtest))
== BatchTestStatus.waiting):
started_test = check_for_result_or_start_test(
batch_domain, batch_test, subtest,
subtests[subtest])
if started_test:
subtests_started += 1
if subtests_started > 0:
domains_to_test -= 1
update_domain_status(batch_domain)
logger.info("Submission duration: {}".format(timer() - start_time))
submitted_domains = settings.BATCH_SCHEDULER_DOMAINS - domains_to_test
logger.info("Submitted {} domains".format(submitted_domains))
if settings.ENABLE_BATCH:
@periodic_task(run_every=timedelta(
seconds=settings.BATCH_SCHEDULER_INTERVAL))
def run():
"""
Run the scheduler every interval only if it is not running already.
"""
lock_id = redis_id.batch_scheduler_lock.id
lock_ttl = redis_id.batch_scheduler_lock.ttl
with util.memcache_lock(lock_id, lock_ttl) as acquired:
if acquired:
_run_scheduler()
return
logger.info("Already running...")
```
#### File: checks/batch/util.py
```python
import json
from functools import wraps
import os
from celery.five import monotonic
from contextlib import contextmanager
from django.conf import settings
from django.core.cache import cache
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import JsonResponse
from django.utils import timezone
from . import BATCH_API_VERSION
from .custom_views import get_applicable_views, gather_views_results
from .custom_views import ForumStandaardisatieView
from .. import batch_shared_task, redis_id
from ..probes import batch_webprobes, batch_mailprobes
from ..models import BatchUser, BatchRequestType, BatchDomainStatus
from ..models import BatchCustomView, BatchWebTest, BatchMailTest
from ..models import BatchDomain, BatchRequestStatus
from ..views.shared import pretty_domain_name, validate_dname
def get_site_url(request):
"""
Compose the url that the user used to connect to the API.
"""
return "{}://{}".format(request.scheme, request.get_host())
def check_valid_user(function):
"""
Custom decorator for batch views.
Check if the authenticated user is a user in the batch database and make
the record available in the decorated function.
"""
@wraps(function)
def wrap(request, *args, **kwargs):
user = get_user_from_request(request)
if not user:
return JsonResponse(dict(
success=False,
message="Unknown user",
data=[]))
kwargs['batch_user'] = user
return function(request, *args, **kwargs)
return wrap
def get_user_from_request(request):
"""
If the user that made the request is a legitimate batch user (exists in the
DB) return the relevant user object from the DB.
"""
user = None
try:
username = (
request.META.get('REMOTE_USER')
or request.META.get('HTTP_REMOTE_USER'))
if not username:
username = getattr(settings, 'BATCH_TEST_USER', None)
user = BatchUser.objects.get(username=username)
except BatchUser.DoesNotExist:
pass
return user
@contextmanager
def memcache_lock(lock_id, lock_duration=60*5):
"""
Simple cache lock to keep celerybeat tasks from running before the previous
execution has not finished yet.
Also used for simple tasks that may be triggered more than one for the same
task.
.. note:: Mostly as documented in the celery documentation.
"""
if lock_duration is not None:
timeout_at = monotonic() + lock_duration - 3
# cache.add fails if the key already exists
status = cache.add(lock_id, True, lock_duration)
try:
yield status
finally:
# memcache delete is very slow, but we have to use it to take
# advantage of using add() for atomic locking
if lock_duration is None or (monotonic() < timeout_at and status):
# don't release the lock if we exceeded the timeout
# to lessen the chance of releasing an expired lock
# owned by someone else
# also don't release the lock if we didn't acquire it
cache.delete(lock_id)
@batch_shared_task(bind=True, ignore_result=True)
def batch_async_generate_results(self, user, batch_request, site_url):
"""
Generate the batch results and save to file.
"""
lock_id_name = redis_id.batch_results_lock.id
lock_ttl = redis_id.batch_results_lock.ttl
def on_failure(exc, task_id, args, kwargs, einfo):
"""
Custom on_failure function to delete state from cache.
"""
user = kwargs['user']
batch_request = kwargs['batch_request']
lock_id = lock_id_name.format(
user.username, batch_request.request_id)
cache.delete(lock_id)
self.on_failure = on_failure
lock_id = lock_id_name.format(user.username, batch_request.request_id)
batch_request.refresh_from_db()
if not (batch_request.report_file
and os.path.isfile(batch_request.report_file.path)):
with memcache_lock(lock_id, lock_ttl) as acquired:
if acquired:
results = gather_batch_results(user, batch_request, site_url)
save_batch_results_to_file(user, batch_request, results)
def gather_batch_results(user, batch_request, site_url):
"""
Gather all the results for the batch request and return them in a
dictionary that will be eventually converted to JSON for the API answer.
"""
results = {
'submission-date': batch_request.submit_date.isoformat(),
'finished-date': batch_request.finished_date.isoformat(),
'name': batch_request.name,
'identifier': batch_request.request_id,
'api-version': BATCH_API_VERSION
}
if batch_request.type is BatchRequestType.web:
probes = batch_webprobes.getset()
url_name = 'webtest_results'
url_arg = ['site']
related_testset = 'webtest'
else:
probes = batch_mailprobes.getset()
url_name = 'mailtest_results'
url_arg = []
related_testset = 'mailtest'
dom_results = []
custom_views = get_applicable_views(user, batch_request)
# Quering for the related rows upfront minimizes further DB queries and
# gives ~33% boost to performance.
related_fields = []
for probe in probes:
related_fields.append(
'{}__report__{}'.format(related_testset, probe.name))
batch_domains = batch_request.domains.all().select_related(*related_fields)
for batch_domain in batch_domains:
domain_name_idna = pretty_domain_name(batch_domain.domain)
if batch_domain.status == BatchDomainStatus.error:
dom_results.append(
dict(domain=domain_name_idna, status="failed"))
continue
batch_test = batch_domain.get_batch_test()
report = batch_test.report
score = report.score
args = url_arg + [batch_domain.domain, report.id]
link = "{}{}".format(site_url, reverse(url_name, args=args))
categories = []
for probe in probes:
category = probe.name
model = getattr(report, probe.name)
_, _, verdict = probe.get_scores_and_verdict(model)
passed = False
if verdict == 'passed':
passed = True
categories.append(dict(category=category, passed=passed))
result = dict(
domain=domain_name_idna,
status="ok",
score=score,
link=link,
categories=categories)
views = gather_views_results(
custom_views, batch_domain, batch_request.type)
if views:
views = sorted(views, key=lambda view: view['name'])
result['views'] = views
dom_results.append(result)
results['domains'] = dom_results
# Add a temporary identifier for the new custom view.
# Will be replaced in a later release with a universal default output.
if (len(custom_views) == 1
and isinstance(custom_views[0], ForumStandaardisatieView)
and custom_views[0].view_id):
results['api-view-id'] = custom_views[0].view_id
return results
def save_batch_results_to_file(user, batch_request, results):
"""
Save results to file using the Django's ORM utilities.
"""
filename = '{}-{}-{}.json'.format(
user.username, batch_request.type.label, batch_request.id)
batch_request.report_file.save(filename, ContentFile(json.dumps(results)))
@batch_shared_task(bind=True, ignore_result=True)
@transaction.atomic
def batch_async_register(self, batch_request, test_type, domains):
"""
Register the submitted domains for future batch testing. Domains need to
pass validity tests similar to vanilla internet.nl. Invalid domains are not
registered.
"""
def on_failure(exc, task_id, args, kwargs, einfo):
"""
Custom on_failure function to record the error.
"""
batch_request = kwargs['batch_request']
batch_request.refresh_from_db()
if batch_request.status != BatchRequestStatus.cancelled:
batch_request.status = BatchRequestStatus.error
batch_request.finished_date = timezone.now()
batch_request.save()
self.on_failure = on_failure
if test_type is BatchRequestType.web:
batch_test_model = BatchWebTest
keys = ('domain', 'batch_request', 'webtest')
# Unused because of latency while registering the domains.
# get_valid_domain = get_valid_domain_web
get_valid_domain = validate_dname
else:
batch_test_model = BatchMailTest
keys = ('domain', 'batch_request', 'mailtest')
# Unused because of latency while registering the domains.
# get_valid_domain = get_valid_domain_mail
get_valid_domain = validate_dname
for domain in domains:
# Ignore leading/trailing whitespace.
domain = domain.strip()
# Only register valid domain names like vanilla internet.nl
domain = get_valid_domain(domain)
if not domain:
continue
batch_test = batch_test_model()
batch_test.save()
values = (domain, batch_request, batch_test)
batch_domain = BatchDomain(**{k: v for k, v in zip(keys, values)})
batch_domain.save()
batch_request.refresh_from_db()
if batch_request.status != BatchRequestStatus.cancelled:
batch_request.status = BatchRequestStatus.live
batch_request.save()
@transaction.atomic
def delete_batch_request(batch_request):
"""
Remove the batch request together with all the batch related tables'
entries.
.. note:: It DOES NOT remove any entries from the vanilla tables.
"""
batch_domains = batch_request.domains.all()
for batch_domain in batch_domains:
batch_domain.get_batch_test().delete()
batch_domain.delete()
batch_request.delete()
def create_batch_user(username, name, organization, email):
"""
Create a batch user in the DB.
"""
user = BatchUser(
username=username, name=name, organization=organization, email=email)
user.save()
return user
def create_custom_view(name, description, usernames=[]):
"""
Create a custom view in the DB.
"""
view = BatchCustomView(name=name, description=description)
view.save()
for user in BatchUser.objects.filter(username__in=usernames):
user.custom_views.add(view)
return view
def add_custom_view_to_user(view_name, username):
"""
Add the mapping from user to custom view in the DB.
"""
view = None
user = None
try:
view = BatchCustomView.objects.get(name=view_name)
user = BatchUser.objects.get(username=username)
except BatchCustomView.DoesNotExist:
return view, user
user.custom_views.add(view)
return view, user
```
#### File: checks/tasks/http_headers.py
```python
import http.client
import socket
from .tls_connection import NoIpError, http_fetch, MAX_REDIRECT_DEPTH
from .. import scoring
def get_multiple_values_from_header(header):
"""
Get all the values for the header.
Multiple values of the same header are in a comma separated list; make sure
to ignore white space when splitting the values.
"""
return [value.strip() for value in header.split(',')]
class HeaderCheckerContentEncoding(object):
"""
Class for checking the Content-Encoding HTTP header.
"""
def __init__(self):
self.name = "Content-Encoding"
def check(self, value, results):
"""
Check if the header has any value.
"""
if value:
results['http_compression_enabled'] = True
score = scoring.WEB_TLS_HTTP_COMPRESSION_BAD
results['http_compression_score'] = score
def get_positive_values(self):
return {
'http_compression_enabled': False,
'http_compression_score': scoring.WEB_TLS_HTTP_COMPRESSION_GOOD,
}
def get_negative_values(self):
return {
'http_compression_enabled': True,
'http_compression_score': scoring.WEB_TLS_HTTP_COMPRESSION_BAD,
}
class HeaderCheckerContentSecurityPolicy(object):
"""
Class for checking the Content-Security-Policy HTTP header.
"""
def __init__(self):
self.name = "Content-Security-Policy"
def check(self, value, results):
"""
Check if the header has any value.
"""
if not value:
results['content_security_policy_enabled'] = False
score = scoring.WEB_APPSECPRIV_CONTENT_SECURITY_POLICY_BAD
results['content_security_policy_score'] = score
else:
values = get_multiple_values_from_header(value)
results['content_security_policy_values'].extend(values)
def get_positive_values(self):
score = scoring.WEB_APPSECPRIV_CONTENT_SECURITY_POLICY_GOOD
return {
'content_security_policy_enabled': True,
'content_security_policy_score': score,
'content_security_policy_values': [],
}
def get_negative_values(self):
score = scoring.WEB_APPSECPRIV_CONTENT_SECURITY_POLICY_BAD
return {
'content_security_policy_enabled': False,
'content_security_policy_score': score,
'content_security_policy_values': [],
}
class HeaderCheckerStrictTransportSecurity(object):
"""
Class for checking the Strict-Transport-Security HTTP header.
"""
def __init__(self):
self.name = "Strict-Transport-Security"
self.first_time_seen = True
def check(self, value, results):
"""
Check if the *first* HSTS header value is more than 6 months.
"""
if self.first_time_seen and not value:
results['hsts_enabled'] = False
results['hsts_score'] = scoring.WEB_TLS_HSTS_BAD
self.first_time_seen = False
elif value:
header_values = get_multiple_values_from_header(value)
try:
max_age = header_values[0].lower().split(
'max-age=')[1].split(';')[0]
# Check if lower than 6 months.
if self.first_time_seen and int(max_age) < 15552000:
results['hsts_score'] = scoring.WEB_TLS_HSTS_PARTIAL
self.first_time_seen = False
except (ValueError, IndexError):
if self.first_time_seen:
results['hsts_score'] = scoring.WEB_TLS_HSTS_BAD
results['hsts_enabled'] = False
self.first_time_seen = False
results['hsts_policies'].extend(header_values)
def get_positive_values(self):
return {
'hsts_enabled': True,
'hsts_policies': [],
'hsts_score': scoring.WEB_TLS_HSTS_GOOD,
}
def get_negative_values(self):
return {
'hsts_enabled': False,
'hsts_policies': [],
'hsts_score': scoring.WEB_TLS_HSTS_BAD,
}
class HeaderCheckerXFrameOptions(object):
"""
Class for checking the X-Frame-Options HTTP header.
"""
def __init__(self):
self.name = "X-Frame-Options"
def check(self, value, results):
"""
Check if the header has any of the allowed values.
"""
if not value:
score = scoring.WEB_APPSECPRIV_X_FRAME_OPTIONS_BAD
results['x_frame_options_score'] = score
results['x_frame_options_enabled'] = False
else:
values = get_multiple_values_from_header(value)
first_header = values[0].upper()
if not (first_header == "DENY"
or first_header == "SAMEORIGIN"
or first_header.startswith("ALLOW-FROM")):
score = scoring.WEB_APPSECPRIV_X_FRAME_OPTIONS_BAD
results['x_frame_options_score'] = score
results['x_frame_options_enabled'] = False
results['x_frame_options_values'].extend(values)
def get_positive_values(self):
score = scoring.WEB_APPSECPRIV_X_FRAME_OPTIONS_GOOD
return {
'x_frame_options_enabled': True,
'x_frame_options_score': score,
'x_frame_options_values': [],
}
def get_negative_values(self):
score = scoring.WEB_APPSECPRIV_X_FRAME_OPTIONS_BAD
return {
'x_frame_options_enabled': False,
'x_frame_options_score': score,
'x_frame_options_values': [],
}
class HeaderCheckerXContentTypeOptions(object):
"""
Class for checking the X-Content-Type-Options HTTP header.
"""
def __init__(self):
self.name = "X-Content-Type-Options"
def check(self, value, results):
"""
Check if the header has the allowed value.
"""
if not value:
score = scoring.WEB_APPSECPRIV_X_CONTENT_TYPE_OPTIONS_BAD
results['x_content_type_options_score'] = score
results['x_content_type_options_enabled'] = False
else:
values = get_multiple_values_from_header(value)
if not values[0].lower() == "nosniff":
score = scoring.WEB_APPSECPRIV_X_CONTENT_TYPE_OPTIONS_BAD
results['x_content_type_options_score'] = score
results['x_content_type_options_enabled'] = False
results['x_content_type_options_values'].extend(values)
def get_positive_values(self):
score = scoring.WEB_APPSECPRIV_X_CONTENT_TYPE_OPTIONS_GOOD
return {
'x_content_type_options_enabled': True,
'x_content_type_options_score': score,
'x_content_type_options_values': [],
}
def get_negative_values(self):
score = scoring.WEB_APPSECPRIV_X_CONTENT_TYPE_OPTIONS_BAD
return {
'x_content_type_options_enabled': False,
'x_content_type_options_score': score,
'x_content_type_options_values': [],
}
class HeaderCheckerXXssProtection(object):
"""
Class for checking the X-Xss-Protection HTTP header.
"""
def __init__(self):
self.name = "X-Xss-Protection"
def check(self, value, results):
"""
Check if XSS protection is enabled.
"""
if not value:
score = scoring.WEB_APPSECPRIV_X_XSS_PROTECTION_BAD
results['x_xss_protection_score'] = score
results['x_xss_protection_enabled'] = False
else:
values = get_multiple_values_from_header(value)
enabled = values[0].split(";")[0]
if enabled == "0":
score = scoring.WEB_APPSECPRIV_X_XSS_PROTECTION_BAD
results['x_xss_protection_score'] = score
results['x_xss_protection_enabled'] = False
results['x_xss_protection_values'].extend(values)
def get_positive_values(self):
score = scoring.WEB_APPSECPRIV_X_CONTENT_TYPE_OPTIONS_GOOD
return {
'x_xss_protection_enabled': True,
'x_xss_protection_score': score,
'x_xss_protection_values': [],
}
def get_negative_values(self):
score = scoring.WEB_APPSECPRIV_X_CONTENT_TYPE_OPTIONS_BAD
return {
'x_xss_protection_enabled': False,
'x_xss_protection_score': score,
'x_xss_protection_values': [],
}
class HeaderCheckerReferrerPolicy(object):
"""
Class for checking the Referrer-Policy HTTP header.
"""
def __init__(self):
self.name = "Referrer-Policy"
def check(self, value, results):
"""
Check if the header has any of the allowed values.
"""
if value == "":
# Empty string defaults to 'no-referrer-when-downgrade'.
results['referrer_policy_values'] = ['""']
elif not value:
score = scoring.WEB_APPSECPRIV_REFERRER_POLICY_BAD
results['referrer_policy_score'] = score
results['referrer_policy_enabled'] = False
else:
values = get_multiple_values_from_header(value)
for value in values:
if value.lower() not in [
'no-referrer',
'no-referrer-when-downgrade',
'origin',
'origin-when-cross-origin',
'same-origin',
'strict-origin',
'strict-origin-when-cross-origin',
'unsafe-url',
]:
score = scoring.WEB_APPSECPRIV_REFERRER_POLICY_BAD
results['referrer_policy_score'] = score
results['referrer_policy_enabled'] = False
results['referrer_policy_values'].extend(values)
def get_positive_values(self):
score = scoring.WEB_APPSECPRIV_REFERRER_POLICY_GOOD
return {
'referrer_policy_enabled': True,
'referrer_policy_score': score,
'referrer_policy_values': [],
}
def get_negative_values(self):
score = scoring.WEB_APPSECPRIV_REFERRER_POLICY_BAD
return {
'referrer_policy_enabled': False,
'referrer_policy_score': score,
'referrer_policy_values': [],
}
def http_headers_check(af_ip_pair, url, header_checkers, task):
results = dict()
# set defaults to positive values. Header tests return negative values if
# a test failed.
for h in header_checkers:
results.update(h.get_positive_values())
put_headers = (("Accept-Encoding", "compress, deflate, exi, gzip, "
"pack200-gzip, x-compress, x-gzip"),)
get_headers = [h.name for h in header_checkers]
try:
conn, res, headers, visited_hosts = http_fetch(
url, af=af_ip_pair[0], path="", port=443, task=task,
ip_address=af_ip_pair[1], put_headers=put_headers,
depth=MAX_REDIRECT_DEPTH,
needed_headers=get_headers)
except (socket.error, http.client.BadStatusLine, NoIpError):
# Not able to connect, return negative values
for h in header_checkers:
results.update(h.get_negative_values())
results['server_reachable'] = False
else:
if 443 in headers:
for name, value in headers[443]:
for header_checker in header_checkers:
if name == header_checker.name:
header_checker.check(value, results)
break
return results
```
|
{
"source": "jehangiramjad/data",
"score": 2
}
|
#### File: fbi/crime/preprocess.py
```python
import csv
import io
import ssl
import urllib.request
import sys
import requests
import re
import os
import pandas as pd
import logging
import geocode_cities
# Years that FBI doesn't public arson data.
# 2019, 2018, 2017
# The FBI does not publish arson data unless it receives data from either the agency or the state for all 12 months of the calendar year.
_FIELDS_IN_CRIME_FILE = 14
_POPULATION_INDEX = 2
_STATE_INDEX = 0
_CITY_INDEX = 1
_DUMMY_RAPE_INDEX = 6
_CRIME_FIELDS = [
'Year',
'State',
'City',
'Population',
# Violent Crimes
'Violent',
'ViolentMurderAndNonNegligentManslaughter',
'ViolentRape',
'Rape2',
'ViolentRobbery',
'ViolentAggravatedAssault',
# Property Crimes
'Property',
'PropertyBurglary',
'PropertyLarcenyTheft',
'PropertyMotorVehicleTheft',
# Arson
'PropertyArson',
]
GEO_CODE = 'Geocode'
TOTAL = 'Total'
OUTPUT_COLUMNS = [
'Year', 'GeoId', 'Count_CriminalActivities_ViolentCrime',
'Count_CriminalActivities_MurderAndNonNegligentManslaughter',
'Count_CriminalActivities_ForcibleRape', 'Count_CriminalActivities_Robbery',
'Count_CriminalActivities_AggravatedAssault',
'Count_CriminalActivities_PropertyCrime',
'Count_CriminalActivities_Burglary',
'Count_CriminalActivities_LarcenyTheft',
'Count_CriminalActivities_MotorVehicleTheft',
'Count_CriminalActivities_Arson', 'Count_CriminalActivities_CombinedCrime'
]
# Automate Template MCF generation since there are many Statitical Variables.
TEMPLATE_MCF_TEMPLATE = """
Node: E:FBI_Crime->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:{stat_var}
measurementMethod: dcs:FBI_Crime
observationAbout: C:FBI_Crime->GeoId
observationDate: C:FBI_Crime->Year
value: C:FBI_Crime->{stat_var}
"""
# From 2013-2016, the FBI reported statistics for two different definitions of rape before fully transitioning to the current definition in 2017.
# We add a dummy column after it (so allyears have two Rape columns).
YEARS_WITH_TWO_RAPE_COLUMNS = {'2013', '2014', '2015', '2016'}
YEARS_WITHOUT_POPULATION_COLUMN = {'2016'}
YEAR_TO_URL = {
'2019':
'https://ucr.fbi.gov/crime-in-the-u.s/2019/crime-in-the-u.s.-2019/tables/table-8/table-8.xls',
'2018':
'https://ucr.fbi.gov/crime-in-the-u.s/2018/crime-in-the-u.s.-2018/tables/table-8/table-8.xls',
'2017':
'https://ucr.fbi.gov/crime-in-the-u.s/2017/crime-in-the-u.s.-2017/tables/table-8/table-8.xls',
'2016':
'https://ucr.fbi.gov/crime-in-the-u.s/2016/crime-in-the-u.s.-2016/tables/table-8/table-8.xls',
'2015':
'https://ucr.fbi.gov/crime-in-the-u.s/2015/crime-in-the-u.s.-2015/tables/table-8/table_8_offenses_known_to_law_enforcement_by_state_by_city_2015.xls',
'2014':
'https://ucr.fbi.gov/crime-in-the-u.s/2014/crime-in-the-u.s.-2014/tables/table-8/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2014.xls',
'2013':
'https://ucr.fbi.gov/crime-in-the-u.s/2013/crime-in-the-u.s.-2013/tables/table-8/table_8_offenses_known_to_law_enforcement_by_state_by_city_2013.xls',
'2012':
'https://ucr.fbi.gov/crime-in-the-u.s/2012/crime-in-the-u.s.-2012/tables/8tabledatadecpdf/table_8_offenses_known_to_law_enforcement_by_state_by_city_2012.xls',
'2011':
'https://ucr.fbi.gov/crime-in-the-u.s/2011/crime-in-the-u.s.-2011/tables/table_8_offenses_known_to_law_enforcement_by_state_by_city_2011.xls',
# Sanity check 2008-2010 don't have duplicate city state.
'2010':
'https://ucr.fbi.gov/crime-in-the-u.s/2010/crime-in-the-u.s.-2010/tables/10tbl08.xls',
'2009':
'https://www2.fbi.gov/ucr/cius2009/data/documents/09tbl08.xls',
'2008':
'https://www2.fbi.gov/ucr/cius2008/data/documents/08tbl08.xls',
}
def _remove_extra_chars(c):
# Remove commas and quotes from string c, and any trailing whitespace.
# Return the cleaned_string
return re.sub(r'[,"]', '', c).strip()
def _remove_digits(c):
# Remove digits from string c
# Return the cleaned string
return re.sub(r'[\d]', '', c)
def _is_digit(x):
try:
float(x)
return True
except ValueError:
return False
def _int_from_field(f):
# Convert a field to int value. If field is empty or non-convertible, return 0.
# Numeric number was read in as string with ".0" suffix, eg: "12.0". First convert to float, then to int.
try:
f = float(f)
f = int(f)
return f
except ValueError as err:
return 0
# Trim 2016 state name.
def _get_2016_state(state):
state = state.replace(' - Metropolitan Counties', '')
state = state.replace(' - Nonmetropolitan Counties', '')
return state
def calculate_crimes(r):
# Return the violent, property, arson crimes & total
# If a field is empty, it is treated as 0
# Category 1: Violent Crimes
violent = _int_from_field(r['Violent'])
murder = _int_from_field(r['ViolentMurderAndNonNegligentManslaughter'])
rape = _int_from_field(r['ViolentRape'])
rape2 = _int_from_field(r['Rape2'])
robbery = _int_from_field(r['ViolentRobbery'])
assault = _int_from_field(r['ViolentAggravatedAssault'])
# Fix rape value
rape += rape2
# Add the values back as ints
r['ViolentMurderAndNonNegligentManslaughter'] = murder
r['ViolentRape'] = rape
r['Rape2'] = rape2
r['ViolentRobbery'] = robbery
r['ViolentAggravatedAssault'] = assault
violent_computed = murder + rape + robbery + assault
if violent_computed != violent:
print('{} {} {} violent mismatch {} {}'.format(r['Year'], r['City'],
r['State'], violent,
violent_computed))
# Category 2: Property Crime
property = _int_from_field(r['Property'])
burglary = _int_from_field(r['PropertyBurglary'])
theft = _int_from_field(r['PropertyLarcenyTheft'])
motor = _int_from_field(r['PropertyMotorVehicleTheft'])
# Add the property crime values as ints.
r['PropertyBurglary'] = burglary
r['PropertyLarcenyTheft'] = theft
r['PropertyMotorVehicleTheft'] = motor
# Compute totals
property_computed = burglary + theft + motor
if property_computed != property:
print('{} {} {} property mismatch {} {}'.format(r['Year'], r['City'],
r['State'], property,
property_computed))
# Category 3: Arson
arson = _int_from_field(r['PropertyArson'])
r['PropertyArson'] = arson
total = violent_computed + property_computed + arson
# Write back the totals
r[TOTAL] = total
r['Violent'] = violent_computed
r['Property'] = property_computed
def clean_crime_file(f_input, f_output, year):
"""Clean a tsv file of crime statistics.
The input contains crime statistics, one for every city.
Remove header and footer lines, and append state column to every line.
Skip lines that do not contain data.
Args:
f_input: file object with crime statistics, one per city.
f_output: outputstream for writing the cleaned statistics.
year: year string this input about.
"""
state = ''
count_line = 0
count_city = 0
count_state = 0
count_header_footer = 0
count_incomplete_lines = 0
count_comments = 0
for line in f_input:
count_line += 1
if line.startswith('#'):
count_comments += 1
continue
# Split by comma and exclude comma from quotes in split
# For case like PENNSYLVANIA,"Abington Township, Montgomery County",55476.0,53.0,0.0,6.0,0,15.0,32.0,934.0,32.0,883.0,19.0,2.0
field = [
'"{}"'.format(x)
for x in list(csv.reader([line], delimiter=',', quotechar='"'))[0]
]
# Skip incomplete lines
if len(field) < _FIELDS_IN_CRIME_FILE:
count_incomplete_lines += 1
logging.info('%s %s', line, len(field))
continue
# Replace commas and quotes in fields e.g. "1,234" -> 1234
# Remove any other leading or trailing whitespace
for i in range(_FIELDS_IN_CRIME_FILE):
field[i] = _remove_extra_chars(field[i])
# Skip if the line does not contain data or if population is empty.
if (not field[_POPULATION_INDEX] or
not _is_digit(field[_POPULATION_INDEX]) or
field[_POPULATION_INDEX] == '0'):
count_header_footer += 1
continue
# If field[_STATE_INDEX] is present, use it as the State.
if field[_STATE_INDEX]:
# Remove numeric values from state names (comes from footnotes)
state = _remove_digits(field[_STATE_INDEX])
if year == "2016":
state = _get_2016_state(state)
count_state += 1
field[_STATE_INDEX] = state
# Remove any numeric characters from city names.
field[_CITY_INDEX] = _remove_digits(field[_CITY_INDEX])
count_city += 1
# Keep the first n fields. Some of the files contain extra empty fields.
output_line = '{},{}\n'.format(year,
','.join(field[:_FIELDS_IN_CRIME_FILE]))
f_output.write(output_line)
logging.info('lines: %d, comments: %d, incomplete: %d, header_footer:%d',
count_line, count_comments, count_incomplete_lines,
count_header_footer)
logging.info('%d cities', count_city)
logging.info('%d states', count_state)
def update_and_calculate_crime_csv(geo_codes, crime_csv, writer):
with open(crime_csv) as crime_f:
crimes = csv.DictReader(crime_f, fieldnames=_CRIME_FIELDS)
found_set = set()
cities_not_found_set = set()
for crime in crimes:
if geocode_cities.update_crime_geocode(crime, geo_codes, found_set,
cities_not_found_set):
calculate_crimes(crime)
processed_dict = {
'Year':
crime['Year'],
'GeoId':
"dcid:geoId/{}".format(crime[GEO_CODE]),
'Count_CriminalActivities_ViolentCrime':
crime['Violent'],
'Count_CriminalActivities_MurderAndNonNegligentManslaughter':
crime['ViolentMurderAndNonNegligentManslaughter'],
'Count_CriminalActivities_ForcibleRape':
crime['ViolentRape'],
'Count_CriminalActivities_Robbery':
crime['ViolentRobbery'],
'Count_CriminalActivities_AggravatedAssault':
crime['ViolentAggravatedAssault'],
'Count_CriminalActivities_PropertyCrime':
crime['Property'],
'Count_CriminalActivities_Burglary':
crime['PropertyBurglary'],
'Count_CriminalActivities_LarcenyTheft':
crime['PropertyLarcenyTheft'],
'Count_CriminalActivities_MotorVehicleTheft':
crime['PropertyMotorVehicleTheft'],
'Count_CriminalActivities_Arson':
crime['PropertyArson'],
'Count_CriminalActivities_CombinedCrime':
crime[TOTAL],
}
writer.writerow(processed_dict)
# Output the cities not_found
with open('city_not_found.txt', 'w') as cities_not_found_f:
for s in cities_not_found_set:
cities_not_found_f.write('{}\n'.format(s))
print('US src_cities = {}, cities_not_found = {}'.format(
len(found_set), len(cities_not_found_set)))
def create_tmcf_file(tmcf_file_path):
stat_vars = OUTPUT_COLUMNS[2:]
with open(tmcf_file_path, 'w', newline='') as f_out:
for i in range(len(stat_vars)):
f_out.write(
TEMPLATE_MCF_TEMPLATE.format_map({
'index': i,
'stat_var': stat_vars[i]
}))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
geo_codes = geocode_cities.read_geocodes()
with open('calculated_crime.csv', 'w') as csv_output_f:
writer = csv.DictWriter(csv_output_f, fieldnames=OUTPUT_COLUMNS)
writer.writeheader()
for year, url in YEAR_TO_URL.items():
response = requests.get(url)
xls_file = year + '.xls'
csv_file = year + '.csv'
cleaned_csv_file = year + '_cleaned.csv'
with open(xls_file, 'wb') as file:
file.write(response.content)
read_file = pd.read_excel(xls_file, skiprows=[0, 1, 2])
if year in YEARS_WITHOUT_POPULATION_COLUMN:
read_file.insert(_POPULATION_INDEX, 'Population', 1)
if year not in YEARS_WITH_TWO_RAPE_COLUMNS:
read_file.insert(_DUMMY_RAPE_INDEX, 'Dummy', 0)
read_file.to_csv(csv_file, index=None, header=True)
with open(csv_file, "r") as f_input:
with open(cleaned_csv_file, "w") as f_output:
logging.info('clean crime file for year %s', year)
clean_crime_file(f_input, f_output, year)
update_and_calculate_crime_csv(geo_codes, cleaned_csv_file, writer)
# Clean intermediate files.
os.remove(xls_file)
os.remove(csv_file)
os.remove(cleaned_csv_file)
create_tmcf_file("FBI_crime.tmcf")
```
|
{
"source": "JehanKandy/image_convert_to_pencil",
"score": 3
}
|
#### File: JehanKandy/image_convert_to_pencil/image_convert_to_pencil.py
```python
import numpy as np
import imageio
import scipy.ndimage
import cv2
#you can add any image
#------important----------
#image and code file should in same folder
#if image on in same folder ou have to give the path
#-------------------------
my_img = "my_photo.jpg"
def rgb_photo(rgb):
return np.dot(rgb[...,:3],[0.2989,0.5870,0.1140])
def image(frant, back):
final_sketch = frant*255/(255-back)
final_sketch[final_sketch>255] = 255
final_sketch[back==255]=255
return final_sketch.astype('uint8')
img = imageio.imread(my_img)
gray = rgb_photo(img)
g = 255-gray
blur = scipy.ndimage.filters.gaussian_filter(g, sigma =15)
f_image = image(blur, gray)
cv2.imwrite('final_image.png', f_image)
```
|
{
"source": "JehanKandy/Python-Web-Version-3.0",
"score": 2
}
|
#### File: JehanKandy/Python-Web-Version-3.0/browserUi.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWebEngineWidgets import QWebEngineView
import sys
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(792, 550)
Form.setWindowFlags(QtCore.Qt.FramelessWindowHint)
Form.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.widget = QtWidgets.QWidget(Form)
self.widget.setStyleSheet("QWidget#widget{\n"
" border:4px solid rgb(45,45,45);\n"
" border-radius:20px;\n"
"}")
self.widget.setObjectName("widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_2.setContentsMargins(2, 2, 2, 2)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget_2 = QtWidgets.QWidget(self.widget)
self.widget_2.setMinimumSize(QtCore.QSize(0, 80))
self.widget_2.setMaximumSize(QtCore.QSize(16777215, 80))
self.widget_2.setStyleSheet("QWidget#widget_2{\n"
" background-color:rgb(20,20,20);\n"
" border-top-left-radius:20px;\n"
" border-top-right-radius:20px;\n"
"}\n"
"QPushButton{\n"
" background-color:rgb(0,0,0);\n"
" color:rgb(144,144,144);\n"
" font:bold;\n"
" font-size:15px;\n"
" font-family:entypo;\n"
"}\n"
"QPushButton:hover{\n"
" color:rgb(142,175,27);\n"
"}\n"
"QPushButton:pressed{\n"
" padding-top:5px;\n"
" padding-left:5px;\n"
" color:rgb(91,88,53);\n"
"}\n"
"\n"
"")
self.widget_2.setObjectName("widget_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget_2)
self.verticalLayout_3.setContentsMargins(12, -1, 12, -1)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_5 = QtWidgets.QLabel(self.widget_2)
self.label_5.setMinimumSize(QtCore.QSize(15, 15))
self.label_5.setMaximumSize(QtCore.QSize(15, 15))
self.label_5.setStyleSheet("background-color:rgb(255,178,102);\n"
"border-radius:7px;")
self.label_5.setText("")
self.label_5.setObjectName("label_5")
self.horizontalLayout.addWidget(self.label_5)
self.label_4 = QtWidgets.QLabel(self.widget_2)
self.label_4.setMinimumSize(QtCore.QSize(15, 15))
self.label_4.setMaximumSize(QtCore.QSize(15, 15))
self.label_4.setStyleSheet("background-color:rgb(255,255,102);\n"
"border-radius:7px")
self.label_4.setText("")
self.label_4.setObjectName("label_4")
self.horizontalLayout.addWidget(self.label_4)
self.label_3 = QtWidgets.QLabel(self.widget_2)
self.label_3.setMinimumSize(QtCore.QSize(15, 15))
self.label_3.setMaximumSize(QtCore.QSize(15, 15))
self.label_3.setStyleSheet("background-color:rgb(255,255,102);\n"
"border:4px solid rgb(45,45,45);\n"
"border-radious7px;")
self.label_3.setText("")
self.label_3.setObjectName("label_3")
self.horizontalLayout.addWidget(self.label_3)
self.label_6 = QtWidgets.QLabel(self.widget_2)
self.label_6.setMinimumSize(QtCore.QSize(200, 0))
self.label_6.setStyleSheet("color:rgb(144,144,144);")
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.horizontalLayout.addWidget(self.label_6)
self.pushButton = QtWidgets.QPushButton(self.widget_2)
self.pushButton.setMinimumSize(QtCore.QSize(25, 25))
self.pushButton.setMaximumSize(QtCore.QSize(25, 25))
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.pushButton_3 = QtWidgets.QPushButton(self.widget_2)
self.pushButton_3.setMinimumSize(QtCore.QSize(25, 25))
self.pushButton_3.setMaximumSize(QtCore.QSize(25, 25))
font = QtGui.QFont()
font.setFamily("entypo")
font.setPointSize(-1)
font.setBold(True)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(75)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.pushButton_3.setFont(font)
self.pushButton_3.setCheckable(True)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
self.pushButton_2 = QtWidgets.QPushButton(self.widget_2)
self.pushButton_2.setEnabled(True)
self.pushButton_2.setMinimumSize(QtCore.QSize(25, 25))
self.pushButton_2.setMaximumSize(QtCore.QSize(25, 25))
font = QtGui.QFont()
font.setFamily("entypo")
font.setPointSize(-1)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButton_4 = QtWidgets.QPushButton(self.widget_2)
self.pushButton_4.setMinimumSize(QtCore.QSize(25, 25))
self.pushButton_4.setMaximumSize(QtCore.QSize(25, 25))
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout_2.addWidget(self.pushButton_4)
self.pushButton_5 = QtWidgets.QPushButton(self.widget_2)
self.pushButton_5.setMinimumSize(QtCore.QSize(25, 25))
self.pushButton_5.setMaximumSize(QtCore.QSize(25, 25))
self.pushButton_5.setObjectName("pushButton_5")
self.horizontalLayout_2.addWidget(self.pushButton_5)
self.pushButton_6 = QtWidgets.QPushButton(self.widget_2)
self.pushButton_6.setMinimumSize(QtCore.QSize(25, 25))
self.pushButton_6.setMaximumSize(QtCore.QSize(25, 25))
self.pushButton_6.setObjectName("pushButton_6")
self.horizontalLayout_2.addWidget(self.pushButton_6)
self.lineEdit = QtWidgets.QLineEdit(self.widget_2)
self.lineEdit.setMinimumSize(QtCore.QSize(0, 25))
self.lineEdit.setMaximumSize(QtCore.QSize(16777215, 25))
self.lineEdit.setStyleSheet("background-color:rgb(32,32,32);\n"
"border-radius:5px;\n"
"color:rgb(144,144,144);\n"
"padding-left:5px;")
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_2.addWidget(self.lineEdit)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.verticalLayout_2.addWidget(self.widget_2)
self.webEngineView = QWebEngineView(self.widget)
self.webEngineView.page().setBackgroundColor(QtGui.QColor(45, 45, 45, 255))
self.webEngineView.setObjectName("webEngineView")
self.verticalLayout_2.addWidget(self.webEngineView)
self.label_2 = QtWidgets.QLabel(self.widget)
self.label_2.setMinimumSize(QtCore.QSize(0, 20))
self.label_2.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_2.setStyleSheet("background-color:rgb(45,45,45);\n"
"border-bottom-left-radius:20px;\n"
"border-bottom-right-radius:20px;\n"
"color:rgb(144,144,144);\n"
"")
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.verticalLayout.addWidget(self.widget)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label_6.setText(_translate("Form", "<NAME>"))
self.pushButton.setText(_translate("Form", "\\"))
self.pushButton_3.setText(_translate("Form", "o"))
self.pushButton_2.setText(_translate("Form", "X"))
self.pushButton_4.setText(_translate("Form", "â"))
self.pushButton_5.setText(_translate("Form", "ê"))
self.pushButton_6.setText(_translate("Form", "d"))
self.label_2.setText(_translate("Form", "DEVELOP BY JEHANKANDY"))
```
|
{
"source": "jehboyes/curriculum_model",
"score": 2
}
|
#### File: curriculum_model/cli/copy.py
```python
import click
from curriculum_model.db import DB, table_map
from curriculum_model.db.schema import Base
from sqlalchemy.inspection import inspect
def dependency_chain(key_only=False):
c = ['curriculum',
'course',
'course_config',
'course_session',
'course_session_config',
'cgroup',
'cgroup_config',
'component',
'cost',
'cost_week'
]
key_objects = [1, 3, 5, 7]
if key_only:
return [o for i, o in enumerate(c) if i in key_objects]
else:
return c
@click.command()
@click.argument("obj_name", type=click.Choice(dependency_chain(True)))
@click.argument("obj_id", type=int)
@click.argument("parent_id", type=int)
# @click.option("--move", "-m", is_flag=True, help="Delete the original file after copying.")
@click.pass_obj
def copy(config, obj_name, obj_id, parent_id):
config.verbose_print(
f"Attempting to copy {obj_name} with id {obj_id} and its sub-objects to parent with id {parent_id}.")
dc = dependency_chain(False)
tm = table_map(Base)
# Get the type of object the parent is
parent_name = dc[dc.index(obj_name)-1]
parent_class = tm[parent_name]
# open connection
with DB(config.echo, config.environment) as db:
session = db.session()
# If the parent doesn't have a curriculum id then it's a config, so go one step further to get curriculum id
if not hasattr(parent_class.__table__.columns, "curriculum_id"):
grandparent_name = dc[dc.index(obj_name)-2]
grandparent_class = tm[grandparent_name]
base_class = grandparent_class
else:
base_class = parent_class
if not click.confirm(f"Proceed with copying {obj_name} with ID {obj_id} " +
f"to {base_class.__tablename__} with ID {parent_id}?",
abort=True):
pass
curriculum_id = session.query(base_class).get(parent_id).curriculum_id
parent_obj = session.query(base_class).get(parent_id)
obj_class = tm[obj_name]
obj = session.query(obj_class).get(obj_id)
_recursive_copy(session, curriculum_id, parent_obj,
obj, tm, dc, config, 0)
if click.confirm("Commit changes?"):
session.commit()
else:
session.rollback()
def _recursive_copy(session, curriculum_id, parent_obj, child_obj, tm, dc, config, indent_level):
indent = indent_level*"\t "
# Columns to copy
tbl = child_obj.__table__
cols = [c for c in tbl.columns.keys() if c not in tbl.primary_key]
# If no columns, it's reached cost_week (two pk columns) so add week column back in
if len(cols) == 0:
cols = ['acad_week', 'cost_id']
# Convert existing record to dictionary of non-pk columns
data = {c: getattr(child_obj, c) for c in cols}
# Change curriculum_id value, if possible
if 'curriculum_id' in data.keys():
data['curriculum_id'] = curriculum_id
# CHange the parent object ID column, if possible
parent_pk_name = list(parent_obj.__table__.primary_key)[0].name
if parent_pk_name in data.keys():
data[parent_pk_name] = getattr(parent_obj, parent_pk_name)
# GEt pk col for reference later
child_pk_name = list(child_obj.__table__.primary_key)[0].name
# Create new object, add to DB
new_child_obj = child_obj.__class__(**data)
session.add(new_child_obj)
session.flush()
config.verbose_print(f"{indent}Created {child_obj.__tablename__} " +
f"with ID {getattr(new_child_obj, child_pk_name)}")
# add config_entry if a config exists between parent and child
if abs(dc.index(parent_obj.__tablename__) - dc.index(new_child_obj.__tablename__)) == 2:
config.verbose_print(
f"{indent}Creating config link for {parent_obj.__tablename__}.")
config_class = tm[parent_obj.__tablename__ + '_config']
new_conf_values = {parent_pk_name: getattr(parent_obj, parent_pk_name),
child_pk_name: getattr(new_child_obj, child_pk_name)}
new_conf = config_class(**new_conf_values)
session.add(new_conf)
session.flush()
# Get children of child
dc_pos = dc.index(child_obj.__tablename__)
if dc_pos == len(dc)-1:
# Reached final object, stop recurse
return
grandchild_name = dc[dc_pos+1]
if grandchild_name[-6:] == "config":
config.verbose_print(f"{indent}Detected config link")
# get grandchildren using config
config_class = tm[grandchild_name]
grandchild_name = dc[dc.index(grandchild_name)+1]
grandchild_class = tm[grandchild_name]
grandchild_pk_col_name = list(
grandchild_class.__table__.primary_key)[0].name
grandchildren_ids_query = session.query(config_class) \
.filter(getattr(config_class, child_pk_name)
== getattr(child_obj, child_pk_name))
else:
config.verbose_print(f"{indent}Detected direct link")
grandchild_class = tm[grandchild_name]
grandchild_pk_col_name = list(
grandchild_class.__table__.primary_key)[0].name
grandchildren_ids_query = session.query(grandchild_class) \
.filter(getattr(grandchild_class, child_pk_name)
== getattr(child_obj, child_pk_name))
grandchildren_ids = [getattr(c, grandchild_pk_col_name)
for c in grandchildren_ids_query.all()]
grandchildren = session.query(grandchild_class) \
.filter(getattr(grandchild_class, grandchild_pk_col_name).in_(grandchildren_ids))
for grandchild_obj in grandchildren.all():
_recursive_copy(session,
curriculum_id,
new_child_obj,
grandchild_obj,
tm,
dc, config, indent_level+1)
```
|
{
"source": "jehboyes/finance_manager",
"score": 2
}
|
#### File: cli/cm/curriculum.py
```python
import click
from finance_manager.database import DB
from finance_manager.database.spec import f_set, curr_nonp
from curriculum_model.db.schema.views import CurriculumHours, CurriculumNonPay
from sqlalchemy.sql import select, delete
from sqlalchemy import and_
@click.command()
@click.argument("setcat", type=str)
@click.argument("acad_year", type=int)
@click.pass_obj
def curriculum(config, setcat, acad_year):
"""
Update the curriculum hours for the sets in the given SETCAT and ACAD_YEAR.
"""
with DB(config=config) as db: # Connect to curriculum db to get total hours
session = db.session()
sets = session.query(f_set).filter(and_(f_set.set_cat_id == setcat,
f_set.acad_year == acad_year)).all()
session.query(curr_nonp).delete()
session.flush()
# Get connection variables for curriculum database
config.set_section("cm")
with DB(config=config) as cm_db:
with click.progressbar(sets, show_eta=False,
show_percent=True, item_show_func=_progress_print,
label="Updating teaching hours") as bar:
for s in bar:
# get curriculum hours
curriculum_select = select([CurriculumHours.c.hours]) \
.where(and_(CurriculumHours.c.usage_id == s.student_number_usage_id,
CurriculumHours.c.curriculum_id == s.curriculum_id,
CurriculumHours.c.costc == s.costc))
cur_hours = cm_db.con.execute(curriculum_select).fetchall()
if len(cur_hours) > 0:
s.curriculum_hours = cur_hours[0].hours
# Get nonpay
nonpay_select = select([CurriculumNonPay.c.costc,
CurriculumNonPay.c.account,
CurriculumNonPay.c.amount]) \
.where(and_(CurriculumNonPay.c.usage_id == s.student_number_usage_id,
CurriculumNonPay.c.curriculum_id == s.curriculum_id,
CurriculumNonPay.c.costc == s.costc,
CurriculumNonPay.c.acad_year == acad_year))
nonpay_results = cm_db.con.execute(nonpay_select) \
.fetchall()
for result in nonpay_results:
nonp = curr_nonp(set_id=s.set_id,
account=result.account,
amount=result.amount)
session.add(nonp)
session.flush()
session.commit()
def _progress_print(s):
if s is not None:
return f"Processed {s.costc}"
```
#### File: cli/db/crud.py
```python
import click
import csv
import sys
import os
from datetime import datetime
from tabulate import tabulate
from sqlalchemy import and_, Table, MetaData, Integer, Column
from finance_manager.database import DB
from finance_manager.database.spec import table_map, meta
from sqlalchemy.ext.automap import automap_base
@click.command()
@click.argument("cmd", type=click.Choice(['create', 'read', 'update', 'delete']))
@click.argument("table", type=str)
@click.option("--where", "-w", multiple=True, help="Filter which records can be affected as 'field=value'")
@click.option("--value", "-v", multiple=True, help="Specify values as 'field=value'")
@click.option("--export", "-e", is_flag=True, help="Export output from 'read' command.")
@click.pass_obj
def crud(config, cmd, table, where, value, export):
"""
Command for CRUD operations.
Perform Create, Read, Delete & Update options (passed as CMD) on TABLE.
Create and Update require at least 1 VALUE option to be passed;
Update and Delete require at least 1 WHERE option to be passed.
WHERE and VALUE options can be passed multiple times.
Use double quotes around strings that contain spaces,
and enter datetimes as #YY-MM-DD# or #YY-MM-DD-HH-MM#.
"""
c = 'create'
r = 'read'
u = 'update'
d = 'delete'
with DB(config=config) as db:
s = db.session()
try:
# Map the table to the relevant orm object
table_object = table_map[table]
except:
if not cmd == r:
# Fail if trying to do anything but read
raise ValueError(
"Only tables can be passed to create, update or delete.")
else:
auto_meta = MetaData()
table_object = Table(
table, auto_meta, Column(
'dummy_id', Integer, primary_key=True), autoload=True, autoload_with=db._engine)
auto_base = automap_base(metadata=auto_meta)
auto_base.prepare()
table_object = getattr(auto_base.classes, table)
# Generate key values pairs from where and values
wheres = _gen_filters(where, table_object)
values = _gen_kargs_dict(value)
# CREATE
if cmd == c:
record = table_object(**values)
click.echo(_record_to_dict(table_object, record))
s.add(record)
# READ
elif cmd == r:
click.echo(r)
valid_cols = [
c for c in table_object.__table__.columns.keys() if c != 'dummy_id']
results = s.query(*[getattr(table_object, c)
for c in valid_cols]).filter(*wheres)
results_list = []
if not export:
results = results.limit(30)
for r in results.all():
results_list.append(
[getattr(r, col) for col in valid_cols])
if export:
filename = f"{table}_export_{'_'.join(where)}_{datetime.today().strftime('%Y%m%d%H%m%S')}.csv"
filepath = os.path.expanduser('~\\documents\\') + filename
with open(filepath, "w", newline="", encoding="utf-8") as file:
writer = csv.writer(
file, quoting=csv.QUOTE_NONE, escapechar='|')
writer.writerow(valid_cols)
# Unlikely to actually show bar due to speed of write.
for row in results_list:
row = [a.replace(',', '-') if isinstance(a, str) else a
for a in row]
writer.writerow(row)
click.echo(f"{len(results_list)} rows written to {filepath}")
else:
click.echo(
tabulate(results_list, headers=table_object.__table__.columns.keys()))
sys.exit()
# UPDATE
elif cmd == u:
click.echo(u)
records = s.query(table_object).filter(*wheres).all()
for r in records:
for attr, val in values.items():
if val == 'NULL':
setattr(r, attr, None)
else:
setattr(r, attr, val)
click.echo(_record_to_dict(table_object, r))
# DELETE
elif cmd == d:
click.confirm("Confirm delete submission", abort=True)
q = s.query(table_object).filter(*wheres)
click.echo("Records to delete:")
for r in q.all():
click.echo(_record_to_dict(table_object, r))
q.delete()
else:
click.echo(
f"Invalid argument: must be one of {c}, {r}, {u} or {d}")
sys.exit()
if click.confirm("Commit?"):
s.commit()
else:
s.rollback()
def _gen_kargs_dict(lst):
"""Creates a dictionary, to be unpacked as kargs for ORM work.
If obj passed, uses the class name to prefix
"""
d = {}
for i in lst:
s = i.split("=")
if s[1] == "NULL":
s[1] = None
elif s[1][0] == "#" and s[1][-1] == "#":
s[1] = s[1].replace("#", "")
v = [int(x) for x in s[1].split("-")]
s[1] = datetime(*v)
elif s[1].lower() == 'false':
s[1] = False
elif s[1].lower() == 'true':
s[1] = True
d.update({s[0]: s[1]})
return d
def _gen_filters(lst, obj):
return tuple(getattr(obj, f.split("=")[0]) == f.split("=")[1] for f in lst)
def _record_to_dict(tbl, record):
return {col: getattr(record, col) for col in tbl.__table__.columns.keys()}
```
#### File: finance_manager/cli/docs.py
```python
import click
from finance_manager.database.spec import directorate
from finance_manager.database import DB
@click.command()
@click.argument("template", type=click.Path(exists=True))
@click.argument("folder", type=click.Path(exists=True))
@click.option("--version", "-v", type=str, help="Append a given version identifier.")
@click.option("--disconnect", "-d", is_flag=True, help="Run the Disconnect macro to sever connections.")
@click.option("--restrict", "-r", type=str, help="Restrict to a given directorate.", default="")
@click.pass_obj
def docs(config, template, folder, version, disconnect, restrict):
"""
Generate documentation for each directorate.
Currently relies on the template having a sheet called 'data_Params', with the columns laid out
as configured in this source code. Only works on Windows.
"""
# import statement within function to prevent import breaking the entire cli when not on windows
import win32com.client
if folder[-1] == '\\':
folder = folder[:-2]
with DB(config=config) as db:
session = db.session()
directorates = session.query(directorate).filter(
directorate.director_name.isnot(None))
if len(restrict) == 1:
directorates = directorates.filter(
directorate.directorate_id == restrict)
directorates = directorates.all()
# Create an excel app
xlapp = win32com.client.DispatchEx("Excel.Application")
xlapp.DisplayAlerts = False
# Open the workbook in said instance of Excel
wb = xlapp.workbooks.open(template)
if disconnect:
file_password = None
else:
file_password = '<PASSWORD>'
with click.progressbar(directorates) as bar:
for d in bar:
ws = wb.Worksheets("data_Params")
ws.Range("A2").Value = d.directorate_id
ws.Range("D2").Value = d.description
ws.Range("E2").Value = d.director_name
acad_year = ws.Range("C2").Value
set_cat_id = ws.Range("B2").Value
namelist = [d.description, set_cat_id]
if version is not None:
if version[0].lower() == 'v':
version = version[1:]
version = 'v'+version
namelist.append(version)
filename = folder + '\\' + \
' '.join(namelist) + '.xlsm'
macro_name = "'" + wb.name + "'!Automation.UpdateRefreshConnections"
xlapp.Run(macro_name)
if disconnect:
macro_name = "'" + wb.name + "'!Automation.Disconnect"
xlapp.Run(macro_name)
wb.SaveAs(filename, None, file_password)
if disconnect:
# Have to close and reopen as connections severed
wb.Close()
wb = xlapp.workbooks.open(template)
xlapp.Quit()
```
#### File: cli/finance/_reconciliation.py
```python
import click
@click.pass_obj
def reconciliation(config):
"""
Reconcile HE income in sets to QL.
Intended to check that figures presented for a given ACAD_YEAR SET_CAT reconcile to those in QL.
Parameters
----------
config : Config
Custom config object.
"""
pass
```
#### File: cli/settings/configure.py
```python
import click
@click.command()
@click.option("--env", type=str, help="Use this to change the environment")
@click.argument("pairs", nargs=-1)
@click.pass_obj
def configure(config, pairs, env):
"""
Takes key:value 'PAIRS' and adds/updates as neccesary.
"""
# change env if passed
if env is not None:
config.set_env(env)
# And the actual values, if passed
try:
pairs = {p[0]: p[1] for p in [pair.split(":") for pair in pairs]}
config.write(pairs)
except:
print("Set command failed. Check key:value argument(s) valid. ")
```
#### File: finance_manager/finance_manager/config.py
```python
from configparser import ConfigParser
import os
class Config(object):
"""
Configuration object used for variable management
Uses ConfigParser and local ``config.ini`` to read/write local variables.
The ``env`` variable is a bit meta: it holds the name of the section to use when the config
is initialised.
"""
def __init__(self, verbose=False):
self.file_name = os.path.join(os.path.dirname(
os.path.dirname(__file__)), 'config.ini')
self._cp = ConfigParser()
self.default_string = "DEFAULT"
self.env_string = 'env'
self.verbose = verbose
if os.path.isfile(self.file_name):
self._cp.read(self.file_name)
self.set_section(
self._cp[self.default_string].get(self.env_string, self.default_string))
else:
# First time run locally
self.set_section(self.default_string)
defaults = {self.env_string: self.default_string,
'database': '',
'server': '',
'dialect': '',
'py_driver': '',
'driver': '',
'username': '',
'password': ''}
self.write(defaults)
def set_env(self, name=None):
name = self.default_string if name is None else name
self._cp[self.default_string][self.env_string] = name
self.set_section(name)
def set_section(self, section):
"""
Set the name of the environment/section to look for and use
"""
self.section = section
self._cp[self.default_string][self.env_string] = section
if self.section not in self._cp.keys():
self._cp[self.section] = {}
def read_section(self, section=None):
"""
Return all the variables in an environment/section as a formatted string
"""
if section is not None:
self.set_section(section)
items = [str(item) + "=" + self.read(item)
for item in self._cp[self.section]]
items.sort()
item_list = "\n".join(items)
return item_list
def read(self, item):
"""
Read a specific item in the current environment/section
"""
s = self._cp[self.section]
return s.get(item, "MISSING")
def write(self, item_dict):
"""
Update or create config variables in the current environment/section
"""
for item, value in item_dict.items():
print(f"{self.section}: Writing {item} as {value}")
self._cp[self.section][item] = value
self._save()
def _save(self):
with open(self.file_name, "w") as config_file:
self._cp.write(config_file)
```
#### File: database/views/__init__.py
```python
import importlib
import inspect
from os import listdir, path
from finance_manager.database.replaceable import ReplaceableObject as o
from finance_manager.functions import periods
from finance_manager.database import DB
from finance_manager.database.spec import f_set, finance_instance
# List of named periods
p = [f'p{n}' for n in periods()]
# p1 + ... + p12
p_sum_string = "+".join(p)
# p1, ... , p12
p_list_string = ", ".join(p)
# Shorthand, as needs to be standardised
account_description = "a.account + ' ' + a.description as account_description"
# Shorthand for finance summary (set summary) description
finance_summary = "cast(s.acad_year as varchar) + ' ' + s.set_cat_id as finance_summary"
def _sql_bound(max_or_min, *fields):
"""
Produces sql to return the maximum of two fields.
Parameters
----------
max_or_min : str
One of MAX or MIN, depending on behaviour desired.
fields : str
Field names of the inputs to the max/min function.
Returns
-------
str
String like 'SELECT [MAX or MIN](n) FROM (VALUES) as VALUE(n)'
"""
cmd = max_or_min.upper()
if cmd != "MAX" and cmd != "MIN":
raise ValueError("Invalid command: must be MAX or MIN")
field_str = ",".join([f"({field})" for field in fields])
sql = f"SELECT {max_or_min}(n) FROM (VALUES {field_str}) as value(n)"
return sql
def _generate_p_string(str_format, join_with=None, restrict=None):
"""Generates a list of periods in the given format.
Use {p} where the period number is required in the format string.
Parameters
----------
format : str
String to format with the period number.
join_with : str
If passed, joins the list with the given path
restrict : int
If n passed, restricts the output to the first n periods
Returns
-------
list
List of numbers from 1 to 12 (or less if restrict was passed)
"""
if restrict is None:
restrict = 12
lst = [str_format.format(p=n) for n in periods(restrict)]
if join_with is not None:
lst = join_with.join(lst)
return lst
def _get_set_cols(session, auto_format=True):
"""
Return finance_summary strings.
Only requires database connection. Returns each
combination of year and set_code_id alraedy in use. For example,
if 2020 BP1 exists, then [2020 BP1] will be one of the values returned.
Parameters
----------
session : Object
SQL Alchemy session object.
auto_format : boolean
True returns a string with commas and square braces (for SQL). False returns list.
Returns
-------
str
SQL compatible list, or list if auto_format set to false.
"""
col_list = []
for year, cat in session.query(f_set.acad_year, f_set.set_cat_id).join(finance_instance).distinct():
name = ' '.join([str(year), cat])
col_list.append(name)
pvt_list = ", ".join(f"[{n}]" for n in col_list)
if auto_format:
return pvt_list
else:
return col_list
def get_views(session):
"""
Return a list of views as replaceable objects.
Defined as a function rather than a list to avoid code running on compilation. Each file in this folder should define a funciton `_view` which returns a replaceable object, which is
a simple class defined in replaceable.
Parameters
----------
session : Object
SQL Alchemy session object, because some views header definitions depend on data.
"""
# Detect files defined in directory
p = path.dirname(__file__)
files = listdir(p)
modules = [importlib.import_module(
".."+f[:-3], "finance_manager.database.views.") for f in files if f[:2] == "v_"]
view_list = []
for module in modules:
# Test for whether the view requires a session object for header construction
if "session" in inspect.getfullargspec(module._view)[0]:
view_list.append(module._view(session))
else:
view_list.append(module._view())
return view_list
def get_headers(sql, prefix=None):
"""Returns a list of headers in an sql select string"""
cols = []
open_parenth = False
clean_sql = ""
# Remove anything in parentheses, liable to contain commas
for i, char in enumerate(sql):
if char == "(":
open_parenth = True
elif char == ")":
open_parenth = False
if not open_parenth:
clean_sql += sql[i].replace("\t", " ")
for col in clean_sql.split("FROM")[0].replace("SELECT", "").split(","):
if " as " in col.lower():
c = col.split(" as ")[-1]
elif "." in col.lower():
c = col.split(".")[-1]
else:
c = col
cols.append(c.strip())
if prefix != None:
cols = ", ".join([".".join([prefix, col]) for col in cols])
return cols
```
#### File: database/views/v_calc_claim.py
```python
from finance_manager.functions import periods
from finance_manager.database.replaceable import ReplaceableObject as o
from finance_manager.database.views import account_description, _generate_p_string, _sql_bound
# Claim rate of pay
rate_calculation = "ROUND((ISNULL(i.rate, 0)*variable_rate+t.rate_uplift)*base_multiplier*holiday_multiplier, 2)"
# Need own period list (instead of from views) as need alias prefix
i_periods = _generate_p_string("i.p{p} as p{p}", ",")
i_periods_summed = _generate_p_string("i.p{p}", "+")
# estimating national_insurance by period - take off an estimate of hourly threshold, multiply by rate
ni_periods = ",\n".join(
["("+_sql_bound("MAX", f"{rate_calculation}-ni.p{n}/37", "0")+f")*i.p{n}*ni.rate*t.apply_ni as ni_p{n}" for n in periods()])
# Heavily simplified pension calculation - applied to anything not casual
pension_periods = _generate_p_string(
"i.p{p}*" + rate_calculation + "*t.apply_pension*pen.p{p} as pension_p{p}", ",\n")
sql = f"""
SELECT i.set_id, i.claim_id, CASE i.claim_type_id WHEN 'CAS' THEN 2102 ELSE i.account END as account,
i.description,
CASE i.claim_type_id WHEN 'CAS' THEN '2102 Casual Claims' ELSE a.account + ' ' + a.description END as account_description,
i.rate, {rate_calculation} as adjusted_rate,
t.description as claim_type, t.claim_type_id,
a.description as account_name,
{i_periods},
{ni_periods},
{pension_periods},
({i_periods_summed})*{rate_calculation} as amount
FROM input_pay_claim i
LEFT OUTER JOIN input_pay_claim_type t ON i.claim_type_id = t.claim_type_id
LEFT OUTER JOIN fs_account a ON i.account = a.account
INNER JOIN f_set s ON s.set_id = i.set_id
INNER JOIN staff_ni ni ON ni.acad_year = s.acad_year
INNER JOIN staff_pension_contrib pen ON pen.pension_id = 'WP' AND pen.acad_year = s.acad_year
"""
def _view():
view = o("v_calc_claim", sql)
return view
if __name__ == "__main__":
print(sql)
```
#### File: database/views/v_calc_internal_balance.py
```python
from finance_manager.database.replaceable import ReplaceableObject as o
def _view():
inner_sql = """
SELECT fs.costc, i.costc as contra_costc, fs.acad_year, fs.set_cat_id, LEFT(a.account, 1) as accf,
SUM(i.amount * -1 * e.coefficient) as total
FROM input_nonp_internal i
LEFT OUTER JOIN fs_account a ON i.account = a.account
LEFT OUTER JOIN fs_entry_type e ON e.balance_type = a.default_balance
INNER JOIN f_set fs ON fs.set_id = i.set_id
GROUP BY i.costc, fs.costc, fs.acad_year, fs.set_cat_id, LEFT(a.account,1)
"""
outer_sql = f"""
SELECT base.*, contra.total as contra_total, ISNULL(base.total,0)+ISNULL(contra.total,0) as net
FROM
({inner_sql}) as base
LEFT OUTER JOIN
({inner_sql}) as contra
ON contra.costc = base.contra_costc --Matching contras
AND base.costc = contra.contra_costc --Matching contras
AND contra.acad_year = base.acad_year --Matching same
AND contra.accf = base.accf
AND base.set_cat_id = contra.set_cat_id"""
return o("v_calc_internal_balance", outer_sql)
```
#### File: database/views/v_calc_staff_monthly_all.py
```python
from finance_manager.database.replaceable import ReplaceableObject as o
from finance_manager.functions import periods
staff_month_ni = ", \n".join(
[f"ISNULL(dbo.udfNI(mt.p{n}, ni.p{n}, ni.rate)*m.p{n}/NULLIF(ISNULL(NULLIF(mt.p{n},0),m.p{n}),0),0) as ni_p{n}" for n in periods()])
staff_month_pension = ", \n".join(
[f"m.p{n}*ISNULL(pension.p{n},0) as pension_p{n}" for n in periods()])
staff_travel_months = 12
staff_travel_allowance = ", \n ".join(
[f"ISNULL(s.travel_scheme,0)/{staff_travel_months} as travel_p{n}" for n in periods()])
def _view():
v = o("v_calc_staff_monthly_all", f"""
SELECT m.*,
{staff_month_ni},
{staff_month_pension},
{staff_travel_allowance}
FROM v_calc_staff_monthly m
INNER JOIN v_calc_staff_monthly_total mt ON m.staff_Id = mt.staff_id
AND m.acad_year = mt.acad_year
AND m.set_cat_id = mt.set_cat_id
INNER JOIN input_pay_staff s ON s.staff_line_id = m.staff_line_id
LEFT OUTER JOIN staff_pension_contrib pension ON pension.pension_id = s.pension_id AND pension.acad_year = m.acad_year
INNER JOIN staff_ni ni ON ni.acad_year = m.acad_year
""")
return v
```
#### File: database/views/v_calc_staff_monthly.py
```python
from finance_manager.database.replaceable import ReplaceableObject as o
from finance_manager.functions import periods
from finance_manager.database.views import _generate_p_string
# work out a line's monthly salary
staff_month_sal = _generate_p_string(
"""dbo.udfGetMonthProp(f_set.acad_year, {p}, s.start_date, s.end_date)
*vFTE.FTE
*(
ISNULL(dbo.udfGetMonthSpine(f_set.acad_year, {p}, s.start_date, s.current_spine, s.grade, f_set.set_cat_id),0)
+ISNULL(s.allowances,0)
)/12 as p{p}""", ", \n")
# get actual spine point for displaying in app
staff_month_sp = _generate_p_string(
"dbo.udfGetMonthSpinePoint(f_set.acad_year, {p}, s.start_date, s.current_spine, s.grade) as sp_p{p}", ", \n")
def _view():
v = o("v_calc_staff_monthly", f"""
SELECT s.staff_line_id, s.post_status_id, s.set_id, f_set.acad_year, f_set.set_cat_id, ISNULL(s.staff_id, s.staff_line_id) as staff_id,
{staff_month_sal},
{staff_month_sp}
FROM input_pay_staff s
INNER JOIN f_set ON f_set.set_id=s.set_id
LEFT OUTER JOIN staff_spine ss on ss.spine=s.current_spine AND f_set.acad_year=ss.acad_year AND f_set.set_cat_id=ss.set_cat_id
INNER JOIN v_calc_staff_fte vFTE on vFTE.staff_line_id=s.staff_line_id
""")
return v
```
#### File: database/views/v_cons_staff.py
```python
from finance_manager.database.replaceable import ReplaceableObject as o
sql = f"""
SELECT s.acad_year, s.set_cat_id, cc.directorate_id,
Title, ISNULL(name, 'Unnamed') as Name, Grade,
current_spine as Spine, SUM(indicative_fte) as FTE, SUM(Allowances) as Allowances,
SUM(pay_total) as Salary, SUM(ni_total) as NI, SUM(pension_total) as Pension,
SUM(pay_total+ ni_total+ pension_total) as [Grand Total]
FROM v_input_pay_staff v
INNER JOIN f_set s ON v.set_id = s.set_id
INNER JOIN fs_cost_centre cc ON cc.costc = s.costc
GROUP BY s.acad_year, s.set_cat_id, cc.directorate_id,
v.title, v.name, v.grade,
v.current_spine
"""
def _view():
return o("v_cons_staff", sql)
```
#### File: database/views/v_input_inc_other.py
```python
from finance_manager.database.replaceable import ReplaceableObject as o
from finance_manager.database.views import account_description, p_list_string, p_sum_string
def _view():
view = o("v_input_inc_other", f"""
SELECT i.inc_id, i.account, a.description as account_name, {account_description}, i.description, i.set_id,
{p_list_string}, {p_sum_string} as amount
FROM input_inc_other i
LEFT OUTER JOIN fs_account a ON i.account = a.account""")
return view
```
#### File: database/views/v_input_pay_fracclaim.py
```python
from finance_manager.database.replaceable import ReplaceableObject as o
from finance_manager.functions import periods
def _view():
# for a period table
values_clause = ", ".join([f"({n})" for n in periods()])
view = o("v_input_pay_fracclaim", f"""
SELECT fs.set_id, ISNULL(fc.hours, 0) as hours, p.period
FROM f_set fs
CROSS JOIN
(SELECT * FROM
(VALUES {values_clause}) AS X(period)) as p
LEFT OUTER JOIN input_pay_fracclaim fc ON fc.set_id = fs.set_id AND fc.period = p.period
""")
return view
```
#### File: database/views/v_luminate_commercial.py
```python
from finance_manager.database.replaceable import ReplaceableObject as o
sql = f"""
SELECT c.directorate_id, s.acad_year, s.set_cat_id, c.costc + ' ' + x.description as description, x.amount
FROM
(
--Courses
SELECT set_id, course_name as description, total as amount FROM v_input_inc_courses
WHERE total <> 0
UNION ALL
--Other
SELECT set_id, i.description, SUM(amount) as amount
FROM v_input_inc_other i INNER JOIN fs_account a on a.account = i.account
WHERE a.summary_code = 104
GROUP BY set_id, i.description
Having SUM(amount) <> 0
) x
INNER JOIN f_set s ON x.set_id = s.set_id
INNER JOIN fs_cost_centre c ON c.costc = s.costc
WHERE s.surpress = 0
"""
def _view():
return o("v_luminate_commercial", sql)
```
#### File: database/views/v_luminate_finance.py
```python
from finance_manager.database.replaceable import ReplaceableObject as o
sql = f"""
SELECT v.*, s.acad_year, s.costc, CAST(s.acad_year as CHAR(4)) + ' ' + s.set_cat_id as finance_summary,
CASE x.n WHEN 1 THEN c.directorate_id ELSE 'Z' END as directorate_id, --Creates a directorate Z copy of each line
s.costc + ' ' + c.description as costc_description
FROM v_mri_finance_grouped_subtotal v
INNER JOIN f_set s ON v.set_id = s.set_id
INNER JOIN fs_cost_centre c on c.costc = s.costc
CROSS JOIN (SELECT * FROM (VALUES (1), (2)) x(n)) as x
WHERE s.surpress = 0
UNION ALL
SELECT v.set_id, v.summary_code, v.summary, v.subsection, v.section, v.supersection, v.summary_order, v.sub_order,
v.sec_order, v.super_order,
-v.amount as amount, v.level, v.id, -v.intuitive_amount,
s.acad_year, s.costc, CAST(s.acad_year as CHAR(4)) + ' ' + s.set_cat_id as finance_summary, 'Z' as directorate_id,
s.costc + ' ' + c.description as costc_description
FROM v_mri_finance_grouped_subtotal_internal v
INNER JOIN f_set s ON v.set_id = s.set_id
INNER JOIN fs_cost_centre c on c.costc = s.costc
"""
def _view():
return o("v_luminate_finance", sql)
```
#### File: database/views/v_mri_finance_export.py
```python
from finance_manager.database.replaceable import ReplaceableObject as o
def _view():
sql = """
SELECT s.set_cat_id, f.acad_year, f.costc, ISNULL(m.t, f.account) as account, f.period,
ROUND(SUM(f.amount*f.coefficient),2) as amount, CASE f.costc WHEN 'MZ1000' THEN 1 ELSE 0 END as governors
FROM v_mri_finance f
INNER JOIN f_set s ON s.set_id = f.set_id
LEFT JOIN (SELECT * FROM (VALUES (1900, 1240), (1901, 1245)) x(f, t)) as m ON m.f=f.account
WHERE f.amount <> 0
GROUP BY s.set_cat_id, f.acad_year, f.costc, m.t, f.account, f.period
"""
return o("v_mri_finance_export", sql)
```
#### File: finance_manager/finance_manager/functions.py
```python
from importlib import import_module as imp
from os import listdir, path
import sys
import functools
import click
class periods():
"""
Iterator for financial periods.
Exists for brevity/clarity in actual code. Outputs the numbers 1 to 12,
unless restricted by passing the ``end`` parameter on construction.
"""
def __init__(self, end=12):
"""
Parameters
----------
end : int, optional
The final month to output, useful for dynamic in-year processing, but by default 12.
"""
self.end = end
pass
def __iter__(self):
self.a = 1
return self
def __next__(self):
if self.a <= self.end:
x = self.a
self.a += 1
return x
else:
raise StopIteration
def period_to_month(period, acad_year):
"""
Financial month and year to calendar month and year.
Converts a period and academic year into the actual month number and calendar year.
Parameters
----------
period : int
Accounting period
acad_year : int
Academic year (calendar year commencing)
Returns
-------
tuple
Month, Calendar year
Examples
--------
Period 1 (August) in the 2020 financial year:
>>> period_to_month(1,2020)
(8, 2020)
Period 6 (January) in the 1984 financial year:
>>> period_to_month(6, 1984)
(1, 1985)
"""
# Because August is P1
period += 7
# Increment calendar year if new period is in next year (i.e. >12)
acad_year += (period-1)//12
# Bring period back to legitimate month number, and correct for 0
period = period % 12
if period == 0:
period = 12
return period, acad_year
def sa_con_string(dialect, server, db, py_driver=None, user=None, password='', driver=None):
"""
Formats connection variables into SQL Alchemy string.
Intended for brevity elsewhere in the App. For more detail,
see the `SQLAlchemy Engine Configuration <https://docs.sqlalchemy.org/en/13/core/engines.html>`_ page.
Parameters
----------
dialect : str
SQLAlchemy-recognised name for the DBMS, such as `mssql` or `sqlite`
server : str
Server/host name
db : str
Database name
py_driver : str
Name of additional driver required for dialect connection (e.g. pyodbc)
user : str
Username, if used. If ommitted, connection uses windows credentials (via trusted connection)
password : str
Password for given username. Can be blank.
driver : str
Specific driver to use when connecting.
Returns
-------
str
SQL Alchemy engine connection string.
"""
# Configure security
user = '' if user is None else user
if len(user) > 0:
login = user + ':' + password
trust = ''
else:
login = ''
trust = '?trusted_connection=yes'
# Configure dialect
if py_driver is not None:
dialect = '+'.join([dialect, py_driver])
# configure additional dialect
if driver is not None and len(driver) > 0:
driver = '&driver='+driver.replace(" ", "+")
con = f"{dialect}://{login}@{server}/{db}{trust}{driver}" + \
";MARS_Connection=Yes"
return con
def normalise_period(val):
"""Return an integer from 1 to 12.
Parameters
----------
val : str or int
Variant for period. Should at least contain numeric characters.
Returns
-------
int
Number corresponding to financial period.
Examples
--------
>>> normalise_period('P6')
6
>>> normalise_period(202106)
6
"""
val = ''.join(c for c in str(val) if c.isdigit())
return int(val[-2:])
def level_to_session(level):
"""
Converts study level to a year of study.
Intended for use with the level descriptions that come out of the
HE In Year Cohort web report, but applicable to other instances.
Parameters
----------
level : str
The text version of a level. Should begin with the word 'level'.
Returns
-------
int
The year of study that the level (typically) corresponds to.
"""
session = "X"
if level[:5].upper() == "LEVEL":
session = int(level[-1]) - 3
else:
session = 1
return session
def name_to_aos(name):
"""
Converts a verbose course name to its aos_code
Essentially a fuzzy matching function, intended for use with reverse engineering web reports
Parameters
----------
name : str
The course description. Can include year.
Returns
-------
str
The 6-character aos_code.
int
Session, i.e. year of study. If no numeric characters were
in the ``name``, this will default to -1.
Examples
--------
>>> name_to_aos('Jazz Year 1')
('HBAMJA', 1)
When no numeric year information appears
>>> name_to_aos('Jazz Year Two')
('HBAMJA', -1)
"""
aos_abbr = [["Business", "BU", ""],
["Classical", "CM", "C"],
["Film", "FM"],
["Folk", "FO", "F"],
["Jazz", "JA", "J"],
["Production", "PR", "M"],
["Popular", "PM", "P"],
["Songwriting", "SW"],
["Acting", "ACT"],
["Actor Musician", "AMU"],
["Musical Theatre", "MTH"]]
aos_code = ""
quals = ["BA ", "FD", "MMus", "MA "]
fd_triggers = ["electronic", "foundation degree", "FD"]
pg_triggers = ["creative", "mmus"]
# Check the name contains qualification
has_qual = any([qual.lower() in name.lower() for qual in quals])
if any([t.lower() in name.lower() for t in pg_triggers]):
aos_code = "HMMCRM"
elif any([t.lower() in name.lower() for t in fd_triggers]):
aos_code = "HFD"
if "Electronic" in name or "EMP" in name:
aos_code += "EMP"
else:
aos_code += "MPM"
elif name[:2] == "BA" or not has_qual: # IE assume BA if not specified
aos_code = "HBA"
if "with" in name:
# i.e. is combined
aos_code += "C"
withpos = name.index("with")
for p in aos_abbr:
if p[0] in name[:withpos]:
aos_code += p[2]
for p in aos_abbr:
if p[0] in name[withpos:]:
aos_code += p[2]
else: # Music and Acting/MT
for p in aos_abbr:
if p[0] in name:
if len(p[1]) == 2:
aos_code += "M"
aos_code += p[1]
break
if len(aos_code) != 6:
raise ValueError(
f"Unable to recognise {name}. Got as far as '{aos_code}''.")
# And then the numeric bit
num = -1
for char in name:
if char.isdigit():
num = int(char)
break
return aos_code, num
def _add_subcommands(parent, file, package):
p = path.dirname(file)
files = listdir(p)
this_package = sys.modules[package].__name__
modules = [imp(this_package+"."+f.replace(".py", ""), )
for f in files if f[0] != "_"]
commands = [getattr(module, module.__name__[module.__name__.rfind(".")+1:])
for module in modules]
for _ in commands:
parent.add_command(_)
```
#### File: finance_manager/tests/test_functions.py
```python
import unittest
from finance_manager import functions
class TestFunctions(unittest.TestCase):
def test_normalise_period(self):
"""
Check the period normalising function.
"""
# Check it didn't error
self.assertEqual(functions.normalise_period("P6"), 6)
self.assertEqual(functions.normalise_period(202106), 6)
def test_period_to_month(self):
self.assertEqual(functions.period_to_month(10, 2007), (5, 2008))
self.assertEqual(functions.period_to_month(13, 2007), (8, 2008))
self.assertEqual(functions.period_to_month(0, 2007), (7, 2007))
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jehboyes/planning_system",
"score": 2
}
|
#### File: blueprints/home/__init__.py
```python
from flask import Blueprint, render_template
home_bp = Blueprint(
'home_bp', __name__,
template_folder='templates',
static_folder='static'
)
@home_bp.route('/', methods=['GET'])
def home():
"""homepage."""
return render_template(
'home.jinja2',
title="Test",
description="A description")
```
#### File: blueprints/user/login.py
```python
from flask_login import logout_user, login_user
from .bp import user_bp
from planning_system.db.schema.tables.app import User
from flask import redirect, url_for, render_template
from flask_wtf import FlaskForm
from wtforms import TextField, SubmitField, PasswordField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
"""Simple user login"""
username = TextField("Username", [DataRequired()])
password = PasswordField("Password", [DataRequired()])
submit = SubmitField("Submit")
@user_bp.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
return redirect(url_for('login'))
login_user(user)
return redirect(url_for('home_bp.home'))
return render_template('user_login.jinja2',
title="User login",
form=form)
@user_bp.route("/logout", methods=["GET"])
def logout():
logout_user()
return redirect(url_for("home_bp.home"))
```
#### File: admin/scripts/stureporting.py
```python
import click
import sys
from sqlalchemy import select, update, delete, insert, or_, and_, not_
from planning_system.api.functions import quick_csv
from planning_system.db import DB
from planning_system.db.schema.tables.finance_structure import account, report_cat_config, cost_centre, summary_code
@click.command()
@click.pass_obj
def stureporting(config):
"""
Populates the reporting_cat_config.
"""
rc = report_cat_config.__table__
with DB(config.uri, config.echo) as db:
session = db.session()
statements = []
# List of pay accounts for reference
pay_accounts = session.execute(
select([account.account]).where(or_(and_(account.account >= 2000,
account.account < 3000),
account.summary_code == 401,
account.summary_code == 405)))
pay_accounts = [a.account for a in pay_accounts]
# Non pay accounts for reference
nonp_accounts = session.execute(
select([account.account]).where(or_(and_(account.summary_code >= 301,
account.summary_code < 400),
account.account == 4017)))
nonp_accounts = [a.account for a in nonp_accounts]
# reset existing
statements.append(delete(report_cat_config))
cross_join = select([account.account, cost_centre.costc]) \
.join(cost_centre, account.account != None)
statements.append(insert(report_cat_config).from_select(
['account', 'costc'], cross_join))
# Other staff costs
statements.append(rc.update()
.where(and_(rc.c.account.in_(pay_accounts)))
.values(rep_cat_a_id='OSO'))
# academic pay
cc_academic = "".join("MA1700,MC1610,MC1700,MC1810,MC1814,MC1820,MC1824," +
"MC1825,MC1827,MC1830,MC1910,MC1912,MC1920,MC1922," +
"MC1923,MC1924,MC1925,MC2000,MC2010,MC2923").split(",")
statements.append(rc.update()
.where(rc.c.costc.in_(cc_academic))
.where(rc.c.account.in_(pay_accounts))
.values(rep_cat_a_id='ACS'))
# facilities staff costs
cc_facility = "MB1100,MB1130,MC1400,MC1430,MB1410".split(",")
statements.append(rc.update()
.where(and_(rc.c.costc.in_(cc_facility),
rc.c.account.in_(pay_accounts),
or_(rc.c.costc != 'MB1100', # Internal rent appears on this combo
rc.c.account != 4371)))
.values(rep_cat_a_id='OSF'))
# Academic support staff costs
cc_support = session.execute(
select([cost_centre.costc]).where(((cost_centre.directorate_id == "Q") &
(cost_centre.costc != "MC1400")) |
(cost_centre.costc.in_(['MA1420', 'MA1450', 'MC1410']))))
cc_support = [c.costc for c in cc_support]
statements.append(rc.update()
.where(and_(rc.c.costc.in_(cc_support),
rc.c.account.in_(pay_accounts)))
.values(rep_cat_a_id='OSA'))
# Other operating
statements.append(rc.update()
.where(rc.c.account.in_(nonp_accounts))
.values(rep_cat_a_id='OEO'))
# Academic operating costs
statements.append(rc.update()
.where(and_(rc.c.costc.in_(cc_academic),
rc.c.account.in_(nonp_accounts)))
.values(rep_cat_a_id="OEA"))
# academic support operating
statements.append(rc.update()
.where(and_(rc.c.costc.in_(cc_support),
rc.c.account.in_(nonp_accounts)))
.values(rep_cat_a_id='OES'))
# facilities operating
depr_accounts = session.execute(
select([account.account]).where(account.summary_code == 402))
depr_accounts = [a.account for a in depr_accounts]
statements.append(rc.update()
.where(or_(and_(rc.c.costc.in_(cc_facility),
rc.c.account.in_(nonp_accounts)),
rc.c.account.in_(
[3308, 4220, 4221, 3310, 4955]),
rc.c.account.in_(depr_accounts),
and_(rc.c.costc == 'MB1100', # Internal rent appears on this combo
rc.c.account == 4371)))
.values(rep_cat_a_id='OEF'))
# income
statements.append(rc.update()
.where(or_(rc.c.account < 2000,
rc.c.account == 4361))
.values(rep_cat_a_id='OIO'))
statements.append(rc.update()
.where(rc.c.account == 1100)
.values(rep_cat_a_id='GGG'))
statements.append(rc.update()
.where(or_(and_(rc.c.account >= 1240, rc.c.account <= 1246),
and_(rc.c.account >= 1900, rc.c.account <= 1901)))
.values(rep_cat_a_id='HFH'))
# internal
internal_accounts = session.execute(
select(account.account)
.join(summary_code, account.summary_code == summary_code.summary_code)
.where(or_(summary_code.internal == True,
account.account.in_([9801, 9802]))))
internal_accounts = [a.account for a in internal_accounts]
statements.append(rc.update()
.where(or_(rc.c.account.in_(internal_accounts)))
.values(rep_cat_a_id='INT'))
for stmt in statements:
session.execute(stmt)
remaining_nulls = session.execute(select([rc.c.account, rc.c.costc])
.where(rc.c.rep_cat_a_id == None)
)
remaining_nulls = [{'account': a, 'costc': b}
for a, b in remaining_nulls]
if len(remaining_nulls) != 0:
quick_csv(remaining_nulls, open_file=True)
raise RuntimeError('Null config rows remaining.')
else:
print(
'No non-internal NULL configurations remaining (process successful).')
session.commit()
```
#### File: cli/curriculum/copy.py
```python
from datetime import datetime
import click
from sqlalchemy import select
from planning_system.db import DB, table_map
from planning_system.db.schema import Base
from planning_system.db.schema.tables.curriculum import Curriculum, Course, Cost, CalendarMap
from planning_system.db.schema.tables.timetable import TGroupMember, TGroup, TGroupStaffing
from .copy_object import _recursive_copy, dependency_chain, maps
@click.command()
@click.argument("id", type=int)
@click.argument("description", type=str)
@click.option("--year", "-y", type=int, help="Link to a specific year.")
@click.option("--editable", "-e", is_flag=True, help="Is editable.")
@click.option("--viewable", "-v", is_flag=True, help="Is viewable.")
@click.option("--timetable", "-t", is_flag=True, help="Copy the timetable data too.")
@click.option("--halt", "-h", type=str, help="Halt recursion at table - don't copy this tables's children.")
@click.pass_obj
def copy(config, id, description, year, editable, viewable, timetable, halt):
"""
Create a deep copy of curriculum ID with DESCRIPTION.
"""
if halt is not None:
halt = halt.lower()
if not halt in dependency_chain(key_only=True):
raise ValueError(
f"'{halt}'' is not a valid key curriculum structure object ({dependency_chain(key_only=True)}).")
with DB(config.uri, config.echo) as db:
session = db.session()
# make new empty curriculum
curriculum = Curriculum(description=description,
created_date=datetime.now(),
acad_year=year,
can_edit=editable,
can_view=viewable)
session.add(curriculum)
session.flush()
# get courses
course_query = select(Course).where(Course.curriculum_id == id)
with click.progressbar(session.execute(course_query).all(),
item_show_func=lambda r: f"Processing {r.Course.pathway}" if r is not None else None
) as bar:
for row in bar:
_recursive_copy(session,
curriculum.curriculum_id,
parent_obj=curriculum,
child_obj=row.Course,
tm=table_map(Base),
dc=dependency_chain(),
config=config,
indent_level=1,
halt_table=halt)
# copy the calendar map
calendar_map = session.execute(
select(CalendarMap).where(CalendarMap.curriculum_id==id)).all()
for row in calendar_map:
new_entry = CalendarMap(curriculum_id=curriculum.curriculum_id)
cols = [col for col in CalendarMap.__table__.columns.keys()
if col != "curriculum_id"]
for col in cols:
setattr(new_entry, col, getattr(row.CalendarMap, col))
session.add(new_entry)
session.flush()
if timetable:
cost_map = [(old, new) for (object, old), new in maps.items()
if object == Cost.__tablename__]
with click.progressbar(cost_map, item_show_func=lambda tup:
f'Copied cost {tup[0]}' if tup is not None else None) as bar:
for old_id, new_id in bar:
old_groups = session.execute(
select(TGroup).where(TGroup.cost_id == old_id)).all()
for row in old_groups:
old_group_id = row.TGroup.tgroup_id
non_pk_cols = [c for c in TGroup.__table__.columns.keys()
if c not in ['tgroup_id', 'cost_id']]
data = {col: getattr(row.TGroup, col)
for col in non_pk_cols}
data['cost_id'] = new_id
new_record = TGroup(**data)
session.add(new_record)
session.flush() # to generate pk
# Copy Staffing
staffing = session.execute(
select(TGroupStaffing).where(TGroupStaffing.tgroup_id == old_group_id)).all()
for staff_row in staffing:
staff_record = TGroupStaffing(tgroup_id=new_record.tgroup_id,
staff_id=staff_row.TGroupStaffing.staff_id)
session.add(staff_record)
# Copy students
students = session.execute(
select(TGroupMember).where(TGroupMember.tgroup_id == old_group_id)).all()
for student_row in students:
student_record = TGroupMember(tgroup_id=new_record.tgroup_id,
student_id=student_row.TGroupMember.student_id)
session.add(student_record)
# flush staff and students
session.flush()
if click.confirm("Confirm creation of new curriculum?"):
session.commit()
else:
session.rollback()
```
#### File: cli/curriculum/_scrape.py
```python
from collections import defaultdict
from docx import Document
from tabulate import tabulate
def _lists():
"""
Declared outside of main function so that
keys can be accessed by other modules.
"""
return {'comp_fields': defaultdict(lambda: ''),
'aims': [],
'outcomes': [],
'readings': [],
'assessments': [],
'assessment_outcomes': []}
def scrape_document(file, fall_over=False, root_level=1):
"""
Reads doc into dictionary, then normalises.
Noted that it could normalise as reading; this
was not the chosen approach, for clarity in code.
Parameters
----------
file : filepath
Path of a module specification .docx file.
fall_over : bool, default False
Whether or not to keep running if a badly formatted section is found.
root_level : int
The level of the root folder (in pathlib.path.parts) for sensible printing.
Returns
-------
dict
module_code : normalised specification, as tuple of
(comp_fields, aims, outcomes, reading_list,
assessments, assessment_outcomes)
"""
lists = _lists()
problems = []
def _error_handler(fn):
"""Generic error handler for all read subs"""
def inner(*args):
try:
fn(*args)
except (IndexError, ValueError):
name = args[0]
short_file = '/'.join(file.parts[root_level:])
problem = f"Bad '{name}' section in {str(short_file)}"
if fall_over:
output = []
for row in args[1]:
output.append([cell.text for cell in row.cells])
print(tabulate(output))
raise RuntimeError(problem)
else:
problems.append(problem)
return inner
@_error_handler
def _read_comp(name, rows):
"""Read in a component field, removing newlines"""
text = rows[0].cells[1].text
text = str(text).replace('\n', '')
text = text.strip()
if text != "":
lists['comp_fields'][name] = text
@_error_handler
def _read_num_comp(name, rows):
"""Read in a component field, removing newlines"""
text = "".join([char for char in str(rows[0].cells[1].text)
if char.isnumeric() or char == '.'])
text = int(text) if int(text) == float(text) else float(text)
lists['comp_fields'][name] = int(text)
@_error_handler
def _read_verbose_comp(name, rows):
"""Read in verbose component fields."""
text = rows[0].cells[1].text
if text.strip() != '':
lists['comp_fields'][name] = text
@_error_handler
def _read_aim(name, rows):
"""Read in aims field"""
for paragraph in rows[0].cells[1].paragraphs:
if paragraph.style.name == 'List Paragraph':
lists['aims'].append(paragraph.text)
else:
lists['comp_fields'][name] += paragraph.text
@_error_handler
def _read_req(name, rows):
"""Read in core/compulsory/optional"""
tbl = rows[0].cells[1].tables[0]
for row in tbl.rows:
# Docs use a stupid tick character, so need to check length of run rather than text
if len(row.cells[1].paragraphs[0].runs) > 0:
lists['comp_fields'][name] = row.cells[0].text.split(' ')[0]
break
@_error_handler
def _read_outcomes(name, rows):
"""Read in module learning outcomes"""
tbl = rows[0].cells[1].tables[0]
# skip header row
for row in tbl.rows[1:]:
lists['outcomes'].append(row.cells[1].text)
@_error_handler
def _read_assessment(name, rows):
"""Read in the assessment sections"""
record = {}
# Define mappings for detecting types of assessment from IDs
assess_map = defaultdict(lambda: 'other')
assess_map.update({'F': 'Formative', 'S': 'Summative'})
if len(rows[0].cells[1].tables) == 0:
# If the re-assessment section isn't tabulated
record['description'] = rows[0].cells[1].text
record['id'] = 0
record['type'] = 'n/a'
record['weighting'] = 0
record['is_reassessment'] = 1
lists['assessments'].append(record)
return
tbl = rows[0].cells[1].tables[0]
for row in tbl.rows[1:]:
record['id'] = len(lists['assessments'])
record['type'] = assess_map[row.cells[0].text[0]]
record['description'] = row.cells[1].text
if len(row.cells) >= 3:
record['weighting'] = float(row.cells[2].text)
else:
record['weighting'] = 100
record['is_reassessment'] = 1 if 'module_re' in name.lower() else 0
lists['assessments'].append(record)
if len(row.cells) >= 4:
for lo in row.cells[3].text.split(','):
map_record = {}
map_record['assessment_id'] = record['id']
map_record['lo_number'] = str(lo).strip()
lists['assessment_outcomes'].append(map_record)
@_error_handler
def _read_reading_list(name, rows):
"""Read in the reading list(s)"""
record = {}
for row in rows:
level = row.cells[1].text
for paragraph in row.cells[2].paragraphs:
record['level'] = level
record['source'] = paragraph
lists['readings'].append(record)
configuration_a = {1: _read_comp,
2: _read_comp,
3: _read_num_comp,
5: _read_num_comp,
6: _read_comp,
7: _read_num_comp,
8: _read_req,
12: _read_verbose_comp,
14: _read_aim,
15: _read_outcomes,
16: _read_verbose_comp,
17: _read_verbose_comp,
19: _read_comp,
20: _read_verbose_comp,
21: _read_assessment,
22: _read_assessment,
23: _read_assessment,
25: _read_comp,
28: _read_reading_list,
29: _read_verbose_comp
}
# spec with requisites
configuration_b = {1: _read_comp,
2: _read_comp,
3: _read_num_comp,
5: _read_num_comp,
6: _read_comp,
7: _read_num_comp,
8: _read_comp,
9: _read_comp,
10: _read_comp,
14: _read_comp,
16: _read_aim,
17: _read_outcomes,
18: _read_verbose_comp,
19: _read_verbose_comp,
21: _read_comp,
22: _read_assessment,
23: _read_assessment,
24: _read_verbose_comp,
25: _read_assessment,
26: _read_verbose_comp,
28: _read_reading_list,
29: _read_verbose_comp
}
# set default
configuration = configuration_a
doc = Document(file)
tbl = []
# Some docs have the table split up on page breaks, so
for table in doc.tables:
tbl += table.rows
# create a dictionary of section number: content
spec = defaultdict(list)
field_names = {}
for row_n, row in enumerate(tbl):
c0 = row.cells[0].text
if c0.isnumeric():
field_n = int(c0)
# Keep first run for name (avoids italicised explanations)
try:
name = str(row.cells[1].paragraphs[0].text.strip())
except IndexError:
name = f"section_{field_n} _[invalid_name]"
name = name.strip().replace(' ', '_').lower()
# limit length of name in words
name = "_".join(name.split("_")[:7])
field_names[field_n] = name
delta = 1
# save all the content in this section
# allows for multi-row sections
try:
while not tbl[row_n+delta].cells[0].text.isnumeric():
spec[field_n] = spec[field_n]+[tbl[row_n+delta]]
delta += 1
if row_n+delta >= len(tbl):
break
except IndexError:
raise RuntimeError(
f"Issue in {file}. Section {name} on row {row_n} delta {delta}")
# use configuration to process contents
if 'requisite' in field_names[8]:
# business logic
configuration = configuration_b
for field_n, function in configuration.items():
try:
function(field_names[field_n], spec[field_n])
except KeyError:
# Ignore sections not in config
pass
# label which format the spec was in
if configuration == configuration_a:
config_name = "A (no requisites)"
else:
config_name = "B (requisites)"
# Handle docs that cover multiple modules
data = [[] for _ in lists.keys()]
for module_code in lists['comp_fields']['module_code'].split(','):
# Take alphanumeric only
module_code = ''.join(chr for chr in module_code if chr.isalnum())
_comp_fields = {k: v for k, v in lists['comp_fields'].items()}
_comp_fields['module_code'] = module_code
_comp_fields['path'] = str(file)
_comp_fields['doc_type'] = config_name
data[0].append(_comp_fields)
data[1] += [{'file': str(file), 'module_code': module_code, 'aim': aim}
for aim in lists['aims']]
data[2] += [{'file': str(file), 'module_code': module_code, 'outcome': outcome}
for outcome in lists['outcomes']]
data[3] += [{'file': str(file), 'module_code': module_code, **item}
for item in lists['readings']]
data[4] += [{'file': str(file), 'module_code': module_code, **item}
for item in lists['assessments']]
data[5] += [{'file': str(file), 'module_code': module_code, **item}
for item in lists['assessment_outcomes']]
return data, problems
```
#### File: cli/db/deadviews.py
```python
import click
from sqlalchemy import inspect
from planning_system.db import DB
from planning_system.db.schema.views import get_view_definitions, views_base
@click.command()
@click.pass_obj
def deadviews(config):
"""
Prints a list of views which exist in the database
but not in the planning system.
Does not provide a delete option, as views
should be manually inspected to determine dependencies.
TODO automate inspection.
"""
with DB(config.uri, config.echo) as db:
config.verbose_print("Getting views in database")
inspector = inspect(db.engine)
db_views = inspector.get_view_names()
config.verbose_print("Getting views in planning system")
session = db.session()
sys_views = [name for name in get_view_definitions(session).keys()]
output = [name for name in db_views if name not in sys_views]
output.sort()
print("Redundant views")
for name in output:
print(name)
```
#### File: cli/db/__init__.py
```python
from planning_system.cli import add_subcommands
import click
EXCLUDE_FROM_EXE = True
@click.group()
@click.pass_obj
def db(config):
"""
'Group' of commands for generic db interaction and maintenance.
"""
pass
add_subcommands(db, __file__, __package__)
```
#### File: cli/finance/load.py
```python
import csv
import sys
from datetime import datetime
import click
from sqlalchemy import and_, select, insert
from planning_system.db import DB
from planning_system.db.schema.tables.finance import f_set, finance, finance_instance
from planning_system.db.schema.tables.finance_structure import account, entry_type
from planning_system.api.functions import normalise_period
@click.command()
@click.option("--acad_year", "-a", type=int)
@click.argument("set_cat_id", type=str)
@click.argument("filepath", type=click.Path(exists=True))
@click.option("--unsigned", "-u", is_flag=True, help="Indicates that import data is unsigned (no negatives).")
@click.pass_obj
def load(config, acad_year, set_cat_id, unsigned, filepath):
"""
Import Finance data.
Load a csv with columns for costc, account, period & amount and
load into ACAD_YEAR SET_CAT_ID. Target sets must exist. Can either have acad year in data,
or by passing with the ``acad_year`` option. Data is assumed to have negative values for credit balances,
and positive values for debit balances (unless the ``unsigned`` option is passed).
"""
headers = {}
body = []
valid_cols = ['account', 'period', 'amount', 'costc', 'acad_year']
with open(filepath) as file:
rows = csv.reader(file)
for i, row in enumerate(rows):
if i == 0:
for j, col in enumerate(row):
headers.update({j: col})
if acad_year != None:
headers.update({len(headers): 'acad_year'})
else:
if acad_year != None:
r = row + [acad_year]
else:
r = row
body.append({headers[k]: v for k, v in enumerate(r)
if headers[k] in valid_cols})
if len(body[0]) != len(valid_cols):
click.echo("Headers incorrect.")
sys.exit()
years = list(set([r['acad_year'] for r in body]))
costcs = list(set([r['costc'] for r in body]))
if acad_year == None:
print(f"Detected years: {', '.join([str(y) for y in years])}")
with DB(config.uri, config.echo) as db:
con = db.con
with con.begin() as transaction:
# Need sets to map existing
set_tbl = f_set.__table__
sets = select([set_tbl.c.set_id,
set_tbl.c.set_cat_id,
set_tbl.c.acad_year,
set_tbl.c.costc]) \
.where(and_(set_tbl.c.set_cat_id == set_cat_id,
set_tbl.c.costc.in_(costcs),
set_tbl.c.acad_year.in_(years)))
# Create finance instance for each cost centre and year used
mapping = {}
for s in con.execute(sets).fetchall():
stmt = insert(finance_instance.__table__) \
.values(created_by='CLI',
datestamp=datetime.now(),
set_id=s.set_id) \
.returning(finance_instance.__table__.c.instance_id)
instance_id = con.execute(stmt).fetchall()[0].instance_id
mapping[tuple([s.costc, s.acad_year])] = instance_id
# Need account information for fixing balances
acc = account.__table__
et = entry_type.__table__
accounts = select([acc.c.account, et.c.coefficient]) \
.where(acc.c.default_balance == et.c.balance_type)
account_bal = {
a.account: (a.coefficient if not unsigned else 1)
for a in con.execute(accounts)}
# Create finacne row for each row in input, correcting ablances and period format
inputs = []
with click.progressbar(body, show_eta=True, show_percent=True, show_pos=True) as bar:
for row in bar:
row_key = tuple([row['costc'], int(row['acad_year'])])
# Check costcentre isvalid for inclusion
if row_key not in mapping.keys():
click.echo(f"No set exists for {row_key}")
sys.exit()
# amounts stored as absolute rather than signed CR DB
inputs.append(dict(instance_id=mapping[row_key],
account=int(row['account']),
amount=float(row['amount'].replace(",", "")) *
float(account_bal[row['account']]),
period=normalise_period(row['period'])))
con.execute(insert(finance.__table__), inputs)
if config.confirm(f"Confirm writing {len(inputs)} finance records to DB?"):
transaction.commit()
else:
transaction.rollback()
```
#### File: cli/finance/save.py
```python
from collections import defaultdict
from getpass import getuser
from datetime import datetime
import click
from planning_system.db import DB
from planning_system.db.schema.tables.finance import f_set, finance, finance_instance
from planning_system.db import Base
from planning_system.db.schema.views import get_view_class
@click.command()
@click.argument("acad_year", type=int)
@click.argument("setcat", type=str)
@click.pass_obj
def save(config, acad_year, setcat):
"""
Save all matching sets.
.. _finance-save:
Create a finance instance for each set with the given 'ACAD_YEAR' and 'SETCAT'.
Once Finances have been calculated from :ref:`v_calc_finances`, the net cost of
non-income-generating cost centres is calculated using :ref:`v_calc_set_costing` and
is recharged to income-generating cost centres (i.e. by adding balancing transactions).
"""
with DB(config.uri, config.echo) as db:
session = db.session()
# Get sets to be updated
sets = session.query(f_set.set_id).filter(f_set.acad_year == acad_year) \
.filter(f_set.set_cat_id == setcat)
# Calculate the actual finances
config.verbose_print("Calculating finances...", nl=False)
f_view = get_view_class("v_calc_finances", db.engine)
calc_finances = session.query(f_view.account, f_view.period, f_view.amount, f_view.set_id) \
.filter(f_view.set_id.in_(sets))
config.verbose_print("Complete.")
config.verbose_print("Splitting finances by set...", nl=False)
# COnvert the results to a dictionary by set_id for more transparent processing
dict_finances = defaultdict(list)
for r in calc_finances:
dict_finances[r.set_id].append(r)
config.verbose_print("Complete.")
# For each set (wrapped for progress bar)
set_instance_dict = {}
# Redefine sets to get costc for progress bar updating
sets = session.query(f_set.set_id, f_set.costc).filter(f_set.acad_year == acad_year) \
.filter(f_set.set_cat_id == setcat)
with click.progressbar(sets.all(), label="Working through sets", show_eta=False,
item_show_func=_progress_label, fill_char="£") as bar:
for s in bar:
# Make it a finance set
i = finance_instance(created_by=getuser(),
set_id=s.set_id, datestamp=datetime.now())
session.add(i)
session.flush()
set_instance_dict[s.set_id] = i.instance_id
# create a list of finance objects for buk inserting, way quicker than one by one
finances = []
for row in dict_finances[s.set_id]:
finances.append(finance(instance_id=i.instance_id,
account=row.account, period=row.period, amount=row.amount))
session.bulk_save_objects(finances)
session.flush()
session.commit()
config.verbose_print("Calculating recharges...", nl=False)
# Work out the recharges based on the values just input, which will then be added to the instances
costing_view = get_view_class("v_calc_set_costing", db.engine)
select_costings = session.query(costing_view.account,
costing_view.period,
costing_view.amount,
costing_view.set_id) \
.filter(costing_view.acad_year == acad_year) \
.filter(costing_view.set_cat_id == setcat)
# Aggregate to add to an instance
agg_recharges = defaultdict(float)
for costing in select_costings.all():
agg_recharges[(costing.account, costing.set_id,
costing.period,)] += costing.amount
finances = []
for key, amount in agg_recharges.items():
if amount != 0:
account, set_id, period = key
if set_id in set_instance_dict.keys():
finances.append(finance(instance_id=set_instance_dict[set_id],
account=account,
period=period,
amount=amount))
else:
print(f"Set {set_id} missing")
session.bulk_save_objects(finances)
session.commit()
config.verbose_print("Complete.")
def _progress_label(s):
if s is not None:
return f"Processed {s.costc}"
else:
return f"Beginning processing"
```
#### File: cli/finance/transactions.py
```python
import csv
import sys
from datetime import datetime
import click
from sqlalchemy import and_
from planning_system.db import DB
from planning_system.db.schema.tables.finance import f_set, transaction
from planning_system.db.schema.tables.finance_structure import account, entry_type
from planning_system.api.functions import normalise_period
@click.command()
@click.argument("acad_year", type=int)
@click.argument("set_cat_id", type=str)
@click.argument("filepath", type=click.Path(exists=True))
@click.pass_obj
def transactions(config, acad_year, set_cat_id, filepath):
"""
Import and clean Finance transactions.
Removes reversing journals and reveresing VAT charges from transaction lists.
"""
headers = {}
body = []
# read into list and check headers match
valid_cols = ['costc', 'account', 'period', 'amount', 't',
'tt', 'trans.date', 'ap/ar id (t)', 'text', 'transno']
with open(filepath, newline="", encoding='utf8', errors='ignore') as file:
rows = csv.reader(file)
for i, row in enumerate(rows):
if i == 0:
for j, col in enumerate(row):
headers.update({j: col.lower()})
else:
body.append({headers[k]: v if len(v) > 0 else None for k, v in enumerate(row)
if headers[k] in valid_cols})
if len(body[0]) != len(valid_cols):
click.echo("Headers incorrect: " + "|".join(list(body[0].keys())))
sys.exit()
# Clean, and remove zero-sum transaction & account combos
idrow_dict = {}
period_dict = {}
for row in body:
key = _row_key(row)
row['period'] = normalise_period(row['period'])
row['amount'] = row['amount'].replace(",", "")
if row['text'] == None:
row['text'] = "<No description>"
try:
date_components = [int(c)
for c in row['trans.date'].split("/")[::-1]]
row['trans.date'] = datetime(*date_components)
except:
raise ValueError(
"Date conversion failed - check date field is in regular dd/mm/yyyy")
idrow_dict.update({key: idrow_dict.get(key, 0) + float(row['amount'])})
period_dict.update({row['transno']: row['period']})
body = [row for row in body if idrow_dict[_row_key(row)] != 0]
to_remove = {}
with click.progressbar(idrow_dict.items(), label="Detecting reversing journals") as bar:
for i, iv in bar:
for j, jv in idrow_dict.items():
if not to_remove.get(j, False):
criteria = [
# The cost centres and accounts match
i[-11:] == j[-11:],
# They sum to zero
iv + jv == 0,
# the transaction IDs are chronolgically close
abs(int(i[:8]) - int(j[:8])) <= 50,
# They are at most one period apart
abs(period_dict[i[:8]] - period_dict[j[:8]]) <= 1
]
if all(criteria):
to_remove.update({j: True})
body = [row for row in body if not to_remove.get(_row_key(row), False)]
# Detect and remove reversing journals by checking other transactions for zero sum
with DB(config.uri, config.echo) as db:
sess = db.session()
# Need sets to map existing
filter_clause = and_(f_set.set_cat_id == set_cat_id,
f_set.acad_year == acad_year)
sets = sess.query(f_set).filter(filter_clause)
costc_map = {s.costc: s.set_id for s in sets.all()}
# Clear existing transactions
click.echo("Clearing set's existing transactions...", nl=False)
trans = sess.query(transaction).join(f_set).filter(filter_clause)
for tran in trans:
sess.delete(tran)
sess.flush()
click.echo("Complete. ")
# Need account information for fixing balances
accounts = sess.query(account, entry_type).filter(
account.default_balance == entry_type.balance_type).all()
account_bal = {
a.account.account: a.entry_type.coefficient for a in accounts}
inputs = []
for row in body:
tran = transaction(set_id=costc_map[row['costc']],
transaction_id=row['transno'],
account=row['account'],
period=row['period'],
status_id=row['t'],
type_id=row['tt'],
dt=row['trans.date'],
supplier_name=row['ap/ar id (t)'],
description=row['text'],
amount=float(row['amount']) *
float(account_bal[row['account']])
)
inputs.append(tran)
if config.confirm(f"Confirm writing {len(inputs)} transactions to DB?"):
click.echo("Writing to DB... ", nl=False)
sess.bulk_save_objects(inputs)
sess.commit()
click.echo("Complete.")
else:
sess.rollback()
def _row_key(d):
l = [d['transno'], d['costc'], d['account']]
for i in l:
if i == None:
raise(
"Missing key element - check there is no grand total row present in import file")
return "|".join([d['transno'], d['costc'], d['account']])
```
#### File: cli/returns/staffxml.py
```python
from datetime import datetime
import xml.etree.ElementTree as ET
import pandas as pd
import click
OBJ_STAFF = "Person"
OBJ_CONTRACT = "Contract"
OBJ_ACTIVITY = "Activity"
OBJ_GOVERNOR = "Governor"
FIRST_STAFF_ATTR = "ABLWELSH"
LAST_STAFF_ATTR = "STAFFID"
FIRST_CONTRACT_ATTR = "ACEMPFUN"
LAST_CONTRACT_ATTR = "ZEROHRS"
FIRST_ACTIVITY_ATTR = "ACTSOC"
LAST_ACTIVITY_ATTR = "CCPROP"
STAFF_ID = "OWNSTAFFID"
GOVERNOR_FLAG = "GOVFLAG"
CONTRACT_ID = "OWNCONTID"
@click.command()
@click.argument("path", type=click.Path(exists=True))
@click.argument("rec_id", type=str)
@click.pass_obj
def staffxml(config, path, rec_id):
"""
Convert the data in the file at PATH to a HESA staff
XML file with return id REC_ID.
The objects' attributes must be arranged together
in the data (person fields arranged together, etc.).
The first two fields of the input must be a contract ID and person ID.
The person ID must be consistent with the OWN_STAFF_ID field.
TODO change to only use the fields within the main data.
Process removes duplicates in person to get unique persons,
so if a person has inconsistent person data on different lines,
it will likely fail validation as it will create multiple people
with the same HESA ID.
Contracts can only have one Activity record.
TODO allow for multiple.
Parameters
----------
path : path
Filepath of the input file.
rec_id : str or int
HESA ID of the return.
"""
# Get data as or dates
class StringConverter(dict):
def __contains__(self, item):
return True
def __getitem__(self, item):
return str
def get(self, default=None):
return str
df = pd.read_csv(path, converters=StringConverter()
).set_index(["PER_REF_NO", "CONCAT"])
# Clean rec_id
if rec_id[0] == 'C':
rec_id = rec_id[1:]
# Create root
root = ET.Element("StaffRecord")
institution = ET.SubElement(root, "Institution")
ET.SubElement(institution, "RECID").text = str(rec_id)
ET.SubElement(institution, "UKPRN").text = str(config.ukprn)
# Get list of columns and markers of key fields
attrs = df.columns.values.tolist()
first_staff_pos = attrs.index(FIRST_STAFF_ATTR)
last_staff_pos = attrs.index(LAST_STAFF_ATTR)
first_contract_pos = attrs.index(FIRST_CONTRACT_ATTR)
first_activity_pos = attrs.index(FIRST_ACTIVITY_ATTR)
last_activity_pos = attrs.index(LAST_ACTIVITY_ATTR)
# Correctly order staff columns
staff_attrs = attrs[first_staff_pos:last_staff_pos+1]
for attr in [STAFF_ID, "STAFFID"]:
staff_attrs.pop(staff_attrs.index(attr))
staff_attrs = [attr] + staff_attrs
# Correctly order contract columns
contract_attrs = [STAFF_ID] + attrs[first_contract_pos:last_activity_pos+1]
for attr in [CONTRACT_ID, "CONTID"]:
contract_attrs.pop(contract_attrs.index(attr))
contract_attrs = [attr] + contract_attrs
# Carve up data
staff_records = df.filter(attrs[first_staff_pos:last_staff_pos+1]) \
.drop_duplicates().droplevel(1, 0)
staff_records = staff_records[staff_attrs] # reorder
# Note includes activity
contract_records = df.filter(contract_attrs) \
.drop_duplicates()
activity_attrs = attrs[first_activity_pos:last_activity_pos+1]
governor_records = df[df[GOVERNOR_FLAG] == '1'].filter(
[STAFF_ID]+attrs[last_activity_pos+1:])
for staff_index, staff_row in staff_records.iterrows():
# Create person and add attrs
person = ET.SubElement(institution, OBJ_STAFF)
for col, value in staff_row.iteritems():
if len(value) > 0:
ET.SubElement(person, col).text = value
# Check consistency between constracts and staff
try:
_ = contract_records.loc[staff_index]
except KeyError:
raise RuntimeError(f"No contracts found for staff ID {staff_index}.")
# Create contract and add attrs
for _, row in contract_records.loc[staff_index].iterrows():
if len(row.get(CONTRACT_ID)) > 0:
contract = ET.SubElement(person, OBJ_CONTRACT)
activity = None # see if activity is none below
for col, value in row.iteritems():
# Proceed if any
go_conditions = [col == 'ENDCON' and row['TERMS'] in ['1', '2'],
len(value) > 0] # last one is generic case
# and none of these
skip_conditions = [col == 'RESAST' and (row['ACEMPFUN'] != '2'
or row['TERMS'] == 3),
col == 'SIGRES' and (not row['TERMS'] in ['1', '2'] or (not row['ACEMPFUN'] in [
'2', '3'] or (row['ACEMPFUN'] == '2' and row['RESAST'] == 1))),
col == 'RESCON' and len(
row['ENDCON']) == 0 and row['TERMS'] in ['1', '2'],
col == 'ACEMPFUN' and not row['ACTSOC'][0] in [
'1', '2', '3'],
col == 'CURACCDIS' and row['ACEMPFUN'] in [
'4', '9'],
col == STAFF_ID]
# Hard overwrites
if col == 'ACTSOC' and row['LEVELS'] == 'A0':
value = '231'
if any(go_conditions) and not any(skip_conditions):
if col in activity_attrs:
if activity is None: # To create in the right place
activity = ET.SubElement(contract, OBJ_ACTIVITY)
targ = activity
else:
targ = contract
ET.SubElement(targ, col).text = str(value)
# Add governor record
if int(staff_row.get(GOVERNOR_FLAG)) == 1:
governor = ET.SubElement(person, OBJ_GOVERNOR)
for col, value in governor_records.loc[staff_index].iloc[0].iteritems():
if len(value) > 0 and col != STAFF_ID:
ET.SubElement(governor, col).text = str(value)
indent(root)
tree = ET.ElementTree(root)
tree.write(f"C{rec_id}.xml")
def indent(elem, level=0):
"""
Recursive function for adding newlines and indents to XML.
https://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
```
#### File: cli/sets/move_inputs.py
```python
import click
from planning_system.db import DB
from planning_system.db.schema.tables.input import nonp_other, pay_staff, pay_claim
from planning_system.db.schema.tables.finance import f_set
from sqlalchemy import and_
lookup = {"staff": [pay_staff, "staff_line_id"],
"claim": [pay_claim, "claim_id"],
"nonp": [nonp_other, "nonp_id"]}
@click.command()
@click.argument("from_costc", type=str)
@click.argument("to_costc", type=str)
@click.argument("acad_year", type=int)
@click.argument("set_cat_id", type=str)
@click.argument("obj", type=click.Choice([k for k in lookup.keys()]))
@click.argument("cmd", type=click.Choice(["copy", "move"]))
@click.option("--replace", "-r", is_flag=True, help="Replace the current contents of the target set.")
@click.pass_obj
def move_inputs(config, from_costc, to_costc, acad_year, set_cat_id, obj, cmd, replace):
"""
Move or copy instances between input tables.
Move OBJ to FROM_COSTC TO_COSTC in the SET_CAT_ID in ACAD_YEAR.
Operation is determined by CMD, one of:
- 'copy' for creating a copy, leaving the original unchanged.
- 'move' for changing the set, leaving no trace in the original.
"""
with DB(config.uri, config.echo) as db:
s = db.session()
db_obj = lookup[obj][0]
original_set = s.query(f_set).filter(and_(f_set.costc == from_costc,
f_set.acad_year == acad_year,
f_set.set_cat_id == set_cat_id)).first()
lines = s.query(db_obj).filter(
db_obj.set_id == original_set.set_id).all()
target_set = s.query(f_set).filter(and_(f_set.costc == to_costc,
f_set.acad_year == acad_year,
f_set.set_cat_id == set_cat_id)).first()
replace_txt = ""
if replace:
target_preexist = s.query(db_obj).filter(
db_obj.set_id == target_set.set_id)
preexist_n = len(target_preexist.all())
target_preexist.delete()
replace_txt = f"(replacing {preexist_n} pre-existing rows) "
# list of fields that will change, which is the PK (from the lookup) and the set_id
change_fields = [lookup[obj][1], 'set_id']
i = 0
for old_line in lines:
new_line = db_obj()
i += 1
# for each column, set the new val = old val if its not one of the change_fields
for ax in old_line.__table__.columns:
a = str(ax).split(".")[1]
if a not in change_fields:
setattr(new_line, a, getattr(old_line, a))
new_line.set_id = target_set.set_id
if cmd == 'move':
s.delete(old_line)
s.add(new_line)
s.flush()
if config.confirm(f"Confirm {cmd} {i} {obj} lines {replace_txt} from {from_costc} to {to_costc} in {acad_year} {set_cat_id}?"):
s.commit()
s.rollback()
```
#### File: cli/tt/snapshot.py
```python
import datetime
import click
from getpass import getuser
from sqlalchemy import select, literal_column
from planning_system.db import DB
from planning_system.db.schema.views import get_view_class
from planning_system.db.schema.tables.timetable import StaffingSnapshot, \
SnapshotInstance, GroupSnapshot, MembershipSnapshot, EnrolSnapshot
from planning_system.api.functions import bulk_insert
@click.command()
@click.argument("curriculum_id")
@click.pass_obj
def snapshot(config, curriculum_id):
"""
Save a timetabling snapshot
"""
with DB(config.uri, config.echo) as db:
session = db.session()
instance = SnapshotInstance(datestamp=datetime.datetime.now(),
created_by=getuser(),
curriculum_id=curriculum_id)
# Get the new ID
session.add(instance)
session.flush()
instance_id = instance.instance_id
view_prefix = 'v_tt_tgroup_'
enrol_view = "v_tt_student_enrols"
# Specify source and destination
view_tbl_pairs = [(GroupSnapshot, f'{view_prefix}stats'),
(MembershipSnapshot, f'{view_prefix}membership'),
(StaffingSnapshot, f'{view_prefix}staffing'),
(EnrolSnapshot, enrol_view)]
with click.progressbar(view_tbl_pairs, label="Saving tables", show_eta=False) as bar:
for tbl, view_name in bar:
# Get the columns of the destination table
cols = [
c.name for c in tbl.__table__.columns if c.name != 'instance_id']
# Get the viewas an object
view = get_view_class(view_name, db.engine)
# USe the destinantion's column as a basis for the select query from the source
stmt = select([getattr(view, c) for c in cols]+[instance_id]) \
.where(view.curriculum_id == curriculum_id)
if view_name == enrol_view:
stmt = stmt.distinct()
result = []
for r in session.execute(stmt):
d = r._asdict()
d['instance_id'] = d.pop(f"{instance_id}")
result.append(d)
bulk_insert(session, result, tbl)
if config.confirm(f"Confirm creation of snapshot {instance_id}?"):
session.commit()
else:
session.rollback()
```
#### File: schema/functions/udfFracFTE.py
```python
def definition():
sql = """
(
@hours float,
@work_hours float,
@hol_hours float
)
RETURNS float
AS_BREAK
BEGIN
if @hours < 0
SET @hours = 0
DECLARE @result float
DECLARE @epsilon float -- Error term derived from historic contract calculation
SET @epsilon = 0.002873557
SET @result = @hours/@work_hours + (1+@hol_hours/@work_hours)*@epsilon
RETURN @result
END"""
return sql
```
#### File: views/curriculum/v_calc_component_coord.py
```python
def definition():
"""
Coordination variables.
"""
sql = """
SELECT enr.set_cat_id, c.component_id, c.description, c.module_code, c.staffing_band, enr.students,
a.assignments, pa.p_assessments, gs.group_size, w.weeks, s.sessions,
-- RED VARIABLE - assignment load
ISNULL(a.assignments,0)*0.5+ISNULL(w.weeks,0)/10*(ISNULL(s.sessions,1)+3)+1 AS base_cost,
(CASE WHEN enr.students < 75 THEN FLOOR(enr.students/25) ELSE 3 END
+ CASE WHEN c.staffing_band = 1 THEN 1 WHEN c.staffing_band = 2 THEN 2 ELSE 3 END)
* ISNULL(a.assignments,0) as red_var,
-- GREEN VARIABLE - assessment load
(CASE WHEN gs.group_size = 1 OR CEILING(enr.students/gs.group_size) = 1 THEN 0
WHEN CEILING(enr.students/gs.group_size) < 6 THEN 0.5
WHEN CEILING(enr.students/gs.group_size) < 11 THEN 1 ELSE 1.5 END +
CASE WHEN staffing_band = 1 then 0.5 WHEN staffing_band = 2 THEN 1 ELSE 1.5 END)
* ISNULL(pa.p_assessments,0) AS green_var,
-- YELLOW VARIABLE - student load
CASE WHEN enr.students < 75 THEN FLOOR(enr.students/25) + 1 ELSE 4 END as yellow_var
FROM (--Get component_enrolments
SELECT set_cat_id, component_id, SUM(students) as students
FROM v_calc_component_enrols
GROUP BY component_id, set_cat_id) enr
INNER JOIN c_component c ON c.component_id = enr.component_id
INNER JOIN (
SELECT y.component_id, MAX(y.weeks) as weeks
FROM (SELECT z.component_id, z.cost_id, count(c_cost_week.acad_week) as weeks
FROM c_cost z
INNER JOIN c_cost_week ON c_cost_week.cost_id = z.cost_id GROUP BY z.component_id, z.cost_id)
as y GROUP BY y.component_id)
AS w ON c.component_id = w.component_id
LEFT OUTER JOIN (
SELECT component_id, COUNT(cost_id) as p_assessments
FROM c_cost
INNER JOIN c_cost_type t on c_cost.cost_type_id = t.cost_type_id WHERE t.is_assessing = 1 GROUP BY component_id)
AS pa ON pa.component_id = c.component_id
LEFT OUTER JOIN (
SELECT component_id, COUNT(cost_id) as sessions
FROM c_cost WHERE cost_type_id = 'Teaching' GROUP BY component_id)
as s ON s.component_id = c.component_id
LEFT OUTER JOIN (
SELECT c_cost.component_id, COUNT(c_cost.cost_id) AS assignments
FROM c_cost
INNER JOIN c_cost_type t on c_cost.cost_type_id = t.cost_type_id
INNER JOIN c_cost_week ON c_cost.cost_id = c_cost_week.cost_id
WHERE t.is_assignment = 1 GROUP BY component_id)
AS a ON a.component_id = c.component_id
LEFT OUTER JOIN (SELECT component_id, CASE MIN(max_group_size) WHEN 0 THEN 1 ELSE MIN(max_group_size) END as group_size
FROM c_cost INNER JOIN c_cost_type ct ON c_cost.cost_type_id = ct.cost_type_id
WHERE ct.is_contact = 1 GROUP BY component_id )
AS gs ON gs.component_id = c.component_id
WHERE c.coordination_eligible = 1"""
return sql
```
#### File: views/curriculum/v_calc_component_enrols.py
```python
from sqlalchemy import select, func
import planning_system.db.schema.tables.curriculum as c
from planning_system.db.schema.tables.finance import f_set as s
from planning_system.db.schema.views import get_view_class
def definition():
"""
Fits student numbers into the curricula.
NB: this view puts all course/session students onto all components:
mutual exclusivity must be enforced by dependents
"""
sql = """
SELECT cg.curriculum_id, s.set_id, s.set_cat_id, s.acad_year, s.costc, c.component_id, cg.cgroup_id,
SUM(sn.student_count) as students
FROM s_number sn
INNER JOIN v_mri_s_number as sni ON sni.instance_id = sn.instance_id
INNER JOIN f_set s ON s.set_id = sni.set_id
INNER JOIN (--Get the fields from the curriculum to link to the student numbers
SELECT cs.course_session_id, c.aos_code, cs.session, c_curriculum.curriculum_id
FROM c_course c
INNER JOIN c_curriculum ON c.curriculum_id = c_curriculum.curriculum_id
INNER JOIN c_course_config cc ON cc.course_id = c.course_id
INNER JOIN c_course_session cs ON cs.course_session_id = cc.course_session_id
) as cur_link on cur_link.aos_code = sn.aos_code
AND cur_link.session = sn.session
AND cur_link.curriculum_id = s.curriculum_id
INNER JOIN c_course_session_config csc ON cur_link.course_session_id = csc.course_session_id
INNER JOIN c_cgroup cg ON csc.cgroup_id = cg.cgroup_id
INNER JOIN c_cgroup_config cgc ON cg.cgroup_id = cgc.cgroup_id
INNER JOIN c_component c ON cgc.component_id = c.component_id
GROUP BY cg.curriculum_id, s.set_cat_id, s.acad_year, s.costc, c.component_id, cg.cgroup_id, s.set_id
"""
return sql
```
#### File: views/curriculum/v_c_cgroup_config.py
```python
from planning_system.db.schema.tables.curriculum import CGroup
from planning_system.db.schema.views.curriculum.v_c_course_config import select_all_and_default
def definition():
"""To be used by UI."""
sql = f"""
SELECT cg.cgroup_id,
c.curriculum_id,
c.component_id,
c.description,
ISNULL(conf.ratio, 1) as ratio,
CASE WHEN conf.component_id IS NULL THEN 0 ELSE 1 END as linked,
0 as changed
FROM ({select_all_and_default(CGroup)}) as cg
LEFT JOIN c_component c ON c.curriculum_id = cg.curriculum_id
LEFT JOIN c_cgroup_config conf ON conf.cgroup_id = cg.cgroup_id
AND conf.component_id = c.component_id"""
return sql
```
#### File: views/curriculum/v_c_cgroup_struc.py
```python
def definition():
"""
Lists the components belonging to a component group.
"""
sql = """
SELECT c.component_id, c.description, cgc.cgroup_id, cal.long_description as cal_name,
ISNULL(cs.description, '1 staff') as staffing_band_description
FROM c_component c
LEFT JOIN c_cgroup_config cgc on cgc.component_id = c.component_id
LEFT JOIN c_calendar cal on cal.calendar_type = c.calendar_type
LEFT OUTER JOIN c_component_staffing cs ON cs.band_id = c.staffing_band"""
return sql
```
#### File: views/curriculum/v_c_course_config.py
```python
from planning_system.db.schema.tables.curriculum import Course
def select_all_and_default(cls):
"""
Generate SQL for selecting entire table and
a record with id -1. Only works for 1-field PKs.
Parameters
----------
cls : SQLAlchemy table object
Object imported from schema.
Returns
-------
str
SQL for SELECT statement, without parenthsises.
"""
pk_name = list(cls.__table__.primary_key)[0].name
select_list = [col for col in cls.__table__.columns.keys()]
table_select = ", ".join(select_list)
default_select = []
# could do as comprehension but long hand for clarity
for col in select_list:
if col == pk_name:
default_select.append(f"-1 as {pk_name}")
elif col == "curriculum_id":
default_select.append("curriculum_id")
else:
default_select.append(f"NULL as {col}")
default_select = ", ".join(default_select)
statement = [f"SELECT {table_select}", f"FROM {cls.__tablename__} ",
"UNION ALL", f"SELECT {default_select}", "FROM c_curriculum"]
return "\t\n".join(statement)
def definition():
"""To be used by UI."""
sql = f"""
SELECT c.course_id,
c.curriculum_id,
cs.course_session_id,
description + ' year ' +CAST(session as varchar(2)) as description,
CASE WHEN conf.course_id IS NULL THEN 0 ELSE 1 END as linked,
0 as changed
FROM ({select_all_and_default(Course)}) as c
LEFT JOIN c_course_session cs ON cs.curriculum_id = c.curriculum_id
LEFT JOIN c_course_config conf ON conf.course_id = c.course_id
AND conf.course_session_id = cs.course_session_id"""
return sql
```
#### File: views/curriculum/v_c_course.py
```python
def definition():
"""
Verbose view of courses, for UI.
"""
sql = """
SELECT CONCAT(
ISNULL(a.qualification + ' ', ''),
a.description) as short_name, --standard name
a.pathway, c.aos_code, c.curriculum_id,
a.qualification as award,
a.qualification,
ISNULL(conf.child_count,0) as child_count, --standard name
c.course_id as id --standard name
FROM c_course c
LEFT JOIN c_aos_code a ON a.aos_code = c.aos_code
LEFT JOIN (SELECT course_id, COUNT(course_session_id) as child_count
FROM c_course_config GROUP BY course_id)
conf ON conf.course_id = c.course_id
"""
return sql
```
#### File: views/curriculum/v_c_structure.py
```python
from sqlalchemy import select, func
import planning_system.db.schema.tables.curriculum as c
from planning_system.db.schema.views.reporting.v_report_c_contact_hours import query_structure
def definition():
fields = [c.Course.curriculum_id, c.Course.aos_code, c.Course.course_id,
c.aos_code.description.label("pathway"), c.aos_code.qualification,
c.CourseSession.session, c.CourseSession.course_session_id, c.CourseSession.costc,
c.CGroup.description.label(
"cgroup_description"), c.CGroup.cgroup_id,
c.Component.component_id, c.Component.description.label(
"comp_description"),
c.Cost.cost_id, c.Cost.description.label("cost_description")]
query = query_structure(*fields)
return str(query)
```
#### File: views/curriculum/v_mri_s_number.py
```python
def definition():
"""The most recent instance of student number data, by set."""
sql = """
-- Get most recent instance of student numbers by set
SELECT set_id, MAX(instance_id) as instance_id
FROM s_number_instance
GROUP BY set_id
"""
return sql
```
#### File: views/finance/v_calc_staff_fte.py
```python
def definition():
"""
Core of the staff pay calculations.
"""
view = """
SELECT staff_line_id,
CASE
WHEN s.post_type_id = 'FRAC' THEN
dbo.udfFracFTE((ISNULL(v.hours,0)-ISNULL(taught.hours,0)) *
s.indicative_fte / NULLIF(frac_fte.denom,0), con.work_hours, con.hol_hours)
WHEN s.post_status_id = 'OLD' THEN
0 -- to reflect pre-change posts shouldn't have any impact
ELSE s.indicative_FTE END as FTE
FROM input_pay_staff s
INNER JOIN f_set fs ON fs.set_id = s.set_id
LEFT OUTER JOIN v_c_delivery_pay as v ON s.set_id = v.set_id
LEFT OUTER JOIN (SELECT set_id, SUM(ISNULL(s.teaching_hours, 0)) +
SUM(ISNULL(s.assessing_hours, 0)) +
SUM(ISNULL(s.coordination_hours, 0)) as hours
FROM input_pay_staff s WHERE s.post_type_id = 'CURM' GROUP BY set_id)
as taught ON taught.set_id = s.set_id
LEFT OUTER JOIN (SELECT set_id, SUM(s.indicative_FTE) as denom FROM input_pay_staff s WHERE s.post_type_id = 'FRAC' AND s.indicative_fte IS NOT NULL GROUP BY set_id)
as frac_fte on frac_fte.set_id = s.set_id
INNER JOIN staff_con_type_hours con ON con.con_type_id = s.con_type_id AND con.acad_year = fs.acad_year AND con.set_cat_id = fs.set_cat_id
"""
return view
```
#### File: views/finance/v_cons_finance.py
```python
sql = f"""
SELECT v.summary_code, v.directorate_id, s.acad_year, s.set_cat_id, s.costc, SUM(amount) as amount
FROM v_mri_finance v
INNER JOIN f_set s ON v.set_id = s.set_id
GROUP BY v.summary_code, v.directorate_id, s.acad_year, s.set_cat_id, s.costc
"""
def definition():
return sql
```
#### File: views/finance/v_fs_account.py
```python
from planning_system.db.schema.views import ACCOUNT_DESCRIPTION, p_list_string, p_sum_string
def definition():
view = f"""
SELECT a.account, a.description, {ACCOUNT_DESCRIPTION},
s.description as summary_description, se.section_id, s.internal, ss.sub_section_id
FROM fs_account a
INNER JOIN fs_summary_code s ON s.summary_code = a.summary_code
INNER JOIN fs_sub_section ss ON ss.sub_section_id = s.sub_section_id
INNER JOIN fs_section se ON se.section_id = ss.section_id
WHERE a.hide_from_users = 0
"""
return view
```
#### File: views/finance/v_input_inc_other.py
```python
from planning_system.db.schema.views import ACCOUNT_DESCRIPTION, p_list_string, p_sum_string
def definition():
"""Other income view"""
view = f"""
SELECT i.inc_id, i.account, a.description as account_name, {ACCOUNT_DESCRIPTION}, i.description, i.set_id,
{p_list_string}, {p_sum_string} as amount
FROM input_inc_other i
LEFT OUTER JOIN fs_account a ON i.account = a.account"""
return view
```
#### File: views/finance/v_mri_finance_grouped_subtotal_internal.py
```python
from planning_system.db.schema.views.finance.v_mri_finance_grouped_subtotal import sql as base_sql
source = """FROM v_mri_finance f
LEFT OUTER JOIN fs_summary_code sc ON sc.summary_code = f.summary_code
LEFT OUTER JOIN fs_sub_section sub ON sub.sub_section_id = sc.sub_section_id
LEFT OUTER JOIN fs_section s ON s.section_id = sub.section_id
LEFT OUTER JOIN fs_super_section super ON super.super_section_id = s.super_section_id
LEFT OUTER JOIN fs_entry_type scae ON scae.balance_type = sub.default_balance
WHERE sc.internal = 1
"""
mod_sql = base_sql.replace("WHERE", "AND")
def definition():
"""Internal transactions list used to negate internals in consolidated"""
return mod_sql.format(source=source)
```
#### File: views/finance/v_ui_dates.py
```python
def definition():
"""UI view of dates"""
sql = """
SELECT d.set_cat_id, d.acad_year, d.dt, s.description + ' ' + c.description + ISNULL(' - ' + d.description,'') as description,
c.important
FROM conf_dt as d
INNER JOIN conf_dt_cat AS c on c.dt_cat_id = d.dt_cat_id
INNER JOIN f_set_cat AS s ON s.set_cat_id = d.set_cat_id """
return sql
```
#### File: views/finance/v_ui_finance.py
```python
from planning_system.db.schema.views import _get_set_cols
def definition(session):
"""
Return UI view.
Complex view, which requires a dynamic pivot.
"""
pvt_list = _get_set_cols(session)
sql = f"""
SELECT costc, summary_code, summary, section, supersection, summary_order, sec_order, super_order, level, {pvt_list}
FROM (SELECT costc, summary_code, summary, section, supersection, summary_order, sec_order, super_order, level,
CAST(f_Set.acad_year as CHAR(4)) + ' ' + f_set.set_cat_id as finance_summary, amount as amount
FROM [v_mri_finance_grouped_subtotal] f INNER JOIN f_set ON f_set.set_id = f.set_id) p
PIVOT
(SUM(amount) FOR finance_summary in ({pvt_list})) as pvt
"""
return sql
```
#### File: views/finance/v_ui_permissions.py
```python
from planning_system.db.schema.views.finance.v_ui_permissions_costc import PERMISSION_CORE
def definition():
"""Primary view of permissions for UI"""
sql = f"""
SELECT DISTINCT c.costc, c.description as costc_name, c.costc+' '+c.description as long_name,
s.set_id, s.acad_year, s.curriculum_id, CAST(s.acad_year as varchar) + ' ' + sc.description as code, lower(core.login_365) as login_365,
CAST(s.acad_year as varchar) + ' ' + sc.description as year_code, s.closed, s.set_cat_id,
sc.is_forecast, comp_acad_year_a as prev_year,
conf_sca.description as prev_desc,
conf_scb.description as mri_desc, conf.split_at_period
FROM
({PERMISSION_CORE}) as core
INNER JOIN fs_cost_centre c ON core.costc = c.costc
INNER JOIN f_set s ON core.costc = s.costc
INNER JOIN f_set_cat sc ON sc.set_cat_id = s.set_cat_id
LEFT OUTER JOIN conf_forecast conf ON conf.set_cat_id = sc.set_cat_id AND conf.acad_year = s.acad_year
LEFT OUTER JOIN f_set_cat conf_sca ON conf_sca.set_cat_id = conf.comp_set_cat_a
LEFT OUTER JOIN f_set_cat conf_scb ON conf_scb.set_cat_id = conf.comp_set_cat_b
LEFT OUTER JOIN conf_set_hide csh ON csh.set_cat_Id = s.set_cat_id AND csh.acad_year = s.acad_year
WHERE csh.set_cat_id IS NULL AND sc.is_ui_visible = 1
"""
return sql
```
#### File: views/finance/v_validation.py
```python
from sqlalchemy import select, case, func, literal, and_
from sqlalchemy.orm.util import aliased
from planning_system.db.schema.views import get_view_class
from planning_system.db.schema.tables.finance import f_set
from planning_system.db.schema.tables.config import Validation
from planning_system.db.schema.tables.finance_structure import cost_centre as cc, directorate as d
from planning_system.db.schema.tables.universal import Constants
def definition(session):
"""Calculate Validation fee."""
# Work out the fee...
student_data = get_view_class("v_s_fee_income", session.bind)
inner_agg = select(f_set.acad_year, f_set.set_cat_id,
func.sum(student_data.students).label("students")) \
.join(student_data,
student_data.set_id == f_set.set_id) \
.group_by(f_set.acad_year, f_set.set_cat_id).subquery(name="stu")
totals = select(inner_agg.c.acad_year,
inner_agg.c.set_cat_id,
inner_agg.c.students,
func.sum(case((Validation.band_lower > inner_agg.c.students, literal(0)),
(Validation.band_upper < inner_agg.c.students,
Validation.band_upper-Validation.band_lower),
else_=inner_agg.c.students-Validation.band_lower)*Validation.charge).label("amount")) \
.join(Validation, Validation.acad_year == inner_agg.c.acad_year) \
.group_by(inner_agg.c.acad_year, inner_agg.c.set_cat_id, inner_agg.c.students) \
.subquery(name="totals")
# And split it out
set_alias = aliased(f_set)
directorate_agg = select(f_set.acad_year, f_set.set_cat_id, set_alias.set_id,
func.sum(student_data.students).label("students")) \
.join(student_data,
student_data.set_id == f_set.set_id) \
.join(cc, cc.costc == f_set.costc) \
.join(d, d.directorate_id == cc.directorate_id) \
.join(set_alias, and_(set_alias.costc == d.primary_costc,
set_alias.acad_year == f_set.acad_year,
set_alias.set_cat_id == f_set.set_cat_id)) \
.group_by(f_set.acad_year, f_set.set_cat_id, set_alias.set_id) \
.subquery(name="directorate_agg")
calc = select(directorate_agg.c.set_id,
Constants.validation_account.label("account"),
literal(8).label("period"),
(directorate_agg.c.students/totals.c.students*totals.c.amount).label("amount")) \
.join(totals, and_(totals.c.acad_year == directorate_agg.c.acad_year,
totals.c.set_cat_id == directorate_agg.c.set_cat_id))
return str(calc.compile(compile_kwargs={"literal_binds": True}))
```
#### File: views/luminate/v_luminate_capex.py
```python
sql = """
SELECT v.*, f.set_cat_id, f.acad_year, 'E' as directorate_id
FROM v_input_capex v
INNER JOIN f_set f ON f.set_id = v.set_id
INNER JOIN fs_cost_centre cc ON cc.costc = f.costc
WHERE f.surpress = 0 AND v.total_amount > 0
"""
def definition():
"""Capital in luminate format"""
return sql
```
#### File: views/luminate/v_luminate_nonp.py
```python
from planning_system.db.schema.views import _generate_p_string, _get_set_cols
def definition(session):
"""Luminate view of nonpay"""
inner_sql = """
SELECT f.directorate_id, f.finance_summary, f.account, a.description, SUM(amount) as t
FROM v_mri_finance f
INNER JOIN f_set s ON s.set_id = f.set_id
INNER JOIN fs_account a ON a.account = f.account
WHERE amount <> 0 AND a.summary_code = 301 AND s.surpress = 0
GROUP BY f.directorate_id, f.finance_summary, f.account, a.description
"""
set_cols = _get_set_cols(
session, auto_format=False) # want a list returned, not string
if len(set_cols) > 0:
sum_cols = ", ".join([f"ISNULL([{col}], 0) as [{col}]" for col in set_cols])
set_cols = ", ".join([f"[{col}]" for col in set_cols])
p_list = _generate_p_string("a.p{p}", ", ")
sum_p_list = _generate_p_string("SUM(p{p}) as p{p}", ", ")
outer_sql = f"""
SELECT a.set_cat_id, a.acad_year, b.*, {p_list}
FROM
(SELECT c.directorate_id, s.set_cat_id, s.acad_year, account_name, account, {sum_p_list}
FROM v_input_nonp_other v
INNER JOIN f_set s ON s.set_id = v.set_id
INNER JOIN fs_cost_centre c ON c.costc = s.costc
GROUP BY c.directorate_id, s.set_cat_id, s.acad_year, account_name, account) as a
INNER JOIN
(SELECT directorate_id, p.account as Account, p.description as Description, {sum_cols}
FROM ({inner_sql}) pvt
PIVOT
(SUM(t) for finance_summary in ({set_cols})) as p
) as b ON a.directorate_id = b.directorate_id AND a.account = b.Account
"""
return outer_sql
```
#### File: views/reporting/v_report_tt_1to1.py
```python
from planning_system.db.schema.views.reporting.v_report_tt_specific_groups import src, gen_select
def definition():
sql = f"""
SELECT
{gen_select},
ISNULL(g.staff_list, 'Missing Tutor') as tutor,
ISNULL(g.room_type_id, c.room_type_id) as room_type,
stu.student_id, stu.name, stu.enr_detail, stu.instrument
{src}
WHERE c.tt_type_id = 3
"""
return sql
```
#### File: views/student/v_s_mri_pivot.py
```python
from planning_system.api.sql_shortcuts import pivot
def definition():
"""
Most recent student numbers pivoted for interfacing.
"""
columns = ["set_id", "origin_id", "origin_description",
"fee_cat", "costc", "default_aos_code"]
sessions = [0, 1, 2, 3]
sql = pivot(columns, sessions, "v_s_mri", "session", "student_count", "sum",
False)
return sql
if __name__ == "__main__":
print(definition())
```
#### File: views/timetable/v_tt_delivery.py
```python
from planning_system.db.schema.views import CURRICULUM_STRUCTURE
from planning_system.api.sql_shortcuts import case
def definition():
"""
View showing delivery by course, by component, by cost, by epoch.
Key is the 'mode' sub-query, which decides whether a component is
online, in-person or blended, based on the room types used not
just at cost level but at timetabling group level.
External requirement - review with Q&SE 2022.
"""
epochs = """
SELECT comp.component_id, cal.epoch_name, map.term as epoch
FROM c_cost cost
INNER JOIN c_component comp ON comp.component_Id = cost.component_id
INNER JOIN c_cost_week week ON week.cost_id = cost.cost_id
INNER JOIN c_calendar_map map ON map.acad_week = week.acad_week
AND map.calendar_type = comp.calendar_type
AND map.curriculum_id = comp.curriculum_id
INNER JOIN c_calendar cal ON cal.calendar_type = comp.calendar_type
GROUP BY component.component_id, cal.epoch_name, map.term
""".replace("\n", "\n\t\t")
mode = f"""
SELECT c.component_id,
AVG({case([("ISNULL(g.room_type_id,c.room_type_id)='[Online]'", "1.0")],
"0.0")}) as prop_online,
AVG(CASE WHEN g.tgroup_id IS NULL THEN 0.0 ELSE 1.0 END) as prop_tt_started
FROM c_cost c
LEFT JOIN tt_tgroup g ON g.cost_id = c.cost_id
INNER JOIN c_cost_type ct ON c.cost_type_id = ct.cost_type_id
WHERE ct.is_taught = 1
GROUP BY c.component_id
"""
sql = f"""
SELECT
curr.curriculum_id,
curr.description as curriclulum_name,
curr.acad_year as curriculum_year,
CONCAT(ISNULL(aos_code.qualification + ' ', ''), crs.pathway, ISNULL(' with ' + crs.combined_with, '') ) as course,
sess.description as session,
cg.description as component_group,
comp.module_code,
{case([("module.description=NULL", "comp.description"),
("comp.description != module.description","comp.description + '/' + module.description")],
"comp.description",
"component_name")
},
mode.prop_online,
{case([("mode.prop_online=0", "'On-campus'"),
("mode.prop_online=1", "'Online'"),
], "'Blended'", "mode_of_delivery")},
mode.prop_tt_started
FROM {CURRICULUM_STRUCTURE}
LEFT JOIN c_aos_code aos_code ON aos_code.aos_code = crs.aos_code
INNER JOIN ({mode}) as mode ON mode.component_id = comp.component_id
"""
return sql
```
#### File: views/tt_admin/v_tt_ss_staffing.py
```python
from planning_system.api.sql_shortcuts import text_concat
def definition():
"""
List of group information with staff.
"""
sql = f"""
SELECT g.*, ISNULL(r.long_description, 'n/a') as room_type, {text_concat("staff_id",
"staff_id FROM tt_ss_staffing s WHERE g.tgroup_id = s.tgroup_id AND g.instance_id = s.instance_id",
"; ")} as staff
FROM tt_ss_tgroup g
LEFT JOIN sp_room_type r ON r.room_type_id = g.room_type_id
"""
return sql
```
#### File: views/tt_admin/v_tt_ss_student_list.py
```python
def definition():
"""
View to list snapshot student group membership, or enrolments if the type is non specific.
"""
sql = """
SELECT instance_id, cost_id, 'n/a' as tgroup_id, 'n/a' as group_num, student_id, name, enr_detail, instrument
FROM tt_ss_enrols
WHERE tt_type_id = 1
UNION ALL
SELECT instance_id, cost_id, cast(tgroup_id as VARCHAR), CAST(group_num AS VARCHAR), student_id, name, enr_detail, instrument
FROM v_tt_ss_membership
"""
return sql
```
#### File: planning_system/sandbox/example.py
```python
my_number = 1
my_number = 'Fifty'
my_numbers = [1, 2, 3, 4, 5]
def run_this_script():
number_found = False
for number in my_numbers:
if number == my_number:
print("Found the number!")
number_found=True
if number_found:
print("Your number was in the list")
else:
print("Your number was not in the list")
if __name__ == "__main__":
run_this_script()
```
|
{
"source": "jehelp/StructuralReliability",
"score": 3
}
|
#### File: jehelp/StructuralReliability/modAna.py
```python
import numpy as np
import numpy.linalg
########################################################################
def eigPro(M_m,K_m):
"""
Returns eigen properties (angular frequencies and eigen vectors).
Input:
- M_m: structural mass matrix
- K_m: structural stiffness matrix
Output:
- eigVal_v: vector of eigen angular frequencies
- eigVec_m: matrix of eigen vectors
"""
A_m = np.linalg.inv(K_m)*M_m
eigVal_v,eigVec_m = np.linalg.eig(A_m)
i = 0
for omeInv in eigVal_v:
if omeInv != 0:
eigVal_v[i] = np.real(omeInv)**(-1/2)
i += 1
return eigVal_v,eigVec_m
########################################################################
```
#### File: jehelp/StructuralReliability/strMod.py
```python
import numpy as np
import numpy.matlib
########################################################################
def masMat(nDof,inpMas,fixDof):
"""
Returns structural lumped mass matrix.
Input:
- nDof: number of active DOFs
- inpMas: list with the lumped masses
- fixDof: list of active DOFs
Output:
- M_m: structural mass matrix
"""
M_m = np.matlib.zeros((nDof,nDof))
for masNod in inpMas:
nod = masNod[0]
dof = np.sum(fixDof[0:nod][:])
j = 1
for act in fixDof[nod][:]:
M_m[dof,dof] = masNod[j]
dof+=act
j+=1
return M_m
########################################################################
def stiMat(nDof,strEle,mesDat,eltPro,fixDof):
"""
Assemble and returns structural stiffness matrix.
-------
Inputs:
strEle: for each element [node I, node J, type]
mesDat: for each element [length, cos, sin]
eltPro: for each elemet type [material properties]
- type 0 (beam): [matE,secA,secIy]
- type 1 (connection): [secMpl]
matE: material elastic modulus
secA: element section area
secIy: section moment of inertia about its centroid axis
secMpl: section plastic moment
-------
Output:
K_m: structural stiffness matrix
"""
# initialize
# Note: in FE programs, matrices are not stored like below (zeros are not stored); here
# this is just for illustration
K_m = np.matlib.zeros((nDof,nDof))
# loop over the elements
eNum = 0
for elt in strEle:
nodI = elt[0] # node numbers
nodJ = elt[1]
eltTyp = elt[2] # element type
secTyp = elt[3] # section type
# build element stiffness matrix in local coord. system
if eltTyp == 0: # element of type 0 (beam/column)
eltL = mesDat[eNum][0]
matE = eltPro[secTyp][0]
secA = eltPro[secTyp][1]
secI = eltPro[secTyp][2]
KeLoc_m = beaSti_EB_2D_loc(matE,secA,eltL,secI)
elif eltTyp == 1: # rigid rotational spring
secMpl = eltPro[eltTyp][0]
# d_a1 = np.concatenate(dis_v[I:I+3], dis_v[J:J+3])
# KeLoc_a2 = strMod.rigRotSpr_2D_loc(E,A,Lf,I,My,d_a1) -> To be developed for NL response
KeLoc_m = rigRotSprSti(secMpl)
# transform to global coordinate system
cos = mesDat[eNum][1]
sin = mesDat[eNum][2]
R_m = glo2loc_2D(cos,sin)
KeGlo_m = np.transpose(R_m)*KeLoc_m*R_m
# assemble structural stiffness
dofI = np.sum(fixDof[0:nodI][:])
dofI = np.int(dofI)
dofJ = np.sum(fixDof[0:nodJ][:])
dofJ = np.int(dofJ)
i = 0
j = 0
dof1 = dofI
for actI in fixDof[nodI][:]:
actI = np.int(actI)
dof2 = dofI
for actJ in fixDof[nodI][:]:
actJ = np.int(actJ)
K_m[dof1,dof2] += actI*actJ*KeGlo_m[i,j]
dof2+=actJ
j+=1
dof2 = dofJ
for actJ in fixDof[nodJ][:]:
actJ = np.int(actJ)
K_m[dof1,dof2] += actI*actJ*KeGlo_m[i,j]
dof2+=actJ
j+=1
dof1+=actI
i+=1
j = 0
dof1 = dofJ
for actI in fixDof[nodJ][:]:
actI = np.int(actI)
dof2 = dofI
for actJ in fixDof[nodI][:]:
actJ = np.int(actJ)
K_m[dof1,dof2] += actI*actJ*KeGlo_m[i,j]
dof2+=actJ
j+=1
dof2 = dofJ
for actJ in fixDof[nodJ][:]:
actJ = np.int(actJ)
K_m[dof1,dof2] += actI*actJ*KeGlo_m[i,j]
dof2+=actJ
j+=1
dof1+=actI
i+=1
j = 0
eNum+=1
return K_m
########################################################################
def rayDamCoe(xi,ome):
"""
Returns Rayleigh damping coefficients.
Initial stiffness is used.
Coefficients are computed once for all in the beginning of the analysis.
-------
Inputs:
xi: strutural damping ratio (same for all modes)
ome: eigen angular frequencies
-------
Output:
alp, bet: Rayleigh damping coefficients
"""
bet = 2*xi/(ome[0]+ome[1])
alp = bet*ome[0]*ome[1]
return alp,bet
########################################################################
def beaSti_EB_2D_loc(E,A,L,I):
"""
Returns 2D Euler-Bernoulli beam stiffness in local coordinate system.
Section is assumed to be constant all over the beam length.
-------
Inputs:
E: elastic modulus
A: beam section area
L: beam length
I: section moment of inertia about its centroid axis
-------
Output:
Ke_m: stiffness matrix
"""
k1 = E*A/L
k2 = 12*E*I/L**3
k3 = 6*E*I/L**2
k4 = 4*E*I/L
# start filling the upper triangle only (K_m is symmetric)
Ke_m = np.matlib.zeros((6,6))
Ke_m[0,0] = k1
Ke_m[0,3] = -k1
Ke_m[1,1] = k2
Ke_m[1,2] = k3
Ke_m[1,4] = -k2
Ke_m[1,5] = k3
Ke_m[2,2] = k4
Ke_m[2,4] = -k3
Ke_m[2,5] = k4/2
Ke_m[3,3] = k1
Ke_m[4,4] = k2
Ke_m[4,5] = -k3
Ke_m[5,5] = k4
# fill lower triangle
for i in range(1,6):
for j in range(0,i):
Ke_m[i,j] = Ke_m[j,i]
return Ke_m
########################################################################
def rigRotSprSti(secIMpl):
kInf = 1e16
kRot = 1e16
# start filling the upper triangle only (K_m is symmetric)
Ke_m = np.matlib.zeros((6,6))
Ke_m[0,0] = kInf
Ke_m[0,3] = -kInf
Ke_m[1,1] = kInf
Ke_m[1,2] = kInf
Ke_m[1,4] = -kInf
Ke_m[1,5] = kInf
Ke_m[2,2] = kRot
Ke_m[2,4] = -kInf
Ke_m[2,5] = -kRot
Ke_m[3,3] = kInf
Ke_m[4,4] = kInf
Ke_m[4,5] = -kInf
Ke_m[5,5] = kRot
# fill lower triangle
for i in range(1,6):
for j in range(0,i):
Ke_m[i,j] = Ke_m[j,i]
return Ke_m
########################################################################
def glo2loc_2D(c,s):
"""
Build rotation matrix from global to local 2D coordinate system.
-------
Inputs:
c: cosine in radian of the angle from global to local coordinate system
s: sine in radian of the angle from global to local coordinate system
-------
Output:
R_m: rotation matrix from local to global coordinate system
"""
R_m = np.matlib.zeros((6,6))
R_m[0,0] = c
R_m[0,1] = s
R_m[1,0] = -s
R_m[1,1] = c
R_m[2,2] = 1
for i in range(3,6):
for j in range(3,6):
I = i-3
J = j-3
R_m[i,j] = R_m[I,J]
return R_m
########################################################################
def extLoa(nDof,fixDof,bouCon):
"""
Build external forces vector at active DOFs.
-------
Inputs:
- nDof: number of active DOFs
- fixDof: list of active DOFs
- bouCon: list of boundary conditions (displacements or forces)
-------
Output:
fe_v: external forces vector at active DOFs
"""
fe_v = np.matlib.zeros((nDof,1))
i = 0
k = 0
for nodDof in fixDof:
j = 0
for dof in nodDof:
if dof == 0:
j+=1
else:
fe_v[k] = dof*bouCon[i][j]
j+=1
k+=1
i+=1
return fe_v
########################################################################
#def main():
```
|
{
"source": "jehenninger/in_vitro_droplet_assay",
"score": 3
}
|
#### File: jehenninger/in_vitro_droplet_assay/methods.py
```python
from skimage import io, filters, measure, color, exposure, morphology, feature, img_as_float, img_as_uint, draw
from scipy import ndimage as ndi
import pandas as pd
import numpy as np
import os
import sys
import math
import matplotlib
# matplotlib.use('Qt5Agg')
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import patches
import argparse
import json
from datetime import datetime
import cv2
from types import SimpleNamespace
from pprint import pprint
def parse_arguments(parser):
# required arguments
parser.add_argument('parent_path', type=str,
help='Full path to folder that contains subfolders of experiments with data')
parser.add_argument('output_path', type=str,
help='Full path to folder where you want the output to be stored. The folder will be made if it does not exist')
# optional arguments
parser.add_argument("--tm", type=float, default=3.0,
help='Optional threshold multiplier. Defaults to 3. mean + std*tm')
parser.add_argument("--r", type=float, default=30,
help='Area of subset circle to use in middle of droplet. Default 30 px^2. Per droplet, --min_a supercedes --r')
parser.add_argument("--min_a", type=float, default=20,
help='Optional threshold for minimum droplet area. Default 20 px^2')
parser.add_argument("--max_a", type=float, default=500,
help='Optional threshold for max droplet area. Default is 500 px^2')
parser.add_argument("--circ", type=float, default=0.8,
help='Optional threshold for droplet circularity (defined 0.0-1.0). Default is 0.8')
parser.add_argument("--s",
help='What channel to use for scaffolding. Defaults to standardized average of all channels.')
parser.add_argument("--b", type=float, default=0.0,
help='Optional absolute value to use to subtract background. Default is 0.0.')
parser.add_argument("--pr", type=str, default='subset',
help='Value to use for [C](in) to calculate partition ratio. Options are subset, mean, and max. Default is subset')
parser.add_argument('--no-image', dest='output_image_flag', action='store_false', default=True,
help='Flag to set if you do not want output images of the droplets saved to the output directory')
parser.add_argument('--rand-bulk', dest='randomize_bulk_flag', action='store_true', default=False,
help='Flag to calculate bulk by randomzing the image and taking the average intensity. NOT YET IMPLEMENTED')
parser.add_argument('--bf', dest='bf_flag', action='store_true', default=False,
help='Flag to include DIC brightfield as the scaffold')
input_params = parser.parse_args()
return input_params
def load_images(replicate_files, data, input_params, folder):
# get replicate sample name
nd_file_name = [n for n in replicate_files if '.nd' in n]
if len(nd_file_name) == 1:
sample_name = get_sample_name(nd_file_name[0])
data.sample_name = sample_name
elif len(nd_file_name) == 0:
print('Error: Could not find .nd files')
sys.exit(0)
else:
print('Error: Found too many .nd files in sample directory')
sys.exit(0)
print(sample_name)
# load images
channel_image_files = [c for c in replicate_files if get_file_extension(c) == '.TIF']
if len(channel_image_files) < 1:
print('Error: Could not find image files')
sys.exit(0)
channel_image_paths = []
channel_images = []
channel_names = []
for idx, p in enumerate(channel_image_files):
channel_image_paths.append(os.path.join(input_params.parent_path, folder, p))
channel_names.append(find_image_channel_name(p))
channel_images.append(img_as_float(io.imread(channel_image_paths[idx])))
data.channel_images = channel_images
data.channel_names = channel_names
return data
def make_output_directories(input_params):
output_parent_dir = input_params.output_path
output_dirs = {'output_parent': output_parent_dir,
'output_individual': os.path.join(output_parent_dir, 'individual'),
'output_summary': os.path.join(output_parent_dir, 'summary'),
'output_individual_images': os.path.join(output_parent_dir,'individual','droplet_images')}
# make folders if they don't exist
if not os.path.isdir(output_parent_dir):
os.mkdir(output_parent_dir)
for key, folder in output_dirs.items():
if key is not 'output_parent':
if not os.path.isdir(folder):
if not os.path.isdir(os.path.dirname(folder)): # so I guess .items() is random order of dictionary keys. So when making subfolders, if the parent doesn't exist, then we would get an error. This accounts for that.
os.mkdir(os.path.dirname(folder))
os.mkdir(folder)
input_params.output_dirs = output_dirs
return input_params
def find_scaffold(data, input_params):
scaffold = np.zeros(shape=data.channel_images[0].shape, dtype=np.float)
scaffold = scaffold - input_params.b
num_of_channels = len(data.channel_names)
# identify what value to use for [C](in) in partition ratio calculation
pr_parameter = input_params.pr
if pr_parameter != 'subset':
if pr_parameter != 'mean':
if pr_parameter != 'max':
print('ERROR: Could not identify user input for value to use to calculate partition ratio')
sys.exit(0)
if input_params.bf_flag: # if you want to use BF as the scaffold
scaffold_channel_name = 'chDIC'
scaffold_test = [b == scaffold_channel_name for b in data.channel_names]
for idx, c in enumerate(data.channel_names):
if scaffold_test[idx]:
scaffold = scaffold + data.channel_images[idx]
if input_params.b > 0.0:
print('Error: Cannot do background subtract on brightfield image')
sys.exit(0)
scaffold = modify_bf_img(scaffold)
data.scaffold_output_img = scaffold
scaffold = standardize_img(scaffold)
else:
if input_params.s: # when the user specifies a scaffold channel
scaffold_channel_name = 'ch' + input_params.s
scaffold_test = [b == scaffold_channel_name for b in data.channel_names]
for idx, c in enumerate(data.channel_names):
if scaffold_test[idx]:
scaffold = scaffold + data.channel_images[idx]
scaffold[np.where(scaffold < 0)] = 0 # to correct for background subtraction
data.scaffold_output_img = scaffold
scaffold = standardize_img(scaffold)
else: # default using the average scaffold
for img in data.channel_images:
scaffold = scaffold + img/num_of_channels
scaffold[np.where(scaffold < 0)] = 0 # to correct for background subtraction
data.scaffold_output_img = scaffold
scaffold = standardize_img(scaffold)
data.scaffold = scaffold
return data
def modify_bf_img(img):
#process brightfield image that should already be type float
median_img = img_as_uint(img)
median_img = cv2.medianBlur(median_img, ksize=5)
median_img = img_as_float(median_img)
img = img - median_img
img[np.where(img < 0)] = 0
return img
def find_droplets(data, input_params):
threshold_multiplier = input_params.tm
scaffold = data.scaffold
channels = data.channel_names
# make binary image of scaffold with threshold intensity. Threshold is multiplier of std above background.
# Since we have already standardized the image, the threshold is simply the value of the image.
binary_mask = np.full(shape=(scaffold.shape[0], scaffold.shape[1]), fill_value=False, dtype=bool)
scaffold_binary = np.full(shape=(scaffold.shape[0], scaffold.shape[1]), fill_value= False, dtype=bool)
binary_mask[scaffold > threshold_multiplier] = True
scaffold_binary[binary_mask] = True
if input_params.bf_flag:
scaffold_binary = ndi.morphology.binary_fill_holes(scaffold_binary)
scaffold_binary = ndi.morphology.binary_opening(scaffold_binary)
scaffold_binary = ndi.morphology.binary_dilation(scaffold_binary)
else:
scaffold_binary = ndi.morphology.binary_fill_holes(scaffold_binary)
scaffold_binary_labeled = measure.label(scaffold_binary)
scaffold_regionprops = measure.regionprops(scaffold_binary_labeled)
# filter droplets by size and circularity
min_area_threshold = input_params.min_a
max_area_threshold = input_params.max_a
circ_threshold = input_params.circ
subset_area = input_params.r
data.subset_area_less_than_min_area_flag = False
if subset_area < min_area_threshold:
data.subset_area_less_than_min_area_flag = True
scaffold_mask = np.full(shape=(scaffold.shape[0], scaffold.shape[1]), fill_value=False, dtype=bool)
for i, region in enumerate(scaffold_regionprops):
if (min_area_threshold <= region.area <= max_area_threshold) and (circ(region) >= circ_threshold):
for coords in region.coords:
scaffold_mask[coords[0], coords[1]] = True
# re-segment droplets after filtering them
scaffold_filtered_binary_labeled = measure.label(scaffold_mask)
scaffold_filtered_regionprops = measure.regionprops(scaffold_filtered_binary_labeled)
data.scaffold_filtered_regionprops = scaffold_filtered_regionprops
print('Found ', len(scaffold_filtered_regionprops), ' droplets')
# get measurements of bulk regions excluding droplets and total intensity of entire image (including droplets)
# Not implemented yet
if input_params.randomize_bulk_flag:
print('Randomized bulk not implemented yet')
sys.exit(0)
# num_of_iterations = 100
#
# total_I = []
#
# if num_of_channels == 1:
# rand_scaffold_storage = np.zeros(shape=(scaffold.shape[0] * scaffold.shape[1], num_of_iterations))
#
# scaffold_1d = np.reshape(scaffold, (scaffold.shape[0] * scaffold.shape[1]))
#
# for n in range(num_of_iterations):
# rand_scaffold_storage[:,n] = np.random.shuffle(scaffold_1d)
#
# scaffold_random_average_image = np.reshape(np.mean(rand_scaffold_storage, axis=1), shape=scaffold.shape)
#
# bulk_I = []
# bulk_I.append(np.mean(scaffold_random_average_image) * 65536)
# total_I.append(np.sum(scaffold) * 65536)
#
# elif num_of_channels == 2:
# rand_client_a_storage = np.zeros(shape=(client_a.shape[0] * client_a.shape[1], num_of_iterations))
# rand_client_b_storage = np.zeros(shape=(client_b.shape[0] * client_b.shape[1], num_of_iterations))
#
# # doc on shuffle: multi-dimensional arrays are only shuffled along the first axis
# # so let's make the image an array of (N) instead of (m,n)
# client_a_1d = np.reshape(client_a, (client_a.shape[0] * client_a.shape[1]))
# client_b_1d = np.reshape(client_b, (client_b.shape[0] * client_b.shape[1]))
#
# rand_client_a_sum = np.zeros(shape=(1, client_a.shape[0] * client_a.shape[1]))
# rand_client_b_sum = np.zeros(shape=(1, client_b.shape[0] * client_b.shape[1]))
# for n in range(num_of_iterations):
# # rand_client_a_storage[n,:] = np.random.shuffle(client_a_1d)
# # rand_client_b_storage[n,:] = np.random.shuffle(client_b_1d)
# np.random.shuffle(client_a_1d)
# np.random.shuffle(client_b_1d)
# rand_client_a_sum = rand_client_a_sum + client_a_1d
# rand_client_b_sum = rand_client_b_sum + client_b_1d
#
# # client_a_random_average_image = np.reshape(np.mean(rand_client_a_storage, axis=1), client_a.shape)
# client_a_random_average_image = np.reshape(rand_client_a_sum/num_of_iterations, client_a.shape)
# client_b_random_average_image = np.reshape(rand_client_b_sum/num_of_iterations, client_b.shape)
#
# # client_b_random_average_image = np.reshape(np.mean(rand_client_b_storage, axis=1), client_b.shape)
#
# bulk_I = []
# bulk_I.append(np.mean(client_a_random_average_image) * 65536)
# bulk_I.append(np.mean(client_b_random_average_image) * 65536)
#
# total_I.append(np.sum(client_a) * 65536)
# total_I.append(np.sum(client_b) * 65536)
#
# if num_of_channels == 1:
# random_bulk_image = client_a_random_average_image
# elif num_of_channels == 2:
# random_bulk_image = np.zeros(shape=(scaffold.shape[0], scaffold.shape[0], 3))
# # we will assume that first channel is green and second channel is magenta
# random_bulk_image[..., 0] = client_b_random_average_image # R
# random_bulk_image[..., 1] = client_a_random_average_image # G
# random_bulk_image[..., 2] = client_b_random_average_image # B
else:
bulk_mask = np.invert(scaffold_mask)
bulk = {}
total = {}
for c_idx, img in enumerate(data.channel_images):
bulk[data.channel_names[c_idx]] = np.mean(img[bulk_mask]) * 65536
total[data.channel_names[c_idx]] = np.sum(img) * 65536
return data, bulk, total
def measure_droplets(data, input_params, bulk):
scaffold = data.scaffold
channels = data.channel_names
scaffold_filtered_regionprops = data.scaffold_filtered_regionprops
# initialize labeled image to generate output of what droplets were called
label_image = np.full(shape=(scaffold.shape[0], scaffold.shape[1]), fill_value=False, dtype=bool)
droplet_id_centroid_r = []
droplet_id_centroid_c = []
sample_list = []
replicate_list = []
droplet_id_list = []
subset_I_list = [[] for x in range(len(channels))]
mean_I_list = [[] for x in range(len(channels))]
max_I_list = [[] for x in range(len(channels))]
total_I_list = [[] for x in range(len(channels))]
bulk_I_list = [[] for x in range(len(channels))]
partition_ratio_list = [[] for x in range(len(channels))]
area_list = []
centroid_r_list = []
centroid_c_list = []
circularity_list = []
# iterate over regions to collect information on individual droplets
s = data.sample_name
r = input_params.replicate_count
if len(scaffold_filtered_regionprops) > 0:
for i, region in enumerate(scaffold_filtered_regionprops):
area = region.area
use_min_area_flag = False # this is if the subset area is less than the min droplet area parameter. In this case, we just use the min area.
if data.subset_area_less_than_min_area_flag:
if area < input_params.r:
use_min_area_flag = True
centroid_r, centroid_c = region.centroid
circularity = circ(region)
coordinates = region.coords
coords_r = coordinates[:, 0]
coords_c = coordinates[:, 1]
if use_min_area_flag:
subset_coords_r = coords_r
subset_coords_c = coords_c
else:
subset_coords_r, subset_coords_c = draw.circle(r=centroid_r, c=centroid_c,
radius=round(math.sqrt(input_params.r/math.pi)))
# in cases where droplets are near the edge, the circle will go beyond the image. In that case,
# we simply ignore the droplet
edge_r_test = all(0 < r < scaffold.shape[0] for r in subset_coords_r)
edge_c_test = all(0 < c < scaffold.shape[1] for c in subset_coords_c)
if edge_r_test and edge_c_test:
label_image[coords_r, coords_c] = True
droplet_id = i
droplet_id_centroid_r.append(centroid_r)
droplet_id_centroid_c.append(centroid_c)
sample_list.append(s)
replicate_list.append(r)
droplet_id_list.append(droplet_id)
area_list.append(area)
centroid_r_list.append(centroid_r)
centroid_c_list.append(centroid_c)
circularity_list.append(circularity)
for c_idx, img in enumerate(data.channel_images):
mean_intensity = np.mean(img[coords_r, coords_c]) * 65536
max_intensity = np.max(img[coords_r, coords_c]) * 65536
subset_intensity = np.mean(img[subset_coords_r, subset_coords_c]) * 65536
total_intensity = np.sum(img[coords_r, coords_c]) * 65536
if input_params.pr == 'subset':
partition_ratio = subset_intensity/bulk[data.channel_names[c_idx]]
elif input_params.pr == 'mean':
partition_ratio = mean_intensity/bulk[data.channel_names[c_idx]]
elif input_params.pr== 'max':
partition_ratio = max_intensity/bulk[data.channel_names[c_idx]]
else:
partition_ratio = -2 # just a sanity check. Should never happen.
subset_I_list[c_idx].append(subset_intensity)
mean_I_list[c_idx].append(mean_intensity)
max_I_list[c_idx].append(max_intensity)
total_I_list[c_idx].append(total_intensity)
bulk_I_list[c_idx].append(bulk[data.channel_names[c_idx]])
partition_ratio_list[c_idx].append(partition_ratio)
else:
droplet_id = i
droplet_id_centroid_r.append(0)
droplet_id_centroid_c.append(0)
sample_list.append(s)
replicate_list.append(r)
droplet_id_list.append(droplet_id)
area_list.append(0)
centroid_r_list.append(0)
centroid_c_list.append(0)
circularity_list.append(0)
for c_idx, img in enumerate(data.channel_images):
mean_intensity = 0
max_intensity = 0
subset_intensity = 0
total_intensity = 0
partition_ratio = 0
subset_I_list[c_idx].append(subset_intensity)
mean_I_list[c_idx].append(mean_intensity)
max_I_list[c_idx].append(max_intensity)
total_I_list[c_idx].append(total_intensity)
bulk_I_list[c_idx].append(bulk[data.channel_names[c_idx]])
partition_ratio_list[c_idx].append(partition_ratio)
else:
sample_list.append(s)
replicate_list.append(r)
droplet_id_list.append(0.0)
area_list.append(0.0)
centroid_r_list.append(0.0)
centroid_c_list.append(0.0)
circularity_list.append(0.0)
for c_idx, c in enumerate(channels):
subset_I_list[c_idx].append(0.0)
mean_I_list[c_idx].append(0.0)
max_I_list[c_idx].append(0.0)
total_I_list[c_idx].append(0.0)
bulk_I_list[c_idx].append(0.0)
partition_ratio_list[c_idx].append(0.0)
replicate_output = pd.DataFrame({'sample': sample_list,
'replicate': replicate_list,
'droplet_id': droplet_id_list,
'area': area_list,
'centroid_r': centroid_r_list,
'centroid_c': centroid_c_list,
'circularity': circularity_list},
columns=['sample', 'replicate', 'droplet_id', 'area',
'centroid_r', 'centroid_c', 'circularity'])
for c_idx, c in enumerate(data.channel_images):
replicate_output['subset_I_' + str(channels[c_idx])] = subset_I_list[c_idx]
replicate_output['mean_I_' + str(channels[c_idx])] = mean_I_list[c_idx]
replicate_output['max_I_' + str(channels[c_idx])] = max_I_list[c_idx]
replicate_output['total_I_' + str(channels[c_idx])] = total_I_list[c_idx]
replicate_output['bulk_I_' + str(channels[c_idx])] = bulk_I_list[c_idx]
replicate_output['partition_ratio_' + str(channels[c_idx])] = partition_ratio_list[c_idx]
data.label_image = label_image
data.replicate_output = replicate_output
if input_params.output_image_flag:
if input_params.randomize_bulk_flag:
pass
# make_droplet_image(output_dirs['output_individual_images'], orig_image, scaffold, label_image,
# num_of_channels, str(s) + '_' + str(r), droplet_id_list, droplet_id_centroid_c, droplet_id_centroid_r,
# input_args, random_bulk_image=random_bulk_image)
else:
if len(scaffold_filtered_regionprops) > 0:
make_droplet_image(input_params.output_dirs['output_individual_images'], data, droplet_id_list, droplet_id_centroid_r,
droplet_id_centroid_c, input_params)
return data
def subtract_background(input_image):
image_hist, image_bin_edges = np.histogram(input_image, bins='auto')
background_threshold = image_bin_edges[np.argmax(image_hist)] # assumes that the max hist peak corresponds to background pixels
output_image = input_image - background_threshold
output_image[output_image < 0] = 0
# output_image = np.reshape(output_image, input_image.shape)
return output_image, background_threshold
def calc_summary_stats(sample, ch_names, rep_data, input_params, bulk, total):
pr_mean = {}
pr_std = {}
cf_mean = {}
cf_std = {}
sample_output = {'sample': sample}
for c in ch_names:
pr_cols = [col for col in rep_data.columns if all(['partition' in col, c in col])]
if len(pr_cols) > 1:
print('Error: Found multiple partition ratio columns for channel ', c)
sys.exit(0)
elif len(pr_cols) == 0:
print('Error: Could not find partition ratio column for channel ', c)
sys.exit(0)
else:
pr_mean[c] = np.mean(rep_data[pr_cols])[0]
pr_std[c] = np.std(rep_data[pr_cols])[0]
replicate_id = np.unique(rep_data['replicate'])
print('Replicate ID is ', replicate_id)
rep_total = []
for r in replicate_id:
rep_mask = rep_data['replicate'] == r
rep_total.append(np.sum(rep_data['total_I_' + str(c)][rep_mask]))
cf_mean[c] = np.mean(np.divide(rep_total, total[c]))
cf_std[c] = np.std(np.divide(rep_total, total[c]))
for c in ch_names:
sample_output['partition_ratio_mean_' + str(c)] = pr_mean.get(c)
sample_output['partition_ratio_std_' + str(c)] = pr_std.get(c)
sample_output['condensed_fraction_mean_' + str(c)] = cf_mean.get(c)
sample_output['condensed_fraction_std_' + str(c)] = cf_std.get(c)
return sample_output
def find_region_edge_pixels(a): # this is a way to maybe find boundary pixels if we ever need to do that
distance = ndi.distance_transform_edt(a)
distance[distance != 1] = 0
np.where(distance == 1)
def make_axes_blank(ax):
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
def make_droplet_image(output_path, data, droplet_list, droplet_c, droplet_r, input_params):
# NOTE: I know that c and r are swapped in the arguments compared to what I actually input. It works this way
# @jonH 190411
fig, ax = plt.subplots(1, 1, figsize=(3, 3))
scaffold_image = exposure.rescale_intensity(data.scaffold_output_img)
label = np.zeros(shape=data.label_image.shape)
label[data.label_image] = 1
region_overlay = color.label2rgb(label, image=scaffold_image,
alpha=0.5, image_alpha=1, bg_label=0, bg_color=None)
ax.imshow(region_overlay)
ax.set_title(data.sample_name + '_rep' + str(input_params.replicate_count))
make_axes_blank(ax)
text_offset = 10
droplet_r = [(int(round(r)) + text_offset) for r in droplet_r]
droplet_c = [(int(round(c)) + text_offset) for c in droplet_c]
for i, drop_id in enumerate(droplet_list):
ax.text(droplet_r[i], droplet_c[i], drop_id, color='w', fontsize=4)
plt.savefig(os.path.join(output_path, data.sample_name + '_rep' + str(input_params.replicate_count) + '.png'), dpi=300)
plt.close()
#
# if random_bulk_image is not None:
# fig, ax = plt.subplots(nrows=1, ncols=2)
# orig_image = exposure.rescale_intensity(orig_image)
# random_bulk_image = exposure.rescale_intensity(random_bulk_image)
#
# if num_of_channels == 1:
# ax[0].imshow(orig_image, cmap='gray')
# ax[1].imshow(random_bulk_image, cmap='gray')
# elif num_of_channels == 2:
# ax[0].imshow(orig_image)
# ax[1].imshow(random_bulk_image)
#
# ax[0].set_title(name)
# ax[1].set_title('Randomized bulk image')
# make_axes_blank(ax[0])
# make_axes_blank(ax[1])
#
# plt.savefig(os.path.join(output_path, name + '_randomized_bulk.png'))
# plt.close()
def find_image_channel_name(file_name):
str_idx = file_name.find('Conf ') # this is specific to our microscopes file name format
channel_name = file_name[str_idx + 5 : str_idx + 8]
channel_name = 'ch' + channel_name
return channel_name
def circ(r):
output = (4 * math.pi * r.area) / (r.perimeter * r.perimeter)
return output
def get_sample_name(nd_file_name):
nd_file_name = os.path.basename(nd_file_name)
sample_name, ext = os.path.splitext(nd_file_name)
return sample_name
def get_file_extension(file_path):
file_ext = os.path.splitext(file_path)
return file_ext[1] # because splitext returns a tuple, and the extension is the second element
def standardize_img(img):
mean = np.mean(img)
std = np.std(img)
img = (img - mean) / std
return img
```
|
{
"source": "jehenninger/MECP2_neuron",
"score": 2
}
|
#### File: jehenninger/MECP2_neuron/main.py
```python
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['text.usetex'] = False
matplotlib.rcParams['font.sans-serif'] = 'Arial'
matplotlib.rcParams['font.family'] = 'sans-serif'
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import os
import sys
from datetime import datetime
from types import SimpleNamespace
import methods
def main(data_path, nuc_idx=0, pro_idx=1, threshold=None):
# user input
input_params = SimpleNamespace()
input_params.parent_path = data_path
input_params.nuc_idx = nuc_idx
input_params.pro_idx = pro_idx
if threshold is not None:
input_params.threshold = threshold/65536
else:
input_params.threshold = threshold
folder_list = os.listdir(input_params.parent_path)
folder_list.sort(reverse=False)
file_ext = '.czi'
# make output directories
#input_params.output_path = input_params.parent_path
input_params.output_path = '/lab/solexa_young/scratch/MECP2_Imaging/20191112_neuron_imaging/Volumes_extra/gaussian-sigma3_mean2.35_struct3_dist0.2'
if not os.path.isdir(input_params.output_path):
os.mkdir(input_params.output_path)
for folder in folder_list:
if not folder.startswith('.') and not folder.endswith('output') and os.path.isdir(os.path.join(input_params.parent_path, folder)): #SAMPLES/EXPERIMENTS
print()
print('Started: ', folder, ' at ', datetime.now())
print()
temp_output = os.path.join(input_params.output_path, folder + '_output')
if not os.path.isdir(temp_output):
os.mkdir(temp_output)
file_list = os.listdir(os.path.join(input_params.parent_path, folder))
base_name_files = [f for f in file_list if file_ext in f and os.path.isfile(os.path.join(input_params.parent_path, folder, f))]
base_name_files.sort(reverse=False)
excel_output = pd.DataFrame(columns=['sample', 'replicate_id', 'nuc_id', 'total_nuc_voxels', 'channel', 'mean_in', 'mean_out', 'norm_mean',
'total_in', 'total_out', 'norm_total'])
objects_output = pd.DataFrame(columns=['sample', 'replicate_id', 'nuc_id', 'object_id', 'voxels', 'channel', 'mean_in', 'mean_out', 'norm_mean'])
replicate_count = 1
for idx, file in enumerate(base_name_files): #REPLICATES
print()
print(file)
print()
data = SimpleNamespace()
data.sample_name = file.replace(file_ext,'')
data.folder = folder
data.img_path = os.path.join(input_params.parent_path, folder, file)
data = methods.load_images(data, input_params)
if data is not None:
data = methods.find_nucleus_2D(data, input_params)
data.z_count = data.nuc_img.shape[0]
if idx == 0:
# z = int(data.nucleus_image.shape[0]/2)
z = 10
# make_nucleus_montage_2D(data, input_params)
total_dense_object_mask = np.full(shape=data.nuc_img.shape, fill_value=False, dtype=bool)
for r_idx, region in enumerate(data.nuc_regions):
region_area = methods.find_region_area(region)
if region_area >= 30000:
nuc_id = data.nuc_label[int((region[0].stop + region[0].start)/2), int((region[1].stop + region[1].start)/2) ]
nuc_box = data.nuc_img[:, region[0], region[1]]
nuc_mask_box = data.nuc_label[region[0], region[1]]
single_nuc_mask = nuc_mask_box == nuc_id
single_nuc_mask = np.repeat(single_nuc_mask[np.newaxis, :, :], data.z_count, axis=0) # because our nuclear mask is 2D so we project it to 3D
dense_obj_mask, bg_nuc_mask, dense_objects = methods.find_dense_objects_3D(nuc_box, single_nuc_mask, input_params, data)
total_dense_object_mask[:, region[0], region[1]][dense_obj_mask] = True
for p_idx, image in enumerate(data.pro_imgs):
channel_name = data.pro_ch_names[p_idx]
protein_box = image[:, region[0], region[1]]
mean_in = np.mean(protein_box[dense_obj_mask])
total_in = np.sum(protein_box[dense_obj_mask])
mean_out = np.mean(protein_box[bg_nuc_mask])
total_out = total_in + np.sum(protein_box[bg_nuc_mask])
norm_mean = mean_in/mean_out
norm_total = total_in/total_out
nuc_voxels = np.sum(dense_obj_mask)
excel_output = excel_output.append({'sample': folder,
'replicate_id': replicate_count,
'nuc_id': nuc_id,
'total_voxels': nuc_voxels,
'channel': str(channel_name),
'mean_in': mean_in,
'mean_out': mean_out,
'norm_mean': norm_mean,
'total_in': total_in,
'total_out': total_out,
'norm_total': norm_total},
ignore_index=True)
for o_idx, object in enumerate(dense_objects):
voxels = np.sum(dense_obj_mask[object])
mean_in = np.mean(protein_box[object]) # not perfect because this is just a 3D bounding box, which will include pixels not in the region, but good enough for now!
objects_output = objects_output.append({'sample': folder,
'replicate_id': replicate_count,
'nuc_id': nuc_id,
'object_id': o_idx + 1,
'voxels': voxels,
'channel': str(channel_name),
'mean_in': mean_in,
'mean_out': mean_out,
'norm_mean': mean_in/mean_out},
ignore_index=True)
graph_output_path = os.path.join(temp_output, folder + '_rep' + str(replicate_count) + '.png')
methods.make_output_graphs(data.nuc_label, total_dense_object_mask, data, graph_output_path)
replicate_count += 1
else:
replicate_count += 1
excel_output.to_excel(os.path.join(temp_output, folder + '_enrichment.xlsx'), index=False)
objects_output.to_excel(os.path.join(temp_output, folder + '_objects.xlsx'), index=False)
if __name__ == "__main__":
data_path = '/lab/solexa_young/scratch/MECP2_Imaging/20191112_neuron_imaging/Volumes_extra'
main(data_path, nuc_idx=1, pro_idx=0) # for czi files, have to hard code in which channel is which
print('--------------------------------------')
print('Completed at: ', datetime.now())
```
|
{
"source": "jeherr/espaloma",
"score": 3
}
|
#### File: graphs/utils/read_heterogeneous_graph.py
```python
import numpy as np
import torch
from espaloma.graphs.utils import offmol_indices
from openff.toolkit.topology import Molecule
from typing import Dict
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def duplicate_index_ordering(indices: np.ndarray) -> np.ndarray:
"""For every (a,b,c,d) add a (d,c,b,a)
TODO: is there a way to avoid this duplication?
>>> indices = np.array([[0, 1, 2, 3], [1, 2, 3, 4]])
>>> duplicate_index_ordering(indices)
array([[0, 1, 2, 3],
[1, 2, 3, 4],
[3, 2, 1, 0],
[4, 3, 2, 1]])
"""
return np.concatenate([indices, np.flip(indices, axis=-1)], axis=0)
def relationship_indices_from_offmol(
offmol: Molecule
) -> Dict[str, torch.Tensor]:
"""Construct a dictionary that maps node names (like "n2") to torch tensors of indices
Notes
-----
* introduces 2x redundant indices (including (d,c,b,a) for every (a,b,c,d)) for compatibility with later processing
"""
idxs = dict()
idxs["n1"] = offmol_indices.atom_indices(offmol)
idxs["n2"] = offmol_indices.bond_indices(offmol)
idxs["n3"] = offmol_indices.angle_indices(offmol)
idxs["n4"] = offmol_indices.proper_torsion_indices(offmol)
idxs["n4_improper"] = offmol_indices.improper_torsion_indices(offmol)
if len(idxs["n4"]) == 0:
idxs["n4"] = np.empty((0, 4))
if len(idxs["n4_improper"]) == 0:
idxs["n4_improper"] = np.empty((0, 4))
# TODO: enumerate indices for coupling-term nodes also
# TODO: big refactor of term names from "n4" to "proper_torsion", "improper_torsion", "angle_angle_coupling", etc.
# TODO (discuss with YW) : I think "n1" and "n4_improper" shouldn't be 2x redundant in current scheme
# (also, unclear why we need "n2", "n3", "n4" to be 2x redundant, but that's something to consider changing later)
for key in ["n2", "n3", "n4"]:
idxs[key] = duplicate_index_ordering(idxs[key])
# make them all torch.Tensors
for key in idxs:
idxs[key] = torch.from_numpy(idxs[key])
return idxs
def from_homogeneous_and_mol(g, offmol):
r"""Build heterogeneous graph from homogeneous ones.
Note
----
For now we name single node, two-, three, and four-,
hypernodes as `n1`, `n2`, `n3`, and `n4`. These correspond
to atom, bond, angle, and torsion nodes in chemical graphs.
Parameters
----------
g : `espaloma.HomogeneousGraph` object
the homogeneous graph to be translated.
Returns
-------
hg : `espaloma.HeterogeneousGraph` object
the resulting heterogeneous graph.
"""
# initialize empty dictionary
hg = {}
# get adjacency matrix
a = g.adjacency_matrix()
# get all the indices
idxs = relationship_indices_from_offmol(offmol)
# make them all numpy
idxs = {key: value.numpy() for key, value in idxs.items()}
# also include n1
idxs["n1"] = np.arange(g.number_of_nodes())[:, None]
# =========================
# neighboring relationships
# =========================
# NOTE:
# here we only define the neighboring relationship
# on atom level
hg[("n1", "n1_neighbors_n1", "n1")] = idxs["n2"]
# build a mapping between indices and the ordering
idxs_to_ordering = {}
for term in ["n1", "n2", "n3", "n4", "n4_improper"]:
idxs_to_ordering[term] = {
tuple(subgraph_idxs): ordering
for (ordering, subgraph_idxs) in enumerate(list(idxs[term]))
}
# ===============================================
# relationships between nodes of different levels
# ===============================================
# NOTE:
# here we define all the possible
# 'has' and 'in' relationships.
# TODO:
# we'll test later to see if this adds too much overhead
#
for small_idx in range(1, 5):
for big_idx in range(small_idx + 1, 5):
for pos_idx in range(big_idx - small_idx + 1):
hg[
(
"n%s" % small_idx,
"n%s_as_%s_in_n%s" % (small_idx, pos_idx, big_idx),
"n%s" % big_idx,
)
] = np.stack(
[
np.array(
[
idxs_to_ordering["n%s" % small_idx][tuple(x)]
for x in idxs["n%s" % big_idx][
:, pos_idx : pos_idx + small_idx
]
]
),
np.arange(idxs["n%s" % big_idx].shape[0]),
],
axis=1,
)
hg[
(
"n%s" % big_idx,
"n%s_has_%s_n%s" % (big_idx, pos_idx, small_idx),
"n%s" % small_idx,
)
] = np.stack(
[
np.arange(idxs["n%s" % big_idx].shape[0]),
np.array(
[
idxs_to_ordering["n%s" % small_idx][tuple(x)]
for x in idxs["n%s" % big_idx][
:, pos_idx : pos_idx + small_idx
]
]
),
],
axis=1,
)
# ======================================
# nonbonded terms
# ======================================
# NOTE: everything is counted twice here
# nonbonded is where
# $A = AA = AAA = AAAA = 0$
# make dense
a_ = a.to_dense().detach().numpy()
idxs["nonbonded"] = np.stack(
np.where(
np.equal(a_ + a_ @ a_ + a_ @ a_ @ a_, 0.0)
),
axis=-1,
)
# onefour is the two ends of torsion
# idxs["onefour"] = np.stack(
# [
# idxs["n4"][:, 0],
# idxs["n4"][:, 3],
# ],
# axis=1,
# )
idxs["onefour"] = np.stack(
np.where(
np.equal(a_ + a_ @ a_, 0.0) * np.greater(a_ @ a_ @ a_, 0.0),
),
axis=-1,
)
# membership
for term in ["nonbonded", "onefour"]:
for pos_idx in [0, 1]:
hg[(term, "%s_has_%s_n1" % (term, pos_idx), "n1")] = np.stack(
[np.arange(idxs[term].shape[0]), idxs[term][:, pos_idx]],
axis=-1,
)
hg[("n1", "n1_as_%s_in_%s" % (pos_idx, term), term)] = np.stack(
[
idxs[term][:, pos_idx],
np.arange(idxs[term].shape[0]),
],
axis=-1,
)
# membership of n1 in n4_improper
for term in ["n4_improper"]:
for pos_idx in [0, 1, 2, 3]:
hg[(term, "%s_has_%s_n1" % (term, pos_idx), "n1")] = np.stack(
[np.arange(idxs[term].shape[0]), idxs[term][:, pos_idx]],
axis=-1,
)
hg[("n1", "n1_as_%s_in_%s" % (pos_idx, term), term)] = np.stack(
[
idxs[term][:, pos_idx],
np.arange(idxs[term].shape[0]),
],
axis=-1,
)
# ======================================
# relationships between nodes and graphs
# ======================================
for term in [
"n1",
"n2",
"n3",
"n4",
"n4_improper",
"nonbonded",
"onefour",
]:
hg[(term, "%s_in_g" % term, "g",)] = np.stack(
[np.arange(len(idxs[term])), np.zeros(len(idxs[term]))],
axis=1,
)
hg[("g", "g_has_%s" % term, term)] = np.stack(
[
np.zeros(len(idxs[term])),
np.arange(len(idxs[term])),
],
axis=1,
)
import dgl
hg = dgl.heterograph({key: list(value) for key, value in hg.items()})
hg.nodes["n1"].data["h0"] = g.ndata["h0"]
# include indices in the nodes themselves
for term in ["n1", "n2", "n3", "n4", "n4_improper", "onefour", "nonbonded"]:
hg.nodes[term].data["idxs"] = torch.tensor(idxs[term])
return hg
```
#### File: espaloma/mm/nonbonded.py
```python
import torch
# =============================================================================
# CONSTANTS
# =============================================================================
import espaloma as esp
from simtk import unit
# CODATA 2018
# ref https://en.wikipedia.org/wiki/Coulomb_constant
# Coulomb constant
K_E = (
8.9875517923 * 1e9
* unit.newton
* unit.meter ** 2
* unit.coulomb ** (-2)
* esp.units.PARTICLE ** (-1)
).value_in_unit(esp.units.COULOMB_CONSTANT_UNIT)
# =============================================================================
# UTILITY FUNCTIONS FOR COMBINATION RULES FOR NONBONDED
# =============================================================================
def geometric_mean(msg="m", out="epsilon"):
def _geometric_mean(nodes):
return {out: torch.prod(nodes.mailbox[msg], dim=1).pow(0.5)}
return _geometric_mean
def arithmetic_mean(msg="m", out="sigma"):
def _arithmetic_mean(nodes):
return {out: torch.sum(nodes.mailbox[msg], dim=1).mul(0.5)}
return _arithmetic_mean
# =============================================================================
# COMBINATION RULES FOR NONBONDED
# =============================================================================
def lorentz_berthelot(g, suffix=""):
import dgl
g.multi_update_all(
{
"n1_as_%s_in_%s"
% (pos_idx, term): (
dgl.function.copy_src(
src="epsilon%s" % suffix, out="m_epsilon"
),
geometric_mean(msg="m_epsilon", out="epsilon%s" % suffix),
)
for pos_idx in [0, 1]
for term in ["nonbonded", "onefour"]
},
cross_reducer="sum",
)
g.multi_update_all(
{
"n1_as_%s_in_%s"
% (pos_idx, term): (
dgl.function.copy_src(src="sigma%s" % suffix, out="m_sigma"),
arithmetic_mean(msg="m_sigma", out="sigma%s" % suffix),
)
for pos_idx in [0, 1]
for term in ["nonbonded", "onefour"]
},
cross_reducer="sum",
)
return g
def multiply_charges(g, suffix=""):
""" Multiply the charges of atoms into nonbonded and onefour terms.
Parameters
----------
g : dgl.HeteroGraph
Input graph.
Returns
-------
dgl.HeteroGraph : The modified graph with charges.
"""
import dgl
g.multi_update_all(
{
"n1_as_%s_in_%s"
% (pos_idx, term): (
dgl.function.copy_src(src="q%s" % suffix, out="m_q"),
dgl.function.sum(msg="m_q", out="_q")
# lambda node: {"q%s" % suffix: node.mailbox["m_q"].prod(dim=1)}
)
for pos_idx in [0, 1]
for term in ["nonbonded", "onefour"]
},
cross_reducer="stack",
apply_node_func=lambda node: {"q": node.data["_q"].prod(dim=1)}
)
return g
# =============================================================================
# ENERGY FUNCTIONS
# =============================================================================
def lj_12_6(x, sigma, epsilon):
"""Lennard-Jones 12-6.
Parameters
----------
x : `torch.Tensor`, `shape=(batch_size, 1)` or `(batch_size, batch_size, 1)`
sigma : `torch.Tensor`, `shape=(batch_size, 1)` or `(batch_size, batch_size, 1)`
epsilon : `torch.Tensor`,
`shape=(batch_size, 1)` or `(batch_size, batch_size, 1)`
Returns
-------
u : `torch.Tensor`, `shape=(batch_size, 1)` or `(batch_size, batch_size, 1)`
"""
return esp.mm.functional.lj(x=x, sigma=sigma, epsilon=epsilon)
def lj_9_6(x, sigma, epsilon):
"""Lennard-Jones 9-6.
Parameters
----------
x : `torch.Tensor`, `shape=(batch_size, 1)` or `(batch_size, batch_size, 1)`
sigma : `torch.Tensor`, `shape=(batch_size, 1)` or `(batch_size, batch_size, 1)`
epsilon : `torch.Tensor`,
`shape=(batch_size, 1)` or `(batch_size, batch_size, 1)`
Returns
-------
u : `torch.Tensor`, `shape=(batch_size, 1)` or `(batch_size, batch_size, 1)`
"""
return esp.mm.functional.lj(
x=x, sigma=sigma, epsilon=epsilon, order=[9, 6], coefficients=[2, 3]
)
def coulomb(x, q, k_e=K_E):
""" Columb interaction without cutoff.
Parameters
----------
x : `torch.Tensor`, shape=`(batch_size, 1)` or `(batch_size, batch_size, 1)`
Distance between atoms.
q : `torch.Tensor`,
`shape=(batch_size, 1) or `(batch_size, batch_size, 1)`
Product of charge.
Returns
-------
torch.Tensor : `shape=(batch_size, 1)` or `(batch_size, batch_size, 1)`
Coulomb energy.
Notes
-----
This computes half Coulomb energy to count for the duplication in onefour
and nonbonded enumerations.
"""
return 0.5 * k_e * q / x
```
#### File: mm/tests/test_geometry.py
```python
import pytest
import torch
import espaloma as esp
from espaloma.graphs.utils.regenerate_impropers import regenerate_impropers
def test_import():
esp.mm.geometry
# later, if we want to do multiple molecules, group these into a struct
smiles = "c1ccccc1"
n_samples = 2
## Different number of expected terms for different improper permutations
expected_n_terms = {
'none': dict(n2=24, n3=36, n4=48, n4_improper=36),
'espaloma': dict(n2=24, n3=36, n4=48, n4_improper=36),
'smirnoff': dict(n2=24, n3=36, n4=48, n4_improper=18)
}
@pytest.fixture
def all_g():
from espaloma.data.md import MoleculeVacuumSimulation
all_g = {}
for improper_def in expected_n_terms.keys():
g = esp.Graph(smiles)
if improper_def != 'none':
regenerate_impropers(g, improper_def)
simulation = MoleculeVacuumSimulation(
n_samples=n_samples, n_steps_per_sample=1
)
g = simulation.run(g, in_place=True)
all_g[improper_def] = g
return all_g
def test_geometry_can_be_computed_without_exceptions(all_g):
for g in all_g.values():
g = esp.mm.geometry.geometry_in_graph(g.heterograph)
def test_geometry_n_terms(all_g):
for improper_def, g in all_g.items():
g = esp.mm.geometry.geometry_in_graph(g.heterograph)
for term, n_terms in expected_n_terms[improper_def].items():
assert g.nodes[term].data["x"].shape == torch.Size(
[n_terms, n_samples]
)
```
#### File: scripts/denali-dataset/transform.py
```python
import espaloma as esp
def run(in_path, out_path, u_threshold=0.1):
g = esp.Graph.load(in_path)
from espaloma.data.md import subtract_nonbonded_force
g = subtract_nonbonded_force(g, subtract_charges=True)
# get number of snapshots
n_data = g.nodes['n1'].data['xyz'].shape[1]
u_min = g.nodes['g'].data['u_ref'].min().item()
print(n_data)
# original indicies
idxs = list(range(n_data))
idxs = [idx for idx in idxs if g.nodes['g'].data['u_ref'][:, idx].item() < u_min + u_threshold]
g.nodes['n1'].data['xyz'] = g.nodes['n1'].data['xyz'][:, idxs, :]
g.nodes['g'].data['u_ref'] = g.nodes['g'].data['u_ref'][:, idxs]
n_data = len(idxs)
print(n_data)
if n_data > 1:
g.save(out_path)
if __name__ == "__main__":
import sys
run(sys.argv[1], sys.argv[2])
```
#### File: tyk2/openff-1.2.0/benchmark_analysis.py
```python
import argparse
import glob
import itertools
import re
import warnings
import numpy as np
import urllib.request
import yaml
from openmmtools.constants import kB
from perses.analysis.load_simulations import Simulation
from simtk import unit
from openff.arsenic import plotting, wrangle
# global variables
base_repo_url = "https://github.com/openforcefield/protein-ligand-benchmark"
# Helper functions
def get_simdir_list(base_dir='.', is_reversed=False):
"""
Get list of directories to extract simulation data.
Attributes
----------
base_dir: str, optional, default='.'
Base directory where to search for simulations results. Defaults to current directory.
is_reversed: bool, optional, default=False
Whether to consider the reversed simulations or not. Meant for testing purposes.
Returns
-------
dir_list: list
List of directories paths for simulation results.
"""
# Load all expected simulation from directories
out_dirs = ['/'.join(filepath.split('/')[:-1]) for filepath in glob.glob(f'{base_dir}/out*/*complex.nc')]
reg = re.compile(r'out_[0-9]+_[0-9]+_reversed') # regular expression to deal with reversed directories
if is_reversed:
# Choose only reversed directories
out_dirs = list(filter(reg.search, out_dirs))
else:
# Filter out reversed directories
out_dirs = list(itertools.filterfalse(reg.search, out_dirs))
return out_dirs
def get_simulations_data(simulation_dirs):
"""Generates a list of simulation data objects given the simulation directories paths."""
simulations = []
for out_dir in simulation_dirs:
# Load complete or fully working simulations
# TODO: Try getting better exceptions from openmmtools -- use non-generic exceptions
try:
simulation = Simulation(out_dir)
simulations.append(simulation)
except Exception:
warnings.warn(f"Edge in {out_dir} could not be loaded. Check simulation output is complete.")
return simulations
def to_arsenic_csv(experimental_data: dict, simulation_data: list, out_csv: str = 'out_benchmark.csv'):
"""
Generates a csv file to be used with openff-arsenic. Energy units in kcal/mol.
.. warning:: To be deprecated once arsenic object model is improved.
Parameters
----------
experimental_data: dict
Python nested dictionary with experimental data in micromolar or nanomolar units.
Example of entry:
{'lig_ejm_31': {'measurement': {'comment': 'Table 4, entry 31',
'doi': '10.1016/j.ejmech.2013.03.070',
'error': -1,
'type': 'ki',
'unit': 'uM',
'value': 0.096},
'name': 'lig_ejm_31',
'smiles': '[H]c1c(c(c(c(c1[H])Cl)C(=O)N([H])c2c(c(nc(c2[H])N([H])C(=O)C([H])([H])[H])[H])[H])Cl)[H]'}
simulation_data: list or iterable
Python iterable object with perses Simulation objects as entries.
out_csv: str
Path to output csv file to be generated.
"""
# Ligand information
ligands_names = list(ligands_dict.keys())
lig_id_to_name = dict(enumerate(ligands_names))
kBT = kB * 300 * unit.kelvin # useful when converting to kcal/mol
# Write csv file
with open(out_csv, 'w') as csv_file:
# Experimental block
# print header for block
csv_file.write("# Experimental block\n")
csv_file.write("# Ligand, expt_DG, expt_dDG\n")
# Extract ligand name, expt_DG and expt_dDG from ligands dictionary
for ligand_name, ligand_data in experimental_data.items():
# TODO: Handle multiple measurement types
unit_symbol = ligand_data['measurement']['unit']
measurement_value = ligand_data['measurement']['value']
measurement_error = ligand_data['measurement']['error']
# Unit conversion
# TODO: Let's persuade PLBenchmarks to use pint units
unit_conversions = { 'M' : 1.0, 'mM' : 1e-3, 'uM' : 1e-6, 'nM' : 1e-9, 'pM' : 1e-12, 'fM' : 1e-15 }
if unit_symbol not in unit_conversions:
raise ValueError(f'Unknown units "{unit_symbol}"')
value_to_molar= unit_conversions[unit_symbol]
# Handle unknown errors
# TODO: We should be able to ensure that all entries have more reasonable errors.
if measurement_error == -1:
# TODO: For now, we use a relative_error from the Tyk2 system 10.1016/j.ejmech.2013.03.070
relative_error = 0.3
else:
relative_error = measurement_error / measurement_value
# Convert to free eneriges
expt_DG = kBT.value_in_unit(unit.kilocalorie_per_mole) * np.log(measurement_value * value_to_molar)
expt_dDG = kBT.value_in_unit(unit.kilocalorie_per_mole) * relative_error
csv_file.write(f"{ligand_name}, {expt_DG}, {expt_dDG}\n")
# Calculated block
# print header for block
csv_file.write("# Calculated block\n")
csv_file.write("# Ligand1,Ligand2, calc_DDG, calc_dDDG(MBAR), calc_dDDG(additional)\n")
# Loop through simulation, extract ligand1 and ligand2 indices, convert to names, create string with
# ligand1, ligand2, calc_DDG, calc_dDDG(MBAR), calc_dDDG(additional)
# write string in csv file
for simulation in simulation_data:
out_dir = simulation.directory.split('/')[-1]
# getting integer indices
ligand1_id, ligand2_id = int(out_dir.split('_')[-1]), int(out_dir.split('_')[-2]) # CHECK ORDER!
# getting names of ligands
ligand1, ligand2 = lig_id_to_name[ligand1_id], lig_id_to_name[ligand2_id]
# getting calc_DDG in kcal/mol
calc_DDG = simulation.bindingdg.value_in_unit(unit.kilocalorie_per_mole)
# getting calc_dDDG in kcal/mol
calc_dDDG = simulation.bindingddg.value_in_unit(unit.kilocalorie_per_mole)
csv_file.write(
f"{ligand1}, {ligand2}, {calc_DDG}, {calc_dDDG}, 0.0\n") # hardcoding additional error as 0.0
# Defining command line arguments
# fetching targets from github repo
# TODO: This part should be done using plbenchmarks API - once there is a conda pkg
targets_url = f"{base_repo_url}/raw/master/data/targets.yml"
with urllib.request.urlopen(targets_url) as response:
targets_dict = yaml.safe_load(response.read())
# get the possible choices from targets yaml file
target_choices = targets_dict.keys()
arg_parser = argparse.ArgumentParser(description='CLI tool for running perses protein-ligand benchmarks analysis.')
arg_parser.add_argument(
"--target",
type=str,
help="Target biomolecule, use openff's plbenchmark names.",
choices=target_choices,
required=True
)
arg_parser.add_argument(
"--reversed",
action='store_true',
help="Analyze reversed edge simulations. Helpful for testing/consistency checks."
)
args = arg_parser.parse_args()
target = args.target
# Download experimental data
# TODO: This part should be done using plbenchmarks API - once there is a conda pkg
# TODO: Let's cache this data when we set up the initial simulations in case it changes in between setting up and running the calculations and analysis.
# TODO: Let's also be sure to use a specific release tag rather than 'master'
target_dir = targets_dict[target]['dir']
ligands_url = f"{base_repo_url}/raw/master/data/{target_dir}/00_data/ligands.yml"
with urllib.request.urlopen(ligands_url) as response:
yaml_contents = response.read()
print(yaml_contents)
ligands_dict = yaml.safe_load(yaml_contents)
# DEBUG
print('')
print(yaml.dump(ligands_dict))
# Get paths for simulation output directories
out_dirs = get_simdir_list(is_reversed=args.reversed)
# Generate list with simulation objects
simulations = get_simulations_data(out_dirs)
# Generate csv file
csv_path = f'./{target}_arsenic.csv'
to_arsenic_csv(ligands_dict, simulations, out_csv=csv_path)
# TODO: Separate plotting in a different file
# Make plots and store
fe = wrangle.FEMap(csv_path)
# Relative plot
plotting.plot_DDGs(fe.graph,
target_name=f'{target}',
title=f'Relative binding energies - {target}',
figsize=5,
filename='./plot_relative.pdf'
)
# Absolute plot, with experimental data shifted to correct mean
experimental_mean_dg = np.asarray([node[1]["exp_DG"] for node in fe.graph.nodes(data=True)]).mean()
plotting.plot_DGs(fe.graph,
target_name=f'{target}',
title=f'Absolute binding energies - {target}',
figsize=5,
filename='./plot_absolute.pdf',
shift=experimental_mean_dg,
)
```
#### File: tyk2/openff-1.2.0/run_benchmarks.py
```python
import argparse
import logging
import os
import yaml
from perses.app.setup_relative_calculation import run
from perses.utils.url_utils import retrieve_file_url
from perses.utils.url_utils import fetch_url_contents
# Setting logging level config
LOGLEVEL = os.environ.get("LOGLEVEL", "DEBUG").upper()
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=LOGLEVEL,
datefmt='%Y-%m-%d %H:%M:%S')
_logger = logging.getLogger()
_logger.setLevel(LOGLEVEL)
# global variables
base_repo_url = "https://github.com/openforcefield/protein-ligand-benchmark"
def concatenate_files(input_files, output_file):
"""
Concatenate files given in input_files iterator into output_file.
"""
with open(output_file, 'w') as outfile:
for filename in input_files:
with open(filename) as infile:
for line in infile:
outfile.write(line)
def run_relative_perturbation(lig_a_idx, lig_b_idx, reverse=False, tidy=True):
"""
Perform relative free energy simulation using perses CLI.
Parameters
----------
lig_a_idx : int
Index for first ligand (ligand A)
lig_b_idx : int
Index for second ligand (ligand B)
reverse: bool
Run the edge in reverse direction. Swaps the ligands.
tidy : bool, optional
remove auto-generated yaml files.
Expects the target/protein pdb file in the same directory to be called 'target.pdb', and ligands file
to be called 'ligands.sdf'.
"""
_logger.info(f'Starting relative calculation of ligand {lig_a_idx} to {lig_b_idx}')
trajectory_directory = f'out_{lig_a_idx}_{lig_b_idx}'
new_yaml = f'relative_{lig_a_idx}_{lig_b_idx}.yaml'
# read base template yaml file
# TODO: template.yaml file is configured for Tyk2, check if the same options work for others.
with open(f'template.yaml', "r") as yaml_file:
options = yaml.load(yaml_file, Loader=yaml.FullLoader)
# TODO: add a step to perform some minimization - should help with NaNs
# generate yaml file from template
options['protein_pdb'] = 'target.pdb'
options['ligand_file'] = 'ligands.sdf'
if reverse:
# Do the other direction of ligands
options['old_ligand_index'] = lig_b_idx
options['new_ligand_index'] = lig_a_idx
# mark the output directory with reversed
trajectory_directory = f'{trajectory_directory}_reversed'
# mark new yaml file with reversed
temp_path = new_yaml.split('.')
new_yaml = f'{temp_path[0]}_reversed.{temp_path[1]}'
else:
options['old_ligand_index'] = lig_a_idx
options['new_ligand_index'] = lig_b_idx
options['trajectory_directory'] = f'{trajectory_directory}'
with open(new_yaml, 'w') as outfile:
yaml.dump(options, outfile)
# run the simulation - using API point to respect logging level
run(new_yaml)
_logger.info(f'Relative calculation of ligand {lig_a_idx} to {lig_b_idx} complete')
if tidy:
os.remove(new_yaml)
# Defining command line arguments
# fetching targets from github repo
# TODO: This part should be done using plbenchmarks API - once there is a conda pkg
targets_url = f"{base_repo_url}/raw/master/data/targets.yml"
with fetch_url_contents(targets_url) as response:
targets_dict = yaml.safe_load(response.read())
# get the possible choices from targets yaml file
target_choices = targets_dict.keys()
arg_parser = argparse.ArgumentParser(description='CLI tool for running perses protein-ligand benchmarks.')
arg_parser.add_argument(
"--target",
type=str,
help="Target biomolecule, use openff's plbenchmark names.",
choices=target_choices,
required=True
)
arg_parser.add_argument(
"--edge",
type=int,
help="Edge index (0-based) according to edges yaml file in dataset. Ex. --edge 5 (for sixth edge)",
required=True
)
arg_parser.add_argument(
"--reversed",
action='store_true',
help="Whether to run the edge in reverse direction. Helpful for consistency checks."
)
args = arg_parser.parse_args()
target = args.target
is_reversed = args.reversed
# Fetch protein pdb file
# TODO: This part should be done using plbenchmarks API - once there is a conda pkg
target_dir = targets_dict[target]['dir']
pdb_url = f"{base_repo_url}/raw/master/data/{target_dir}/01_protein/crd/protein.pdb"
pdb_file = retrieve_file_url(pdb_url)
# Fetch cofactors crystalwater pdb file
# TODO: This part should be done using plbenchmarks API - once there is a conda pkg
cofactors_url = f"{base_repo_url}/raw/master/data/{target_dir}/01_protein/crd/cofactors_crystalwater.pdb"
cofactors_file = retrieve_file_url(cofactors_url)
# Concatenate protein with cofactors pdbs
concatenate_files((pdb_file, cofactors_file), 'target.pdb')
# Fetch ligands sdf files and concatenate them in one
# TODO: This part should be done using plbenchmarks API - once there is a conda pkg
ligands_url = f"{base_repo_url}/raw/master/data/{target_dir}/00_data/ligands.yml"
with fetch_url_contents(ligands_url) as response:
ligands_dict = yaml.safe_load(response.read())
ligand_files = []
for ligand in ligands_dict.keys():
ligand_url = f"{base_repo_url}/raw/master/data/{target_dir}/02_ligands/{ligand}/crd/{ligand}.sdf"
ligand_file = retrieve_file_url(ligand_url)
ligand_files.append(ligand_file)
# concatenate sdfs
concatenate_files(ligand_files, 'ligands.sdf')
# run simulation
# fetch edges information
# TODO: This part should be done using plbenchmarks API - once there is a conda pkg
edges_url = f"{base_repo_url}/raw/master/data/{target_dir}/00_data/edges.yml"
with fetch_url_contents(edges_url) as response:
edges_dict = yaml.safe_load(response.read())
edges_list = list(edges_dict.values()) # suscriptable edges object - note dicts are ordered for py>=3.7
# edge list to access by index
edge_index = args.edge # read from cli arguments
edge = edges_list[edge_index]
ligand_a_name = edge['ligand_a']
ligand_b_name = edge['ligand_b']
# ligands list to get indices -- preserving same order as upstream yaml file
ligands_list = list(ligands_dict.keys())
lig_a_index = ligands_list.index(ligand_a_name)
lig_b_index = ligands_list.index(ligand_b_name)
# Perform the simulation
run_relative_perturbation(lig_a_index, lig_b_index, reverse=is_reversed)
```
|
{
"source": "jeheyer/code",
"score": 3
}
|
#### File: code/html/csv2json.py
```python
import csv
import json
def ReadCSV(file_name: str):
file = open(file_name)
csvreader = csv.reader(file)
header_fields = []
header_fields = next(csvreader)
#print(header_fields)
rows = []
for _ in csvreader:
rows.append(_)
file.close()
return header_fields, rows
def GenerateJSON(header_fields, data):
json_data = []
for _ in range(len(data)):
row = {}
row['name'] = data[_][0]
for i in range(1,len(data[_])):
row[header_fields[i]] = data[_][i]
json_data.append(row)
return json_data
headers, data = ReadCSV('../../../OpenText/iPhones.csv')
print(json.dumps(GenerateJSON(headers, data), indent=2))
```
#### File: code/python/fastapi_test.py
```python
from fastapi import FastAPI, Request
app = FastAPI()
# Route all possible paths here
@app.get("/")
@app.get("/{path:path}")
def root(path, req: Request):
import traceback
from starlette.responses import Response
try:
return Response(
status_code = 200,
headers = {'Content-type': "text/plain"},
content = str(vars(req))
)
except:
return Response(status_code = 500, content = traceback.format_exc())
```
#### File: code/python/handle_post_flask.py
```python
from paste import request
from flask import Flask, request, redirect
from lib.web_apps import *
# WSGI entry point
def application(environ, start_response):
import traceback
try:
path = environ.get('REQUEST_URI', '/').split('?')[0]
inputs = {}
if "poll_vote" in path:
fields = request.parse_formvars(environ)
for _ in ['poll_db','poll_name','poll_desc','poll_url','choice_id']:
inputs[_] = fields.get(_, None)
if int(inputs['choice_id']) > 0:
PollVote(inputs['poll_db'], inputs['poll_name'], inputs['choice_id'])
redirect_url = f"{inputs['poll_url']}?poll_name={inputs['poll_name']}&poll_desc={inputs['poll_desc']}"
if "graffiti_post" in path:
fields = request.parse_formvars(environ)
for _ in ['db_name','wall','name','text','graffiti_url']:
inputs[_] = fields.get(_, None)
GraffitiPost(inputs['db_name'], inputs['wall'], inputs['name'], inputs['text'])
redirect_url = f"{inputs['graffiti_url']}?wall={inputs['wall']}"
start_response(302, ('Location', redirect_url))
return []
except:
response_headers = [ ('Content-type', 'text/plain') ]
start_response('500 Internal Server Error', response_headers)
return [ str(traceback.format_exc()).encode('utf-8') ]
# Flask entry point
app = Flask(__name__)
@app.route("/", defaults = {'path': ""}, methods=['POST'])
@app.route("/<string:path>", methods=['POST'])
@app.route("/<path:path>", methods=['POST'])
def root(path):
import traceback
try:
inputs = {}
if "poll_vote" in path:
for _ in ['poll_db','poll_name','poll_desc','poll_url','choice_id']:
inputs[_] = request.form.get(_)
PollVote(inputs['poll_db'], inputs['poll_name'], inputs['choice_id'])
redirect_url = f"{inputs['poll_url']}?poll_name={inputs['poll_name']}&poll_desc={inputs['poll_desc']}"
if "graffiti_post" in path:
for _ in ['db_name','wall','name','text','graffiti_url']:
inputs[_] = request.form.get(_)
GraffitiPost(inputs['db_name'], inputs['wall'], inputs['name'], inputs['text'])
redirect_url = f"{inputs['graffiti_url']}?wall={inputs['wall']}"
return redirect(redirect_url)
except:
return format(traceback.format_exc()), 500, {'Content-Type': "text/plain"}
if __name__ == '__main__':
app.run(debug=True, port=5000)
```
#### File: python/lib/logfile.py
```python
class LogFile():
def __init__(self, filename, filter = None):
self.filename = filename
self.contents = []
self.num_lines = 0
self.ReadFile(filter)
def ReadFile(self, filter = None):
try:
fh = open(self.filename,"r")
except:
raise Exception("ERROR: could not read log file '" + self.filename + "'")
for line in fh:
if filter:
if filter in line:
parts = line.split(" ")
self.contents.append(parts)
else:
parts = line.split(" ")
self.contents.append(parts)
self.num_lines += 1
fh.close()
```
#### File: python/lib/system_tools.py
```python
def GetDNSServersFromToken(token = "<PASSWORD>1234"):
import re
try:
# Override if this is test token
if token == "testing1234":
dns_resolvers = [ "192.0.2.53", "198.51.100.53", "203.0.113.53" ]
else:
# Open the BIND log file for A record queries
dns_resolvers = []
dns_resolvers_hash = dict()
bind_log_file = LogFile("/var/log/named/query.log", " IN A ")
for line in bind_log_file.contents:
if token in line[7]:
source_ip, source_port = line[6].split("#")
if not re.match("10.|192.168.", source_ip) and source_ip not in dns_resolvers_hash:
dns_resolvers_hash[source_ip] = True
dns_resolvers.append(source_ip)
return dict(dns_resolvers = dns_resolvers)
except Exception as e:
raise Exception(e)
def GetConfig(type, key = None):
import configparser
# Read config file
config = configparser.ConfigParser()
config.read('/web/private/cfg/{}.cfg'.format(type))
if key:
return config[key]
return config
def ReadFromHTTPS(hostname, path):
import http.client
import ssl
lines = []
try:
ssl_context = ssl._create_unverified_context()
conn = http.client.HTTPSConnection(hostname, port = 443, timeout = 3, context = ssl_context)
#conn = http.client.HTTPConnection(hostname, port = 80, timeout = 3)
conn.request(method = "GET", url = path)
resp = conn.getresponse()
lines = resp.read().decode("utf-8").rstrip().splitlines()
except Exception as e:
return e
conn.close()
return lines
def ReadFromGoogleCloudStorage(bucket_name, file_name):
from google.cloud import storage
lines = []
try:
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(file_name)
return blob.download_as_string().decode("utf-8").rstrip().splitlines()
#out_file = "/var/tmp/" + file_name.split("/")[-1]
#blob.download_to_filename(out_file)
#print(out_file)
except Exception as e:
raise(e)
return lines
def ReadFromS3(bucket_name, file_name):
import boto3
return None
def ProcessBlob(source_name = None, lines = []):
from time import time
from math import floor
#now = math.floor(time.time())
now = 1614968742
threshold = now - 7200
entries = []
for l in range(len(lines)-1, 0, -1):
line = lines[l]
parts = line.split()
if int(parts[0].split('.')[0]) > threshold:
entry = {'reporter': source_name, 'data': parts}
#for i in range(0,len(fields)):
# if i == 0:
# datetimestr = datetime.fromtimestamp(int(parts[0].split(".")[0]), tz=None)
# entry['timestamp'] = datetimestr.strftime("%d-%m-%y %H:%M:%S")
# else:
# entry[fields[i]] = parts[i]
entries.append(entry)
else:
break
return entries
def ReadFromFile(file_name):
#import mmap
lines = []
f = open(file_name)
return f.readlines()
#with open(file_name, 'r+') as f:
# for line in f:
# lines.append(line)
#return lines
#with open(file_name, 'r') as f:
# for piece in read_in_chunks(f):
# lines.append(piece)
#for line in open(file_name):
# lines.append(line)
#return lines
#with open(file_name, "r+") as f:
# map = mmap.mmap(f.fileno(), 0)
# map.close()
#return lines
def ReadInput(file_name: str) -> (list, str):
lines = []
f = open(file_name)
lines = f.readlines()
file_ext = file_name.split(".")[-1]
return (lines, file_ext)
def ConvertToDict(contents: list, file_type: str) -> list:
data = []
if file_type == "csv":
for line in contents:
line = line.rstrip()
print(line)
parts = line.split(",")
obj = {}
for i in range(0,len(parts)):
obj[i] = parts[i]
data.append(obj)
return data
class LogFile():
def __init__(self, filename, filter = None):
self.filename = filename
self.contents = []
self.num_lines = 0
self.ReadFile(filter)
def ReadFile(self, filter = None):
try:
fh = open(self.filename,"r")
except:
raise Exception("ERROR: could not read log file '" + self.filename + "'")
for line in fh:
if filter:
if filter in line:
parts = line.split(" ")
self.contents.append(parts)
else:
parts = line.split(" ")
self.contents.append(parts)
self.num_lines += 1
fh.close()
```
#### File: code/python/quart_driver.py
```python
from quart import Quart
from lib.http_utils import *
from lib.makejson import *
import traceback
app = Quart(__name__)
app.config['JSON_SORT_KEYS'] = False
@app.route("/", defaults = {'path': ""})
@app.route("/<string:path>")
@app.route("/<path:path>")
def root(path):
from quart import request
from quart.helpers import make_response
from quart.json import jsonify
http_request = HTTPRequest(request = request)
try:
data = main(vars(http_request))
return jsonify(data), 200
except:
return format(traceback.format_exc()), 500, {'Content-Type': "text/plain"}
if __name__ == '__main__':
app.run()
```
|
{
"source": "jehiah/pynsq",
"score": 2
}
|
#### File: pynsq/nsq/__init__.py
```python
from __future__ import absolute_import
import signal
import tornado.ioloop
import logging
from .protocol import (
Error,
unpack_response,
decode_message,
valid_topic_name,
valid_channel_name,
identify,
subscribe,
ready,
finish,
touch,
requeue,
nop,
pub,
mpub,
FRAME_TYPE_RESPONSE,
FRAME_TYPE_ERROR,
FRAME_TYPE_MESSAGE,
)
from .message import Message
from .backoff_timer import BackoffTimer
from .sync import SyncConn
from .async import AsyncConn
from .reader import Reader
from .legacy_reader import LegacyReader
from .writer import Writer
from .version import __version__ # NOQA
def _handle_term_signal(sig_num, frame):
logging.getLogger(__name__).info(
'TERM Signal handler called with signal %r', sig_num)
tornado.ioloop.IOLoop.instance().stop()
def run():
"""
Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer`
"""
signal.signal(signal.SIGTERM, _handle_term_signal)
tornado.ioloop.IOLoop.instance().start()
__author__ = "<NAME> <<EMAIL>>"
__all__ = ["Reader", "Writer", "run", "BackoffTimer", "Message", "Error", "LegacyReader",
"SyncConn", "AsyncConn", "unpack_response", "decode_message",
"identify", "subscribe", "ready", "finish", "touch", "requeue", "nop", "pub", "mpub",
"valid_topic_name", "valid_channel_name",
"FRAME_TYPE_RESPONSE", "FRAME_TYPE_ERROR", "FRAME_TYPE_MESSAGE"]
```
#### File: pynsq/nsq/sync.py
```python
from __future__ import absolute_import
import socket
import struct
from nsq import protocol
class SyncConn(object):
def __init__(self, timeout=1.0):
self.buffer = ''
self.timeout = timeout
self.s = None
def connect(self, host, port):
assert isinstance(host, (str, unicode))
assert isinstance(port, int)
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(self.timeout)
self.s.connect((host, port))
self.s.send(protocol.MAGIC_V2)
def _readn(self, size):
while True:
if len(self.buffer) >= size:
break
packet = self.s.recv(4096)
if not packet:
raise Exception('failed to read %d' % size)
self.buffer += packet
data = self.buffer[:size]
self.buffer = self.buffer[size:]
return data
def read_response(self):
size = struct.unpack('>l', self._readn(4))[0]
return self._readn(size)
def send(self, data):
self.s.send(data)
```
#### File: pynsq/tests/test_command.py
```python
from __future__ import absolute_import
import struct
import pytest
import os
import sys
try:
import simplejson as json
except ImportError:
import json # pyflakes.ignore
# shunt '..' into sys.path since we are in a 'tests' subdirectory
base_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
if base_dir not in sys.path:
sys.path.insert(0, base_dir)
from nsq import protocol
def pytest_generate_tests(metafunc):
identify_dict_ascii = {'a': 1, 'b': 2}
identify_dict_unicode = {'c': u'w\xc3\xa5\xe2\x80\xa0'}
identify_body_ascii = json.dumps(identify_dict_ascii)
identify_body_unicode = json.dumps(identify_dict_unicode)
msgs = ['asdf', 'ghjk', 'abcd']
mpub_body = struct.pack('>l', len(msgs)) + ''.join(struct.pack('>l', len(m)) + m for m in msgs)
if metafunc.function == test_command:
for cmd_method, kwargs, result in [
(protocol.identify,
{'data': identify_dict_ascii},
'IDENTIFY\n' + struct.pack('>l', len(identify_body_ascii)) +
identify_body_ascii),
(protocol.identify,
{'data': identify_dict_unicode},
'IDENTIFY\n' + struct.pack('>l', len(identify_body_unicode)) +
identify_body_unicode),
(protocol.subscribe,
{'topic': 'test_topic', 'channel': 'test_channel'},
'SUB test_topic test_channel\n'),
(protocol.finish,
{'id': 'test'},
'FIN test\n'),
(protocol.finish,
{'id': u'\u2020est \xfcn\xee\xe7\xf8\u2202\xe9'},
'FIN \xe2\x80\xa0est \xc3\xbcn\xc3\xae\xc3\xa7\xc3\xb8\xe2\x88\x82\xc3\xa9\n'),
(protocol.requeue,
{'id': 'test'},
'REQ test 0\n'),
(protocol.requeue,
{'id': 'test', 'time_ms': 60},
'REQ test 60\n'),
(protocol.touch,
{'id': 'test'},
'TOUCH test\n'),
(protocol.ready,
{'count': 100},
'RDY 100\n'),
(protocol.nop,
{},
'NOP\n'),
(protocol.pub,
{'topic': 'test', 'data': msgs[0]},
'PUB test\n' + struct.pack('>l', len(msgs[0])) + msgs[0]),
(protocol.mpub,
{'topic': 'test', 'data': msgs},
'MPUB test\n' + struct.pack('>l', len(mpub_body)) + mpub_body)
]:
metafunc.addcall(funcargs=dict(cmd_method=cmd_method, kwargs=kwargs, result=result))
def test_command(cmd_method, kwargs, result):
assert cmd_method(**kwargs) == result
def test_unicode_body():
pytest.raises(AssertionError, protocol.pub, 'topic', u'unicode body')
```
#### File: pynsq/tests/test_sync.py
```python
from __future__ import absolute_import
import struct
import time
from . import mock_socket
from nsq import protocol, sync
sync.socket = mock_socket
def mock_write(c, data):
c.s.queue_recv(data)
def mock_response_write(c, frame_type, data):
body_size = 4 + len(data)
body_size_packed = struct.pack('>l', body_size)
frame_type_packed = struct.pack('>l', frame_type)
mock_write(c, body_size_packed + frame_type_packed + data)
def mock_response_write_message(c, timestamp, attempts, id, body):
timestamp_packed = struct.pack('>q', timestamp)
attempts_packed = struct.pack('>h', attempts)
id = "%016d" % id
mock_response_write(
c, protocol.FRAME_TYPE_MESSAGE, timestamp_packed + attempts_packed + id + body)
def test_sync_authenticate_subscribe():
c = sync.SyncConn()
c.connect("127.0.0.1", 4150)
c.send(protocol.identify({'short_id': 'test', 'long_id': 'test.example'}))
c.send(protocol.subscribe('test', 'ch'))
mock_response_write(c, protocol.FRAME_TYPE_RESPONSE, 'OK')
mock_response_write(c, protocol.FRAME_TYPE_RESPONSE, 'OK')
resp = c.read_response()
unpacked = protocol.unpack_response(resp)
assert unpacked[0] == protocol.FRAME_TYPE_RESPONSE
assert unpacked[1] == 'OK'
resp = c.read_response()
unpacked = protocol.unpack_response(resp)
assert unpacked[0] == protocol.FRAME_TYPE_RESPONSE
assert unpacked[1] == 'OK'
def test_sync_receive_messages():
c = sync.SyncConn()
c.connect("127.0.0.1", 4150)
c.send(protocol.identify({'short_id': 'test', 'long_id': 'test.example'}))
c.send(protocol.subscribe('test', 'ch'))
mock_response_write(c, protocol.FRAME_TYPE_RESPONSE, 'OK')
mock_response_write(c, protocol.FRAME_TYPE_RESPONSE, 'OK')
resp = c.read_response()
unpacked = protocol.unpack_response(resp)
assert unpacked[0] == protocol.FRAME_TYPE_RESPONSE
assert unpacked[1] == 'OK'
resp = c.read_response()
unpacked = protocol.unpack_response(resp)
assert unpacked[0] == protocol.FRAME_TYPE_RESPONSE
assert unpacked[1] == 'OK'
for i in range(10):
c.send(protocol.ready(1))
body = '{"data": {"test_key": %d}}' % i
ts = int(time.time() * 1000 * 1000)
mock_response_write_message(c, ts, 0, i, body)
resp = c.read_response()
unpacked = protocol.unpack_response(resp)
assert unpacked[0] == protocol.FRAME_TYPE_MESSAGE
msg = protocol.decode_message(unpacked[1])
assert msg.timestamp == ts
assert msg.id == "%016d" % i
assert msg.attempts == 0
assert msg.body == body
```
|
{
"source": "jehinescmu/cc_tools",
"score": 4
}
|
#### File: jehinescmu/cc_tools/example_2_family_json.py
```python
import json
import family_data
#Creates and returns a Family object(defined in example_data) from loaded json_data
def make_family_from_json(json_data):
new_family = family_data.Family()
#stoping here is a good check that this is a dictionary
new_family.parents = json_data["parents"]
for kid_json in json_data["kids"]:
new_kid = family_data.Kid()
new_kid.age = kid_json["age"]
new_kid.name = kid_json["name"]
new_family.kids.append(new_kid)
return new_family
with open("data/family.json", "r") as reader:
family_json = json.load(reader)
print("JSON data:")
print(family_json)
family_data = make_family_from_json(family_json)
print()
print("Family data:")
print(family_data)
```
#### File: jehinescmu/cc_tools/part_2_read_test_json.py
```python
import json
import test_data
# Part 1 ===================================================================================
# Creates and returns a GameLibrary object(defined in test_data) from loaded json_data
def make_game_library_from_json(json_data):
# Initialize a new game library
game_library = test_data.GameLibrary()
# Loop through each individual game
for game_json in json_data["games"]:
# Initialize a new game
game = test_data.Game()
game.title = game_json["title"]
game.year = game_json["year"]
# Loop through each detail of the platform
for platform_json in game_json["platform"]:
# Initialize a new platform
game.platform = test_data.Platform()
game.platform.name = platform_json["name"]
game.platform.launch_year = platform_json["launch_year"]
# Add the game to the new game library
game_library.add_game(game)
# return the completed library
return game_library
# Part 2 ===================================================================================
input_json_file = "data/test_data.json"
# Open the file specified by input_json_file + use the json module to load the data from the file
with open("data/test_data.json", "r") as reader:
test_data_json = json.load(reader)
# Print JSON Data for reference
print("JSON Data:")
print(test_data_json)
# Use make_game_library_from_json(json_data) to convert the data to GameLibrary data
test_data = make_game_library_from_json(test_data_json)
# Print the resulting Game Library
print("Test Data:")
print(test_data)
```
|
{
"source": "JEHoctor/spelling-bee",
"score": 3
}
|
#### File: spelling-bee/scripts/convert_archive.py
```python
import click
import pickle
import spelling_bee
@click.command()
def convert_archive():
"""
Convert the pickle files in the archive directory to json
"""
archive = spelling_bee.archive.Archive()
pickle_files = sorted(archive.path.glob("*.pkl"))
for pf in pickle_files:
with pf.open("rb") as f:
puzzle = pickle.load(f)
tayp = spelling_bee.scrape.TodayAndYesterdayPuzzles.parse_raw(puzzle.game_data_json)
archive.archive(tayp.today)
archive.archive(tayp.yesterday)
def main():
"""
Main method. Call into click command
"""
convert_archive()
if __name__ == "__main__":
main()
```
|
{
"source": "jehontan/gesture_control",
"score": 2
}
|
#### File: gesture_control/gesture_control/controller.py
```python
import rclpy
from rclpy.logging import LoggingSeverity
import numpy as np
import ros2_numpy as ros2np
from pose_estimator.pose_estimation import BodyLandmarks
from gesture_control_interfaces.msg import BodyLandmarksStamped, HandLandmarksStamped
from geometry_msgs.msg import PoseStamped, Point
import tf2_ros
from tf2_ros import TransformException
from tf2_ros.transform_listener import TransformListener
from .utils import SimpleMovingAverage, euclidean_distance
from .knn import KNNClassifier
from .embedder import embed_hand_pose
import sys
class PointToNavigateController(rclpy.node.Node):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# setup attributes
self._pointing = False
self._goal_point = SimpleMovingAverage(10)
# setup params
self.param_fixed_frame = self.declare_parameter('fixed_frame_id', value='odom')
self.param_hand_pose_dataset = self.declare_parameter('hand_pose_dataset', value='~/hand_pose_dataset.csv')
self.param_hand_pose_labels = self.declare_parameter('hand_pose_labels', value='~/hand_pose_labels.txt')
# setup subscribers
self.sub_pose = self.create_subscription(
BodyLandmarksStamped,
'body_landmarks',
self.body_landmarks_callback,
rclpy.qos.QoSPresetProfiles.SYSTEM_DEFAULT.value
)
self.sub_hands = self.create_subscription(
HandLandmarksStamped,
'hand_landmarks',
self.hand_landmarks_callback,
rclpy.qos.QoSPresetProfiles.SYSTEM_DEFAULT.value
)
# setup publishers
self.pub_goal = self.create_publisher(
PoseStamped,
'goal_pose',
rclpy.qos.QoSPresetProfiles.SYSTEM_DEFAULT.value
)
# setup tf
self.tf_buffer = tf2_ros.buffer.Buffer()
self.tf_listener = TransformListener(self.tf_buffer, self)
# load hand pose dataset
self.init_knn()
def init_knn(self):
'''
Initialize KNN classifier for hand poses.
'''
try:
# load labels
self.pose_labels = []
with open(self.param_hand_pose_labels.value, 'r') as f:
for label in f:
self.pose_labels.append(label.strip())
# load dataset
dataset = np.genfromtxt(self.param_hand_pose_dataset.value, delimiter=',')
Y_train = dataset[:,0]
X_train = dataset[:,1:]
self.pose_knn = KNNClassifier(X_train, Y_train, 5)
self.get_logger().log('Hand pose classifier initialized.', LoggingSeverity.INFO)
except Exception as e:
self.get_logger().log(e, LoggingSeverity.FATAL)
sys.exit('Could not initialize pose classifier.')
def body_landmarks_callback(self, msg) -> None:
'''
Get pointing direction from 3D body landmarks.
Pointing direction is calulated from wrist to root of index finger.
Parameters
==========
msg : gesture_control_interfaces.msg.BodyPose3D
Body pose message. Landmark points in meters, relative to robot coordinate system.
'''
if self._pointing:
target_frame = self.param_fixed_frame.value
source_frame = msg.header.frame_id
# get landmarks of interest as numpy homogeneous points
lm1 = ros2np.numpify(msg.landmarks[BodyLandmarks.RIGHT_WRIST], hom=True)
lm2 = ros2np.numpify(msg.landmarks[BodyLandmarks.RIGHT_INDEX], hom=True)
# transform to odom frame
try:
tf = self.tf_buffer.lookup_transform(target_frame, source_frame, rclpy.time.Time())
except TransformException:
self.get_logger().log('Could not transform pose.', LoggingSeverity.WARN)
return
tf = ros2np.numpify(tf) # 4x4 homogeneous tranformation matrix
lm1 = tf@lm1
lm2 = tf@lm2
# compute direction, cast from wrist to z = 0 (in odom frame)
v = lm2[:3] - lm1[:3] # direction vector
# discard if operator is pointing to high, leading to points far away
if np.arctan(v[1]/v[2]) >= np.deg2rad(60):
self.get_logger().log('Operator is pointing too high!', LoggingSeverity.WARN)
return
l = -lm1[2]/v[2] # length of ray cast
point = l*v + lm1[:3] # point to navigate to on z = 0
ave = self._goal_point.update(point)
# compare point to average, point should be stable within 0.3 m
d = euclidean_distance(point, ave)
if self._goal_point.is_full() and d < 0.3:
self.get_logger().log('Pointed goal: ({}, {}, {})'.format(*ave), LoggingSeverity.INFO)
# create pose
goal = PoseStamped()
goal.header.stamp = self.get_clock().now().to_msg()
goal.header.frame_id = self.param_fixed_frame.value # change the reference frame
goal.pose.point = ros2np.msgify(Point, ave)
# publish pose
self.pub_goal.publish(goal)
# reset SMA filter
self._goal_point.clear()
def hand_landmarks_callback(self, msg):
'''
Detect hand gestures.
'''
landmarks = np.empty((21, 3))
for i, landmark in enumerate(msg.landmarks):
landmarks[i,:] = ros2np.numpify(landmark)
# classify the pose
embedding = embed_hand_pose(landmarks)
pose = self.pose_knn.predict(embedding)
self._pointing = self.pose_labels[pose] == 'pointing'
if self._pointing:
self.get_logger().log('Pointing detected.', LoggingSeverity.INFO)
def main(args=None):
rclpy.init(args=args)
node = PointToNavigateController(node_name='gesture_controller')
rclpy.spin(node)
rclpy.shutdown()
```
#### File: gesture_control/gesture_control/embedder.py
```python
import numpy as np
from itertools import combinations, tee
from utils import euclidean_distance
def embed_hand_pose(landmarks):
A, B = tee(combinations(landmarks))
A = np.array(list(A))
B = np.array(list(B))
return euclidean_distance(A, B)
```
#### File: gesture_control/pose_estimator/pose_estimation.py
```python
from multiprocessing import Process, Event, Array, Value, Lock, log_to_stderr
import logging
import numpy as np
from numpy.typing import ArrayLike, DTypeLike
from dataclasses import dataclass
from typing import Any, Sequence, Tuple
import mediapipe
from enum import Flag, IntEnum
import mediapipe as mp
import cv2
import ctypes
@dataclass
class PoseEstimator2DConfig:
distortion_maps:Tuple[Any, Any] # distortion maps as returned by cv2.initUndistortRectifyMap
model_complexity: int = 1
min_detection_confidence: float = 0.5
min_tracking_confidence: float = 0.5
@dataclass
class SharedNumpyArray:
'''
Wrapper for Numpy array in shared memory.
'''
shape: Tuple[int, ...] # shared array shape
dtype: DTypeLike # shared array dtype
arr: Array # shared array buffer
changed_flag: Value # change flag
def __init__(self, shape:Tuple[int,...], dtype:DTypeLike, arr:Array=None, changed_flag:Value=None):
self.shape = shape
self.dtype = dtype
self.arr = arr if arr is not None else Array(np.ctypeslib.as_ctypes_type(dtype), int(np.product(shape)))
self.changed_flag = changed_flag if changed_flag is not None else Value(ctypes.c_bool)
def as_numpy(self) -> ArrayLike:
return np.frombuffer(self.arr.get_obj(), dtype=self.dtype).reshape(self.shape)
def has_changed(self) -> bool:
return self.changed_flag.value
def set_changed(self, value=True) -> None:
self.changed_flag.value = value
class ColorSpace(IntEnum):
RGB=0
GRAY=cv2.COLOR_GRAY2RGB
BGR=cv2.COLOR_BGR2RGB
HSV=cv2.COLOR_HSV2RGB
@dataclass
class SharedImage(SharedNumpyArray):
color: ColorSpace
def __init__(self, width:int, height:int, color:ColorSpace, *args, **kwargs):
shape = (height, width)
if color != ColorSpace.GRAY:
shape = (*shape, 3)
super(SharedImage, self).__init__(shape=shape,dtype=np.uint8, *args, **kwargs)
self.color = color
@property
def width(self):
return self.shape[1]
@property
def height(self):
return self.shape[0]
class PoseAnnotationType(Flag):
NONE = 0
BODY = 1
LEFT_HAND = 2
RIGHT_HAND = 4
FACE = 8
HANDS = LEFT_HAND | RIGHT_HAND
ALL = BODY | HANDS | FACE
def draw_landmarks(image:ArrayLike, results:Any, mode:PoseAnnotationType) -> None:
if mode & PoseAnnotationType.FACE:
mp.solutions.drawing_utils.draw_landmarks(
image,
results.face_landmarks,
mp.solutions.holistic.FACEMESH_CONTOURS,
landmark_drawing_spec=None,
connection_drawing_spec=mp.solutions.drawing_styles
.get_default_face_mesh_contours_style())
if mode & PoseAnnotationType.BODY:
mp.solutions.drawing_utils.draw_landmarks(
image,
results.pose_landmarks,
mp.solutions.holistic.POSE_CONNECTIONS,
landmark_drawing_spec=mp.solutions.drawing_styles
.get_default_pose_landmarks_style())
if mode & PoseAnnotationType.LEFT_HAND:
mp.solutions.drawing_utils.draw_landmarks(
image,
results.left_hand_landmarks,
mp.solutions.holistic.HAND_CONNECTIONS,
landmark_drawing_spec=mp.solutions.drawing_styles.get_default_hand_landmarks_style(),
connection_drawing_spec=mp.solutions.drawing_styles.get_default_hand_connections_style())
if mode & PoseAnnotationType.RIGHT_HAND:
mp.solutions.drawing_utils.draw_landmarks(
image,
results.right_hand_landmarks,
mp.solutions.holistic.HAND_CONNECTIONS,
landmark_drawing_spec=mp.solutions.drawing_styles.get_default_hand_landmarks_style(),
connection_drawing_spec=mp.solutions.drawing_styles.get_default_hand_connections_style())
class PoseEstimator2DProcess(Process):
'''
Background process to perform image rectification and 2D pose estimation.
Uses the MediaPipe Holistic solution.
Input GRAY, output BGR.
Shutdown by setting stop_event.
'''
NUM_BODY_LANDMARKS = 33
NUM_HAND_LANDMARKS = 21
NUM_FACE_LANDMARKS = 468
def __init__(self,
config:PoseEstimator2DConfig,
in_lock:Lock,
in_image:SharedImage,
out_lock:Lock, # shared lock for all output
out_image:SharedImage, # undistorted image
out_body_landmarks:SharedNumpyArray, # (33, 2)
out_left_hand_landmarks:SharedNumpyArray, # (21, 3)
out_right_hand_landmarks:SharedNumpyArray, # (21, 3)
out_face_landmarks: SharedNumpyArray, # (468, 2)
stop_event:Event,
out_annotate:PoseAnnotationType = PoseAnnotationType.NONE,
*args, **kwargs):
super(PoseEstimator2DProcess, self).__init__(*args, **kwargs)
self.config = config
# init inputs
self.in_lock = in_lock
self.in_changed = in_image.changed_flag
self.in_image = in_image
# init outputs
self.out_lock = out_lock
self.out_image = out_image
self.out_body_landmarks = out_body_landmarks
self.out_left_hand_landmarks = out_left_hand_landmarks
self.out_right_hand_landmarks = out_right_hand_landmarks
self.out_face_landmarks = out_face_landmarks
# stop event
self.stop_event = stop_event
# annotation
self.out_annotate = out_annotate
# set color conversion
self._color_cvt = in_image.color if in_image.color != ColorSpace.RGB else None
# logging
self.logger = log_to_stderr()
self.logger.setLevel(logging.INFO)
def run(self):
with mediapipe.solutions.holistic.Holistic(
model_complexity=self.config.model_complexity,
min_detection_confidence=self.config.min_detection_confidence,
min_tracking_confidence=self.config.min_tracking_confidence
) as model:
while not self.stop_event.is_set():
# self.logger.debug('Waiting for image...')
locked = self.in_lock.acquire(timeout=0.1)
self.logger.debug('Locked: {}'.format(locked))
if locked:
if self.in_changed.value:
self.logger.debug('Attempting to process...')
# make a numpy copy and release the lock
image = self.in_image.as_numpy().copy()
self.in_changed.value = False # set valid to False to indicate consumed
self.in_lock.release()
# undistort
image = cv2.remap(src=image,
map1=self.config.distortion_maps[0],
map2=self.config.distortion_maps[1],
interpolation=cv2.INTER_LINEAR)
# convert color if necessary
image.flags.writeable = False
if self._color_cvt is not None:
image = cv2.cvtColor(image, self._color_cvt)
# process the image
results = model.process(image)
# make writeable
image.flags.writeable = True
# annotate
draw_landmarks(image, results, self.out_annotate)
# convert to BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# convert landmarks
body_landmarks = self.landmarks_to_numpy(self.NUM_BODY_LANDMARKS, results.pose_landmarks)
left_hand_landmarks = self.landmarks_to_numpy(self.NUM_HAND_LANDMARKS, results.left_hand_landmarks, keep_z=True)
right_hand_landmarks = self.landmarks_to_numpy(self.NUM_HAND_LANDMARKS, results.right_hand_landmarks, keep_z=True)
face_landmarks = self.landmarks_to_numpy(self.NUM_FACE_LANDMARKS, results.face_landmarks)
# write to outputs
with self.out_lock:
np.copyto(self.out_image.as_numpy(), image)
self.out_image.set_changed()
np.copyto(self.out_body_landmarks.as_numpy(), body_landmarks)
self.out_body_landmarks.set_changed()
np.copyto(self.out_left_hand_landmarks.as_numpy(), left_hand_landmarks)
self.out_left_hand_landmarks.set_changed()
np.copyto(self.out_right_hand_landmarks.as_numpy(), right_hand_landmarks)
self.out_right_hand_landmarks.set_changed()
np.copyto(self.out_face_landmarks.as_numpy(), face_landmarks)
self.out_face_landmarks.set_changed()
else:
self.in_lock.release()
def landmarks_to_numpy(self, n:int, landmarks:Any, keep_z:bool=False) -> ArrayLike:
'''
Convert Mediapipe landmarks object to numpy array.
Parameters
==========
n : int
number of landmarks
landmarks : Mediapipe landmarks object
Mediapipe landmarks object to convert
keep_z : bool
Whether or not to keep the z axis result.
Returns
=======
arr : (n,2) ArrayLike | (n,3) ArrayLike
Array of 3D or 2D points, depending on keep_z.
'''
if landmarks is None:
return np.inf*np.ones((n, 3 if keep_z else 2))
else:
if keep_z:
return np.array([(l.x, l.y, l.z) for l in landmarks.landmark])
else:
return np.array([(l.x, l.y) for l in landmarks.landmark])
class BodyLandmarks(IntEnum):
'''
Enumeration of body landmarks.
Enum value corresponds to index of MediaPipe output.
'''
NOSE = 0
LEFT_EYE_INNER = 1
LEFT_EYE = 2
LEFT_EYE_OUTER = 3
RIGHT_EYE_INNER = 4
RIGHT_EYE = 5
RIGHT_EYE_OUTER = 6
LEFT_EAR = 7
RIGHT_EAR = 8
MOUTH_LEFT = 9
MOUTH_RIGHT = 10
LEFT_SHOULDER = 11
RIGHT_SHOULDER = 12
LEFT_ELBOW = 13
RIGHT_ELBOW = 14
LEFT_WRIST = 15
RIGHT_WRIST = 16
LEFT_PINKY = 17
RIGHT_PINKY = 18
LEFT_INDEX = 19
RIGHT_INDEX = 20
LEFT_THUMB = 21
RIGHT_THUMB = 22
LEFT_HIP = 23
RIGHT_HIP = 24
LEFT_KNEE = 25
RIGHT_KNEE = 26
LEFT_ANKLE = 27
RIGHT_ANKLE = 28
LEFT_HEEL = 29
RIGHT_HEEL = 30
LEFT_FOOT_INDEX = 31
RIGHT_FOOT_INDEX = 32
class HandLandmarks(IntEnum):
'''
Enumeration of body landmarks.
Enum value corresponds to index of MediaPipe output.
'''
WRIST = 0
THUMB_CMC = 1
THUMB_MCP = 2
THUMB_IP = 3
THUMB_TIP = 4
INDEX_FINGER_MCP = 5
INDEX_FINGER_PIP = 6
INDEX_FINGER_DIP = 7
INDEX_FINGER_TIP = 8
MIDDLE_FINGER_MCP = 9
MIDDLE_FINGER_PIP = 10
MIDDLE_FINGER_DIP = 11
MIDDLE_FINGER_TIP = 12
RING_FINGER_MCP = 13
RING_FINGER_PIP = 14
RING_FINGER_DIP = 15
RING_FINGER_TIP = 16
PINKY_MCP = 17
PINKY_PIP = 18
PINKY_DIP = 19
PINKY_TIP = 20
```
#### File: gesture_control/test/test_utils.py
```python
from gesture_control.utils import euclidean_distance
import numpy as np
def test_euclidean_distance():
a = np.array([[0,0]])
b = np.array([4,3])
res = euclidean_distance(a,b)
assert res == 5
```
|
{
"source": "jehontan/ros-onvif-camera",
"score": 3
}
|
#### File: ros-onvif-camera/geometry_helper/geometry_helper.py
```python
import numpy as np
def euler_to_quaternion(roll, pitch, yaw):
qx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
qy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
qz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
qw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
return [qx, qy, qz, qw]
def quaternion_to_euler(x, y, z, w):
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
X = np.arctan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
Y = np.arcsin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
Z = np.arctan2(t3, t4)
return X, Y, Z
```
#### File: jehontan/ros-onvif-camera/test.py
```python
import socket
import struct
def main():
HOST = '192.168.137.216'
PORT = 2306
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
print('Connected!')
while True:
txt = input('Pan, Tilt, Zoom >')
cmds = txt.split(',')
pan = float(cmds[0])
tilt = float(cmds[1])
zoom = float(cmds[2]) if len(cmds)>2 else 0.0
cmd = struct.pack('fff', pan, tilt, zoom)
s.send(cmd)
if __name__ == '__main__':
main()
```
|
{
"source": "jehoppert/pyMatrix",
"score": 2
}
|
#### File: jehoppert/pyMatrix/test_pyMatrix.py
```python
import pyMatrix
#def test_func():
#assert 1 == 1
if __name__ == "__main__":
#run functions tests here
print("pyMatrix tests passed")
```
|
{
"source": "jehov4/atomic-operator",
"score": 3
}
|
#### File: atomic-operator/atomic_operator/atomic_operator.py
```python
import os
from .base import Base
from .models import (
Config
)
from .configparser import ConfigParser
from .utils.exceptions import AtomicsFolderNotFound
from .execution import (
LocalRunner,
RemoteRunner,
AWSRunner
)
class AtomicOperator(Base):
"""Main class used to run Atomic Red Team tests.
atomic-operator is used to run Atomic Red Team tests both locally and remotely.
These tests (atomics) are predefined tests to mock or emulate a specific technique.
config_file definition:
atomic-operator's run method can be supplied with a path to a configuration file (config_file) which defines
specific tests and/or values for input parameters to facilitate automation of said tests.
An example of this config_file can be seen below:
inventory:
linux1:
executor: ssh
authentication:
username: root
password: <PASSWORD>!
#ssk_key_path:
port: 22
timeout: 5
hosts:
# - 192.168.1.1
- 10.32.100.199
# etc.
atomic_tests:
- guid: f7e6ec05-c19e-4a80-a7e7-241027992fdb
input_arguments:
output_file:
value: custom_output.txt
input_file:
value: custom_input.txt
- guid: 3ff64f0b-3af2-3866-339d-38d9791407c3
input_arguments:
second_arg:
value: SWAPPPED argument
- guid: 32f90516-4bc9-43bd-b18d-2cbe0b7ca9b2
inventories:
- linux1
Raises:
ValueError: If a provided technique is unknown we raise an error.
"""
__test_responses = {}
def __find_path(self, value):
"""Attempts to find a path containing the atomic-red-team repository
Args:
value (str): A starting path to iterate through
Returns:
str: An absolute path containing the path to the atomic-red-team repo
"""
if value == os.getcwd():
for x in os.listdir(value):
if os.path.isdir(x) and 'redcanaryco-atomic-red-team' in x:
if os.path.exists(self.get_abs_path(os.path.join(x, 'atomics'))):
return self.get_abs_path(os.path.join(x, 'atomics'))
else:
if os.path.exists(self.get_abs_path(value)):
return self.get_abs_path(value)
def __run_technique(self, technique, **kwargs):
"""This method is used to run defined Atomic tests within
a MITRE ATT&CK Technique.
Args:
technique (Atomic): An Atomic object which contains a list of AtomicTest
objects.
"""
self.__logger.debug(f"Checking technique {technique.attack_technique} ({technique.display_name}) for applicable tests.")
for test in technique.atomic_tests:
self._set_input_arguments(test, **kwargs)
if test.auto_generated_guid not in self.__test_responses:
self.__test_responses[test.auto_generated_guid] = {}
if technique.hosts:
for host in technique.hosts:
self.__logger.info(f"Running {test.name} test ({test.auto_generated_guid}) for technique {technique.attack_technique}")
self.__logger.debug(f"Description: {test.description}")
if test.executor.name in ['sh', 'bash']:
self.__test_responses[test.auto_generated_guid].update(
RemoteRunner(test, technique.path).start(host=host, executor='ssh')
)
elif test.executor.name in ['command_prompt']:
self.__test_responses[test.auto_generated_guid].update(
RemoteRunner(test, technique.path).start(host=host, executor='cmd')
)
elif test.executor.name in ['powershell']:
self.__test_responses[test.auto_generated_guid].update(
RemoteRunner(test, technique.path).start(host=host, executor='powershell')
)
else:
self.__logger.warning(f"Unable to execute test since the executor is {test.executor.name}. Skipping.....")
else:
if self._check_platform(test, show_output=True):
self.__logger.info(f"Running {test.name} test ({test.auto_generated_guid}) for technique {technique.attack_technique}")
self.__logger.debug(f"Description: {test.description}")
if self._check_if_aws(test):
self.__test_responses[test.auto_generated_guid].update(
AWSRunner(test, technique.path).start()
)
else:
self.__test_responses[test.auto_generated_guid].update(
LocalRunner(test, technique.path).start()
)
if self.__test_responses.get(test.auto_generated_guid):
self.__test_responses[test.auto_generated_guid].update({
'technique_id': technique.attack_technique,
'technique_name': technique.display_name
})
def help(self, method=None):
from fire.trace import FireTrace
from fire.helptext import HelpText
obj = AtomicOperator if not method else getattr(self, method)
return HelpText(self.run,trace=FireTrace(obj))
def get_atomics(self, desintation=os.getcwd(), art_repo="https://github.com/redcanaryco/atomic-red-team/zipball/master/", **kwargs):
"""Downloads the RedCanary atomic-red-team repository to your local system.
Args:
desintation (str, optional): A folder path to download the repositorty data to. Defaults to os.getcwd().
kwargs (dict, optional): This kwargs will be passed along to Python requests library during download. Defaults to None.
Returns:
str: The path the data can be found at.
"""
if not os.path.exists(desintation):
os.makedirs(desintation)
desintation = kwargs.pop('destination') if kwargs.get('destination') else desintation
folder_name = self.download_atomic_red_team_repo(
save_path=desintation,
art_repo=art_repo,
**kwargs
)
return os.path.join(desintation, folder_name)
def run(self, techniques: list=['all'], test_guids: list=[], select_tests=False,
atomics_path=os.getcwd(), check_prereqs=False, get_prereqs=False,
cleanup=False, copy_source_files=True,command_timeout=20, debug=False,
prompt_for_input_args=False, return_atomics=False, config_file=None,
config_file_only=False, hosts=[], username=None, password=<PASSWORD>,
ssh_key_path=None, private_key_string=None, verify_ssl=False,
ssh_port=22, ssh_timeout=5, test_name=None, *args, **kwargs) -> None:
"""The main method in which we run Atomic Red Team tests.
Args:
techniques (list, optional): One or more defined techniques by attack_technique ID. Defaults to 'all'.
test_guids (list, optional): One or more Atomic test GUIDs. Defaults to None.
select_tests (bool, optional): Select one or more tests from provided techniques. Defaults to False.
atomics_path (str, optional): The path of Atomic tests. Defaults to os.getcwd().
check_prereqs (bool, optional): Whether or not to check for prereq dependencies (prereq_comand). Defaults to False.
get_prereqs (bool, optional): Whether or not you want to retrieve prerequisites. Defaults to False.
cleanup (bool, optional): Whether or not you want to run cleanup command(s). Defaults to False.
copy_source_files (bool, optional): Whether or not you want to copy any related source (src, bin, etc.) files to a remote host. Defaults to True.
command_timeout (int, optional): Timeout duration for each command. Defaults to 20.
debug (bool, optional): Whether or not you want to output details about tests being ran. Defaults to False.
prompt_for_input_args (bool, optional): Whether you want to prompt for input arguments for each test. Defaults to False.
return_atomics (bool, optional): Whether or not you want to return atomics instead of running them. Defaults to False.
config_file (str, optional): A path to a conifg_file which is used to automate atomic-operator in environments. Default to None.
config_file_only (bool, optional): Whether or not you want to run tests based on the provided config_file only. Defaults to False.
hosts (list, optional): A list of one or more remote hosts to run a test on. Defaults to [].
username (str, optional): Username for authentication of remote connections. Defaults to None.
password (str, optional): Password for authentication of remote connections. Defaults to None.
ssh_key_path (str, optional): Path to a SSH Key for authentication of remote connections. Defaults to None.
private_key_string (str, optional): A private SSH Key string used for authentication of remote connections. Defaults to None.
verify_ssl (bool, optional): Whether or not to verify ssl when connecting over RDP (windows). Defaults to False.
ssh_port (int, optional): SSH port for authentication of remote connections. Defaults to 22.
ssh_timeout (int, optional): SSH timeout for authentication of remote connections. Defaults to 5.
testname (str, optional): If given, together with technique ID selects test to be run
kwargs (dict, optional): If provided, keys matching inputs for a test will be replaced. Default is None.
Raises:
ValueError: If a provided technique is unknown we raise an error.
Returns:
run_list
"""
if kwargs.get('help'):
return self.help(method='run')
if debug:
import logging
logging.getLogger().setLevel(logging.DEBUG)
atomics_path = self.__find_path(atomics_path)
if not atomics_path:
return AtomicsFolderNotFound('Unable to find a folder containing Atomics. Please provide a path or run get_atomics.')
Base.CONFIG = Config(
atomics_path = atomics_path,
check_prereqs = check_prereqs,
get_prereqs = get_prereqs,
cleanup = cleanup,
command_timeout = command_timeout,
debug = debug,
prompt_for_input_args = prompt_for_input_args,
kwargs = kwargs,
copy_source_files = copy_source_files
)
# taking inputs from both config_file and passed in values via command
# line to build a run_list of objects
self.__config_parser = ConfigParser(
config_file=config_file,
techniques=None if config_file_only else self.parse_input_lists(techniques),
test_guids=None if config_file_only else self.parse_input_lists(test_guids),
host_list=None if config_file_only else self.parse_input_lists(hosts),
username=username,
password=password,
ssh_key_path=ssh_key_path,
private_key_string=private_key_string,
verify_ssl=verify_ssl,
ssh_port=ssh_port,
ssh_timeout=ssh_timeout,
select_tests=select_tests,
test_name=test_name
)
self.__run_list = self.__config_parser.run_list
__return_atomics = []
for item in self.__run_list:
if return_atomics:
__return_atomics.append(item)
elif kwargs.get('kwargs'):
self.__run_technique(item, **kwargs.get('kwargs'))
else:
self.__run_technique(item)
if return_atomics and __return_atomics:
return __return_atomics
return self.__test_responses
```
|
{
"source": "JehovahYF/OnedriveShareLinkDL-aria2",
"score": 2
}
|
#### File: JehovahYF/OnedriveShareLinkDL-aria2/bot.py
```python
from telegram.ext import Updater
REQUEST_KWARGS = {
# "USERNAME:PASSWORD@" is optional, if you need authentication:
'proxy_url': 'http://127.0.0.1:2334',
}
updater = Updater('',
request_kwargs=REQUEST_KWARGS)
dispatcher = updater.dispatcher
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def start(update, context):
print(update.effective_chat.id)
context.bot.send_message(
chat_id=update.effective_chat.id, text="你可以输入一个 xxx-my.sharepoint.com 链接,程序会自动解析文件")
def stop(update, context):
if update.effective_chat.id == 0:
context.bot.send_message(
chat_id=update.effective_chat.id, text="机器人已停止")
updater.stop()
exit()
from telegram.ext import CommandHandler
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
stop_handler = CommandHandler('stop', stop)
dispatcher.add_handler(stop_handler)
updater.start_polling()
```
|
{
"source": "jehrodrigues/linearregression-similarity",
"score": 2
}
|
#### File: jehrodrigues/linearregression-similarity/embedding_tfidf.py
```python
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import sys
import pickle
import spacy
import scipy.sparse
from scipy.sparse import csr_matrix
import math
from sklearn.metrics.pairwise import linear_kernel
nlp=spacy.load('en_core_web_lg')
""" Tokenizing"""
def _keep_token(t):
return (t.is_alpha and
not (t.is_space or t.is_punct or
t.is_stop or t.like_num))
def _lemmatize_doc(doc):
return [ t.lemma_ for t in doc if _keep_token(t)]
def _preprocess(doc_list):
return [_lemmatize_doc(nlp(doc)) for doc in doc_list]
def dummy_fun(doc):
return doc
# Importing List of 128.000 Metadescriptions:
Web_data=open("./data/meta_descriptions","r", encoding="utf-8")
All_lines=Web_data.readlines()
# outputs a list of meta-descriptions consisting of lists of preprocessed tokens:
data=_preprocess(All_lines)
# TF-IDF Vectorizer:
vectorizer = TfidfVectorizer(min_df=10,tokenizer=dummy_fun,preprocessor=dummy_fun,)
tfidf = vectorizer.fit_transform(data)
dictionary = vectorizer.get_feature_names()
# Retrieving Word embedding vectors:
temp_array=[nlp(dictionary[i]).vector for i in range(len(dictionary))]
# I had to build the sparse array in several steps due to RAM constraints
# (with bigrams the vocabulary gets as large as >1m
dict_emb_sparse=scipy.sparse.csr_matrix(temp_array[0])
for arr in range(1,len(temp_array),100000):
print(str(arr))
dict_emb_sparse=scipy.sparse.vstack([dict_emb_sparse, scipy.sparse.csr_matrix(temp_array[arr:min(arr+100000,len(temp_array))])])
# Multiplying the TF-IDF matrix with the Word embeddings:
tfidf_emb_sparse=tfidf.dot(dict_emb_sparse)
# Translating the Query into the TF-IDF matrix and multiplying with the same Word Embeddings:
query_doc= vectorizer.transform(_preprocess(["World of Books is one of the largest online sellers of second-hand books in the world Our massive collection of over million cheap used books also comes with free delivery in the UK Whether it s the latest book release fiction or non-fiction we have what you are looking for"]))
query_emb_sparse=query_doc.dot(dict_emb_sparse)
# Calculating Cosine Similarities:
cosine_similarities = linear_kernel(query_emb_sparse, tfidf_emb_sparse).flatten()
related_docs_indices = cosine_similarities.argsort()[:-10:-1]
# Printing the Site descriptions with the highest match:
for ID in related_docs_indices:
print(All_lines[ID])
```
#### File: jehrodrigues/linearregression-similarity/evaluate_assin2.py
```python
from gensim.models import FastText
from sklearn.linear_model import LinearRegression
from commons import read_xml
from assin_eval import eval_similarity
from assin_eval import eval_rte
from gensim.models import KeyedVectors
from xml.dom import minidom
from numpy import array
from os import path
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import pickle
import argparse
import unicodedata
DATA_DIR = './data/'
TEST_DIR = path.join(DATA_DIR, 'assin-test-gold/')
def gensim_embedding_difference(data): #, field1, field2
"""Calculate the similarity between the sum of all embeddings."""
distances = []
for pair in data:
#print("t: ", normalize_terms(pair.t.lower()))
e1 = [i if i in embeddings else 'teste' for i in normalize_terms(pair.t.lower())]
e2 = [i if i in embeddings else 'teste' for i in normalize_terms(pair.h.lower())]
distances.append([embeddings.n_similarity(e1, e2)])
return distances
def evaluate_testset(x, y, test):
"""Docstring."""
l_reg = LinearRegression()
l_reg.fit(x, y)
test_predict = l_reg.predict(test)
return test_predict
def write_xml(filename, pred):
"""Docstring."""
with open(filename) as fp:
xml = minidom.parse(fp)
pairs = xml.getElementsByTagName('pair')
for pair in pairs:
#print('pred: ', pred)
#print('pairs.index: ', pairs.index(pair))
#print('pair: ', str(pred[pairs.index(pair)]))
pair.setAttribute('similarity', str(pred[pairs.index(pair)]))
with open(filename, 'w') as fp:
fp.write(xml.toxml())
def normalize_terms(terms):
# Remove Numerals
#terms = remove_numerals(terms)
# Remove Punctuation and tokenize
terms = remove_punctuation(terms)
# Remove StopWords
filtered_words = [word for word in terms if word not in stopwords.words('portuguese')]
# Remove Accents
#filtered_words = [remove_accents(term).lower() for term in terms]
# Stemming
#st = nltk.stem.RSLPStemmer()
#st = nltk.stem.SnowballStemmer('portuguese')
#filtered_stem = [st.stem(term) for term in terms]
#filtered_stem = [st.stem(filtered_word) for filtered_word in terms]
return filtered_words
def remove_punctuation(term):
"""Remove Punctuation and tokenize"""
tokenizer = RegexpTokenizer(r'\w+')
return tokenizer.tokenize(term)
if __name__ == '__main__':
# Parser descriptors
parser = argparse.ArgumentParser(
description='''Sentence similarity evaluation for word embeddings in
brazilian and european variants of Portuguese language. It is expected
a word embedding model in text format.''')
parser.add_argument('embedding',
type=str,
help='embedding model')
parser.add_argument('lang',
choices=['br', 'pt'],
help='{br, eu} choose PT-BR or PT-EU testset')
args = parser.parse_args()
lang = args.lang
emb = args.embedding
# Loading embedding model
embeddings = KeyedVectors.load_word2vec_format(emb,
binary=False,
unicode_errors="ignore")
pairs_train = read_xml('%sassin2-train.xml' % (DATA_DIR), True)
pairs_test = read_xml('%sassin-ptbr-test.xml' % (TEST_DIR), True)
# Loading evaluation data and parsing it
#with open('%sassin-pt%s-train.pkl' % (DATA_DIR, lang), 'rb') as fp:
#data = pickle.load(fp)
#with open('%sassin-pt%s-test-gold.pkl' % (DATA_DIR, lang), 'rb') as fp:
#test = pickle.load(fp)
# Getting features
#features = gensim_embedding_difference(data, 'tokens_t1', 'tokens_t2')
features = gensim_embedding_difference(pairs_train)
#features_test = gensim_embedding_difference(test, 'tokens_t1', 'tokens_t2')
features_test = gensim_embedding_difference(pairs_test)
# Predicting similarities
#results = array([float(i['result']) for i in data])
results = array([float(i.similarity) for i in pairs_train])
results_test = evaluate_testset(features, results, features_test)
write_xml('%soutput.xml' % DATA_DIR, results_test)
# Evaluating
pairs_gold = read_xml('%sassin-pt%s-test.xml' % (TEST_DIR, lang), True)
pairs_sys = read_xml('%soutput.xml' % DATA_DIR, True)
#eval_rte(pairs_gold, pairs_sys)
eval_similarity(pairs_gold, pairs_sys)
#python evaluate_assin2.py ./models/skip_s300.txt br
```
|
{
"source": "jehturner/pintport",
"score": 3
}
|
#### File: pintport/pintport/structures.py
```python
from collections.abc import MutableSequence, Iterable
from copy import copy
import sqlite3
import pandas_datareader as pdr
from . import config
class Source:
"""
A class describing and abstracting a source of historical pricing
information for a security.
"""
def __init__(self, source, query, symbol, exchange, currency='USD',
ID=None):
self.source = source
self.query = query
self.symbol = symbol
self.exchange = exchange
self.currency = currency
self.ID = None
try:
self.api_key = config['api_keys'][self.source]
except KeyError:
self.api_key = None
def __repr__(self):
return("<{0}(source='{source}', query='{query}', symbol='{symbol}', "
"exchange='{exchange}', currency='{currency}', ID={ID})>"\
.format(
self.__class__.__name__,
source=self.source,
query=self.query,
symbol=self.symbol,
exchange=self.exchange,
currency=self.currency,
ID=self.ID
))
def __call__(self, start=None, end=None):
"""
Look up time series from the defined source.
Parameters
----------
start : string, int, date, datetime, Timestamp
left boundary for range (defaults to 1/1/2010)
end : string, int, date, datetime, Timestamp
right boundary for range (defaults to today)
Returns
-------
`pandas.core.frame.DataFrame`
Pandas DataFrame containing the time series.
"""
return pdr.data.DataReader(name=self.symbol, data_source=self.query,
start=start, end=end, session=None,
api_key=self.api_key)
class _SourceList(MutableSequence):
"""
A priority-ordered list of Source instances for a given security, with
unique identifiers, that can be mapped to a database table and interrogated
to provide data for a given security.
This private class is intended to be used by Asset. Its ID numbers need
to remain synchronized with those recorded in the asset price data.
"""
def __init__(self, sources=None):
super().__init__()
sources = [] if sources is None else copy(sources)
self._list = self._check_items(sources)
def __getitem__(self, index):
return self._list[index]
def __setitem__(self, index, value):
_list = self._list.copy()
_list[index] = value
self._list = self._check_items(_list)
def __delitem__(self, index):
del self._list[index]
def __len__(self):
return len(self._list)
def insert(self, index, value):
_list = self._list.copy()
_list.insert(index, value)
self._list = self._check_items(_list)
# inherited append works automatically
def _check_items(self, _list):
ID = self._next_ID()
queries, IDs = [], []
# Check input Sources before setting their IDs so as not to change
# them if the operation fails:
for item in _list:
if not isinstance(item, Source):
raise ValueError('items must be Source instances')
if (item.query, item.symbol) in queries:
raise ValueError("duplicate item: query='{}', symbol='{}'"\
.format(item.query, item.symbol))
queries.append((item.query, item.symbol))
if item.ID is not None:
if item.ID in IDs:
raise ValueError('duplicate ID {}'.format(item.ID))
IDs.append(item.ID)
for item in _list:
if item.ID is None:
item.ID = ID
ID += 1
return _list
def _next_ID(self):
"""
Return the next available new indentifier. These should not be re-used
because they may be recorded in an Asset's time series (nor should the
Source be deleted if that is the case), but it would be very
inefficient to store a UUID in every row of a time series, so just
return one more than the highest number in use (normally len(list)),
to minimize the likelihood of re-using any deleted identifiers.
"""
if not hasattr(self, '_list'):
return 0
return max((-1 if source.ID is None else source.ID for source in
self._list), default=-1) + 1
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._list))
```
|
{
"source": "jehturner/quadpype",
"score": 2
}
|
#### File: ndmapper/io/mapio.py
```python
import hashlib
import numpy as np
from astropy.nddata import StdDevUncertainty
from astropy.table import Table
import astropy.io.fits as pyfits
from ._util import get_backend_fn
__all__ = ['NDMapIO', 'TabMapIO']
class NDMapIO(object):
"""
Propagate additional information needed for `NDLater` instances to support
lazy loading, allow saving only arrays/header attributes that have changed
& report which FITS extensions they came from for IRAF etc.
For lazy-loading or saving operations to succeed, the corresponding file
must already exist. This class is intended to encapsulate bookkeeping
within `NDLater` (managed by a `DataFile` instance) with reasonable
overheads, rather than to provide a robust API: for the user-level
interface, see `DataFile` instead.
Attributes
----------
filename : `str`
The path to the file from which the data are to be mapped.
ident : `int` or `str` or `None`
Group identifier appropriate for the file type (int EXTVER for FITS),
which labels this particular `NDData` instance within a `DataFile`.
data_idx : `int`
uncertainty_idx : `int` or `None`
flags_idx : `int` or `None`
The original index of each constituent data/uncertainty/flags array
within the host file (extension number for FITS).
"""
_data_hash = None
_uncertainty_hash = None
_flags_hash = None
def __init__(self, filename, ident=None, data_idx=None, \
uncertainty_idx=None, flags_idx=None):
# This must maintain a separate copy of the host object's filename,
# otherwise lazy loading of data not yet in memory will fail when
# changing the filename of a DataFile instance and trying to save it.
# This should perhaps be changed to cache a reference to its data so
# that one NDLater instance instantiated from another will share the
# same data arrays independently of whether lazy loading is triggered
# before or after instantiation. Once one of them is saved, it will
# still get re-mapped independently.
if not isinstance(filename, str):
raise ValueError('filename must be supplied as a string')
self.filename = filename
self.ident = ident
self.data_idx = data_idx
self.uncertainty_idx = uncertainty_idx
self.flags_idx = flags_idx
self._dloader = get_backend_fn('load_array', self.filename)
self._mloader = get_backend_fn('load_array_meta', self.filename)
self._saver = get_backend_fn('save_array', self.filename)
# Consider automatically determining ident from the input here
# (ie. setting it to hdu.ver == EXTVER) if None.
def load_data(self):
data = self._dloader(self.filename, self.data_idx)
# A NumPy array is directly hashable -- but doing so pulls memory-
# mapped data entirely into memory, where they stay until unloaded
# with "del ndd.data". A workaround of reading the file twice would
# negate the intended benefit of being able to save it intelligently,
# so just disable hashing in the first instance and ask <NAME>. about
# it later. It might be better to determine whether the copy is dirty
# using object ids (weakref) and memory mapping instead, like PyFITS,
# but that might mean re-reading the file after saving, to establish
# memory mapping before we can mark the buffer clean.
# self._data_hash = hashlib.sha1(data).hexdigest()
return data
# def save_data(self, data, header, force=False):
# # Should hash meta-data as well here, or else we'll lose changes that
# # aren't associated with changes to data.
# newhash = hashlib.sha1(data).hexdigest()
# if force or newhash != self._data_hash:
# self._data_hash = newhash
# self._saver(self.filename, self.data_idx, data, header)
def load_uncertainty(self):
if self.uncertainty_idx:
uncert = self._dloader(self.filename, self.uncertainty_idx)
# Presumably this kills any memory mapping? Worry about it later.
# The sqrt is just a temporary hack until I write a Var subclass.
# StdDevUncertainty isn't directly hashable so cast to str first
# (also see load_data above for another reason).
uncert = StdDevUncertainty(np.sqrt(np.maximum(uncert, 0.)))
# self._uncert_hash = hashlib.sha1(uncert).hexdigest()
return uncert
def load_flags(self):
if self.flags_idx:
flags = self._dloader(self.filename, self.flags_idx)
# self._flags_hash = hashlib.sha1(flags).hexdigest()
return flags
def load_meta(self):
meta = self._mloader(self.filename, self.data_idx)
# This cast to str is a little bit slow, so let's see whether the hash
# here turns out to be premature optimization before reinstating it:
# self._meta_hash = hashlib.sha1(str(meta)).hexdigest()
return meta
class TabMapIO(object):
"""
A proxy object for lazily loading/saving AstroPy Table instances. This is
similar to `NDMapIO`, but instead of being used by an `NDData` sub-class to
load its own attributes lazily, `TabMapIO` is used to initialize a normal
`Table` instance on demand, since the latter doesn't have several data
arrays to load separately and sub-classing `Table` would likely prove more
complicated with less benefit.
At the user level, instances are managed by, and the corresponding table
data accessed via, `DataFile` objects. For lazy-loading or saving
operations to succeed, the corresponding file must already exist.
Attributes
----------
filename : `str`
The path to the file from which the data are to be mapped.
label : `str`
Application-specific label/name identifying the type of `Table`
(EXTNAME for FITS). Multiple tables of the same type can be
distinguished via the ident parameter.
ident : `int` or `str` or `None`
Identifier appropriate for the file type (int EXTVER for FITS), which
distinguishes this particular instance of a given type of Table within
the applicable DataFile.
idx : `int`
The original array index/number within the host file (extension number
for FITS).
"""
_table = None
def __init__(self, filename, idx, label=None, ident=None):
# This must maintain a separate copy of the host object's filename,
# otherwise lazy loading of data not yet in memory will fail when
# changing the filename of a DataFile instance and trying to save it.
if not isinstance(filename, str):
raise ValueError('filename must be supplied as a string')
self.filename = filename
self.idx = idx
self.label = label
self.ident = ident
self._dloader = get_backend_fn('load_table', self.filename)
self._mloader = get_backend_fn('load_table_meta', self.filename)
# self._saver = get_backend_fn('save_table', self.filename)
def load_data(self):
data = self._dloader(self.filename, self.idx)
return data
def load_meta(self):
meta = self._mloader(self.filename, self.idx)
return meta
def load_table(self):
meta = self.load_meta()
data = self.load_data()
self._table = Table(data=data, meta=meta, copy=False)
@property
def table(self):
if not self._table:
self.load_table()
return self._table
@table.setter
def table(self, value):
# Should this preserve the existing label & ident? Should it update
# them in the new Table's meta (which would mean making a copy)?
# EXTNAME & EXTVER should probably be removed while in memory instead.
# Avoid converting existing Table instances to Table because that
# converts .meta from an io.fits header to an OrderedDict, which it
# turns out can choke on some odd values such as HISTORY.
if not isinstance(value, Table):
try:
value = Table(value, copy=False)
except ValueError:
raise TypeError('value of .table must be convertible to Table')
self._table = value
def copy(self):
"""
Generate a new instance that shares any already-loaded data but can
be re-mapped independently.
"""
newinst = TabMapIO(self.filename, self.idx, self.label, self.ident)
newinst._table = self._table
return newinst
```
#### File: ndmapper/io/_util.py
```python
from functools import wraps
from ..libutils import splitext
from .formats import formats
__all__ = ['get_backend_fn']
def get_backend_fn(funcname, filename):
"""
Given a filename string and the name of a loader function defined in
ndmapper.io, return the implementation of the latter function from the
sub-module appropriate for the file format.
Currently we use only the file extension names, rather than more foolproof
magic or try-loader-except to determine file types, avoiding unnecessary
I/O overheads & complication.
This function may be used either directly by applications wanting to cache
look-ups when doing repeated I/O operations or, internally (when defining
new generic functions in ndmapper.io), via the _get_loader decorator.
"""
fext = (splitext(filename)[1] or '').lower()
backend_fn = None
for fmt, vals in formats.items():
if fext in vals:
# Import back-end module if not done already; just assume it
# exists if defined in formats dict, otherwise we have a bug.
exec('from . import _{0} as {0}'.format(fmt))
# Try to get the back-end function from the module:
try:
backend_fn = eval(fmt+'.'+funcname)
except AttributeError:
raise IOError('back end \'%s\' has no function \'%s\'' \
% (fmt, funcname))
break
if not backend_fn: # no back-end for file extension
raise IOError('unsupported file format \'%s\'' % fext)
return backend_fn
def _get_loader(fn):
"""
A decorator that calls get_backend_fn() to determine automatically the
appropriate back-end function corresponding to the generic one called
directly and provide it to the latter as an additional argument (similar
to 'self' in classes). Intended for internal use within ndmapper.io.
"""
@wraps(fn) # use func's own name & docstring instead of the wrapper's
def loader_wrapper(*args, **kwargs):
loader = get_backend_fn(fn.__name__, args[0])
return fn(loader, *args, **kwargs)
return loader_wrapper
```
#### File: quadpype/ndmapper/iraf_task.py
```python
import os
import os.path
import tempfile
import datetime
import traceback
from pyraf import iraf
from . import config
from .data import FileName, DataFile, DataFileList, temp_saved_datafile
from .utils import to_datafilelist
from .libutils import is_list_like
__all__ = ['run_task', 'get_extname_labels']
def run_task(taskname, inputs, outputs=None, prefix=None, suffix=None,
comb_in=False, MEF_ext=True, path_param=None, reprocess=None,
logfile=None, **params):
"""
Wrapper to run an IRAF task on one or more `DataFile` objects and collect
the results.
Parameters
----------
taskname : `str`
Name of the IRAF task to run, optionally (if the package is already
loaded) including the package hierarchy (eg. ``gemini.gmos.gfcube``).
inputs : `dict`
Dictionary mapping task parameter names to one or more input `DataFile`
instances to pass one at a time to the task (``comb_in is False``) or
all together (``comb_in is True``). All the named files must already
exist.
outputs : `dict` or `None`, optional
Specify output parameter name(s) and their filename value(s), if any,
as dictionary keys & values. The files named must not already exist.
The equivalent dictionary is returned as output, after applying any
automatic modifications. The `dict` values may have any type that can
be converted to a string (eg. `FileName`), either individually or in
a sequence.
If the ``prefix`` and/or ``suffix`` parameter is set, the value(s) may
name a parameter from the inputs dictionary, prefixed with '@'
(eg. ``@infiles``), to create the output names based on the
corresponding input names, or prefixed with '!' to create a single
output name based on the first input name.
prefix : `str` or `None`
A default prefix to add to existing input filename(s) to form the
output filename(s), if the output parameter value(s) specify this
behaviour.
suffix : `str` or `None`
A suffix to add to existing input filename(s) to form the output
filename(s), if the output parameter value(s) specify this behaviour.
comb_in : `bool`
Pass all the inputs to the task at once, in a single call (eg. for
stacking), instead of the default behaviour of calling the task on
each file in turn (per input parameter)? This parameter is named
obscurely to avoid conflicts with IRAF tasks
(eg. ``imcombine.combine``).
MEF_ext : `bool`
Specify and iterate over FITS image extensions, for tasks expecting
simple FITS as input (eg. core IRAF tasks; default `True`)? This should
be set to `False` for tasks that already handle multi-extension FITS
files (eg. from Gemini IRAF) or when the input files are already simple
FITS. The extension names to be iterated over are defined when the
input DataFile instances are created, defaulting to values kept in the
package ``config`` dictionary.
The number of extensions named ``config['labels']['data']`` (eg. 'SCI')
must be the same for every input file or one (in which case that
single extension will be re-used for each iteration over the extensions
of the other input files).
path_param : `str` or `None`
Name of a task parameter (eg. ``rawpath``) used to specify the location
of the input files, instead of including the full path in each input
filename as usual. The DataFile paths are automatically stripped from
the inputs and supplied to the task via this parameter instead. Output
files are still assumed to reside in the current working directory
unless otherwise specified. To use this option, all inputs containing
a directory path (other than '') must reside in the same directory --
if this is not the case and the IRAF task does not understand paths in
filenames then the user will need to copy the input files to the
current directory before running it. The user must not supply filenames
in another directory to input parameters for which the IRAF task does
not apply the path_param prefix (usually input calibrations), or the
task will fail to find some or all of the inputs.
reprocess : `bool` or `None`, optional
Overwrite or re-use existing output files? The default of `None`
redirects to the value of the package configuration variable
``ndmapper.config['reprocess']``, which in turn defaults to `None`,
causing an error to be raised where output files already exist before
processing. If the value is set to `True`, any existing files will be
deleted before the IRAF task is run, while `False` is a no-op for
existing files, causing them to be re-used as output without repeating
the processing. If the IRAF task produces any intermediate files that
are not included in ``outputs`` (ie. that are unknown to run_task), it
is the caller's responsibility to delete them before repeating any
processing. The task is always (re-)run where there are no `outputs`.
logfile : `str` or `dict` or `None`, optional
Optional filename for logging output, which includes any IRAF log
contents (delimited by run_task status lines) or Python exceptions.
The default of `None` causes the value of the package configuration
variable ``ndmapper.config['logfile']`` to be used, which itself
defaults to `None` (in which case no log is written).
Where only a filename string is provided and the IRAF task has a
parameter named ``logfile``, the corresponding IRAF log will be captured
automatically, otherwise only status information and Python exceptions
will get recorded. Where a single-item dictionary is provided, the key
specifies an alternative IRAF "log file" parameter name to use and the
value again specifies the output filename [not implemented]. A special
key string of ``STDOUT`` will cause the standard output to be captured
in the log (instead of any IRAF log file contents) [unimplemented].
The IRAF log contents relevant to each file are also appended to the
corresponding output DataFile's log attribute (whether or not a log
file is specified here and written to disk).
params : `dict`
Named IRAF task parameters. These may include ancillary input or
output filenames that don't need to be tracked by the main inputs &
outputs dictionaries.
Returns
-------
outputs : `dict` of `str` : `DataFileList`
The DataFile objects named by the parameter ``outputs``, containing
the results from IRAF.
Notes
-----
There is no support for mixing MEF- and simple FITS files in a single call.
In principle, ``prefix`` & ``suffix`` could conflict with any like-named
IRAF parameters that have a different meaning from the Gemini convention
(ie. a string that is added to the start/end of the input filename to
provide an output name), but there appears to be only one such case in
Ureka (sqiid.getcoo); likewise for ``MEF_ext``, which has no known uses
elsewhere, ``path_param`` and ``reprocess``. It is assumed that the
widely-used ``logfile`` will only ever have the usual meaning.
"""
# Print initial run_task() delimiter:
dt = datetime.datetime.now()
logstart = '-----\nSTART run_task(%s) %s' % \
(taskname, dt.strftime('%Y-%m-%d %H:%M:%S'))
print(logstart)
# Start the log file:
if logfile is None:
logfile = config['logfile']
if logfile is None:
userlog = None
elif isinstance(logfile, str):
userlog = open(logfile, 'a')
userlog.write('%s\n' % logstart)
else:
# Dict, to be implemented:
raise NotImplementedError('logfile must currently be str or None')
# Determine from the config dict whether to reprocess data, if unspecified:
if reprocess is None:
reprocess = config['reprocess']
# Keep a list of any temporary files that need cleaning up when finished:
tmplist = []
# This giant try-except block just exists to log any tracebacks before
# re-raising the exception:
try:
# Ensure host package(s) is/are loaded:
pkglist = taskname.split('.')[:-1]
for pkg in pkglist:
eval('iraf.'+pkg+'(_doprint=0, Stdout=1)') # capture+discard stdout
# Get reference to the task as a function:
task = eval('iraf.'+taskname)
# Restore any old task parameter settings that aren't overridden in
# "params" to their defaults, to ensure the results are reproducible:
task.unlearn()
# Check for a recognizeable "log file" task parameter that can be used
# to capture IRAF logging output:
logpar = None
for parname in ['logfile']:
if parname in task.getParDict().keys():
logpar = parname
break
# Ensure the main inputs & outputs are dictionaries, otherwise we don't
# know what IRAF parameters they go with. I think we want this to be
# fairly brittle rather than duck-typed, to avoid unexpected behaviour.
if outputs is None: outputs = {}
if not isinstance(inputs, dict) or not isinstance(outputs, dict):
raise TypeError('values of inputs/outputs must be parameter=' \
' value dictionaries')
# Make sure each input parameter is expressed as a filename list and
# determine how many sets of input files there are to iterate over
# (should be as many as the length of the longest list).
inplen = conv_io_pars(inputs, mode=None) # defaults to mode='read'
nfiles = max(inplen)
# Input files are no longer required already to exist on disk here, as
# the unloaded flag will be False otherwise, which now causes temporary
# copies to get saved below, at temp_saved_datafile().
# Set the task's path_param if specified (but not overridden by the
# user), after ensuring the path to the files is unique:
if path_param and path_param not in params:
paths=set()
for dfl in inputs.values():
for df in dfl:
if df.filename.dir: # don't count CWD ('')
paths.add(df.filename.dir)
ndirs = len(paths)
if ndirs == 0:
path = ''
elif ndirs == 1:
(path,) = paths
else:
raise ValueError('inputs must all have the same path when ' \
'\'path_param\' is set')
path = os.path.join(path, '') # incl. trailing slash unless blank
params[path_param] = path
# Apply any specified prefix to the filenames of the reference input
# parameter to form the corresponding output filenames (this usage is
# a bit dodgy after re-doing DataFile modes but still works):
if outputs is not None:
for key, val in outputs.items():
if isinstance(val, str) and val and val[0] in '!@':
if prefix is None and suffix is None:
raise ValueError('output \'%s\' requires missing '
'suffix/prefix value' % key)
refpar = val[1:]
if val[0] == '!':
namerange = slice(0, 1) # use first filename
else:
namerange = slice(None, None) # use all filenames
preflist = DataFileList(mode='overwrite')
if refpar in inputs:
for datafile in inputs[refpar][namerange]:
newfile = DataFile(filename=datafile.filename,
mode='overwrite')
newfile.filename.dir='' # output goes in CWD
if prefix is not None:
newfile.filename.prefix = \
prefix + newfile.filename.prefix
if suffix is not None:
newfile.filename.suffix.append(suffix)
preflist.append(newfile)
else:
raise ValueError('parameter name %s for prefix not '\
'in inputs dictionary' % refpar)
outputs[key] = preflist
# Make sure output parameters are DataFileLists, as for the input,
# selecting the mode according to the reprocess parameter. Use
# overwrite mode for reprocess==False until the files get reloaded, so
# DataFile won't complain if they don't already exist.
mode = 'new' if reprocess is None else 'overwrite'
outplen = conv_io_pars(outputs, mode=mode)
# Save temporary copies (to the current directory) of any input files
# that could have changed in memory, having done what's needed with
# the original input filenames above. Creating copies can slow things
# down by a factor of ~2-3 (eg. from 2.3s to 5.7s when adding two 270M
# FITS files with 3 SCI extensions each on an SSD) but avoids
# unexpected results due to unsaved changes. Disk space usage could be
# improved by copying only those files needed at each iteration but
# that would complicate expansion of lists to the same length below etc.
# When path_param is set and *any* of the inputs needs saving, copies
# must be made of all the files, since they must reside in the same
# directory (and the original location may not be writeable).
if path_param and not all([df.unloaded for dfl in inputs.values() \
for df in dfl]):
copyall = True
params[path_param] = ''
else:
copyall = False
# Substitute original DataFiles for temporary copies only where needed
# (if we're not sure there's an up-to-date copy on disk already). The
# method for deciding this is currently a bit primitive (optimizing it
# is a bit of a minefield) but avoids routine copies in the common case
# where DataFileList is simply used as a list of files for IRAF.
for dfl in inputs.values():
for n, df in enumerate(dfl):
if copyall or not df.unloaded:
tdf = temp_saved_datafile(df)
tmplist.append(tdf)
dfl[n] = tdf
# Consider adding a section here that enables comb_in automatically
# if the number of output files (for at least one output parameter?)
# is 1 (in which case the following check won't run). The parameter
# can't be eliminated entirely because we can't distinguish looping
# over N inputs with N outputs from running a task once that processes
# N inputs together and produces N outputs (eg. WCS updates onto a
# common system, scaling to a common sky level etc.). Could add
# comb_in="auto"/None option or make it into "force_comb" or
# "separate" etc.
# - At this point, the prefix parameter has already been applied to
# generate one or more output filenames, if applicable.
# - Document decision in the log.
# Now if we're iterating over the files and feeding them to the task
# one at a time for each parameter, expand out any single filenames
# implicitly to the length of any input list(s) so we can iterate over
# everything together, complaining if given lists of different lengths
# (ie. whose correspondence cannot be determined unambiguously).
if not comb_in and nfiles > 1:
for parset, parlens in [(inputs, inplen), (outputs, outplen)]:
for param, n in zip(parset, parlens):
if n == 1:
parset[param] *= nfiles
elif n != nfiles:
raise ValueError('input/output file lists have ' \
'unmatched lengths and comb_in=False')
# Create a list of inputs & outputs for each set of files on which the
# task is run (just one if comb_in=True). At this point, the input
# & output lists should all have the same lengths if comb_in=False.
if comb_in:
inlist = [inputs]
outlist = [outputs]
else:
inlist = [{key : DataFileList(data=inputs[key][n]) for key in \
inputs.keys()} for n in range(nfiles)]
outlist = [{key : DataFileList(data=outputs[key][n], mode=mode) \
for key in outputs.keys()} for n in range(nfiles)]
# Define IRAF string format for any extension FITS extension numbers:
in_extfmt = '[%s]'
out_extfmt = '[%s,%s,append]'
# To avoid obscure failures, do a pre-iteration over output parameter
# set(s) and ensure there are no duplicate outputs between iterations
# or, when reprocess is False, incomplete subsets of existing outputs
# from any given iteration. Duplicate outputs are allowed within an
# iteration (ie. task call) and if not legitimate should eventually be
# caught when the task itself complains. Other errors like file
# permissions are dealt with later, by checking that the files actually
# get created by IRAF. While we're at it, add the run_task delimiter to
# the DataFile log attributes (done separately from the IRAF log so
# it's still present when not using the latter).
prevnames = []
for outpset in outlist:
iternames = []
existing = non_existing = False
for dfl in outpset.values():
for df in dfl:
df.log += '\n%s\n' % logstart
# (This comparison should also deal automatically with any
# explicit .fits extensions once DataFile handles them:)
name = os.path.abspath(str(df))
if name in prevnames:
raise IOError('duplicate output file: %s' % str(df))
if reprocess is False:
if os.path.exists(name):
existing = True
else:
non_existing = True
iternames.append(name)
prevnames.extend(iternames)
if reprocess is False and existing and non_existing:
raise IOError('reprocess is False & a subset of these outputs '\
'already exist:\n {0}'
.format('\n '.join([str(df) for dfl \
in outpset.values() \
for df in dfl])))
# Iterate over the parameter set(s) and run the task on each one:
for inpset, outpset in zip(inlist, outlist):
call_task = True
# If re-processing, delete any existing files from this task call
# and if not, check whether the call can be skipped. This check is
# done here, separately from the above section, to avoid premature
# removal of results in case of failure:
if reprocess is not None:
# This variable also gets re-used in the call_task else clause:
names = [str(df) for dfl in outpset.values() for df in dfl]
for name in names:
if os.path.exists(name):
if reprocess:
os.remove(name)
else:
call_task = False
break # either all or none exist after above sec.
# Execute the task unless its outputs already exist and reprocess
# is False, in which case we just re-use the files instead (via the
# same reload() call at the end):
if call_task:
# When MEF_ext=True, we require all the inputs to have the same
# or unit length at each iteration (or overall if comb_in=True)
# and the same EXTVERs in order to match data extensions
# unambiguously between the inputs. While one could envisage
# more intelligent expansion behaviour than this for special
# cases of comb_in=True (such as requiring the lengths to match
# only between files at the same list positions) it would be
# difficult to generalize without unnecessarily imposing fixed
# relationships between sets of input files (eg. there's
# nothing to stop an IRAF task from taking a different number
# of files for each input or combining them in some way other
# than one operation over all the inputs per list position).
# The most likely case of iterating implicitly over MEF
# extensions for multiple groups of files that only match
# within each group can be handled using comb_in=False.
if MEF_ext:
# List EXTVERs for each input file (matching inpset dict):
extdict = {param : [{ndd.ident : ndd._io.data_idx \
for ndd in df} \
for df in inpset[param]] \
for param in inpset}
# Also derive a flat list of all sorted EXTVER lists, to
# check easily that they match & get the nominal EXTVERs:
allvers = [sorted(extmap.keys()) for extmaps in \
extdict.values() for extmap in extmaps]
# Find longest extension list, which we'll iterate over if
# all goes well (others should be the same or unit length):
extvers = max(allvers, key=len)
# Fail if other non-unit-length EXTVER lists don't match:
if not all([dfvers == extvers for dfvers in allvers
if len(dfvers) > 1]):
raise ValueError('non-matching input MEF EXTVERs')
# Not iterating explicitly over MEF extensions:
else:
# Dummy dict to iterate over below, avoiding duplication:
extdict = {param : [None for df in inpset[param]] \
for param in inpset}
extvers = ['']
# Run the task once on per MEF extension, if applicable,
# otherwise just once in total:
for ver in extvers:
# Complete the IRAF parameter set with input/output file
# lists for this iteration over the input files:
for param in inpset:
# IRAF filenames for this parameter:
fnlist = []
# Iterate over files for this param & their ext maps:
for df, dfextmap in zip(inpset[param], extdict[param]):
# OS filename, without any MEF ext (& without any
# path if task expects a separate path parameter):
if path_param:
fn = str(FileName(df.filename, dirname=''))
else:
fn = str(df)
# If iterating over FITS extensions, find the data
# extension number corresponding to this extver, or
# if there's only one data ext re-use that number:
if dfextmap:
if len(df) == 1:
fn += in_extfmt % df[0]._io.data_idx
else:
fn += in_extfmt % dfextmap[ver]
fnlist.append(fn)
# Convert filename list to IRAF comma-separated string
# and add the relevant task parameter/value:
params[param] = ','.join(fnlist)
# Similar IRAF comma-separated list for output files. Here
# we just give IRAF the extname/ver instead of the ext.
for param in outpset:
params[param] = ','.join( \
[str(df)+(out_extfmt % (df._labels['data'], ver)) \
if ver else str(df) for df in outpset[param]])
# Specify log file for IRAF. Even if the user specifies a
# name, use a temporary file and capture its contents
# before appending to the user-specified file.
if logpar is not None:
templog = tempfile.NamedTemporaryFile('w+t')
params[logpar] = templog.name
else:
templog = None
print('pars', params)
# Execute with Python inputs converted to IRAF-style pars:
try:
task(**params)
# Note that PyRAF doesn't trap failures in IRAF tasks that
# accept input file lists and only issue a warning and
# carry on when an error specific to one of the files
# occurs, so we have to check separately that the expected
# output gets created to be confident it worked.
except (iraf.IrafError, KeyError):
# Currently just a placeholder for any clean-up.
raise
# Save any temporary IRAF log output whether or not the
# task succeeded:
finally:
if templog is not None:
logtext = templog.read()
# Copy temporary IRAF log into user-specified log:
if userlog:
userlog.write(logtext)
# Attach log text to all output DataFile objects
# since, where there's more than one, we don't know
# which if any is the main one and it may apply to
# them all:
# To do: currently get overwritten by reload below?
for dfl in outpset.values():
for df in dfl:
df.log += logtext
templog.close()
# Check that any non-blank output filenames got created:
for key, val in outpset.items():
for df in val:
namestr = str(df)
if namestr and not os.path.isfile(namestr):
raise RuntimeError(
'No file %s after running %s' % \
(namestr, taskname)
)
# Here we would clean up any temp copies of input files from
# this iteration over a given set of files, if and when the
# copies are made per iteration instead of all at the start.
# If processing was skipped, note that in the log:
else:
msg = 'Skip processing & re-use existing outputs for:'
for name in names: # from "if reprocess" at start of loop
msg += '\n {0}'.format(name)
for dfl in outpset.values():
for df in dfl:
df.log += msg
if userlog:
userlog.write('{0}\n'.format(msg))
print(msg)
# Print final run_task() delimiter:
dt = datetime.datetime.now()
logend='END run_task(%s) %s\n-----\n' % \
(taskname, dt.strftime('%Y-%m-%d %H:%M:%S'))
if userlog: userlog.write('%s' % logend)
print(logend)
# Add delimiter to individual DataFile log attributes as well:
for dfl in outputs.values():
for df in dfl:
df.log += '\n%s\n' % logend
# print('dfl', df.log)
except:
if userlog:
userlog.write(traceback.format_exc()) # save traceback
raise
finally:
if userlog:
userlog.close()
for df in tmplist:
os.remove(str(df))
# Map data from files listed in the outputs after their creation by IRAF:
for param in outputs:
for df in outputs[param]:
df.reload()
# Return the outputs dictionary provided by the user, after expanding
# any input parameter refs expanded to DataFileLists etc.:
return outputs
# TO DO:
# - Finish logging behaviour as per the docstring(?).
# - Also send record of inputs/outputs to the log only in case the
# IRAF task doesn't do it?
# - Want to ensure the name ends in .fits etc?
# - Would do this in DataFile, otherwise it would judge incorrectly
# whether the file already exists (and in any case DataFile needs to
# know how to load the file etc.).
# - Use a (prioritized?) list of all recognized file types rather than
# making DataFile FITS-specific.
# - Only when the file mode corresponds to input files(?). Otherwise
# there's no way to know which is the right implicit extension.
# Unless there's a package-configurable default?
# - Capture any stdout or stderr as well as the log?
# - Consider allowing params other than input & output to be DataFileLists
# and converting them to strings as needed, for convenience.
# - Add IRAF tests with another 2 FITS files: with VAR/DQ, unnumbered.
# - Improve error trapping further??
# - Check gemini status parameters?
def conv_io_pars(pardict, mode):
"""
Convert `dict` of input or output Python file lists/names to `dict` of
type `DataFileList` and return a `list` of the list lengths (private).
"""
for param in pardict:
pardict[param] = to_datafilelist(pardict[param], mode=mode)
parlen = [len(val) for val in pardict.values()]
return parlen
def get_extname_labels(datafiles):
"""
Ensure that all `DataFile` instances in datafiles use the same convention
for labelling `NDData` component arrays (eg. 'SCI', 'VAR', 'DQ') and return
the corresponding labels dictionary. The dictionary values can then be
used to specify the EXTNAME conventions for IRAF tasks that operate on
multi-extension FITS files (which is unnecessary in Python, where `NDData`
defines which array is which). An empty dictionary is returned if the input
list is empty.
Raises `ValueError` if the constituent label dictionaries differ.
"""
if isinstance(datafiles, DataFile):
datafiles = [datafiles]
elif not is_list_like(datafiles) or \
not all([isinstance(df, DataFile) for df in datafiles]):
raise TypeError('datafiles must be a DataFile list or a DataFile')
unique_items = set([tuple(df._labels.items()) for df in datafiles])
if len(unique_items) > 1:
raise ValueError('datafiles must all have the same "labels" convention')
labels = dict(unique_items.pop()) if unique_items else {}
return labels
```
#### File: lib/gmos/gmos.py
```python
from pyraf import iraf
from ndmapper import config, ndprocess_defaults
from ndmapper.data import FileName, DataFile, DataFileList
from ndmapper.iraf_task import run_task, get_extname_labels
from ndmapper.utils import to_datafilelist
from ..gemini import *
# Redefine explicitly what to expose from our parent module here, rather than
# appending to its __all__ (as at lower levels of the tree), since it contains
# observatory-specific helper functions that are not processing steps.
__all__ = ['CAL_DEPS', 'make_bias', 'clean_pixels']
# These functions are intended to represent logical processing steps, rather
# than strict one-to-one wrappers for existing IRAF tasks; the aim is not to
# replicate the IRAF user interface exactly. Each "wrapper" function must
# therefore deal with any options exposed to the user explicitly, rather than
# allowing IRAF task parameters to be specified arbitrarily via **params.
# At least to begin with, the aim is to simplify things aggressively and not
# expose every option for atypical usage until a use case arises. When that
# happens, we'll weigh up passing additional options directly vs indirectly vs
# adding more wrappers to support different common usage scenarios.
# NB. PyRAF accepts True/False, iraf.yes/iraf.no & 'yes'/'no' interchangeably.
@ndprocess_defaults
def make_bias(inputs, bias=None, bpm=None, ovs_function='chebyshev',
ovs_order=1, ovs_lsigma=2.0, ovs_hsigma=2.0, ovs_niter=5, ovs_sample=(2,63),
comb_lsigma=2.0, comb_hsigma=2.0, reprocess=None, interact=None):
"""
Combine individual bias exposures to produce a reference bias frame for
calibrating other data.
Parameters
----------
inputs : DataFileList
Input raw bias images.
bias : str-like, optional
Output combined bias image name. If None (default), a new DataFile
will be returned whose name is constructed from that of the first input
file, by appending '_bias'.
bpm : DataFile or DataFileList, optional
A bad pixel mask, used if 'use_uncert' and/or 'use_flags' is enabled.
This can be created with the Gemini IRAF task GBPM.
ovs_function : str
Function to use for fitting the overscan region in IRAF (default
'chebyshev'; may also be 'legendre', 'spline1' or 'spline3').
ovs_order : int
Order of the overscan fitting function (default 1).
ovs_lsigma : float
Negative sigma rejection threshold for overscan fitting (default 2.0).
ovs_hsigma : float
Positive sigma rejection threshold for overscan fitting (default 2.0).
ovs_niter : int
Number of rejection iterations for overscan fitting (default 5).
ovs_sample : list or tuple or None, optional
Zero-indexed range of rows to include in the overscan fit, to help
avoid contamination (default (2,63)). A value of None selects all the
available rows (gireduce 'default').
comb_lsigma : float
Negative sigma rejection threshold for averaging biases (default 2.0).
comb_hsigma : float
Positive sigma rejection threshold for averaging biases (default 2.0).
interact : bool, None
Fit the overscan region interactively in IRAF? If None (default),
interactivity is instead controlled by the package configuration
dictionary (see below).
See "help gbias" in IRAF for more detailed information.
Returns
-------
outbias : DataFile
The combined bias image produced by gbias.
Package 'config' options
------------------------
use_uncert : bool
Enable NDData 'uncertainty' (variance) propagation (default True)?
use_flags : bool
Enable NDData 'flags' (data quality) propagation (default True)?
reprocess : bool or None
Re-generate and overwrite any existing output files on disk or skip
processing and re-use existing results, where available? The default
of None instead raises an exception where outputs already exist
(requiring the user to delete them explicitly). The processing is
always performed for outputs that aren't already available.
interact : bool
Enable interactive plotting (default False)? This may be overridden
by the task's own "interact" parameter.
"""
# Some candidate parameters to open up to the UI:
verbose = True
# Default to appending "_bias" if an output filename is not specified:
if not bias:
bias = '!inimages'
# Determine input DataFile EXTNAME convention, to pass to the task:
labels = get_extname_labels(inputs)
# Insert a BPM in the task inputs if supplied by the user
# (NB. Use of this BPM parameter is untested at the time of writing; it
# would need a multi-extension FITS BPM in place of the pixel list files
# distributed with the package):
inputs = {'inimages' : inputs}
if bpm:
inputs['bpm'] = bpm
# Convert range of overscan rows fitted to the right format for IRAF:
if ovs_sample is None:
biasrows='default'
elif len(ovs_sample) != 2:
raise IndexError('ovs_sample should contain 2 limits')
else:
biasrows = '{0}:{1}'.format(*(i+1 for i in ovs_sample))
# Most of the IRAF package tasks don't have the granularity to control
# VAR & DQ propagation separately, so just turn them both on if either
# is specified. This isn't handled by ndprocess_defaults since the
# Python & IRAF APIs are different (two parameters vs one):
if config['use_uncert'] or config['use_flags']:
vardq = True
else:
vardq = False
# Wrap gbias, defining the parameters reproducibly (for a given version)
# but omitting inapplicable parameters such as minmax options. Certain
# parameters, such as logfile & rawpath, are set directly by run_task.
result = run_task('gemini.gmos.gbias', inputs=inputs,
outputs={'outbias' : bias}, suffix='_bias', comb_in=True,
MEF_ext=False, path_param='rawpath', reprocess=reprocess, fl_over=True,
fl_trim=True, key_biassec='BIASSEC', key_datasec='DATASEC',
key_ron='RDNOISE', key_gain='GAIN', ron=3.5, gain=2.2,
gaindb='default', sci_ext=labels['data'],
var_ext=labels['uncertainty'], dq_ext=labels['flags'], sat='default',
nbiascontam='default', biasrows=biasrows, fl_inter=interact,
median=False, function=ovs_function, order=ovs_order,
low_reject=ovs_lsigma, high_reject=ovs_hsigma, niterate=ovs_niter,
combine='average', reject='avsigclip', lthreshold=iraf.INDEF,
hthreshold=iraf.INDEF, masktype='goodvalue', maskvalue=0.0,
scale='none', zero='none', weight='none', statsec='[*,*]',
key_exptime='EXPTIME', nkeep=1, mclip=True, lsigma=comb_lsigma,
hsigma=comb_hsigma, sigscale=0.1, grow=0.0, fl_vardq=vardq,
verbose=verbose)
# Return the only DataFile instance from the output DataFileList
# corresponding to the task's "outbias" parameter:
return result['outbias'][0]
```
#### File: quadpype/ndmapper/libutils.py
```python
import os, os.path
import tempfile
def splitext(path):
"""
A version of splitext that splits at the first separator rather than the
last one (so 'file.fits.gz' gives 'file' & 'fits.gz'). It also returns
None for the extension value where there isn't one (instead of ''), just
to avoid incorrect reconstruction of 'file.' as 'file' or vice versa.
"""
components = os.path.basename(path).split(os.extsep, 1) # len always 1->2
ext = None if len(components) == 1 else components[1]
root = path if ext is None else path[:-len(os.extsep+ext)]
return root, ext
def addext(path, ext):
"""
Reconstruct a filename from a (root, extension) tuple of the type
produced by splitext().
"""
return path + ('' if ext is None else os.extsep + ext)
def new_filename(purpose='tmp', base='', ext='', full_path=False):
"""
Generate a new filename string that is not already used in the current
directory (beginning with 'tmp' by default, for use as a temporary file).
Unlike Python's tempfile module, this function does not actually open the
file, making the result suitable for passing to external programs, but as
a result, a race condition may occur if the file is not created
immediately, which is the user's responsibility.
Parameters
----------
purpose : str, optional
Starting string, used to indicate the file's purpose (default 'tmp').
base : convertible to str, optional
A base name to add between "tmp_" and the last few, randomized
characters, to help distinguish temporary filenames, eg. for
troubleshooting purposes.
ext : convertible to str, optional
An file extension name to use (eg 'fits'). The leading dot is optional
and will be added if needed.
full_path : bool
Return the full path to the file, rather than a (relative) filename in
the current working directory (default False)?
Returns
-------
str
A filename that doesn't already exist in the current working directory.
"""
base = str(base)
ext = str(ext)
# Add the leading dot to any specified file extension, if necessary
# (checking type to produce a less obscure error below if not a string):
if ext and not ext.startswith(os.extsep):
ext = os.extsep + ext
# Python doesn't provide a (non-deprecated) way to produce a temporary
# filename without actually creating and opening the file (to avoid
# possible race conditions & exploits). One can, however, let Python close
# the file again and then recycle its name, saving the corresponding
# DataFile immediately to avoid possible collisions.
with tempfile.NamedTemporaryFile(
prefix='{0}_{1}{2}'.format(purpose, base, '_' if base else ''),
suffix=ext, dir='') as tmpfile:
tmpname = tmpfile.name
return tmpname if full_path else os.path.basename(tmpname)
def map_API_enum(name, value, map_dict):
"""
Convert an enumerable parameter value from the Python API to its equivalent
IRAF value in the supplied dictionary (where such a mapping exists),
raising an appropriate exception if it's not recognized.
"""
try:
return map_dict[value]
except KeyError:
raise ValueError('unrecognized value for \'{0}\''.format(name))
def is_list_like(obj):
"""
Check whether a variable is a sequence other than a string.
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
```
#### File: quadpype/ndmapper/utils.py
```python
from ndmapper.libutils import is_list_like
from ndmapper.data import FileName, DataFile, DataFileList
from .calibrations import K_CALIBRATIONS
def convert_region(region, convention):
"""
Convert a NumPy- or FITS-style region string into a tuple of integers
and Python slice objects that is suitable for subscripting arrays.
Some corner cases (eg. involving '*', ':' or steps) currently behave
differently from Python/IRAF subscripting but should be fairly harmless.
Parameters
----------
region : str
Region string, eg. '100:110,50:60', '100,*' or ':,99'.
convention : str
Indexing convention used: 'NumPy' (default) or 'FITS'; case
insensitive.
"""
# Check arguments:
if not isinstance(region, str):
raise TypeError('region must be a string')
convention = convention.lower()
if convention not in ['numpy', 'fits']:
raise ValueError('convention must be NumPy or FITS')
# Apply appropriate syntax & indexing adjustments for convention used:
if convention == 'numpy':
nregion = region
order = slice(None, None, None)
orig = 0
elif convention == 'fits':
nregion = region.replace('*', ':')
order = slice(None, None, -1)
orig = 1
else:
raise ValueError('convention must be \'NumPy\' or \'FITS\'')
# Split region into a range for each axis:
axes = nregion.split(',')
# Parse sub-string for each axis into a range & convert to slice object:
slices = []
for axis in axes[order]:
err = False if axis else True # disallow empty string
vals = axis.split(':')
nvals = len(vals)
if nvals > 3:
err = True # disallow more than start:stop:step
elif nvals == 1:
try:
sliceobj = int(vals[0])-orig # single row/column number
except ValueError:
err = True
else:
try:
# Any adjustment for 1-based indexing is applied only to the
# start of the range, since 1 has to be added to the ending
# index to account for FITS/IRAF-style ranges being inclusive.
sliceobj = slice(*(int(val)-adj if val else None \
for val, adj in zip(vals, (orig, 0, 0))))
except ValueError:
err = True # disallow non-numeric values etc.
if err:
raise ValueError('failed to parse region: [%s]' % region)
slices.append(sliceobj)
return tuple(slices)
def to_filename_strings(objects, strip_names=True, strip_dirs=True,
use_cal_dict=False):
"""
Extract a list of filename strings from one or more `str`, `FileName` or
`DataFile` objects (or a `DataFileList`), by default removing any path and
processing suffix/prefixes. A calibration dictionary may also be given,
in the format produced by calibrations.init_cal_dict(), if the relevant
option is enabled. It is the caller's responsibility to ensure that the
string values are actually valid filenames.
This is typically used to reproduce base filenames for use in either
downloading or looking up external information about the files.
"""
# Convert any recognized single objects to a list (partly to ensure we
# don't inadvertently iterate over the NDData instances of a DataFile):
if isinstance(objects, (DataFile, FileName, str)):
objects = [objects]
# If the objects argument looks like a calibration dict, extract a list
# of unique constituent filenames from all the calibrations:
elif use_cal_dict and hasattr(objects, 'keys') and \
K_CALIBRATIONS in objects:
objects = list(set([fn for flist in \
objects[K_CALIBRATIONS].values() \
for fn in (flist if is_list_like(flist) else [])]))
# This must be list-like after the above:
if not is_list_like(objects) or hasattr(objects, 'keys'):
raise ValueError('objects parameter has an unexpected type')
return [str(FileName(str(obj), strip=strip_names, \
dirname='' if strip_dirs else None)) \
for obj in objects]
def to_datafilelist(arg, mode=None):
"""
Derive a `DataFileList` object from one or a sequence of objects than can
be converted to filename strings (eg. `str`, `FileName` or `DataFile`).
The ``mode`` defaults to ``'read'`` when given one or more filenames and
to the existing mode for `DataFile` and `DataFileList`. Where all of the
inputs are existing `DataFile` instances, those are re-used by reference
(``mode`` permitting), instead of opening new copies.
Beware of feeding this inappropriate argument types, as almost anything can
be converted to `str`...
"""
# Ensure the input is in a list, for iteration:
if (isinstance(arg, DataFile) or not is_list_like(arg)):
arg = [arg]
# If passed existing DataFile(s), use them as the data argument for
# instantiation. Currently, DataFiles can only be re-used by reference if
# all elements have that type. This case also covers DataFileList, which
# needs re-instantating in case a new mode is specified.
if arg and all([isinstance(df, DataFile) for df in arg]):
outlist = DataFileList(data=arg, mode=mode)
# Otherwise, convert inputs to filename strings and instantiate with those.
# Should the acceptable types be enumerated to avoid strange results? That
# might impact user creativity in both good and a bad ways...
else:
mode = 'read' if mode is None else mode
outlist = DataFileList(filenames=[str(fn) for fn in arg], mode=mode)
return outlist
```
|
{
"source": "jehung/MachineLearning_models",
"score": 2
}
|
#### File: jehung/MachineLearning_models/ann_model.py
```python
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from imblearn.over_sampling import SMOTE, ADASYN
from imblearn.combine import SMOTEENN
from sklearn.metrics import classification_report, confusion_matrix, precision_score, recall_score, f1_score
from imblearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit
from sklearn.model_selection import cross_val_predict, train_test_split
from joblib import Parallel, delayed
import multiprocessing
pd.set_option('display.max_columns', None)
import get_all_data
import utility
from itertools import repeat
d = {'train': None, 'cv set': None, 'test': None}
def train_size_ann(train=None, target=None, size=0):
#for size in [0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
print('size', size)
print('here')
training_features, test_features, \
training_target, test_target, = train_test_split(train, target, test_size=0.33, random_state=778)
X_train, X_val, y_train, y_val = train_test_split(training_features, training_target, train_size=size)
#smote = SMOTE(ratio=1)
#X_train_res, y_train_res = smote.fit_sample(X_train, y_train)
print('start')
clf = MLPClassifier(verbose=True)
clf.fit(X_train, y_train)
print('process')
d['train'] = f1_score(y_train, clf.predict(X_train), average='weighted')
d['cv set'] = f1_score(y_val, clf.predict(X_val), average='weighted')
d['test'] = f1_score(test_target, clf.predict(test_features), average='weighted')
print('end')
return d
def complexity_ann(X, y):
# X_train, y_train, X_test, y_test = train_test_split(X,y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=778)
# smote = SMOTE(ratio=1)
# X_train_res, y_train_res = smote.fit_sample(X_train, y_train)
print('Start Search')
mlp = MLPClassifier(verbose=True)
pipe = Pipeline([('mlp', mlp)])
param_grid = {
'mlp__hidden_layer_sizes': [(160), (160, 112, 112), (160, 112, 112, 112, 112), (160, 112, 112, 112, 112, 112, 112)]}
grid_search = GridSearchCV(estimator=pipe, param_grid=param_grid, n_jobs=6, cv=10, scoring='neg_log_loss', verbose=5)
grid_search.fit(X_train, y_train)
clf = grid_search.best_estimator_
print('clf', clf)
print('best_score', grid_search.best_score_)
y_pred = clf.predict(X_test)
check_pred = clf.predict(X_train)
target_names = ['Not delinq', 'Delinq']
print(classification_report(y_test, y_pred, target_names=target_names))
conf_mat = confusion_matrix(y_test, y_pred)
plt.figure()
utility.plot_confusion_matrix(conf_mat, classes=target_names,
title='Confusion matrix, without normalization')
plt.show()
return clf, clf.predict(X_train), y_pred
if __name__== '__main__':
all_data = get_all_data.get_all_data()
train, target = get_all_data.process_data(all_data)
#df = Parallel(n_jobs=6)(delayed(train_size_ann)(train=train, target=target, size=size) for size in np.arange(0.1, 1, 0.1))
#df = utility.merge_dict(df)
#print(df)
clf, score, mat = complexity_ann(train, target)
```
#### File: jehung/MachineLearning_models/dt_model.py
```python
import numpy as np
import pandas as pd
from datetime import datetime
import time
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from imblearn.over_sampling import SMOTE, ADASYN
from imblearn.combine import SMOTEENN
from sklearn.metrics import classification_report, confusion_matrix, precision_score, recall_score, f1_score
from imblearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import label_binarize
from sklearn.preprocessing import scale
from sklearn.model_selection import cross_val_predict, train_test_split
from joblib import Parallel, delayed
import multiprocessing
pd.set_option('display.max_columns', None)
import get_all_data
import utility
from itertools import repeat
d = {'train': None, 'cv set': None, 'test': None}
def train_size_dt(train, target, size=0):
print('size', size)
print('here')
training_features, test_features, \
training_target, test_target, = train_test_split(train, target, test_size=0.33, random_state=778)
X_train, X_val, y_train, y_val = train_test_split(training_features, training_target, train_size=size)
print('start')
start_time = time.time()
dt = DecisionTreeClassifier(class_weight='balanced')
dt.fit(X_train, y_train)
print('Decision Tree took', time.time() - start_time, 'to run')
d['train'] = f1_score(y_train, dt.predict(X_train), average='weighted')
d['cv set'] = f1_score(y_val, dt.predict(X_val), average='weighted')
d['test'] = f1_score(test_target, dt.predict(test_features), average='weighted')
print('end')
return d
def complexity_dt(X, y):
#X_train, y_train, X_test, y_test = train_test_split(X,y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=778)
#smote = SMOTE(ratio=1)
#X_train_res, y_train_res = smote.fit_sample(X_train, y_train)
print('Start Search')
dt = DecisionTreeClassifier(class_weight='balanced')
pipe = Pipeline([('dt', dt)])
param_grid = {'dt__max_depth': [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]}
grid_search = GridSearchCV(estimator=pipe, param_grid=param_grid, n_jobs=6, cv=5, scoring='neg_log_loss', verbose=5)
grid_search.fit(X_train, y_train)
clf = grid_search.best_estimator_
print('clf', clf)
print('best_score', grid_search.best_score_)
y_pred = clf.predict(X_test)
check_pred = clf.predict(X_train)
target_names = ['Not delinq', 'Delinq']
print(classification_report(y_test, y_pred, target_names=target_names))
conf_mat = confusion_matrix(y_test, y_pred)
plt.figure()
utility.plot_confusion_matrix(conf_mat, classes=target_names,
title='Confusion matrix, without normalization')
plt.show()
return clf, clf.predict(X_train), y_pred
if __name__== '__main__':
all_data = get_all_data.get_all_data()
train, target = get_all_data.process_data(all_data)
df = Parallel(n_jobs=6)(delayed(train_size_dt)(train=train, target=target, size=size) for size in np.arange(0.1, 1, 0.1))
df = utility.merge_dict(df)
print(df)
clf, score, mat = complexity_dt(train, target)
```
#### File: jehung/MachineLearning_models/utility.py
```python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def merge_dict(dicts):
"""dicts: a list of dicts"""
super_dict = {}
for d in dicts:
for k, v in d.items(): # d.items() in Python 3+
super_dict.setdefault(k, []).append(v)
df = pd.DataFrame.from_dict(super_dict)
df.plot()
plt.show()
return df
```
|
{
"source": "jehung/universal_portfolio",
"score": 2
}
|
#### File: universal_portfolio/universal_portfolio/knapsack.py
```python
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: <NAME>
Created: 30/03/2016
Copyright: (c) <NAME> 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def read_file(file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
return x_test, ticker
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show()
```
#### File: universal_portfolio/universal_portfolio/mimic_nn.py
```python
import os
import csv
import time
from time import clock
import sys
sys.path.append('/Users/jennyhung/MathfreakData/School/OMSCS_ML/Assign2/abagail_py/ABAGAIL/ABAGAIL.jar')
import jpype as jp
from util import process_data
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from array import *
from itertools import product
jp.startJVM(jp.getDefaultJVMPath(), '-ea', '-Djava.class.path=/Users/jennyhung/MathfreakData/School/OMSCS_ML/Assign2/abagail_py/ABAGAIL/ABAGAIL.jar')
jp.java.lang.System.out.println("hello world")
jp.java.func.nn.backprop.BackPropagationNetworkFactory
jp.java.func.nn.backprop.RPROPUpdateRule
jp.java.func.nn.backprop.BatchBackPropagationTrainer
jp.java.shared.SumOfSquaresError
jp.java.shared.DataSet
jp.java.shared.Instance
jp.java.opt.SimulatedAnnealing
jp.java.opt.example.NeuralNetworkOptimizationProblem
jp.java.opt.RandomizedHillClimbing
jp.java.ga.StandardGeneticAlgorithm
jp.java.func.nn.activation.RELU
jp.java.opt.example.NeuralNetworkEvaluationFunction
ContinuousPeaksEvaluationFunction = jp.JPackage('opt').example.ContinuousPeaksEvaluationFunction
BackPropagationNetworkFactory = jp.JPackage('func').nn.backprop.BackPropagationNetworkFactory
DataSet = jp.JPackage('shared').DataSet
SumOfSquaresError = jp.JPackage('shared').SumOfSquaresError
NeuralNetworkOptimizationProblem = jp.JPackage('opt').example.NeuralNetworkOptimizationProblem
RandomizedHillClimbing = jp.JPackage('opt').RandomizedHillClimbing
Instance = jp.JPackage('shared').Instance
RELU = jp.JPackage('func').nn.activation.RELU
NeuralNetworkEvaluationFunction = jp.JPackage('opt').example.NeuralNetworkEvaluationFunction
DiscreteDependencyTree = jp.JPackage('dist').DiscreteDependencyTree
DiscreteUniformDistribution = jp.JPackage('dist').DiscreteUniformDistribution
GenericProbabilisticOptimizationProblem = jp.JPackage('opt').prob.GenericProbabilisticOptimizationProblem
MIMIC = jp.JPackage('opt').prob.MIMIC
FixedIterationTrainer = jp.JPackage('shared').FixedIterationTrainer
def get_cv_set(data):
train, val = train_test_split(data, test_size=0.2)
return train, val
def initialize_instances(data, label):
"""Read the train.csv CSV data into a list of instances."""
instances = []
'''
# Read in the CSV file
with open(infile, "r") as dat:
next(dat)
reader = csv.reader(dat)
for row in reader:
instance = Instance([float(value) for value in row[:-1]])
instance.setLabel(Instance(float(row[-1])))
instances.append(instance)
'''
for i in range(len(data)):
#instance = Instance([float(value) for value in data[i][:-1]])
instance = Instance([float(value) for value in data[i]])
instance.setLabel(Instance(float(label[i])))
instances.append(instance)
return instances
def run_mimic():
"""Run this experiment"""
datapath = 'util/stock_dfs/'
outfile = 'Results/randopts_@ALG@_@N@_LOG.txt'
# INPUT_LAYER = 451
# OUTPUT_LAYER = 3
OUTFILE = 'MIMIC_LOG.txt'
N = 100
T = 49
maxIters = 10
numTrials = 5
fill = [1] * N
ranges = array('i', fill)
all = process_data.merge_all_data(datapath)
train_set, val_set = get_cv_set(all)
train_inputdf, train_targetdf = process_data.embed(train_set)
train_labeled = process_data.process_target(train_targetdf)
training_ints = initialize_instances(train_inputdf, train_labeled['multi_class'])
factory = BackPropagationNetworkFactory()
measure = SumOfSquaresError()
relu = RELU()
#rule = RPROPUpdateRule()
classification_network = factory.createClassificationNetwork([2241, len(train_labeled['multi_class'])],relu)
data_set = DataSet(training_ints)
nnop = NeuralNetworkOptimizationProblem(data_set, classification_network, measure)
for t in range(numTrials):
for samples, keep, m in product([100], [50], [0.7, 0.9]):
fname = outfile.replace('@ALG@', 'MIMIC{}_{}_{}'.format(samples, keep, m)).replace('@N@', str(t + 1))
with open(fname, 'w') as f:
f.write('algo,trial,iterations,param1,param2,param3,fitness,time,fevals\n')
ef = NeuralNetworkEvaluationFunction(classification_network, data_set, measure)
#ef = ContinuousPeaksEvaluationFunction(50)
odd = DiscreteUniformDistribution(ranges)
df = DiscreteDependencyTree(m, ranges)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)
mimic = MIMIC(samples, keep, pop)
fit = FixedIterationTrainer(mimic, 10)
times = [0]
for i in range(0, maxIters, 10):
print(i)
start = clock()
fit.train()
elapsed = time.clock() - start
times.append(times[-1] + elapsed)
fevals = ef.fevals
score = ef.value(mimic.getOptimal())
ef.fevals -= 1
st = '{},{},{},{},{},{},{},{},{}\n'.format('MIMIC', t, i, samples, keep, m, score, times[-1], fevals)
print(st)
with open(fname, 'a') as f:
f.write(st)
if __name__ == "__main__":
run_mimic()
jp.shutdownJVM()
```
#### File: mimicry/mimicry/multiple_model.py
```python
import numpy as np
import pandas as pd
from datetime import datetime
import time
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV, ShuffleSplit, learning_curve, validation_curve
from sklearn.ensemble import GradientBoostingClassifier
from joblib import Parallel, delayed
pd.set_option('display.max_columns', None)
import get_all_data
import mimic
import networkx as nx
import random
from scipy import stats
from sklearn.metrics import mutual_info_score
np.set_printoptions(precision=4)
models = {
#'DecisionTree': DecisionTreeClassifier(class_weight='balanced'),
'NeuralNetwork': MLPClassifier(verbose=5, hidden_layer_sizes=(109, 76, 76, 76, 76)),
#'GradientBoosting': GradientBoostingClassifier(max_depth=1, n_estimators=50),
#'SupportVectorMachine': LinearSVC(class_weight='balanced'),
#'KNearestNeighbor': KNeighborsClassifier(n_neighbors=5)
}
params1 = {
#'DecisionTree': {'max_depth': [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]},
'NeuralNetwork': {'validation_fraction': [0.1, 0.25, 0.33, 0.5, 0.75, 0.9]},
#'GradientBoosting': {'max_depth': [1, 2, 3]},
#'SupportVectorMachine': {'C': [0.001, 0.01, 0.1, 1, 10, 100]},
#'KNearestNeighbor': {'n_neighbors': [3,7,11]}
}
'''
class EstimatorSelectionHelper:
def __init__(self, models, params):
if not set(models.keys()).issubset(set(params.keys())):
missing_params = list(set(models.keys()) - set(params.keys()))
raise ValueError("Some estimators are missing parameters: %s" % missing_params)
self.models = models
self.params = params
self.keys = models.keys()
self.grid_searches = {}
def fit(self, X, y, cv=10, n_jobs=-1, verbose=5, scoring=None, refit=True):
for key in self.keys:
print("Running GridSearchCV for %s." % key)
model = self.models[key]
params = self.params[key]
gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,
verbose=verbose, scoring=scoring, refit=refit)
gs.fit(X, y)
self.grid_searches[key] = gs
def score_summary(self, sort_by='mean_score'):
def row(key, scores, params):
d = {
'estimator': key,
'min_score': min(scores),
'max_score': max(scores),
'mean_score': scores.mean(),
'std_score': scores.std()
}
return pd.Series({**params, **d})
rows = [row(k, gsc.cv_validation_scores, gsc.parameters)
for k in self.keys
for gsc in self.grid_searches[k].grid_scores_]
df = pd.concat(rows, axis=1).T.sort_values([sort_by], ascending=False)
columns = ['estimator', 'min_score', 'mean_score', 'max_score', 'std_score']
columns = columns + [c for c in df.columns if c not in columns]
print(df[columns])
return df[columns]
'''
def plot_complexity_curve(estimator, title, X, y, param_name, param_range, cv=None,
n_jobs=1):
plt.figure()
plt.title(title)
plt.title("Validation Curves")
plt.xlabel(param_name)
plt.ylabel("Score")
train_scores, test_scores = validation_curve(
estimator, X, y, param_name=param_name, param_range=param_range,
cv=3, scoring="roc_auc", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
return plt
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring='roc_auc')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
'''
def complexity():
helper1 = EstimatorSelectionHelper(models, params2)
all_data = get_all_data.get_all_data()
train, target = get_all_data.process_data(all_data)
training_features, test_features, \
training_target, test_target, = train_test_split(train, target, test_size=0.33, random_state=778)
X_train, X_val, y_train, y_val = train_test_split(training_features, training_target)
helper1.fit(X_train, y_train, scoring='f1', n_jobs=1)
helper1.score_summary(sort_by='min_score')
'''
all_data = get_all_data.get_all_data()
train, target = get_all_data.process_data(all_data)
samples = train[0:4500]
print(samples)
distribution = mimic.Distribution(samples)
print('distribution', distribution)
distribution._generate_bayes_net()
for node_ind in distribution.bayes_net.nodes():
print(distribution.bayes_net.node[node_ind])
pos = nx.spring_layout(distribution.spanning_graph)
edge_labels = dict(
[((u, v,), d['weight'])
for u, v, d in distribution.spanning_graph.edges(data=True)])
nx.draw_networkx(distribution.spanning_graph, pos)
nx.draw_networkx_edge_labels(
distribution.spanning_graph,
pos,
edge_labels=edge_labels)
plt.show()
'''
for model in models:
title = model
cv = ShuffleSplit(n_splits=5, test_size=0.33)
print(title)
#plot_learning_curve(models[model], title, train, target, cv=cv, n_jobs=1)
plot_complexity_curve(models[model], title, train, target, list(params1[model].keys())[0], list(params1[model].values())[0], cv=3, n_jobs=-1)
plt.show()
'''
```
#### File: rrl_trading/01_python/tradingrrl_multi_noshorts_weights_multiplicative.py
```python
import sys
import os
stage='/Users/Shared/Jenkins/Home/workspace/Test1/'
stage1='/Users/Shared/Jenkins/Home/workspace/Test2/'
sys.path.append(stage)
sys.path.append(stage1)
import time
import numpy as np
import pandas as pd
from datetime import datetime as dt
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import heapq
import collections
def load_bench(bench):
mu = 100
tmp = pd.read_csv(bench, header=0, low_memory=False)
tmp.set_index('Date', inplace=True)
tmp = tmp['Adj Close'][1001:]
bench = mu * (1 + tmp.pct_change()).cumprod()
# self.bench = self.mu * np.diff(tmp, axis=0).cumsum()
print('bench', bench)
pd.DataFrame(bench).to_csv('bench.csv')
return bench
def load_csv_test(fname):
tmp = pd.read_csv(fname, header=0, low_memory=False)
print(tmp.head())
tmp.replace(0, np.nan, inplace=True)
tmp.dropna(axis=1, how='any', inplace=True)
print('effect check', tmp.shape)
tickers_list = tmp.columns.values
print('ticker_list', len(tickers_list[1:]), tickers_list[1:])
tmp_tstr = tmp['Unnamed: 0']
# tmp_t = [dt.strptime(tmp_tstr[i], '%Y.%m.%d') for i in range(len(tmp_tstr))]
# tmp_t = [dt.strptime(tmp_tstr[i], '%m/%d/%y') for i in range(len(tmp_tstr))]
tmp_t = [dt.strptime(tmp_tstr[i], '%Y-%m-%d') for i in range(len(tmp_tstr))]
tmp_p = tmp.iloc[:, 1:]
all_t = np.array(tmp_t) # [::-1]
all_p = np.array(tmp_p) # .reshape((1, -1))[0] # [::-1]
print('all_p shape', all_p.shape)
return all_t, all_p, tickers_list
class TradingRRL(object):
def __init__(self, T=1000, thisT = 1000, M=300, thisM = 300, N=0, init_t=10000, mu=10000, sigma=0.04, rho=1.0, n_epoch=10):
self.T = T
self.thisT = thisT
self.M = M
self.thisM = thisM
self.N = N
self.TOP = 20
self.threshold = 0.0
self.init_t = init_t
self.mu = mu
self.sigma = sigma
self.rho = rho
self.all_t = None
self.all_p = None
self.t = None
self.p = None
self.bench = None
self.r = None
self.x = np.zeros([T, M + 2])
self.F = np.zeros((T + 1, N))
self.FS = np.zeros((T + 1, N))
self.R = np.zeros((T, N))
self.w = np.ones((M + 2, N))
self.w_opt = np.ones((M + 2, N))
self.epoch_S = pd.DataFrame()
self.n_epoch = n_epoch
self.progress_period = 100
self.q_threshold = 0.5
self.b = np.ones((T+1, N))
self.total = None
self.bench = None
self.tickers_list = None
self.ticker_data = collections.defaultdict(dict)
def quant(self, f):
fc = f.copy()
fc[np.where(np.abs(fc) < self.q_threshold)] = 0
#return np.sign(fc)
return fc
def softmax(self, x):
l2_norm = np.sqrt(x*x).sum()
return x/l2_norm
#e_x = np.exp(x)
#return e_x / e_x.sum()
def set_t_p_r(self, train_phase=True):
if train_phase:
self.t = self.all_t[self.init_t:self.init_t + self.T + self.M + 1]
self.p = self.all_p[self.init_t:self.init_t + self.T + self.M + 1,:] ## TODO: add column dimension for assets > 1
print('p dimension', self.p.shape)
#self.r = -np.diff(self.p, axis=0)
firstr = np.zeros((1, self.p.shape[1]))
self.r = np.diff(self.p, axis=0)/self.p[:-1]
self.r = np.concatenate((firstr, self.r), axis=0)
print('r dimension', self.r.shape)
pd.DataFrame(self.r).to_csv("smallr.csv", header=False, index=False)
else:
self.t = self.all_t[self.init_t:self.init_t + self.thisT + self.thisM + 1]
self.p = self.all_p[self.init_t:self.init_t + self.thisT + self.thisM + 1,:] ## TODO: add column dimension for assets > 1
print('p dimension', self.p.shape)
# self.r = -np.diff(self.p, axis=0)
firstr = np.zeros((1, self.p.shape[1]))
self.r = np.diff(self.p, axis=0) / self.p[:-1]
self.r = np.concatenate((firstr, self.r), axis=0)
def set_x_F(self, train_phase=True):
if train_phase:
for i in range(self.T - 1, -1, -1):
self.x[i] = np.zeros(self.M + 2)
self.x[i][0] = 1.0
self.x[i][self.M + 2 - 1] = self.F[i+1,-1] ## TODO: i used -1 on column
for j in range(1, self.M + 2 - 1, 1):
#self.x[i][j] = self.r[i+ j - 1,0] ## TODO: i used -1 on column:
self.x[i,j] = self.r[i + (j-1), -1] ## TODO: i used -1 on column; and must deal with j
self.F[i] = self.quant(np.tanh(np.dot(self.x[i], self.w)+self.b[i])) ## TODO: test this
else:
thisw = np.ones((self.thisM+2, self.N))
self.x = np.zeros([self.thisT, self.thisM + 2])
self.F = np.zeros((self.thisT + 1, self.N))
for i in range(self.thisT - 1, -1, -1):
self.x[i] = np.zeros(self.thisM + 2)
self.x[i][0] = 1.0
self.x[i][self.thisM + 2 - 1] = self.F[i+1,-1] ## TODO: i used -1 on column
for j in range(1, self.thisM + 2 - 1, 1):
#self.x[i][j] = self.r[i+ j - 1,0] ## TODO: i used -1 on column:
self.x[i,j] = self.r[i + (j-1), -1] ## TODO: i used -1 on column; and must deal with j
self.F[i] = self.quant(np.tanh(np.dot(self.x[i], thisw)+self.b[i])) ## TODO: test this
def calc_R(self):
#self.R = self.mu * (np.dot(self.r[:self.T], self.F[:,1:]) - self.sigma * np.abs(-np.diff(self.F, axis=1)))
#self.R = self.mu * (self.r[:self.T] * self.F[1:]) - self.sigma * np.abs(-np.diff(self.F, axis=0))
#self.R = self.mu * (np.multiply(self.F[1:,], np.reshape(self.r[:self.T], (self.T, -1)))) * (self.sigma) * np.abs(-np.diff(self.F, axis=0))
self.R = ((np.multiply(self.F[1:, ], np.reshape(0+self.r[:self.T], (self.T, -1)))) * (1-self.sigma * np.abs(-np.diff(self.F, axis=0))))
pd.DataFrame(self.R).to_csv('R.csv')
def calc_sumR(self):
self.sumR = np.cumsum(self.R[::-1], axis=0)[::-1] ## TODO: cumsum axis
#self.sumR = np.cumprod(self.R[::-1], axis=0)[::-1] ## TODO: cumsum axis
self.sumR2 = np.cumsum((self.R[::-1] ** 2), axis=0)[::-1] ## TODO: cumsum axis
#self.sumR2 = np.cumprod((self.R[::-1] ** 2), axis=0)[::-1] ## TODO: cumsum axis
#print('cumprod', self.sumR)
def calc_dSdw(self, train_phase=True):
if not train_phase:
self.T = self.thisT
self.M = self.thisM
self.set_x_F(train_phase=train_phase)
self.calc_R()
self.calc_sumR()
self.Sall = np.empty(0) # a list of period-to-date sharpe ratios, for all n investments
self.dSdw = np.zeros((self.M + 2, self.N))
for j in range(self.N):
self.A = self.sumR[0,j] / self.T
self.B = self.sumR2[0,j] / self.T
#self.A = self.sumR / self.T
#self.B = self.sumR2 / self.T
self.S = self.A / np.sqrt(self.B - (self.A ** 2))
#self.S = ((self.B[1:,j]*np.diff(self.A[:,j], axis=0)-0.5*self.A[1:,j]*np.diff(self.B[:,j], axis=0))/ (self.B[1,j] - (self.A[1,j] ** 2))**(3/2))[1]
#self.S = (self.B[1,j] - (self.A[1,j] ** 2))**(3/2)
#print('sharpe checl', np.isnan(self.r).sum())
self.dSdA = self.S * (1 + self.S ** 2) / self.A
self.dSdB = -self.S ** 3 / 2 / self.A ** 2
self.dAdR = 1.0 / self.T
self.dBdR = 2.0 / self.T * self.R[:,j]
self.dRdF = -self.mu * self.sigma * np.sign(-np.diff(self.F, axis=0))
self.dRdFp = self.mu * self.r[:self.T] + self.mu * self.sigma * np.sign(-np.diff(self.F, axis=0)) ## TODO: r needs to be a matrix if assets > 1
self.dFdw = np.zeros(self.M + 2)
self.dFpdw = np.zeros(self.M + 2)
#self.dSdw = np.zeros((self.M + 2, self.N)) ## TODO: should not have put this here. this resets everytime
self.dSdw_j = np.zeros(self.M + 2)
for i in range(self.T - 1, -1, -1):
if i != self.T - 1:
self.dFpdw = self.dFdw.copy()
self.dFdw = (1 - self.F[i,j] ** 2) * (self.x[i] + self.w[self.M + 2 - 1,j] * self.dFpdw)
self.dSdw_j += (self.dSdA * self.dAdR + self.dSdB * self.dBdR[i]) * (
self.dRdF[i,j] * self.dFdw + self.dRdFp[i,j] * self.dFpdw)
self.dSdw[:, j] = self.dSdw_j
self.Sall = np.append(self.Sall, self.S)
def update_w(self):
self.w += self.rho * self.dSdw
def get_investment_weights(self, train_phase=True):
if not train_phase:
self.FS = np.zeros((self.thisT + 1, self.N))
for i in range(self.FS.shape[0]):
self.FS[i] = np.multiply(self.F[i], self.Sall)
tmp = np.apply_along_axis(self.select_n, 1, self.FS) # TODO: conisder taking the abs(): magnitutde
F1 = np.apply_along_axis(self.softmax, 1, tmp)
print('MAKE F1', F1.shape)
print('see F1', F1)
print('see R', self.R)
mask = F1 != 0
_, j = np.where(mask)
for ji in set(j):
self.ticker_data[self.tickers_list[ji]]['inv weight'] = F1[-2, ji]
self.ticker_data[self.tickers_list[ji]]['return'] = self.R[-2, ji]
print(self.ticker_data)
return F1
def select_n(self, array):
threshold = max(heapq.nlargest(self.TOP, array)[-1], self.threshold)
new_array = [x if x >= threshold else 0 for x in array]
return new_array
def fit(self):
pre_epoch_times = len(self.epoch_S)
self.calc_dSdw()
print("Epoch loop start. Initial sharp's ratio is " + str(np.mean(self.Sall)) + ".")
print('s len', len(self.Sall))
self.S_opt = self.Sall
tic = time.clock()
for e_index in range(self.n_epoch):
self.calc_dSdw()
if np.mean(self.Sall) > np.mean(self.S_opt):
self.S_opt = self.Sall
self.w_opt = self.w.copy()
#self.Sall = np.apply_along_axis(self.select_n, 0, self.Sall) # TODO: don't do this here
self.epoch_S[e_index] = np.array(self.S_opt)
self.update_w()
if e_index % self.progress_period == self.progress_period - 1:
toc = time.clock()
print("Epoch: " + str(e_index + pre_epoch_times + 1) + "/" + str(
self.n_epoch + pre_epoch_times) + ". Shape's ratio: " + str(self.Sall[self.Sall.nonzero()].mean()) + ". Elapsed time: " + str(
toc - tic) + " sec.")
toc = time.clock()
print("Epoch: " + str(e_index + pre_epoch_times + 1) + "/" + str(
self.n_epoch + pre_epoch_times) + ". Shape's ratio after iteration: " + str(self.S_opt[self.S_opt.nonzero()].mean()) + ". Elapsed time: " + str(
toc - tic) + " sec.")
self.w = self.w_opt.copy()
self.calc_dSdw()
print("Epoch loop end. Optimized sharp's ratio is " + str(self.S_opt[self.S_opt.nonzero()].mean()) + ".")
print('first check', self.Sall)
print('now check', self.epoch_S)
print('R dimension', self.R.shape)
def save_weight(self, train_phase=True):
if train_phase:
self.F1 = self.get_investment_weights()
pd.DataFrame(self.w).to_csv("w.csv", header=False, index=False)
self.epoch_S.to_csv("epoch_S.csv", header=False, index=False)
pd.DataFrame(self.F).to_csv("f.csv", header=False, index=False)
pd.DataFrame(self.FS).to_csv("fs.csv", header=False, index=False)
pd.DataFrame(self.F1).to_csv("f1.csv", header=False, index=False)
else:
self.F1 = self.get_investment_weights(train_phase=False)
pd.DataFrame().from_dict(self.ticker_data).T.to_csv('ticker_data.csv')
def load_weight(self):
tmp = pd.read_csv("w.csv", header=None)
self.w = tmp.T.values[0]
def get_investment_sum(self, train_phase=True):
firstR = np.zeros((1,self.p.shape[1]))
self.R = np.concatenate((firstR, self.R), axis=0)
tmp = np.multiply(self.R, self.F1)
self.total = self.mu * ((1+tmp.sum(axis=1)).cumprod(axis=0))
print('iam here', self.total.shape, self.total)
if train_phase:
pd.DataFrame(self.total).to_csv('investment_sum.csv')
else:
pd.DataFrame(self.total).to_csv('investment_sum_testphase.csv')
def main():
#fname = '../../util/stock_dfs/A.csv'
#fname = 'USDJPY30.csv'
bench = stage+'SPY.csv'
fname = stage1+'all_data_todate.csv'
all_t, all_p, tickers_list = load_csv_test(fname)
bench = load_bench(bench)
init_t = 1001 #1001
M = 200
thisM = 20
T = 1000
thisT = all_p.shape[0]-(init_t+T+M)-thisM
N = all_p.shape[1]
mu = 100#bench[init_t]
sigma = 0.04
rho = 1.0
n_epoch = 100
# RRL agent with initial weight.
ini_rrl = TradingRRL(T, thisT, M, thisM, N, init_t, mu, sigma, rho, n_epoch) ## TODO: init_t is really a change point!!!
ini_rrl.all_t = all_t
ini_rrl.all_p = all_p
ini_rrl.bench = bench
ini_rrl.tickers_list = tickers_list
ini_rrl.set_t_p_r()
ini_rrl.calc_dSdw()
# RRL agent for training
rrl = TradingRRL(T, thisT, M, thisM, N, init_t, mu, sigma, rho, n_epoch)
rrl.all_t = ini_rrl.all_t
rrl.all_p = ini_rrl.all_p
rrl.tickers_list = ini_rrl.tickers_list
rrl.set_t_p_r()
rrl.fit()
rrl.save_weight()
rrl.get_investment_sum()
# Plot results.
# Training for initial term T.
fig, ax = plt.subplots(nrows=2, figsize=(15, 10))
t = np.linspace(0, ini_rrl.bench.shape[0], ini_rrl.bench.shape[0])
print('x len', len(t[init_t:init_t + rrl.T+1]))
print('y len', rrl.total.shape[0])
print('x1 len', len(t[init_t:init_t + rrl.T + 1]))
print('y2 len', ini_rrl.bench.shape[0])
ax[0].plot(t[:ini_rrl.T], ini_rrl.bench[:ini_rrl.T], color='red', label='Benchmark: training phase')
ax[0].plot(t[ini_rrl.T:], ini_rrl.bench[ini_rrl.T:], color='purple', label='Benchmark: after training phase')
ax[0].set_xlabel("time")
ax[0].set_ylabel("SPY")
ax[0].grid(True)
ax[1].plot(t[:ini_rrl.T], ini_rrl.bench[:ini_rrl.T], color='red', label='Benchmark: before start of training')
ax[1].plot(t[ini_rrl.T:], ini_rrl.bench[ini_rrl.T:], color='orange',label='Benchmark: start training')
ax[1].plot(t[:rrl.T+1], rrl.total, color="blue", label="With optimized weights")
ax[1].set_xlabel("time")
ax[1].set_ylabel("Total Invested")
ax[1].legend(loc="best")
ax[1].grid(True)
plt.savefig("rrl_training.png", dpi=300)
# Prediction for next term T with optimized weight.
# RRL agent with initial weight.
ini_rrl_f = TradingRRL(T, thisT, M, thisM, N, init_t+T, mu, sigma, rho, n_epoch)
ini_rrl_f.all_t = ini_rrl.all_t
ini_rrl_f.all_p = ini_rrl.all_p
ini_rrl.tickers_list = ini_rrl.tickers_list
ini_rrl_f.set_t_p_r(train_phase=False)
ini_rrl_f.calc_dSdw(train_phase=False)
# RRL agent with optimized weight.
rrl_f = TradingRRL(T, thisT, M, thisM, N, init_t+T, mu, sigma, rho, n_epoch)
rrl_f.all_t = ini_rrl.all_t
rrl_f.all_p = ini_rrl.all_p
rrl_f.tickers_list = ini_rrl.tickers_list
rrl_f.set_t_p_r(train_phase=False)
rrl_f.w = rrl.w
rrl_f.calc_dSdw(train_phase=False)
rrl_f.save_weight(train_phase=False)
rrl_f.get_investment_sum(train_phase=False)
fig, ax = plt.subplots(nrows=2, figsize=(15, 10))
t = np.linspace(0, ini_rrl.bench.shape[0], ini_rrl.bench.shape[0])
print('what is this', ini_rrl.bench.shape)
print('len check', len(t))
print('check len1', len(t[:rrl_f.T]))
print('check len2', len(t[rrl_f.T:]))
print('check len3', len(ini_rrl.bench[:rrl_f.T]))
print('check len4', len(ini_rrl.bench[rrl_f.T:]))
ax[0].plot(t[:rrl_f.T], ini_rrl.bench[:rrl_f.T], color='red', label='Benchmark: training phase')
ax[0].plot(t[rrl_f.T:], ini_rrl.bench[rrl_f.T:], color='orange', label='Benchmark: post-training phase')
ax[0].set_xlabel("time")
ax[0].set_ylabel("SPY: benchmark")
ax[0].grid(True)
print('len check b', rrl.total.shape)
print('len check b1', rrl_f.total.shape)
ax[1].plot(t[:rrl_f.T], ini_rrl.bench[:rrl_f.T], color='red', label='Benchmark: training phase')
ax[1].plot(t[rrl_f.T:], ini_rrl.bench[rrl_f.T:], color='orange', label='Benchmark: post-training phase')
ax[1].plot(t[:rrl.total.shape[0]], rrl.total, color="blue", label="With optimized weights: before day 1000")
ax[1].plot(t[rrl.total.shape[0]:rrl.total.shape[0]+rrl_f.total.shape[0]], rrl_f.total, color="green", label="With optimized weights: before day 1000")
ax[1].set_xlabel("time")
ax[1].set_ylabel("Total Investment")
ax[1].legend(loc="best")
ax[1].grid(True)
plt.savefig("rrl_prediction.png", dpi=300)
fig.clear()
if __name__ == "__main__":
main()
```
#### File: universal_portfolio/util/process_data.py
```python
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
datapath = '../util/stock_dfs/'
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def ret(x, y):
return np.log(y/x)
def get_zscore(x):
return (x -x.mean())/x.std()
def make_inputs(filepath):
D = pd.read_csv(filepath).set_index('Date')
#D.index = pd.to_datetime(D.index,format='%Y-%m-%d') # Set the indix to a datetime
Res = pd.DataFrame()
ticker = get_ticker(filepath)
Res['c_2_o'] = get_zscore(ret(D.Open,D.Close))
Res['h_2_o'] = get_zscore(ret(D.Open,D.High))
Res['l_2_o'] = get_zscore(ret(D.Open,D.Low))
Res['c_2_h'] = get_zscore(ret(D.High,D.Close))
Res['h_2_l'] = get_zscore(ret(D.High,D.Low))
Res['c1_c0'] = ret(D.Close,D.Close.shift(-1)).fillna(0) #Tommorows return
Res['vol'] = get_zscore(D.Volume)
Res['ticker'] = ticker
return Res
def merge_all_data(datapath):
all = pd.DataFrame()
for f in os.listdir(datapath):
filepath = os.path.join(datapath,f)
if filepath.endswith('.csv'):
print(filepath)
Res = make_inputs(filepath)
all = all.append(Res)
return all
def embed(df, str):
"str: choice of return, class, multi_class"
pivot_columns = df.columns[:-1]
P = df.pivot_table(index=df.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
mi = P.columns.tolist()
new_ind = pd.Index(e[1] + '_' + e[0] for e in mi)
P.columns = new_ind
clean_and_flat = P.dropna(axis=1)
target_cols = list(filter(lambda x: 'c1_c0' in x, clean_and_flat.columns.values))
input_cols = list(filter(lambda x: 'c1_c0' not in x, clean_and_flat.columns.values))
inputDF = clean_and_flat[input_cols]
targetDF = clean_and_flat[target_cols]
TotalReturn = ((1 - np.exp(targetDF)).sum(axis=1)) / len(targetDF.columns) # If i put one dollar in each stock at the close, this is how much I'd get back
Labeled = pd.DataFrame()
Labeled['return'] = TotalReturn
Labeled['class'] = TotalReturn.apply(labeler, 1)
Labeled['multi_class'] = pd.qcut(TotalReturn, 11, labels=range(11))
pd.qcut(TotalReturn, 5).unique()
return inputDF, Labeled[str]
def labeler(x):
if x>0.0029:
return 1
if x<-0.00462:
return -1
else:
return 0
'''
if __name__ == "__main__":
all = merge_all_data(datapath)
inputdf, targetdf = embed(all)
labeled = process_target(targetdf)
print(inputdf.head())
print(labeled.head())
'''
```
#### File: universal_portfolio/util/process_sp500.py
```python
import os
import pandas as pd
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def get_benchmark():
return 'SPY'
def read_file(file, test=None):
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': ticker,
'Volume': 'volume'},
inplace=True)
d.drop(labels=['open', 'high', 'low', 'close', 'volume'], axis=1, inplace=True)
return d
def all_init_data():
#filepath = stage2
filepath = '/Users/Shared/Jenkins/Home/workspace/Test1/'
alldata = []
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
print(datapath)
if datapath.endswith('.csv') and 'SPY' not in datapath:
print(datapath)
Res = read_file(datapath)
Res.reset_index(inplace=True)
Res.drop_duplicates('Date', keep='first', inplace=True)
Res.set_index('Date', inplace=True)
alldata.append(Res)
alldata = pd.concat(alldata, axis=1)
alldata.fillna(0, inplace=True)
alldata
#alldata.to_csv('../rrl_trading/01_python/all_data_todate.csv')
alldata.to_csv('all_data_todate.csv')
all_init_data()
```
|
{
"source": "JehunYoo/opendart",
"score": 2
}
|
#### File: opendart/api/finance.py
```python
import requests
from opendart.config import SINGLE_FS_XML, QUATER_REPORT_KEYS, FS_DIV_KEYS
from opendart.auth.api import DartAPI
from opendart.objects.fs import FinancialStatement
from opendart.utils.url import make_url
def get_fs(corp: str, year: int, quater: int, div: int = 1) -> FinancialStatement:
"""
Make API request of financial statement
"""
url = make_url(
api_code=SINGLE_FS_XML,
crtfc_key=DartAPI().api_key,
corp_code=corp,
bsns_year=year,
reprt_code=QUATER_REPORT_KEYS[quater],
fs_div=FS_DIV_KEYS[div]
)
response = requests.get(url)
response.raise_for_status()
return FinancialStatement(response.text, corp, year, quater, div)
```
#### File: opendart/utils/url.py
```python
from opendart.config import BASE_URL
def make_url(api_code, base=BASE_URL, **kwargs) -> str:
url = base + api_code + '?'
for key, value in kwargs.items():
url += f"{key}={value}&"
return url
```
|
{
"source": "jehuty-python/transmute",
"score": 2
}
|
#### File: jehuty-python/transmute/splunk-logger.py
```python
import logging
from sys import platform, getfilesystemencoding
from os import uname
from collections import namedtuple
from jproperties import Properties
from splunk_hec_handler import SplunkHecHandler
# setup logger utility for this script
logging.basicConfig(filename='transmute.log', filemode='w',format='%(asctime)s - PID:%(process)d - %(name)s - %(message)s', level=logging.INFO)
# global vars
SYSTEM_OS = platform
ENCODING = getfilesystemencoding()
# create logger specifically for splunk data
splunk_logger = logging.getLogger('splunk_logger')
splunk_logger.setLevel(logging.DEBUG)
# create and add log stream handler to it
stream_handler = logging.StreamHandler()
stream_handler.level = logging.DEBUG
splunk_logger.addHandler(stream_handler)
# splunk token
token = "EA<PASSWORD>"
# Create Handler to push data to Splunk HTTP Event Collector
splunk_handler = SplunkHecHandler('sample.splunk.domain.com',
token, index="hec",
port=8080, proto='http', ssl_verify=False,
source="evtx2json", sourcetype='xxxxxxxx_json')
splunk_logger.addHandler(splunk_handler)
# add additional fields and corresponding values to splunk
dict_obj = {'fields': {'color': 'yellow', 'api_endpoint': '/results', 'host': 'app01', 'index':'hec'},
'user': 'foobar', 'app': 'my demo', 'severity': 'low', 'error codes': [1, 23, 34, 456]}
# send sample data to splunk_logger
splunk_logger.info(dict_obj)
# specify splunk ingestion parameters adhoc like so:
log_summary_evt = {'fields': {'index': 'adhoc', 'sourcetype': '_json', 'source': 'adv_example'}, 'exit code': 0, 'events logged': 100}
splunk_logger.debug(log_summary_evt)
# load java properties
p = Properties()
jpfile = '/home/kafka/apps/kafka/config/log4j.properties'
with open(jpfile, 'rb') as f:
p.load(f, ENCODING)
# add to dictionary
log4j_json = dict()
log4j_json['source_file'] = jpfile
log4j_json.update(p)
# send to splunk
splunk_logger.info({'fields': p})
def os_enrich(prune_output=True):
"""
returns dict of useful OS information
"""
osvars = uname()
os_data = { 'system_os': SYSTEM_OS,
'fs_enconding': ENCODING,
'sysname': osvars.sysname,
'nodename': osvars.nodename,
'machine': osvars.machine,
'os_version': osvars.version,
'os_release': osvars.release
}
return os_data
# send more data
splunk_logger.info({'fields': os_enrich()})
# you get the idea
splunk_logger.info({'fields': os.environ})
```
|
{
"source": "jehuty-works/haproxy-probe",
"score": 2
}
|
#### File: src/scripts/curl-wrapper.py
```python
# RESULT=$(curl -k -L --output /dev/null --silent --show-error --write-out 'time_namelookup: %{time_namelookup}\ntime_connect: %{time_connect}\ntime_appconnect: %{time_appconnect}\ntime_pretransfer: %{time_pretransfer}\ntime_redirect: %{time_redirect}\ntime_starttransfer: %{time_starttransfer}\ntime_total: %{time_total}' $URL)
import sys
import os
import csv
import datetime
import json
import subprocess
import argparse
from collections import OrderedDict
from socket import gethostname, gethostbyname
from time import sleep
def get_args():
"""
Parse CLI arguments (via sys.argv[]) and return arguments as a named tuple called 'Namespace'
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--poll-time','-t',
type=int,
default=int(os.environ['POLL_TIME']),
help='specify polling interval for curl')
parser.add_argument(
'--url','-u',
type=str,
help='specify remote URL')
parser.add_argument(
'--source-zone','-z',
type=str,
help='specify internal or external to openshift')
parser.add_argument(
'--f5-policy',
type=str,
default='',
help='specify terminate, reencrypt, passthrough; default ""')
parser.add_argument(
'--haproxy-policy',
type=str,
default='',
help='specify terminate, reencrypt, passthrough; default ""')
parser.add_argument(
'--description','-d',
type=str,
default='',
help='specify terminate, reencrypt, passthrough; default ""')
parser.add_argument(
'--insecure','-k',
action='store_true',
default=False,
help='tell script to ignore TLS validations')
parser.add_argument(
'--cacert',
type=str,
default=None,
help='specify cacert bundle location; default uses built-in cert')
# read specified file as search data
parser.add_argument(
'-f','--file',
type=str,
default=sys.stdin,
help='specify input file')
# allow optional output file (over/write only), defaults to stdout
parser.add_argument(
'-o','--output',
type=str,
help='specify output file')
parser.add_argument(
'--version','-V',
action='version',
version="""
%(prog)s 1.1,
Author: <NAME>;
Last Updated: 2020-08-10""")
parsed_args = parser.parse_args()
return parsed_args
def process_data(parsed_args):
"""
Build dataset of curl metrics and return OrderedDict()
"""
args = parsed_args
# store initial metrics in base_data
base_data = OrderedDict()
d = datetime.datetime.utcnow()
base_data.update({
"src_host": gethostname(),
"src_ip": gethostbyname(gethostname()),
"datetime": d.strftime('%Y-%m-%d %H:%M:%S')
})
# populate static metadata gathered from CLI or input file
if args.url:
base_data["remote_url"] = args.url
if isinstance(args.source_zone, str) and args.source_zone.lower() in ['internal','external']:
base_data["src_zone"] = args.source_zone
else:
base_data["src_zone"] = ''
if isinstance(args.f5_policy, str) and args.f5_policy.lower() in ['reencrypt','terminate','passthrough','none']:
base_data["f5_policy"] = args.f5_policy
else:
base_data["f5_policy"] = ''
if isinstance(args.haproxy_policy, str) and args.haproxy_policy.lower() in ['reencrypt','terminate','passthrough','none']:
base_data["haproxy_policy"] = args.haproxy_policy
else:
base_data["haproxy_policy"] = ''
if isinstance(args.description, str):
base_data["description"] = args.description
else:
base_data["description"] = ''
# pass cacert from args if it exists, else default to local cert
#curl_cacert = args.cacert if args.cacert else 'AllyCert.ca'
# generate dict of curl output
curl_data = parse_curl(curl_endpoint(args))
# add curl output to existing dict
base_data.update(curl_data)
output = json.dumps(base_data, sort_keys=True)
return output
def curl_endpoint(parsed_args):
"""
calls cURL in a sub-shell and returns the raw text output
"""
args = parsed_args
cmd_args = [
'curl',
'-L',
'--output','/dev/null',
'--silent',
'--show-error',
'--write-out', r'remote_ip: %{remote_ip}\nresponse_code: %{response_code}\nsslverify_result: %{ssl_verify_result}\ntime_namelookup: %{time_namelookup}\ntime_connect: %{time_connect}\ntime_appconnect: %{time_appconnect}\ntime_pretransfer: %{time_pretransfer}\ntime_redirect: %{time_redirect}\ntime_starttransfer: %{time_starttransfer}\ntime_total: %{time_total}']
# add additional curl args where needed:
if args.cacert is not None:
assert args.cacert, "TLS ERROR: --cacert requires a file name as an argument!"
cmd_args.extend(['--cacert',str(args.cacert)])
elif args.insecure and args.cacert is None:
cmd_args.append('-k')
# should be last:
cmd_args.append(args.url)
# create filehandle for /dev/null
FNULL = open(os.devnull, 'w')
# need to reuse output, so redirect stdout to PIPE
proc = subprocess.Popen(cmd_args, stdout=subprocess.PIPE, stderr=FNULL)
# reads from PIPE above
output = proc.stdout.read()
return output
def parse_curl(curl_output):
"""
parses raw string output from curl_endpoint and returns OrderedDict()
"""
curl_data = OrderedDict()
# each line has a standard format 'metric_name: value'
# creating dictionary key-value pairs for each line
curl_lines = curl_output.split("\n")
for line in curl_lines:
metric_key, metric_val = line.split(":")
metric_key = metric_key.strip()
metric_val = metric_val.strip()
curl_data.update({metric_key: metric_val})
return curl_data
def process_by_csv(parsed_args):
args = parsed_args
with open(args.file, 'r') as fh:
csvreader = csv.DictReader(fh)
for row in csvreader:
args.url = row['URL']
args.source_zone = row['SRC_ZONE']
args.f5_policy = row['F5_POLICY']
args.haproxy_policy = row['HAPROXY_POLICY']
args.description = row['DESCRIPTION']
print(process_data(args))
if __name__ == '__main__':
args = get_args()
if isinstance(args.file, str): # check to see if --file has been passed, will be sys.stdin if not
while True:
process_by_csv(args)
sleep(args.poll_time)
else:
print("Running in standalone mode...\n")
print(process_data(args))
```
|
{
"source": "JeHwanYoo/web-vision",
"score": 3
}
|
#### File: opencv/library/readimg.py
```python
import sqlite3
import os
import sys
from pathlib import Path
sys.path.append(os.path.dirname(__file__) + '/library')
def readimg(img_id):
db_path = Path(os.path.dirname(__file__) + '/../../store/database/images.db')
con = sqlite3.connect(db_path)
cur = con.cursor()
cur.execute('SELECT * FROM Image WHERE id = ?', [img_id])
row = cur.fetchone()
con.close()
return row
```
|
{
"source": "JeHyuckLee/evsim",
"score": 2
}
|
#### File: JeHyuckLee/evsim/Agent.py
```python
from concurrent.futures import thread
from ctypes.wintypes import tagRECT
from trace import Trace
from behavior_model_executor import BehaviorModelExecutor
from system_message import SysMessage
from definition import *
from system_simulator import SystemSimulator
class Agent(BehaviorModelExecutor):
def __init__(self, instance_time, destruct_time, name, engine_name, ix,
iy):
BehaviorModelExecutor.__init__(self, instance_time, destruct_time,
name, engine_name)
self.init_state("IDLE")
self.insert_state("IDLE", Infinite)
self.insert_state("SEND", 0.01)
self.insert_state("MOVE", 0.01)
self.insert_state("END", Infinite)
self.insert_input_port("agent")
self.insert_output_port("gm")
self.insert_input_port("command")
self.insert_input_port("blk")
self.insert_input_port("test")
self.ix = ix
self.iy = iy
self.blk_flag = False
self.flag = ''
def ext_trans(self, port, msg):
msg_list = []
print(f"exttrans {self.get_cur_state()}")
if port == "command": #명령어 리스트를 입력받음
print("[agent][start]")
self.cancel_rescheduling()
data = msg.retrieve()
self.cm_list = data[0]
print(f"[agent][in] cm_list :{self.cm_list} ")
self._cur_state = "SEND" #SEND state = GM에게 자신의 현재위치를 보냄
elif port == "gm": #게임매니져 에게 현재위치에 대한 주변 정보를 얻음
print("[agent][in]")
self.cancel_rescheduling()
data = msg.retrieve()
msg_list = data[0]
self.map_data = msg_list
self._cur_state = "MOVE" #MOVE state = 움직일수있는지 여부를 판단해서 움직임
def output(self):
print(f"output {self.get_cur_state()}")
if self._cur_state == "SEND": #에이전트가 gm에게 자신의 현재 위치를 보냄
Data = [self.ix, self.iy]
msg = SysMessage(self.get_name, "gm")
print(f"[agent][current] : {Data}")
msg.insert(Data)
return msg
if self._cur_state == "MOVE": #에이전트가 움직인다.
cm = self.cm_list.pop(0)
try:
print(f"[agent] [cm] = {cm}, [rest cmlist] = {self.cm_list}")
if (self.map_data[cm] == 0): #장애물이 없는경우
self.move(cm)
print(f"[agent] move X:{self.ix},Y:{self.iy}\n")
elif (self.map_data[cm] == 1): #장애물을 만난경우
print(f"[agent] can't go")
self.flag = cm
print(f"[agent] if move")
self.Ifmove() #설정해둔 if move
self.blk_flag = True
elif (self.map_data[cm] == 3): #도착지점에 도착
self.move(cm)
print(f"[agent] move X:{self.ix},Y:{self.iy}\n")
print("[agent] arrive!")
self._cur_state = "END" #게임엔드
except:
self._cur_state = "END" #모든부분에서 막혀있을경우 게임을 종료한다.
def Set_Ifmove(self, blk, cm):
if blk == 'R':
self.rblk_move = cm
elif blk == 'L':
self.lblk_move = cm
elif blk == 'F':
self.fblk_move = cm
elif blk == 'B':
self.bblk_move = cm
def Ifmove(self):
if self.flag == 'R':
self.cm_list.insert(0, self.rblk_move)
elif self.flag == 'L':
self.cm_list.insert(0, self.lblk_move)
elif self.flag == 'F':
self.cm_list.insert(0, self.fblk_move)
elif self.flag == 'B':
self.cm_list.insert(0, self.bblk_move)
def move(self, cm):
if (cm == "R"):
self.ix += 1
elif (cm == "L"):
self.ix -= 1
elif (cm == "F"):
self.iy += 1
elif (cm == "B"):
self.iy -= 1
def int_trans(self):
print(f"int trans {self.get_cur_state()}")
if self._cur_state == "END": #게임엔드
print("GAME END")
elif self.blk_flag == True: #벽을만났을때 리스트에 ifmove에서 설정한 방향을 추가하고, 다시 Move로 이동
self._cur_state == "MOVE"
self.blk_flag = False
elif self._cur_state == "SEND": #GM에게 메세지를 보낸후 다시 IDLE 상태로 GM에게 메시지 받을때 까지 대기
self._cur_state = "IDLE"
elif not self.cm_list:
self._cur_state == "END"
else: #MOVE 한 이후 SEND상태로 가서 다시 GM에게 현재위치 를 전송
self._cur_state = "SEND"
```
#### File: JeHyuckLee/evsim/Game_manager.py
```python
from behavior_model_executor import BehaviorModelExecutor
from system_message import SysMessage
from definition import *
from system_simulator import SystemSimulator
map = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 3, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
#왼쪽 상단 부터 (0,0) F: 아래쪽 , R:오른쪽, B:위쪽, L : 왼쪽
class Gamemanager(BehaviorModelExecutor):
def __init__(self, instance_time, destruct_time, name, engine_name):
BehaviorModelExecutor.__init__(self, instance_time, destruct_time,
name, engine_name)
self.set_name(engine_name)
self.init_state("IDLE")
self.insert_state("IDLE", Infinite)
self.insert_state("SEND", 0.01)
self.insert_input_port("agent")
self.insert_output_port("gm")
def ext_trans(self, port, msg):
msg_list = []
if port == "agent": #에이전트에게 명령어 와 현재 위치를 받는다.
print(f"[Gm][in] received")
self.cancel_rescheduling()
data = msg.retrieve()
msg_list = data[0]
aX = msg_list[0]
aY = msg_list[1]
print(f"[Gm] aX:{aX} aY:{aY}")
self.Data = self.map_data(aX, aY)
self._cur_state = "SEND"
def output(self):
msg = SysMessage(self.get_name,
"agent") #에이전트의 현재 위치를 기준으로 상하좌우 의 맵데이터를 보낸다
msg.insert(self.Data)
print(f"[Gm][out]{self.Data}")
return msg
def int_trans(self):
if self._cur_state == "SEND":
self._cur_state = "IDLE"
else:
self._cur_state = "SEND"
def map_data(self, j, i):
map_data = {
'R': map[i][j + 1],
'L': map[i][j - 1],
'F': map[i + 1][j],
'B': map[i - 1][j]
}
return map_data
```
|
{
"source": "je-hyun/GracileFeed",
"score": 3
}
|
#### File: je-hyun/GracileFeed/models.py
```python
from flask_login import UserMixin
from app import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
subscriptions = db.relationship('Subscription', cascade="all,delete", backref='user') #subscriptions = db.relationship('Subscription', cascade = "all,delete", backref='user')
favorites = db.relationship('Favorite', cascade="all,delete", backref='user')
def __repr__(self):
return f'User {self.username}'
class Source(db.Model):
rss_url = db.Column(db.String(512), primary_key=True)
homepage_url = db.Column(db.String(512))
name = db.Column(db.String(80))
subscriptions = db.relationship('Subscription', cascade="all,delete", backref='source')
articles = db.relationship('ArticleSource', cascade="all,delete", backref='source')
def __repr__(self):
return f'Source {self.rss_url}'
class Subscription(db.Model):
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), primary_key=True)
rss_url = db.Column(db.String(512), db.ForeignKey("source.rss_url"), primary_key=True)
daily_amount = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f'Subscription {self.user_id} - {self.rss_url}'
class Article(db.Model):
url = db.Column(db.String(512), primary_key=True)
title = db.Column(db.String(256))
image_url = db.Column(db.String(512))
publish_date = db.Column(db.DateTime())
text = db.Column(db.Text())
source = db.relationship('ArticleSource', cascade="all,delete", backref='article')
favorites = db.relationship('Favorite', cascade="all,delete", backref='article')
def __repr__(self):
return f'Article {self.url}'
class ArticleSource(db.Model):
article_url = db.Column(db.String(512), db.ForeignKey("article.url"), primary_key=True)
rss_url = db.Column(db.String(512), db.ForeignKey("source.rss_url"), primary_key=True)
def __repr__(self):
return f'ArticleSource {self.article_url} - {self.rss_url}'
class Favorite(db.Model):
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), primary_key=True)
article_url = db.Column(db.String(512), db.ForeignKey("article.url"), primary_key=True)
def __repr__(self):
return f'Favorite {self.user_id} - {self.article_url}'
```
|
{
"source": "jeibloo/Unknown-Blue-Substance",
"score": 3
}
|
#### File: Unknown-Blue-Substance/app/models.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
# Create user
class User(db.Model):
id = db.Column(db.BigInteger, primary_key=True)
name = db.Column(db.String(15), nullable=False)
newest_tweet_id = db.Column(db.BigInteger)
def __repr__(self):
return f'<User {self.name}>'
class Tweet(db.Model):
id = db.Column(db.BigInteger, primary_key=True)
text = db.Column(db.Unicode(500))
user_id = db.Column(db.BigInteger, db.ForeignKey('user.id'),
nullable=False)
embedding = db.Column(db.PickleType, nullable=False)
# Here we define the relationship betwixt user and tweet
user = db.relationship('User', backref=db.backref('tweets', lazy=True))
def __repr__(self):
return f'<Tweet {self.text}>'
```
#### File: Unknown-Blue-Substance/app/predict.py
```python
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .online import BASILICA
def predict_user(user1_name, user2_name, tweet_text):
# Filter is from SQLAlchemy
user1 = User.query.filter(User.name == user1_name).one()
user2 = User.query.filter(User.name == user2_name).one()
#
user1_embeddings = np.array([tweet.embedding for tweet in user1.tweets])
user2_embeddings = np.array([tweet.embedding for tweet in user2.tweets])
# Create labels (1's and 0's)
user1_labels = np.ones(len(user1.tweets))
user2_labels = np.zeros(len(user2.tweets))
# Stack the embeddings and put into variable, creates (at least 2D)
embeddings = np.vstack([user1_embeddings, user2_embeddings])
labels = np.concatenate([user1_labels, user2_labels])
# Set model and 'fit'(train) on embeddings and labels
log_reg = LogisticRegression(solver='lbfgs', max_iter=1000)
log_reg.fit(embeddings, labels)
# Use BASILICA to embed the individual tweet we want to analyze
# Guess you can choose Twitter as your model? (wow)
tweet_embedding = BASILICA.embed_sentence(tweet_text, model='twitter')
# The :, 1 separate rows from columns apparently...
return log_reg.predict_proba(np.array([tweet_embedding]))[:, 1]
```
|
{
"source": "jeicy07/_NMT_GAN",
"score": 2
}
|
#### File: _NMT_GAN/tensor2tensor/common_layers.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensor2tensor import expert_utils as eu
import tensorflow as tf
from tensorflow.python.framework import function
# This is a global setting. When turned off, no @function.Defun is used.
allow_defun = True
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", [x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
def hard_sigmoid(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
x_shifted = 0.5 * x + 0.5
return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost
def hard_tanh(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost
def inverse_exp_decay(max_step, min_value=0.01):
"""Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
inv_base = tf.exp(tf.log(min_value) / float(max_step))
step = tf.to_float(tf.contrib.framework.get_global_step())
return inv_base**tf.maximum(float(max_step) - step, 0.0)
def standardize_images(x):
"""Image standardization on batches (tf.image.per_image_standardization)."""
with tf.name_scope("standardize_images", [x]):
x = tf.to_float(x)
x_mean = tf.reduce_mean(x, axis=[1, 2, 3], keep_dims=True)
x_variance = tf.reduce_mean(
tf.square(x - x_mean), axis=[1, 2, 3], keep_dims=True)
num_pixels = tf.to_float(tf.shape(x)[1] * tf.shape(x)[2] * 3)
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
# TODO(lukaszkaiser): remove hack below, needed for greedy decoding for now.
if x.shape and len(x.shape) == 4 and x.shape[3] == 1:
x = tf.concat([x, x, x], axis=3) # Not used, just a dead tf.cond branch.
x.set_shape([None, None, None, 3])
return x
def image_augmentation(images, do_colors=False):
"""Image augmentation: cropping, flipping, and color transforms."""
images = tf.random_crop(images, [299, 299, 3])
images = tf.image.random_flip_left_right(images)
if do_colors: # More augmentation, but might be slow.
images = tf.image.random_brightness(images, max_delta=32. / 255.)
images = tf.image.random_saturation(images, lower=0.5, upper=1.5)
images = tf.image.random_hue(images, max_delta=0.2)
images = tf.image.random_contrast(images, lower=0.5, upper=1.5)
return images
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = tf.shape(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
# Preserve static shapes when available.
xshape_static = x.get_shape()
result.set_shape([xshape_static[0], None, xshape_static[3]])
return result
def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0):
"""Embed x of type int64 into dense vectors, reducing to max 4 dimensions."""
with tf.variable_scope(
name, default_name="embedding", values=[x], reuse=reuse):
embedding_var = tf.get_variable("kernel", [vocab_size, dense_size])
# On the backwards pass, we want to convert the gradient from
# an indexed-slices to a regular tensor before sending it back to the
# parameter server. This avoids excess computation on the parameter server.
embedding_var = eu.ConvertGradientToTensor(embedding_var)
emb_x = tf.gather(embedding_var, x)
if multiplier != 1.0:
emb_x *= multiplier
shape, static_shape = tf.shape(emb_x), emb_x.shape.as_list()
if not static_shape or len(static_shape) < 5:
return emb_x
# If we had extra channel dimensions, assume it's 1, i.e. shape[3] == 1.
assert len(static_shape) == 5
return tf.reshape(emb_x, [shape[0], shape[1], shape[2], static_shape[4]])
def shift_left(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets
def shift_left_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
"""Use a strided convolution to downsample x by 2, `nbr_steps` times.
We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
As detailed in http://distill.pub/2016/deconv-checkerboard/.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: number of halving downsample rounds to apply
output_filters: an int specifying the filter count for the convolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
`[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
if nbr_steps == 0:
out = conv(x, output_filters, (1, 1))
return out, [out]
hidden_layers = [x]
for i in xrange(nbr_steps):
hidden_layers.append(
conv(
hidden_layers[-1],
output_filters, (2, 2),
strides=2,
activation=tf.nn.relu,
name="conv" + str(i)))
return hidden_layers[-1], hidden_layers
def deconv_stride2_multistep(x,
nbr_steps,
output_filters,
name=None,
reuse=None):
"""Use a deconvolution to upsample x by 2**`nbr_steps`.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: an int specifying the number of doubling upsample rounds to
apply.
output_filters: an int specifying the filter count for the deconvolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or
`[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse):
def deconv1d(cur, i):
cur_shape = tf.shape(cur)
thicker = conv(
cur,
output_filters * 2, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv1d" + str(i))
return tf.reshape(thicker,
[cur_shape[0], cur_shape[1] * 2, 1, output_filters])
def deconv2d(cur, i):
thicker = conv(
cur,
output_filters * 4, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv2d" + str(i))
return tf.depth_to_space(thicker, 2)
cur = x
for i in xrange(nbr_steps):
if cur.get_shape()[2] == 1:
cur = deconv1d(cur, i)
else:
cur = tf.cond(
tf.equal(tf.shape(cur)[2], 1),
lambda idx=i: deconv1d(cur, idx),
lambda idx=i: deconv2d(cur, idx))
return cur
def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):
"""Conditional conv_fn making kernel 1d or 2d depending on inputs shape."""
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
# Add support for left padding.
if "padding" in kwargs and kwargs["padding"] == "LEFT":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
kwargs["padding"] = "VALID"
force2d = False # Special argument we use to force 2d kernels (see below).
if "force2d" in kwargs:
force2d = kwargs["force2d"]
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
if "name" in kwargs:
original_name = kwargs["name"]
name = kwargs.pop("name") + "_" + name_suffix
else:
original_name = None
name = "conv_" + name_suffix
original_force2d = None
if "force2d" in kwargs:
original_force2d = kwargs.pop("force2d")
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
# Manually setting the shape to be unknown in the middle two dimensions so
# that the `tf.cond` below won't throw an error based on the convolution
# kernels being too large for the data.
inputs.set_shape = tf.TensorShape([static_shape[0], None, None, static_shape[3]]) # pylint: disable=protected-access
if kernel_size[1] == 1 or force2d:
# Avoiding the cond below can speed up graph and gradient construction.
return conv2d_kernel(kernel_size, "single")
return tf.cond(
tf.equal(tf.shape(inputs)[2],
1), lambda: conv2d_kernel((kernel_size[0], 1), "small"),
lambda: conv2d_kernel(kernel_size, "std"))
def conv(inputs, filters, kernel_size, **kwargs):
return conv_internal(tf.layers.conv2d, inputs, filters, kernel_size, **kwargs)
def conv1d(inputs, filters, kernel_size, **kwargs):
return tf.squeeze(
conv(tf.expand_dims(inputs, 2), filters, (kernel_size, 1), **kwargs), 2)
def separable_conv(inputs, filters, kernel_size, **kwargs):
return conv_internal(tf.layers.separable_conv2d, inputs, filters, kernel_size,
**kwargs)
def subseparable_conv(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution. If separability == 0 it's a separable_conv."""
def conv_fn(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution, splits into separability-many blocks."""
separability = None
if "separability" in kwargs:
separability = kwargs.pop("separability")
if separability:
parts = []
abs_sep = separability if separability > 0 else -1 * separability
for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):
with tf.variable_scope("part_%d" % split_idx):
if separability > 0:
parts.append(
tf.layers.conv2d(split, filters // separability, kernel_size, **
kwargs))
else:
parts.append(
tf.layers.separable_conv2d(split, filters // abs_sep,
kernel_size, **kwargs))
if separability > 1:
result = tf.layers.conv2d(tf.concat(parts, axis=3), filters, (1, 1))
elif abs_sep == 1: # If we have just one block, return it.
assert len(parts) == 1
result = parts[0]
else:
result = tf.concat(parts, axis=3)
else:
result = tf.layers.separable_conv2d(inputs, filters, kernel_size,
**kwargs)
if separability is not None:
kwargs["separability"] = separability
return result
return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
def layer_norm_compute_python(x, epsilon, scale, bias):
"""Layer norm raw computation."""
mean = tf.reduce_mean(x, axis=[-1], keep_dims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * scale + bias
@function.Defun(compiled=True)
def layer_norm_compute_grad(x, epsilon, scale, bias, dy):
y = layer_norm_compute_python(x, epsilon, scale, bias)
dx = tf.gradients(ys=[y], xs=[x, epsilon, scale, bias], grad_ys=[dy])
return dx
@function.Defun(
compiled=True,
separate_compiled_gradients=True,
grad_func=layer_norm_compute_grad)
def layer_norm_compute(x, epsilon, scale, bias):
return layer_norm_compute_python(x, epsilon, scale, bias)
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = x.get_shape()[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
if allow_defun:
result = layer_norm_compute(x, tf.constant(epsilon), scale, bias)
result.set_shape(x.get_shape())
else:
result = layer_norm_compute_python(x, epsilon, scale, bias)
return result
def noam_norm(x, name=None):
"""One version of layer normalization."""
with tf.name_scope(name, default_name="noam_norm", values=[x]):
shape = x.get_shape()
ndims = len(shape)
return (tf.nn.l2_normalize(x, ndims - 1, epsilon=1.0) *
tf.sqrt(tf.to_float(shape[-1])))
def residual_function(hparams):
"""Returns a function for combining layer input and layer output.
The returned function on x (layer input) and y (layer output) computes:
norm_function(x + t
Args:
hparams: model hyperparameters
Returns:
a function from x=<layer input> and y=<layer output> to computed output
"""
def residual_fn(x, y):
return hparams.norm_function(x + tf.nn.dropout(
y, 1.0 - hparams.residual_dropout))
return residual_fn
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
first_relu=True,
use_elu=False,
separabilities=None,
**kwargs):
"""A block of convolutions.
Args:
conv_fn: convolution function, e.g. conv or separable_conv.
inputs: a Tensor
filters: an Integer
dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))
first_relu: whether to do a relu at start (defaults to True)
use_elu: whether to use ELUs instead of ReLUs (defaults to False)
separabilities: list of separability factors (per-layer).
**kwargs: additional arguments (e.g., pooling)
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
norm = kwargs.pop("normalizer_fn") if "normalizer_fn" in kwargs else None
if norm is None and "normalizer_fn" not in kwargs:
norm = lambda x, name: layer_norm(x, filters, name=name)
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if first_relu or counter > 0:
cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
**kwargs)
if norm is not None:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard convolutions."""
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(separable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(subseparable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
"""Pooling (supports "LEFT")."""
with tf.name_scope("pool", [inputs]):
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
# Add support for left padding.
if padding == "LEFT":
assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
if len(static_shape) == 3:
width_padding = 2 * (window_size[1] // 2)
padding_ = [[0, 0], [width_padding, 0], [0, 0]]
else:
height_padding = 2 * (window_size[0] // 2)
cond_padding = tf.cond(
tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (window_size[1] // 2)))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding_)
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
padding = "VALID"
window_size_small = (window_size[0], 1)
strides_small = (strides[0], 1)
# Manually setting the shape to be unknown in the middle two dimensions so
# that the `tf.cond` below won't throw an error based on the convolution
# kernels being too large for the data.
inputs.set_shape = tf.TensorShape( # pylint: disable=protected-access
[static_shape[0], None, None, static_shape[3]])
return tf.cond(
tf.equal(tf.shape(inputs)[2], 1),
lambda: tf.nn.pool( # pylint: disable=g-long-lambda
inputs, window_size_small, pooling_type, padding,
strides=strides_small),
lambda: tf.nn.pool( # pylint: disable=g-long-lambda
inputs, window_size, pooling_type, padding, strides=strides))
def conv_block_downsample(x,
kernel,
strides,
padding,
separability=0,
name=None,
reuse=None):
"""Implements a downwards-striding conv block, like Xception exit flow."""
with tf.variable_scope(
name, default_name="conv_block_downsample", values=[x], reuse=reuse):
hidden_size = int(x.get_shape()[-1])
res = conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
strides=strides,
name="res_conv")
x = subseparable_conv_block(
x,
hidden_size, [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv0")
x = subseparable_conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv1")
x = pool(x, kernel, "MAX", padding, strides=strides)
x += res
x = subseparable_conv_block(
x,
2 * hidden_size, [((1, 1), kernel)],
first_relu=False,
padding=padding,
separability=separability,
name="conv2")
x = subseparable_conv_block(
x,
int(2.5 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv3")
return x
def decompress_seqcnn(x,
targets,
targets_vocab_size,
dilations_and_kernels,
block_size,
is_2d=False,
embedding_var=None,
name=None,
reuse=None):
"""Decompress x into targets size using a Sequence CNN at every element."""
with tf.variable_scope(
name,
default_name="decompress_batch_seqcnn",
values=[x, targets],
reuse=reuse):
# We assume targets are [batch x block_size * N x block_size * N x C] if
# is_2d=True or [batch, block_size * N, 1, C] otherwise, and C is static.
# Let's shift targets to depth and embed.
targets_shape, targets_shape_static = tf.shape(targets), targets.get_shape()
channels = int(targets_shape_static[-1])
hidden_size = int(x.get_shape()[-1])
if is_2d:
depth_targets = tf.space_to_depth(targets, block_size)
factor = channels * block_size * block_size
else:
depth_targets = tf.reshape(targets, [
targets_shape[0], targets_shape[1] // block_size, 1,
channels * block_size
])
factor = channels * block_size
if embedding_var is None:
embedding_var = tf.get_variable("targets_embedding",
[targets_vocab_size, hidden_size])
targets_emb = tf.gather(embedding_var, depth_targets)
# Flatten x and embedded targets. Flat targets are factor* larger on axis=1.
flat_x = tf.reshape(x, [-1, 1, 1, hidden_size])
flat_targets = tf.reshape(targets_emb, [-1, factor, 1, hidden_size])
shifted_targets = shift_left(flat_targets)
# Run a SeqCNN large-batch to produce factor outputs out of every target.
flat_x += tf.zeros_like(shifted_targets) # Broadcast on axis=1.
flat_outputs = conv_block(
tf.concat([flat_x, shifted_targets], axis=3),
hidden_size,
dilations_and_kernels,
padding="LEFT")
# Reshape back to embedded targets shape.
outputs = tf.reshape(flat_outputs, [
tf.shape(targets_emb)[0],
tf.shape(targets_emb)[1],
tf.shape(targets_emb)[2], factor * hidden_size
])
# Move depth back to target space.
if is_2d:
outputs = tf.depth_to_space(outputs, 2)
else:
outputs = tf.reshape(outputs, [
tf.shape(outputs)[0], block_size * tf.shape(outputs)[1], 1,
hidden_size
])
# Final reshape before prediction to ensure target size.
outputs = tf.reshape(outputs, [
targets_shape[0], targets_shape[1], targets_shape[2], channels,
hidden_size
])
return tf.layers.dense(outputs, targets_vocab_size)
def moe_layer(data_parallelism,
ps_devices,
xs,
train,
model_hidden_size,
expert_hidden_size,
n1,
n2,
loss_coef,
autoscale=True,
name=None):
"""A mixture of experts layer.
Args:
data_parallelism: a expert_utils.Parallelism object.
ps_devices: a list of strings
xs: a list of input tensors.
train: a boolean scalar.
model_hidden_size: an integer (input/output size for this layer)
expert_hidden_size: an integer (size of each expert's hidden layer)
n1: an integer - number of experts (or # of groups for hierarchical MoE)
n2: optional integer - size of each group of experts for hierarchical MoE
loss_coef: a scalar - multiplier on load-balancing losses
autoscale: a boolean
name: a string
Returns:
ys: a list of tensors:
extra_training_loss: a scalar
"""
dp = data_parallelism
with tf.variable_scope(name, default_name="moe"):
# Set up the hyperparameters for the gating networks.
primary_gating_hp = eu.NoisyTopKGatingParams()
primary_gating_hp.num_experts = n1
if n2:
# hierarchical MoE containing moe_n1 groups of moe_n2 experts.
assert n2 > 1
secondary_gating_hp = eu.NoisyTopKGatingParams()
secondary_gating_hp.num_experts = n2
else:
# flat mixture of moe_n1 experts.
secondary_gating_hp = None
# Set up the hyperparameters for the expert networks.
# Each expert contains a hidden RELU layer of size filter_size
expert_hp = eu.FeedForwardExpertParams()
expert_hp.autoscale = autoscale
expert_hp.hidden_layer_sizes = [expert_hidden_size]
# Create the mixture of experts.
moe = eu.DistributedMixtureOfExperts(primary_gating_hp, secondary_gating_hp,
expert_hp, model_hidden_size,
model_hidden_size, ps_devices, "moe")
# MoE expects input tensors to be 2d.
# Flatten out spatial dimensions.
xs_2d = dp(tf.reshape, xs, [[-1, model_hidden_size]] * dp.n)
# Call the MoE
moe_out_2d, importance, load, _, _ = moe.Eval(
dp.devices, xs_2d, train, identifiers=None, summaries=True)
# Reshape the output to the original shape.
moe_out = dp(tf.reshape, moe_out_2d, dp(tf.shape, xs))
# These losses encourage equal load on the different experts.
loss = loss_coef * (eu.CVSquared(importance) + eu.CVSquared(load))
return moe_out, loss
def simple_attention(target, source, bias=None, summaries=True):
"""A simple attention function.
Args:
target: a `Tensor` with shape `[batch, target_timesteps, depth]` or
`[batch, target_timesteps_1, target_timesteps_2, depth]`
source: a `Tensor` with shape `[batch, source_timesteps, depth]` or
`[batch, source_timesteps_1, source_timesteps_2, depth]`
bias: an optional `Tensor` with shape `[batch, timesteps, 1, 1]` used
to mask the attention to not attend to padding of input.
summaries: Boolean, whether to output summaries.
Returns:
a `Tensor` with same shape as `target`
"""
with tf.name_scope("simple_attention", [target, source]):
target_shape = tf.shape(target)
source_shape = tf.shape(source)
target = tf.reshape(target, [
target_shape[0], target_shape[1] * target_shape[2], target_shape[3]
])
source = tf.reshape(source, [
source_shape[0], source_shape[1] * source_shape[2], source_shape[3]
])
attention = tf.matmul(target, source, transpose_b=True)
attention *= tf.rsqrt(tf.to_float(tf.shape(target)[2]))
if bias is not None:
attention += tf.expand_dims(tf.squeeze(bias, axis=[2, 3]), axis=1)
attention = tf.nn.softmax(attention)
if summaries and not tf.get_variable_scope().reuse:
tf.summary.image("attention", tf.expand_dims(attention, 3), max_outputs=5)
attended = tf.matmul(attention, source)
return tf.reshape(attended, target_shape)
def multiscale_conv_sum(inputs, output_size, dilation_rates_and_kernel_sizes,
pooling_type, **kwargs):
"""Sum of several dilated convolutions.
For all convolutions with dilation_rate > 1, we first pool the input with
width dilation_rate.
Args:
inputs: a Tensor
output_size: an Integer
dilation_rates_and_kernel_sizes: a list of pairs (dilation, kernel_size)
pooling_type: "AVG" or "MAX"
**kwargs: additional
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "multiscale_conv_sum", [inputs]):
padding = kwargs["padding"]
results, counter = [], -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if dilation_rate > 1:
pooled = pool(inputs, kernel_size, pooling_type, padding)
else:
pooled = inputs
results.append(
conv(
pooled,
output_size,
kernel_size,
dilation_rate=dilation_rate,
name="conv_layer%d" % counter,
**kwargs))
return tf.add_n(results) * (len(results)**-0.5)
def multiscale_conv_and_attention(x,
padding,
hparams,
source=None,
summaries=True):
"""A common part of t2t layers.
First, do a linear multiscale convolution
Second, do attention (if source is not None)
Applies residuals and normalization on both steps.
Args:
x: a Tensor.
padding: a padding type
hparams: hyperparameters for model
source: optional source tensor for attention. (encoder output)
summaries: Boolean, whether to output summaries.
Returns:
a Tensor.
"""
# TODO(noam): The number of different scales should be a hyperparameter.
conv_sum = multiscale_conv_sum(
x,
hparams.hidden_size, [((hparams.kernel_height**i, hparams.kernel_width**
i), (hparams.kernel_height, hparams.kernel_width))
for i in xrange(3)],
"AVG",
padding=padding)
# For residuals a rescale if necessary if channels differ.
if x.get_shape().as_list()[-1] != conv_sum.get_shape().as_list()[-1]:
x = conv(x, hparams.hidden_size, (1, 1))
x = noam_norm(x + conv_sum)
if source is not None:
x = noam_norm(x + simple_attention(x, source, summaries=summaries))
return x
def conv_with_pools(inputs, output_size, kernel_size, pool_sizes, pooling_type,
**kwargs):
"""Convolution plus 1x1 convolution applied to specified pools.
For example we might do a regular convolution with kernel size (3, 1),
and pools of sizes [(9, 1), (27, 1)].
Args:
inputs: a Tensor
output_size: an Integer
kernel_size: a tuple of integers
pool_sizes: a list of tuples of integers.
pooling_type: "AVG" or "MAX"
**kwargs: additional keyword args for conv
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_with_pools", [inputs]):
padding = kwargs["padding"]
results = []
results.append(conv(inputs, output_size, kernel_size, **kwargs))
for i, pool_size in enumerate(pool_sizes):
pooled = pool(inputs, pool_size, pooling_type, padding)
results.append(
conv(pooled, output_size, (1, 1), name="pool_%d" % i, **kwargs))
return tf.add_n(results) * (len(results)**-0.5)
def conv_with_pools_and_attention(x,
padding,
hparams,
source=None,
summaries=True):
"""A common part of t2t layers.
First, do conv_with_pools
Second, do attention (if source is not None)
Applies residuals and normalization on both steps.
Args:
x: a Tensor.
padding: a padding type
hparams: hyperparameters for model
source: optional source tensor for attention. (encoder output)
summaries: Boolean, whether to output summaries.
Returns:
a Tensor.
"""
conv_sum = conv_with_pools(
x,
hparams.hidden_size, (hparams.kernel_height, hparams.kernel_width),
hparams.pool_sizes,
"AVG",
padding=padding)
if x.get_shape().as_list()[-1] == conv_sum.get_shape().as_list()[-1]:
conv_sum += x
x = noam_norm(conv_sum)
if source is not None:
x = noam_norm(x + simple_attention(x, source, summaries=summaries))
return x
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
"""
positions = tf.to_float(tf.range(length))
log_timescale_increment = (math.log(max_timescale / min_timescale) /
(num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
This allows attention to learn to use absolute and relative positions.
The timing signal should be added to some precursor of both the source
and the target of the attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
experessed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the depth dimension, padded with zeros to be the same depth as the input,
and added into input.
Args:
x: a Tensor with shape [?, length, ?, depth]
min_timescale: a float
max_timescale: a float
num_timescales: an int <= depth/2
Returns:
a Tensor the same shape as x.
"""
length = tf.shape(x)[1]
depth = tf.shape(x)[3]
signal = get_timing_signal(length, min_timescale, max_timescale,
num_timescales)
padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])
return x + tf.reshape(padded_signal, [1, length, 1, depth])
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
"""
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keep_dims=True))
def mask_leq(target_length, source_length):
"""A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.
Args:
target_length: an integer
source_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return tf.expand_dims(
tf.matrix_band_part(tf.ones([target_length, source_length]), -1, 0), 0)
def attention_1d_v0(source,
target,
attention_size,
output_size,
num_heads,
mask=None,
transform_source=True,
transform_target=True,
transform_output=True,
summaries=True,
name=None):
"""multi-headed attention.
TODO(noam): this could probably be extended to 2d.
Args:
source: a Tensor of shape [batch, source_length, source_depth]
target: a Tensor of shape [batch, target_length, target_depth]
attention_size: an integer
output_size: an integer
num_heads: an integer divisor of attention_size
mask: a float32 Tensor of shape [batch, target_length, source_length]
1.0 means can-see; 0.0 means can't-see.
Any dimension can be 1 (supports broadcasting).
transform_source: a boolean
transform_target: a boolean
transform_output: a boolean
summaries: a boolean
name: an optional string
Returns:
a Tensor of shape [batch, length, output_size]
"""
with tf.variable_scope(name, default_name="attention", values=[target]):
source_length = tf.shape(source)[1]
target_length = tf.shape(target)[1]
batch = tf.shape(source)[0]
def _maybe_transform(t, size, should_transform, name):
if should_transform:
return conv1d(t, size, 1, name=name)
else:
assert t.get_shape()[-1] == size
return t
source_attention = _maybe_transform(source, attention_size,
transform_source, "source_attention")
target_attention = _maybe_transform(target, attention_size,
transform_target, "target_attention")
assert attention_size % num_heads == 0
size_per_head = attention_size // num_heads
source_attention = tf.reshape(
source_attention, [batch, source_length, num_heads, size_per_head])
target_attention = tf.reshape(
target_attention, [batch, target_length, num_heads, size_per_head])
# [batch, num_heads, length, size_per_head]
source_attention = tf.transpose(source_attention, [0, 2, 1, 3])
target_attention = tf.transpose(target_attention, [0, 2, 1, 3])
# [batch, num_heads, target_length, source_length]
attention = tf.matmul(target_attention, source_attention, transpose_b=True)
attention *= size_per_head**-0.5
if mask is not None:
mask = tf.expand_dims(mask, 1)
mask = (1.0 - mask) * -1e9
attention += mask
attention = tf.nn.softmax(attention)
if summaries and not tf.get_variable_scope().reuse:
# Compute a color image summary.
image = tf.reshape(attention,
[batch, num_heads, target_length, source_length])
image = tf.transpose(image, [0, 2, 3, 1])
image = tf.pow(image, 0.2) # for high-dynamic-range
# Each head will correspond to one of RGB.
# pad the heads to be a multiple of 3
extra_heads = -num_heads % 3
image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, -num_heads % 3]])
image = tf.reshape(image, [
batch, target_length, source_length, 3, (num_heads + extra_heads) // 3
])
image = tf.reduce_max(image, 4)
tf.summary.image("local_attention", image, max_outputs=1)
# output: [batch, num_heads, target_length, size_per_head]
output = tf.matmul(attention, source_attention)
output = tf.transpose(output, [0, 2, 1, 3])
output = tf.reshape(output, [batch, target_length, attention_size])
output = _maybe_transform(output, output_size, transform_output,
"attention_output")
return output
def relu_density_logit(x, reduce_dims):
"""logit(density(x)).
Useful for histograms.
Args:
x: a Tensor, typilcally the output of tf.relu
reduce_dims: a list of dimensions
Returns:
a Tensor
"""
frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
return scaled
def conv_hidden_relu(inputs,
hidden_size,
output_size,
kernel_size=(1, 1),
summaries=True,
dropout=0.0,
**kwargs):
"""Hidden layer with RELU activation followed by linear projection."""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_hidden_relu", [inputs]):
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
h = conv(
inputs,
hidden_size,
kernel_size,
activation=tf.nn.relu,
name="conv1",
**kwargs)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
if summaries and not tf.get_variable_scope().reuse:
tf.summary.histogram("hidden_density_logit",
relu_density_logit(
h, list(range(inputs.shape.ndims - 1))))
ret = conv(h, output_size, (1, 1), name="conv2", **kwargs)
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
def conv_gru(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional GRU in 1 dimension."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start, padding):
return conv(
args,
filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate,
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="conv_gru", values=[x], reuse=reuse):
reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding))
gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding))
return gate * x + (1 - gate) * candidate
def conv_lstm(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional LSTM in 1 dimension."""
with tf.variable_scope(
name, default_name="conv_lstm", values=[x], reuse=reuse):
gates = conv(
x,
4 * filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate)
g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
return tf.sigmoid(g[2]) * tf.tanh(new_cell)
def diagonal_conv_gru(x,
kernel_size,
filters,
train,
dropout=0.0,
name=None,
reuse=None):
"""Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start):
return conv(
args,
filters,
kernel_size,
padding="SAME",
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="diagonal_conv_gru", values=[x], reuse=reuse):
reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5))
gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0))
# Dropout if training.
if dropout > 0.0 and train:
candidate = tf.nn.dropout(candidate, 1.0 - dropout)
# Diagonal shift.
shift_filters = filters // 3
base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) +
[[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)
shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)
shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)
x_shifted = tf.nn.depthwise_conv2d(
x, shift_filter, [1, 1, 1, 1], padding="SAME")
# Return the gated result and cost.
total_cost_avg = 0.5 * (reset_cost + gate_cost)
return gate * x_shifted + (1 - gate) * candidate, total_cost_avg
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", [x, y]):
x_length = tf.shape(x)[axis]
y_length = tf.shape(y)[axis]
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
def pad_with_zeros(logits, labels):
"""Pad labels on the length dimension to match logits length."""
with tf.name_scope("pad_with_zeros", [logits, labels]):
logits, labels = pad_to_same_length(logits, labels)
if len(labels.shape.as_list()) == 3: # 2-d labels.
logits, labels = pad_to_same_length(logits, labels, axis=2)
return labels
def weights_nonzero(labels):
"""Assign weight 1.0 to all labels except for padding (id=0)."""
return tf.to_float(tf.not_equal(labels, 0))
def weights_all(labels):
"""Assign weight 1.0 to all labels."""
return tf.ones_like(labels, dtype=tf.float32)
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0],
[0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))
return ret
def padded_cross_entropy(logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True):
"""Compute cross-entropy assuming 0s are padding.
Computes a loss numerator (the sum of losses), and loss denominator
(the number of non-padding tokens).
Args:
logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
"""
confidence = 1.0 - label_smoothing
vocab_size = tf.shape(logits)[-1]
with tf.name_scope("padded_cross_entropy", [logits, labels]):
pad_labels = pad_with_zeros(logits, labels)
xent = smoothing_cross_entropy(logits, pad_labels, vocab_size, confidence)
weights = weights_fn(pad_labels)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def smoothing_cross_entropy(logits, labels, vocab_size, confidence):
"""Cross entropy with label smoothing to limit over-confidence."""
with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
# Normalizing constant is the best cross-entropy value with soft targets.
# We subtract it just for readability, makes no difference on learning.
normalizing = -(confidence * tf.log(confidence) + tf.to_float(
vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20))
# Soft targets.
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=soft_targets)
return xentropy - normalizing
```
|
{
"source": "jeid64/mesos",
"score": 2
}
|
#### File: mesos/support/post-reviews.py
```python
import atexit
import os
import sys
from subprocess import *
def readline(prompt):
try:
return raw_input(prompt)
except KeyboardInterrupt:
sys.exit(1)
def execute(command, ignore_errors=False):
process = Popen(command,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
shell=False)
data = process.stdout.read()
status = process.wait()
if status != 0 and not ignore_errors:
cmdline = ' '.join(command) if isinstance(command, list) else command
print 'Failed to execute: \'' + cmdline + '\':'
print data
sys.exit(1)
elif status != 0:
return None
return data
# TODO(benh): Make sure this is a git repository, apologize if not.
# Don't do anything if people have uncommitted changes.
diff_stat = execute(['git', 'diff', '--shortstat']).strip()
if diff_stat:
print 'Please commit or stash any changes before using post-reviews!'
sys.exit(1)
top_level_dir = execute(['git', 'rev-parse', '--show-toplevel']).strip()
repository = 'git://git.apache.org/mesos.git'
parent_branch = 'master'
branch_ref = execute(['git', 'symbolic-ref', 'HEAD']).strip()
branch = branch_ref.replace('refs/heads/', '', 1)
temporary_branch = '_post-reviews_' + branch
# Always delete the temporary branch.
atexit.register(lambda: execute(['git', 'branch', '-D', temporary_branch], True))
# Always put us back on the original branch.
atexit.register(lambda: execute(['git', 'checkout', branch]))
merge_base = execute(['git', 'merge-base', parent_branch, branch_ref]).strip()
print 'Running post-review across all of ...'
call(['git',
'--no-pager',
'log',
'--pretty=format:%Cred%H%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr)%Creset',
merge_base + '..HEAD'])
log = execute(['git',
'--no-pager',
'log',
'--pretty=oneline',
'--reverse',
merge_base + '..HEAD']).strip()
shas = []
for line in log.split('\n'):
sha = line.split()[0]
shas.append(sha)
previous = 'master'
for i in range(len(shas)):
sha = shas[i]
execute(['git', 'branch', '-D', temporary_branch], True)
message = execute(['git',
'--no-pager',
'log',
'--pretty=format:%B',
previous + '..' + sha])
review_request_id = None
if message.find('Review: ') != -1:
url = message[(message.index('Review: ') + len('Review: ')):].strip()
# TODO(benh): Handle bad (or not Review Board) URLs.
review_request_id = os.path.basename(url.strip('/'))
# Show the commit.
if review_request_id is None:
print '\nCreating diff of:\n'
call(['git',
'--no-pager',
'log',
'--pretty=format:%Cred%H%Creset -%C(yellow)%d%Creset %s',
previous + '..' + sha])
else:
print '\nUpdating diff of:\n'
call(['git',
'--no-pager',
'log',
'--pretty=format:%Cred%H%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr)%Creset',
previous + '..' + sha])
# Show the "parent" commit(s).
print '\n... with parent diff created from:\n'
call(['git',
'--no-pager',
'log',
'--pretty=format:%Cred%H%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr)%Creset',
parent_branch + '..' + previous])
try:
raw_input('\nPress enter to continue or \'Ctrl-C\' to skip.\n')
except KeyboardInterrupt:
i = i + 1
previous = sha
continue
revision_range = previous + ':' + sha
if review_request_id is None:
output = execute(['post-review',
'--repository-url=' + repository,
'--tracking-branch=' + parent_branch,
'--revision-range=' + revision_range] + sys.argv[1:]).strip()
else:
output = execute(['post-review',
'--review-request-id=' + review_request_id,
'--repository-url=' + repository,
'--tracking-branch=' + parent_branch,
'--revision-range=' + revision_range] + sys.argv[1:]).strip()
print output
if review_request_id is not None:
i = i + 1
previous = sha
continue
lines = output.split('\n')
url = lines[len(lines) - 1]
url = url.strip('/')
# Construct new commit message.
message = message + '\n' + 'Review: ' + url + '\n'
execute(['git', 'checkout', '-b', temporary_branch])
execute(['git', 'reset', '--hard', sha])
execute(['git', 'commit', '--amend', '-m', message])
# Now rebase all remaining shas on top of this amended commit.
j = i + 1
old_sha = execute(['cat', os.path.join(top_level_dir, '.git/refs/heads', temporary_branch)]).strip()
previous = old_sha
while j < len(shas):
execute(['git', 'checkout', shas[j]])
execute(['git', 'rebase', temporary_branch])
# Get the sha for our detached HEAD.
new_sha = execute(['git', '--no-pager', 'log', '--pretty=format:%H', '-n', '1', 'HEAD']).strip()
execute(['git',
'update-ref',
'refs/heads/' + temporary_branch,
new_sha,
old_sha])
old_sha = new_sha
shas[j] = new_sha
j = j + 1
# Okay, now update the actual branch to our temporary branch.
new_sha = old_sha
old_sha = execute(['cat', os.path.join(top_level_dir, '.git/refs/heads', branch)]).strip()
execute(['git', 'update-ref', 'refs/heads/' + branch, new_sha, old_sha])
i = i + 1
```
|
{
"source": "jeikabu/lumberyard",
"score": 2
}
|
#### File: AzFramework/CodeGen/AzEBusInline.py
```python
import os
from az_code_gen.base import *
from AzReflectionCpp import format_cpp_annotations
class AZEBusInline_Driver(TemplateDriver):
def apply_transformations(self, json_object):
format_cpp_annotations(json_object)
def render_templates(self, input_file, **template_kwargs):
input_file_name, input_file_ext = os.path.splitext(input_file)
self.render_template_to_file(
"AzEBusInline.tpl", template_kwargs, '{}.generated.inline'.format(input_file_name))
# Factory function - called from launcher
def create_drivers(env):
return [AZEBusInline_Driver(env)]
```
#### File: AzTestScanner/aztest/filters.py
```python
import logging as lg
import os
import re
from aztest.common import to_list
from aztest.errors import AzTestError
logger = lg.getLogger(__name__)
DEFAULT_WHITELIST_FILE = "lmbr_test_whitelist.txt"
DEFAULT_BLACKLIST_FILE = "lmbr_test_blacklist.txt"
def get_default_whitelist():
"""Returns the default whitelist file path if it exists, otherwise empty string
:return: whitelist path or empty string
:rtype: str
"""
if os.path.exists(os.path.abspath(DEFAULT_WHITELIST_FILE)):
return os.path.abspath(DEFAULT_WHITELIST_FILE)
else:
return ""
def get_default_blacklist():
"""Returns the default blacklist file path if it exists, otherwise empty string
:return: blacklist path or empty string
:rtype: str
"""
if os.path.exists(os.path.abspath(DEFAULT_BLACKLIST_FILE)):
return os.path.abspath(DEFAULT_BLACKLIST_FILE)
else:
return ""
class FileApprover(object):
"""Class for compiling and validating the set of files that are allowed to be tested"""
whitelist = None
blacklist = None
def __init__(self, whitelist_files=None, blacklist_files=None):
self.make_whitelist(whitelist_files)
self.make_blacklist(blacklist_files)
def make_whitelist(self, whitelist_files):
"""Make the whitelist from a file or list of files
It is assumed that if no whitelist is provided, then we will scan everything not in a blacklist.
:param whitelist_files: path to a whitelist file or list of paths
"""
if whitelist_files:
for whitelist_file in to_list(whitelist_files):
self.add_whitelist(whitelist_file)
else:
self.whitelist = None
def make_blacklist(self, blacklist_files):
"""Make the blacklist from a file or list of files
It is assumed that is no blacklist is provided, then we will scan everything in a directory or what is
whitelisted.
:param blacklist_files: path to a blacklist file or list of paths
"""
if blacklist_files:
for blacklist_file in to_list(blacklist_files):
self.add_blacklist(blacklist_file)
else:
self.blacklist = None
def add_whitelist(self, whitelist_file):
"""Add the file of patterns to the whitelist
:param whitelist_file: path to a whitelist file
"""
if whitelist_file:
patterns = self.get_patterns_from_file(whitelist_file)
if patterns:
self.whitelist = patterns | (self.whitelist or set())
def add_blacklist(self, blacklist_file):
"""Add the file of patterns to the blacklist
:param blacklist_file: path to a blacklist file
"""
if blacklist_file:
patterns = self.get_patterns_from_file(blacklist_file)
if patterns:
self.blacklist = patterns | (self.blacklist or set())
def add_whitelist_patterns(self, patterns):
"""Add patterns to the whitelist
:param patterns: regular expression pattern or list of patterns to add to whitelist
"""
self.whitelist = set(to_list(patterns)) | (self.whitelist or set())
def add_blacklist_patterns(self, patterns):
"""Add patterns to the blacklist
:param patterns: regular expression pattern or list of patterns to add to blacklist
"""
self.blacklist = set(to_list(patterns)) | (self.blacklist or set())
def is_approved(self, file_name):
"""Checks to see if file_name is in the whitelist and not the blacklist
:param file_name: name or path of the file to check
:return: true if file_name in whitelist or whitelist is None and not in blacklist or blacklist is None,
else false
"""
return self.is_whitelisted(file_name) and not self.is_blacklisted(file_name)
def is_whitelisted(self, file_name):
"""Checks to see if file_name is in the whitelist
:param file_name: name or path of the file to check
:return: true if file_name in whitelist or whitelist is None, else false
"""
return True if not self.whitelist else self.is_in_list(file_name, self.whitelist, 'whitelist')
def is_blacklisted(self, file_name):
"""Checks to see if file_name is in the blacklist
:param file_name: name or path of the file to check
:return: true if file_name not in blacklist or blacklist is None, else false
"""
return False if not self.blacklist else self.is_in_list(file_name, self.blacklist, 'blacklist')
@staticmethod
def get_patterns_from_file(pattern_file):
"""Returns set of patterns from pattern_file if pattern_file is valid, otherwise returns None
:param pattern_file: path of the whitelist or blacklist file to get patterns from
:return: the set of patterns from the file, or None if pattern_file is invalid
:rtype: set
"""
if not pattern_file or not os.path.exists(pattern_file):
raise AzTestError("Invalid module/pattern file path: {}".format(pattern_file))
logger.info("Using module/pattern file: {}".format(pattern_file))
with open(pattern_file, 'r') as f:
patterns = set((line.strip() for line in f))
return patterns
@staticmethod
def is_in_list(file_name, patterns, list_name='CMD'):
"""Checks file_name against the set of patterns using regex
For the curious, we do not compile the patterns ahead of time because the 're' module uses an internal cache
that stores compiled patterns, and it is large enough (100) that we shouldn't need to worry about patterns being
recompiled during a scan.
:param file_name: name or path of the file to check
:param patterns: the set of patterns to use as regular expressions
:param list_name: the name of the list being checked (if applicable, defaults to CMD)
:return: true if there is a match, otherwise false
"""
if not file_name or not patterns:
return False
logger.debug("Checking file: {}".format(file_name))
for pattern in patterns:
full_pattern = r"^.*[/\\]{}(\.dll|\.exe|\.dylib)?$".format(pattern)
match = re.match(full_pattern, file_name, re.IGNORECASE)
if match:
logger.debug("Found file {} in {} patterns.".format(file_name, list_name))
return True
return False
```
#### File: HLSLCrossCompiler/mk/build.py
```python
import os.path
import argparse
from argparse import ArgumentParser
from shutil import copy
import sys
import glob
import subprocess
def get_vcvarsall_path():
import _winreg as winreg
possible_roots = ['SOFTWARE\\Wow6432node', 'SOFTWARE']
vc_dir = None
for root in possible_roots:
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, root + "\\Microsoft\\VisualStudio\\11.0\\Setup\\VC")
vc_dir = winreg.QueryValueEx(key, 'ProductDir')[0]
key.Close()
break
except:
continue
if vc_dir is None:
raise RuntimeError('Could not detect a compatible installation of Visual Studio')
vcvarsall_path = os.path.join(vc_dir, 'vcvarsall.bat')
if not os.path.exists(vcvarsall_path):
raise RuntimeError('Could not find vcvarsall.bat')
return vcvarsall_path
def copy_output(src_file, dst_directory):
print('Copying ' + src_file + ' to ' + dst_directory)
copy(src_file, dst_directory)
def copy_outputs(src_queries, dst_directory):
if dst_directory is not None:
for src_query in src_queries:
results = glob.glob(os.path.normpath(src_query))
for result in results:
copy_output(result, dst_directory)
def build(configuration, platform, lib_dir = None, exe_dir = None, portable = False):
if lib_dir is not None:
lib_dir = os.path.normpath(os.path.abspath(lib_dir))
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if exe_dir is not None:
exe_dir = os.path.normpath(os.path.abspath(exe_dir))
if not os.path.exists(exe_dir):
os.makedirs(exe_dir)
id_platform_flags = platform
if portable:
id_platform_flags += '_portable'
script_dir = os.path.dirname(os.path.realpath(__file__))
build_dir = os.path.join(script_dir, os.pardir, id_platform_flags + '_' + configuration, 'build')
if not os.path.exists(build_dir):
os.makedirs(build_dir)
os.chdir(build_dir)
configuration_name = {'release': 'Release', 'debug': 'Debug'}[configuration]
exe_queries = []
lib_queries = []
if platform in ['win32', 'win64']:
platform_name = {'win32' : 'Win32', 'win64' : 'Win64'}[platform]
if platform == 'win64':
platform_name = 'x64'
generator_suffix = ' Win64'
vcvarsall_arg = ' x86_amd64'
else:
platform_name = 'Win32'
generator_suffix = ''
vcvarsall_arg = ''
flags = ''
if portable:
flags += ' -DPORTABLE=ON'
commands = ['cmake -G "Visual Studio 11' + generator_suffix + '"'+ flags + ' "' + script_dir + '"']
vcvarsall_path = get_vcvarsall_path()
commands += ['"' + vcvarsall_path + '"' + vcvarsall_arg]
commands += ['msbuild.exe HLSLCrossCompilerProj.sln /p:Configuration=' + configuration_name + ' /p:Platform=' + platform_name]
cmd_line = '&&'.join(commands)
p = subprocess.Popen(cmd_line, shell = True)
p.wait()
exe_queries += [os.path.join(build_dir, os.pardir, 'bin', id_platform_flags, configuration_name, '*.*')]
lib_queries += [os.path.join(build_dir, os.pardir, 'lib', id_platform_flags, configuration_name, '*.*')]
elif platform == 'linux':
subprocess.call(['cmake', script_dir,'-DCMAKE_BUILD_TYPE:STRING=' + configuration_name])
subprocess.call(['make', 'libHLSLcc'])
subprocess.call(['make', 'HLSLcc'])
exe_queries += [os.path.join(build_dir, os.pardir, 'bin', id_platform_flags, '*')]
lib_queries += [os.path.join(build_dir, os.pardir, 'lib', id_platform_flags, '*')]
elif platform == 'android-armeabi-v7a':
jni_dir = os.path.join(script_dir, os.pardir, 'jni')
os.chdir(jni_dir)
subprocess.call(['ndk-build', '-j4'])
lib_queries += [os.path.join(os.pardir, 'obj', 'local', 'armeabi-v7a', '*.*')]
copy_outputs(exe_queries, exe_dir)
copy_outputs(lib_queries, lib_dir)
def main():
parser = ArgumentParser(description = 'Build HLSLCrossCompiler')
parser.add_argument(
'--platform',
choices = ['win32', 'win64', 'linux', 'android-armeabi-v7a'],
dest = 'platform',
required = True)
parser.add_argument(
'--configuration',
choices = ['release', 'debug'],
dest = 'configuration',
required = True)
parser.add_argument(
'--portable',
dest = 'portable',
action = 'store_true')
parser.add_argument(
'--lib-output-dir',
dest = 'lib')
parser.add_argument(
'--exe-output-dir',
dest = 'exe')
params = parser.parse_args()
build(params.lib, params.exe, params.configuration, params.platform, params.portable)
if __name__ == '__main__':
main()
```
#### File: Python/piexif/_dump.py
```python
import copy
import numbers
import struct
from ._common import *
from ._exif import *
TIFF_HEADER_LENGTH = 8
def dump(exif_dict_original):
"""
py:function:: piexif.load(data)
Return exif as bytes.
:param dict exif: Exif data({"0th":dict, "Exif":dict, "GPS":dict, "Interop":dict, "1st":dict, "thumbnail":bytes})
:return: Exif
:rtype: bytes
"""
exif_dict = copy.deepcopy(exif_dict_original)
header = b"Exif\x00\x00\x4d\x4d\x00\x2a\x00\x00\x00\x08"
exif_is = False
gps_is = False
interop_is = False
first_is = False
if "0th" in exif_dict:
zeroth_ifd = exif_dict["0th"]
else:
zeroth_ifd = {}
if (("Exif" in exif_dict) and len(exif_dict["Exif"]) or
("Interop" in exif_dict) and len(exif_dict["Interop"]) ):
zeroth_ifd[ImageIFD.ExifTag] = 1
exif_is = True
exif_ifd = exif_dict["Exif"]
if ("Interop" in exif_dict) and len(exif_dict["Interop"]):
exif_ifd[ExifIFD. InteroperabilityTag] = 1
interop_is = True
interop_ifd = exif_dict["Interop"]
elif ExifIFD. InteroperabilityTag in exif_ifd:
exif_ifd.pop(ExifIFD.InteroperabilityTag)
elif ImageIFD.ExifTag in zeroth_ifd:
zeroth_ifd.pop(ImageIFD.ExifTag)
if ("GPS" in exif_dict) and len(exif_dict["GPS"]):
zeroth_ifd[ImageIFD.GPSTag] = 1
gps_is = True
gps_ifd = exif_dict["GPS"]
elif ImageIFD.GPSTag in zeroth_ifd:
zeroth_ifd.pop(ImageIFD.GPSTag)
if (("1st" in exif_dict) and
("thumbnail" in exif_dict) and
(exif_dict["thumbnail"] is not None)):
first_is = True
exif_dict["1st"][ImageIFD.JPEGInterchangeFormat] = 1
exif_dict["1st"][ImageIFD.JPEGInterchangeFormatLength] = 1
first_ifd = exif_dict["1st"]
zeroth_set = _dict_to_bytes(zeroth_ifd, "0th", 0)
zeroth_length = (len(zeroth_set[0]) + exif_is * 12 + gps_is * 12 + 4 +
len(zeroth_set[1]))
if exif_is:
exif_set = _dict_to_bytes(exif_ifd, "Exif", zeroth_length)
exif_length = len(exif_set[0]) + interop_is * 12 + len(exif_set[1])
else:
exif_bytes = b""
exif_length = 0
if gps_is:
gps_set = _dict_to_bytes(gps_ifd, "GPS", zeroth_length + exif_length)
gps_bytes = b"".join(gps_set)
gps_length = len(gps_bytes)
else:
gps_bytes = b""
gps_length = 0
if interop_is:
offset = zeroth_length + exif_length + gps_length
interop_set = _dict_to_bytes(interop_ifd, "Interop", offset)
interop_bytes = b"".join(interop_set)
interop_length = len(interop_bytes)
else:
interop_bytes = b""
interop_length = 0
if first_is:
offset = zeroth_length + exif_length + gps_length + interop_length
first_set = _dict_to_bytes(first_ifd, "1st", offset)
thumbnail = _get_thumbnail(exif_dict["thumbnail"])
thumbnail_max_size = 64000
if len(thumbnail) > thumbnail_max_size:
raise ValueError("Given thumbnail is too large. max 64kB")
else:
first_bytes = b""
if exif_is:
pointer_value = TIFF_HEADER_LENGTH + zeroth_length
pointer_str = struct.pack(">I", pointer_value)
key = ImageIFD.ExifTag
key_str = struct.pack(">H", key)
type_str = struct.pack(">H", TYPES.Long)
length_str = struct.pack(">I", 1)
exif_pointer = key_str + type_str + length_str + pointer_str
else:
exif_pointer = b""
if gps_is:
pointer_value = TIFF_HEADER_LENGTH + zeroth_length + exif_length
pointer_str = struct.pack(">I", pointer_value)
key = ImageIFD.GPSTag
key_str = struct.pack(">H", key)
type_str = struct.pack(">H", TYPES.Long)
length_str = struct.pack(">I", 1)
gps_pointer = key_str + type_str + length_str + pointer_str
else:
gps_pointer = b""
if interop_is:
pointer_value = (TIFF_HEADER_LENGTH +
zeroth_length + exif_length + gps_length)
pointer_str = struct.pack(">I", pointer_value)
key = ExifIFD.InteroperabilityTag
key_str = struct.pack(">H", key)
type_str = struct.pack(">H", TYPES.Long)
length_str = struct.pack(">I", 1)
interop_pointer = key_str + type_str + length_str + pointer_str
else:
interop_pointer = b""
if first_is:
pointer_value = (TIFF_HEADER_LENGTH + zeroth_length +
exif_length + gps_length + interop_length)
first_ifd_pointer = struct.pack(">L", pointer_value)
thumbnail_pointer = (pointer_value + len(first_set[0]) + 24 +
4 + len(first_set[1]))
thumbnail_p_bytes = (b"\x02\x01\x00\x04\x00\x00\x00\x01" +
struct.pack(">L", thumbnail_pointer))
thumbnail_length_bytes = (b"\x02\x02\x00\x04\x00\x00\x00\x01" +
struct.pack(">L", len(thumbnail)))
first_bytes = (first_set[0] + thumbnail_p_bytes +
thumbnail_length_bytes + b"\x00\x00\x00\x00" +
first_set[1] + thumbnail)
else:
first_ifd_pointer = b"\x00\x00\x00\x00"
zeroth_bytes = (zeroth_set[0] + exif_pointer + gps_pointer +
first_ifd_pointer + zeroth_set[1])
if exif_is:
exif_bytes = exif_set[0] + interop_pointer + exif_set[1]
return (header + zeroth_bytes + exif_bytes + gps_bytes +
interop_bytes + first_bytes)
def _get_thumbnail(jpeg):
segments = split_into_segments(jpeg)
while (b"\xff\xe0" <= segments[1][0:2] <= b"\xff\xef"):
segments.pop(1)
thumbnail = b"".join(segments)
return thumbnail
def _pack_byte(*args):
return struct.pack("B" * len(args), *args)
def _pack_signed_byte(*args):
return struct.pack("b" * len(args), *args)
def _pack_short(*args):
return struct.pack(">" + "H" * len(args), *args)
def _pack_signed_short(*args):
return struct.pack(">" + "h" * len(args), *args)
def _pack_long(*args):
return struct.pack(">" + "L" * len(args), *args)
def _pack_slong(*args):
return struct.pack(">" + "l" * len(args), *args)
def _pack_float(*args):
return struct.pack(">" + "f" * len(args), *args)
def _pack_double(*args):
return struct.pack(">" + "d" * len(args), *args)
def _value_to_bytes(raw_value, value_type, offset):
four_bytes_over = b""
value_str = b""
if value_type == TYPES.Byte:
length = len(raw_value)
if length <= 4:
value_str = (_pack_byte(*raw_value) +
b"\x00" * (4 - length))
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_byte(*raw_value)
elif value_type == TYPES.Short:
length = len(raw_value)
if length <= 2:
value_str = (_pack_short(*raw_value) +
b"\x00\x00" * (2 - length))
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_short(*raw_value)
elif value_type == TYPES.Long:
length = len(raw_value)
if length <= 1:
value_str = _pack_long(*raw_value)
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_long(*raw_value)
elif value_type == TYPES.SLong:
length = len(raw_value)
if length <= 1:
value_str = _pack_slong(*raw_value)
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_slong(*raw_value)
elif value_type == TYPES.Ascii:
try:
new_value = raw_value.encode("latin1") + b"\x00"
except:
try:
new_value = raw_value + b"\x00"
except TypeError:
raise ValueError("Got invalid type to convert.")
length = len(new_value)
if length > 4:
value_str = struct.pack(">I", offset)
four_bytes_over = new_value
else:
value_str = new_value + b"\x00" * (4 - length)
elif value_type == TYPES.Rational:
if isinstance(raw_value[0], numbers.Integral):
length = 1
num, den = raw_value
new_value = struct.pack(">L", num) + struct.pack(">L", den)
elif isinstance(raw_value[0], tuple):
length = len(raw_value)
new_value = b""
for n, val in enumerate(raw_value):
num, den = val
new_value += (struct.pack(">L", num) +
struct.pack(">L", den))
value_str = struct.pack(">I", offset)
four_bytes_over = new_value
elif value_type == TYPES.SRational:
if isinstance(raw_value[0], numbers.Integral):
length = 1
num, den = raw_value
new_value = struct.pack(">l", num) + struct.pack(">l", den)
elif isinstance(raw_value[0], tuple):
length = len(raw_value)
new_value = b""
for n, val in enumerate(raw_value):
num, den = val
new_value += (struct.pack(">l", num) +
struct.pack(">l", den))
value_str = struct.pack(">I", offset)
four_bytes_over = new_value
elif value_type == TYPES.Undefined:
length = len(raw_value)
if length > 4:
value_str = struct.pack(">I", offset)
try:
four_bytes_over = b"" + raw_value
except TypeError:
raise ValueError("Got invalid type to convert.")
else:
try:
value_str = raw_value + b"\x00" * (4 - length)
except TypeError:
raise ValueError("Got invalid type to convert.")
elif value_type == TYPES.SByte: # Signed Byte
length = len(raw_value)
if length <= 4:
value_str = (_pack_signed_byte(*raw_value) +
b"\x00" * (4 - length))
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_signed_byte(*raw_value)
elif value_type == TYPES.SShort: # Signed Short
length = len(raw_value)
if length <= 2:
value_str = (_pack_signed_short(*raw_value) +
b"\x00\x00" * (2 - length))
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_signed_short(*raw_value)
elif value_type == TYPES.Float:
length = len(raw_value)
if length <= 1:
value_str = _pack_float(*raw_value)
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_float(*raw_value)
elif value_type == TYPES.DFloat: # Double
length = len(raw_value)
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_double(*raw_value)
length_str = struct.pack(">I", length)
return length_str, value_str, four_bytes_over
def _dict_to_bytes(ifd_dict, ifd, ifd_offset):
tag_count = len(ifd_dict)
entry_header = struct.pack(">H", tag_count)
if ifd in ("0th", "1st"):
entries_length = 2 + tag_count * 12 + 4
else:
entries_length = 2 + tag_count * 12
entries = b""
values = b""
for n, key in enumerate(sorted(ifd_dict)):
if (ifd == "0th") and (key in (ImageIFD.ExifTag, ImageIFD.GPSTag)):
continue
elif (ifd == "Exif") and (key == ExifIFD.InteroperabilityTag):
continue
elif (ifd == "1st") and (key in (ImageIFD.JPEGInterchangeFormat, ImageIFD.JPEGInterchangeFormatLength)):
continue
raw_value = ifd_dict[key]
key_str = struct.pack(">H", key)
value_type = TAGS[ifd][key]["type"]
type_str = struct.pack(">H", value_type)
four_bytes_over = b""
if isinstance(raw_value, numbers.Integral) or isinstance(raw_value, float):
raw_value = (raw_value,)
offset = TIFF_HEADER_LENGTH + entries_length + ifd_offset + len(values)
try:
length_str, value_str, four_bytes_over = _value_to_bytes(raw_value,
value_type,
offset)
except ValueError:
raise ValueError(
'"dump" got wrong type of exif value.\n' +
'{0} in {1} IFD. Got as {2}.'.format(key, ifd, type(ifd_dict[key]))
)
entries += key_str + type_str + length_str + value_str
values += four_bytes_over
return (entry_header + entries, values)
```
#### File: Python/PIL/MpoImagePlugin.py
```python
from . import Image, JpegImagePlugin
__version__ = "0.1"
def _accept(prefix):
return JpegImagePlugin._accept(prefix)
def _save(im, fp, filename):
# Note that we can only save the current frame at present
return JpegImagePlugin._save(im, fp, filename)
##
# Image plugin for MPO images.
class MpoImageFile(JpegImagePlugin.JpegImageFile):
format = "MPO"
format_description = "MPO (CIPA DC-007)"
_close_exclusive_fp_after_loading = False
def _open(self):
self.fp.seek(0) # prep the fp in order to pass the JPEG test
JpegImagePlugin.JpegImageFile._open(self)
self.mpinfo = self._getmp()
self.__framecount = self.mpinfo[0xB001]
self.__mpoffsets = [mpent['DataOffset'] + self.info['mpoffset']
for mpent in self.mpinfo[0xB002]]
self.__mpoffsets[0] = 0
# Note that the following assertion will only be invalid if something
# gets broken within JpegImagePlugin.
assert self.__framecount == len(self.__mpoffsets)
del self.info['mpoffset'] # no longer needed
self.__fp = self.fp # FIXME: hack
self.__fp.seek(self.__mpoffsets[0]) # get ready to read first frame
self.__frame = 0
self.offset = 0
# for now we can only handle reading and individual frame extraction
self.readonly = 1
def load_seek(self, pos):
self.__fp.seek(pos)
@property
def n_frames(self):
return self.__framecount
@property
def is_animated(self):
return self.__framecount > 1
def seek(self, frame):
if not self._seek_check(frame):
return
self.fp = self.__fp
self.offset = self.__mpoffsets[frame]
self.tile = [
("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))
]
self.__frame = frame
def tell(self):
return self.__frame
# -------------------------------------------------------------------q-
# Registry stuff
# Note that since MPO shares a factory with JPEG, we do not need to do a
# separate registration for it here.
# Image.register_open(MpoImageFile.format,
# JpegImagePlugin.jpeg_factory, _accept)
Image.register_save(MpoImageFile.format, _save)
Image.register_extension(MpoImageFile.format, ".mpo")
Image.register_mime(MpoImageFile.format, "image/mpo")
```
#### File: Python/PIL/WmfImagePlugin.py
```python
from __future__ import print_function
from . import Image, ImageFile
from ._binary import i16le as word, si16le as short, i32le as dword, si32le as _long
__version__ = "0.2"
_handler = None
if str != bytes:
long = int
def register_handler(handler):
"""
Install application-specific WMF image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler
if hasattr(Image.core, "drawwmf"):
# install default handler (windows only)
class WmfHandler(object):
def open(self, im):
im.mode = "RGB"
self.bbox = im.info["wmf_bbox"]
def load(self, im):
im.fp.seek(0) # rewind
return Image.frombytes(
"RGB", im.size,
Image.core.drawwmf(im.fp.read(), im.size, self.bbox),
"raw", "BGR", (im.size[0]*3 + 3) & -4, -1
)
register_handler(WmfHandler())
#
# --------------------------------------------------------------------
# Read WMF file
def _accept(prefix):
return (
prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or
prefix[:4] == b"\x01\x00\x00\x00"
)
##
# Image plugin for Windows metafiles.
class WmfStubImageFile(ImageFile.StubImageFile):
format = "WMF"
format_description = "Windows Metafile"
def _open(self):
# check placable header
s = self.fp.read(80)
if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00":
# placeable windows metafile
# get units per inch
inch = word(s, 14)
# get bounding box
x0 = short(s, 6)
y0 = short(s, 8)
x1 = short(s, 10)
y1 = short(s, 12)
# normalize size to 72 dots per inch
size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch
self.info["wmf_bbox"] = x0, y0, x1, y1
self.info["dpi"] = 72
# print(self.mode, self.size, self.info)
# sanity check (standard metafile header)
if s[22:26] != b"\x01\x00\t\x00":
raise SyntaxError("Unsupported WMF file format")
elif dword(s) == 1 and s[40:44] == b" EMF":
# enhanced metafile
# get bounding box
x0 = _long(s, 8)
y0 = _long(s, 12)
x1 = _long(s, 16)
y1 = _long(s, 20)
# get frame (in 0.01 millimeter units)
frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)
# normalize size to 72 dots per inch
size = x1 - x0, y1 - y0
# calculate dots per inch from bbox and frame
xdpi = 2540 * (x1 - y0) // (frame[2] - frame[0])
ydpi = 2540 * (y1 - y0) // (frame[3] - frame[1])
self.info["wmf_bbox"] = x0, y0, x1, y1
if xdpi == ydpi:
self.info["dpi"] = xdpi
else:
self.info["dpi"] = xdpi, ydpi
else:
raise SyntaxError("Unsupported file format")
self.mode = "RGB"
self.size = size
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr(_handler, "save"):
raise IOError("WMF save handler not installed")
_handler.save(im, fp, filename)
#
# --------------------------------------------------------------------
# Registry stuff
Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)
Image.register_save(WmfStubImageFile.format, _save)
Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"])
```
#### File: lambda-code/ServiceLambda/custom_field.py
```python
from __future__ import print_function
import boto3
import CloudCanvas
import json
from botocore.exceptions import ClientError
from botocore.client import Config
def __get_bucket():
if not hasattr(__get_bucket,'client_configuration'):
__get_bucket.client_configuration = boto3.resource('s3', config=Config(signature_version='s3v4')).Bucket(CloudCanvas.get_setting("ClientConfiguration"))
return __get_bucket.client_configuration
def update_client_configuration(client_configuration):
__get_bucket().put_object(Key="client_configuration.json", Body=json.dumps(client_configuration))
return 'SUCCEED'
def get_client_configuration():
client = boto3.client('s3', config=Config(signature_version='s3v4'))
client_configuration = []
try:
response = client.get_object(Bucket=CloudCanvas.get_setting("ClientConfiguration"), Key="client_configuration.json")
client_configuration = json.loads(response["Body"].read())
except ClientError as e:
if e.response['Error']['Code'] == 'AccessDenied':
client_configuration = []
else:
raise e
return client_configuration
```
#### File: Utils/cgf_utils/aws_utils.py
```python
import random
import time
import json
import datetime
import os
import boto3
import boto3.session
import urllib
from resource_manager_common import constant
import custom_resource_utils
import json_utils
from botocore.exceptions import ClientError
from botocore.exceptions import EndpointConnectionError
from botocore.exceptions import IncompleteReadError
from botocore.exceptions import ConnectionError
from botocore.exceptions import BotoCoreError
from botocore.exceptions import UnknownEndpointError
from botocore.client import Config
current_region = os.environ.get('AWS_REGION')
LOG_ATTEMPT = 3
LOG_SUCCESS = 2
LOG_FAILURE = 1
LOG_LEVEL_NONE = []
LOG_LEVEL_ALL = [LOG_ATTEMPT, LOG_SUCCESS, LOG_FAILURE]
class ClientWrapper(object):
BACKOFF_BASE_SECONDS = 1.25
BACKOFF_MAX_SECONDS = 60.0
BACKOFF_MAX_TRYS = 15
MAX_LOG_STRING_LENGTH = 200
def __init__(self, wrapped_client, do_not_log_args = [], log_level=LOG_LEVEL_ALL):
self.__wrapped_client = wrapped_client
self.__client_type = type(wrapped_client).__name__
self.__do_not_log_args = do_not_log_args
self.__log_level = log_level
@property
def client_type(self):
return self.__client_type
def __getattr__(self, attr):
orig_attr = self.__wrapped_client.__getattribute__(attr)
if callable(orig_attr):
def client_wrapper(*args, **kwargs):
# http://www.awsarchitectureblog.com/2015/03/backoff.html
sleep_seconds = self.BACKOFF_BASE_SECONDS
backoff_base = self.BACKOFF_BASE_SECONDS
backoff_max = self.BACKOFF_MAX_SECONDS
if 'baseBackoff' in kwargs:
backoff_base = kwargs['baseBackoff']
del kwargs['baseBackoff']
if 'maxBackoff' in kwargs:
backoff_max = kwargs['maxBackoff']
del kwargs['maxBackoff']
count = 1
while True:
self.__log_attempt(attr, args, kwargs)
try:
result = orig_attr(*args, **kwargs)
self.__log_success(attr, result)
return result
except (ClientError, EndpointConnectionError, IncompleteReadError, ConnectionError, UnknownEndpointError) as e:
# Do not catch BotoCoreError here!!! That error is the base for all kinds of errors
# that should not be retried. For example: ParamValidationError. Errors like this
# will never succeed, but the backoff takes a very long time. In the case of
# custom resource handlers this can cause the lambda to timeout, which causes cloud
# formation to retry quite a few times before giving up. This can effectivly hang
# stack update/create for hours.
self.__log_failure(attr, e)
if count == self.BACKOFF_MAX_TRYS or (
isinstance(e, ClientError) and e.response['Error']['Code'] not in ['Throttling',
'TooManyRequestsException']):
raise e
temp = min(backoff_max, backoff_base * 2 ** count)
sleep_seconds = temp / 2 + random.uniform(0, temp / 2)
self.__log(attr, 'Retry attempt {}. Sleeping {} seconds'.format(count, sleep_seconds))
time.sleep(sleep_seconds)
count += 1
except Exception as e:
self.__log_failure(attr, e)
raise e
return client_wrapper
else:
return orig_attr
def __log(self, method_name, log_msg):
msg = 'AWS '
msg += self.__client_type
msg += '.'
msg += method_name
msg += ' '
msg += log_msg
print msg
def __log_attempt(self, method_name, args, kwargs):
if not LOG_ATTEMPT in self.__log_level:
return
msg = 'attempt: '
comma_needed = False
for arg in args:
if comma_needed: msg += ', '
msg += arg
msg += type(arg).__name__
comma_needed = True
for key, value in kwargs.iteritems():
if key in self.__do_not_log_args: continue
if comma_needed: msg += ', '
msg += key
msg += '='
if isinstance(value, basestring):
msg += '"'
msg += value
msg += '"'
elif isinstance(value, dict):
msg += json.dumps(value, cls=json_utils.SafeEncoder)
else:
msg += str(value)
comma_needed = True
self.__log(method_name, msg)
def __log_success(self, method_name, result):
if not LOG_SUCCESS in self.__log_level:
return
msg = 'success: '
msg += type(result).__name__
if isinstance(result, dict):
msg += json.dumps(result, cls=json_utils.SafeEncoder)
else:
msg += str(result)
self.__log(method_name, msg)
def __log_failure(self, method_name, e):
if not LOG_FAILURE in self.__log_level:
return
msg = ' failure '
msg += type(e).__name__
msg += ': '
msg += str(getattr(e, 'response', e.message))
self.__log(method_name, msg)
ID_DATA_MARKER = '::'
def get_data_from_custom_physical_resource_id(physical_resource_id):
'''Returns data extracted from a physical resource id with embedded JSON data.'''
if physical_resource_id:
embedded_physical_resource_id = custom_resource_utils.get_embedded_physical_id(physical_resource_id)
i_data_marker = embedded_physical_resource_id.find(ID_DATA_MARKER)
if i_data_marker == -1:
id_data = {}
else:
try:
id_data = json.loads(embedded_physical_resource_id[i_data_marker + len(ID_DATA_MARKER):])
except Exception as e:
print 'Could not parse JSON data from physical resource id {}. {}'.format(embedded_physical_resource_id,
e.message)
id_data = {}
else:
id_data = {}
return id_data
def construct_custom_physical_resource_id_with_data(stack_arn, logical_resource_id, id_data):
'''Creates a physical resource id with embedded JSON data.'''
physical_resource_name = get_stack_name_from_stack_arn(stack_arn) + '-' + logical_resource_id
id_data_string = json.dumps(id_data, sort_keys=True)
return physical_resource_name + ID_DATA_MARKER + id_data_string
# Stack ARN format: arn:aws:cloudformation:{region}:{account}:stack/{name}/{uuid}
def get_stack_name_from_stack_arn(arn):
return arn.split('/')[1]
def s3_key_join(*args):
return constant.S3_DELIMETER.join(args)
def get_region_from_stack_arn(arn):
return arn.split(':')[3]
def get_account_id_from_stack_arn(arn):
return arn.split(':')[4]
def get_cloud_canvas_metadata(resource, metadata_name):
metadata_string = resource.get('Metadata', None)
if metadata_string is None: return
try:
metadata = json.loads(metadata_string)
except ValueError as e:
raise RuntimeError('Could not parse CloudCanvas {} Metadata: {}. {}'.format(metadata_name, metadata_string, e))
cloud_canvas_metadata = metadata.get('CloudCanvas', None)
if cloud_canvas_metadata is None: return
return cloud_canvas_metadata.get(metadata_name, None)
def paginate(fn, params, paginator_key='Marker', next_paginator_key='NextMarker'):
"""
A generic paginator that should work with any paginated AWS function, including those that do not have a built-in
paginator supplied for them.
:param fn: A client function to call, e.g. boto3.client('s3').list_objects
:param params: A dictionary of parameters to pass into the function, e.g. {'Bucket': "foo"}
:param paginator_key: The key used as the marker for fetching results, e.g. 'Marker'
:param next_paginator_key: The key returned in the response to fetch the next page, e.g. 'NextMarker'
:return: An iterator to the results of the function call.
"""
while True:
response = fn(**params)
yield response
next_key = response.get(next_paginator_key, None)
if next_key and len(next_key):
params[paginator_key] = next_key
else:
break
def get_resource_arn(type_definitions, stack_arn, resource_type, physical_id, optional=False, lambda_client=None):
result = None
physical_id = custom_resource_utils.get_embedded_physical_id(physical_id)
type_definition = type_definitions.get(resource_type, None)
if type_definition is None:
if optional:
return None
else:
raise RuntimeError(
'Unsupported ARN mapping for resource type {} on resource {}. To add support for additional resource types, add a Custom::ResourceTypes dependency to your resource describing the type.'.format(
resource_type, physical_id))
if type_definition.arn_function:
if not lambda_client:
lambda_client = ClientWrapper(boto3.client("lambda", get_region_from_stack_arn(stack_arn)))
response = lambda_client.invoke(
FunctionName=type_definition.get_arn_lambda_function_name(),
Payload=json.dumps(
{
'Handler': type_definition.arn_function,
'ResourceType': resource_type,
'Region': get_region_from_stack_arn(stack_arn),
'AccountId': get_account_id_from_stack_arn(stack_arn),
'ResourceName': physical_id,
'StackId': stack_arn
}
)
)
if response['StatusCode'] == 200:
response_data = json.loads(response['Payload'].read().decode())
result = response_data.get('Arn', None)
if not result:
raise RuntimeError("ARN lambda response for resource type '%s' did not contain an 'Arn' field. "
"Response: %s" % (resource_type, response_data))
else:
raise RuntimeError("ARN lambda for resource type '%s' failed to execute, returned HTTP status %d" %
(resource_type, response['StatusCode']))
elif type_definition.arn_format:
result = type_definition.arn_format.format(
resource_type=resource_type,
region=get_region_from_stack_arn(stack_arn),
account_id=get_account_id_from_stack_arn(stack_arn),
resource_name=physical_id)
else:
raise RuntimeError(
'Invalid ARN definition for resource type {} on resource {}. (This should have been detected earlier when the type was loaded.)'.format(
resource_type, physical_id))
return result
def get_role_name_from_role_arn(arn):
# Role ARN format: arn:aws:iam::{account_id}:role/{resource_name}
if arn is None: return None
return arn[arn.rfind('/')+1:]
def get_cognito_pool_from_file(configuration_bucket, configuration_key, logical_name, stack):
s3 = ClientWrapper(boto3.client('s3', current_region, config=Config(
region_name=current_region, signature_version='s3v4', s3={'addressing_style': 'virtual'})))
s3_key = configuration_key + '/cognito-pools.json'
try:
res = s3.get_object(Bucket=configuration_bucket, Key=s3_key)
content = json.loads(res['Body'].read())
except Exception as e:
return ""
key = ""
if stack.is_deployment_access_stack:
key = "DeploymentAccess"
if stack.is_project_stack:
key = "Project"
return content.get(key, {}).get(logical_name, "")
```
#### File: Utils/cgf_utils/properties.py
```python
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #1 $
from types import *
class ValidationError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class _Properties(object):
def __init__(self, src, schema, prefix=''):
wildcard = None
for name, validator in schema.iteritems():
if name == '*':
wildcard = validator
else:
setattr(self, name, validator(prefix + name, src.get(name, None)))
for name, value in src.iteritems():
if name not in schema and name != 'ServiceToken':
if wildcard:
setattr(self, name, wildcard(prefix + name, value))
else:
raise ValidationError('The {} property is not supported.'.format(name))
class String(object):
def __init__(self, default = None):
self.default = default
def __call__(self, name, value):
if value is None:
value = self.default
if value is None:
raise ValidationError('A value for property {} must be provided.'.format(name))
if not isinstance(value, basestring):
raise ValidationError('The {} property value must be a string.'.format(name))
return value
class StringOrListOfString(object):
def __init__(self, default = None, minOccurs = None, maxOccurs = None):
self.default = default
def __call__(self, name, value):
if value is None:
value = self.default
if value is None:
raise ValidationError('A value for property {} must be provided.'.format(name))
if isinstance(value, basestring):
return [ value ]
else:
if not isinstance(value, list):
raise ValidationError('The {} property value must be a string or a list of strings.'.format(name))
for entry in value:
if not isinstance(entry, basestring):
raise ValidationError('The {} property must be a string or a list of strings.'.format(name))
return value
class Object(object):
def __init__(self, default = None, schema = None):
self.default = default
self.schema = schema if schema else {}
def __call__(self, name, value):
if value is None:
value = self.default
if value is None:
raise ValidationError('A value for property {} must be provided.'.format(name))
if not isinstance(value, dict):
raise ValidationError('The {} property value must be a dictionary.'.format(name))
return _Properties(value, self.schema, prefix=name + '.')
class ObjectOrListOfObject(object):
def __init__(self, default = None, schema = None):
self.default = default
self.Entry = Object(schema=schema)
def __call__(self, name, value):
if value is None:
value = self.default
if value is None:
raise ValidationError('A value for property {} must be provided.'.format(name))
if not isinstance(value, list):
value = [ value ]
result = []
for index, entry in enumerate(value):
result.append(self.Entry('{}[{}]'.format(name, index), entry))
return result
class Dictionary(object):
def __init__(self, default = None):
self.default = default
def __call__(self, name, value):
if value is None:
value = self.default
if value is None:
raise ValidationError('A value for property {} must be provided.'.format(name))
if not isinstance(value, dict):
raise ValidationError('The {} property value must be a dictionary.'.format(name))
return value
class Boolean(object):
def __init__(self, default = None):
self.default = default
def __call__(self, name, value):
if value is None:
value = self.default
if value is None:
raise ValidationError('A value for property {} must be provided.'.format(name))
# Cloud Formation doesn't support Boolean typed parameters. Check for
# boolean strings so that string parameters can be used to initialize
# boolean properties.
if isinstance(value, basestring):
lower_value = value.lower()
if lower_value == 'true':
value = True
elif lower_value == 'false':
value = False
if not isinstance(value, bool):
raise ValidationError('The {} property value must be boolean.'.format(name))
return value
class Integer(object):
def __init__(self, default = None):
self.default = default
def __call__(self, name, value):
if value is None:
value = self.default
if value is None:
raise ValidationError('A value for property {} must be provided.'.format(name))
try:
result = int(value)
except ValueError:
raise ValidationError('The {} property value {} must be an integer.'.format(name, value))
return result
def load(event, schema):
return _Properties(event['ResourceProperties'], schema)
def load_old(event, schema):
return _Properties(event['OldResourceProperties'], schema)
```
#### File: ProjectResourceHandler/test/test_HelperResourceHandler.py
```python
import unittest
import mock
from cgf_utils import custom_resource_response
from resource_types import Custom_Helper
from cgf_utils.properties import ValidationError
class UnitTest_CloudGemFramework_ProjectResourceHandler_HelperResourceHandler(unittest.TestCase):
@mock.patch.object(custom_resource_response, 'success_response')
def __assert_succeeds(self, input, expected_output, mock_response_succeed):
event = {
'ResourceProperties': {
'Input': input
},
'LogicalResourceId': 'TestLogicalId',
'StackId': 'arn:aws:cloudformation:{region}:{account}:stack/TestStackName/{uuid}'
}
context = {}
Custom_Helper.handler(event, context)
expected_physical_resource_id = 'TestStackName-TestLogicalId'
mock_response_succeed.assert_called_with(expected_output, expected_physical_resource_id)
def __assert_fails(self, input):
event = {
'ResourceProperties': {
'Input': input
},
'LogicalResourceId': 'TestLogicalId',
'StackId': 'arn:aws:cloudformation:{region}:{account}:stack/TestStackName/{uuid}'
}
context = {}
with self.assertRaises(ValidationError):
Custom_Helper.handler(event, context)
def test_non_functions(self):
input = {
'Dict': {
'A': 1,
'B': 2
},
'List': [
1, 2, { 'C': 3 }
]
}
expected_output = input
self.__assert_succeeds(input, expected_output)
def test_lower_case_with_string(self):
input = {
'Test': { 'HelperFn::LowerCase': 'UPPER' }
}
expected_otput = {
'Test': 'upper'
}
self.__assert_succeeds(input, expected_otput)
def test_lower_case_with_non_string(self):
input = {
'Test': { 'HelperFn::LowerCase': {} }
}
self.__assert_fails(input)
```
#### File: ServiceLambda/api/resource_info.py
```python
import service
import errors
import CloudCanvas
from resource_manager_common import stack_info
@service.api
def get_deployment_access_resource_info(request, deployment_name, resource_name):
project_stack_arn = CloudCanvas.get_setting('ProjectStackArn')
stack_info_manager = stack_info.StackInfoManager()
project = stack_info.ProjectInfo(stack_info_manager, project_stack_arn)
for deployment in project.deployments:
if deployment.deployment_name == deployment_name:
physical_id = deployment.deployment_access.resources.get_by_logical_id(resource_name).physical_id
if physical_id:
return {
'PhysicalId': physical_id
}
raise errors.NotFoundError('Resource {} not found.'.format(resource_name))
raise errors.NotFoundError('Deployment {} not found'.format(deployment_name))
@service.api
def list_deployment_resources(request, deployment_name):
project_stack_arn = CloudCanvas.get_setting('ProjectStackArn')
stack_info_manager = stack_info.StackInfoManager()
project = stack_info.ProjectInfo(stack_info_manager, project_stack_arn)
for deployment in project.deployments:
if deployment.deployment_name == deployment_name:
resources = {}
for resource_group in deployment.resource_groups:
for resource in resource_group.resources:
full_logical_id = '.'.join([resource_group.resource_group_name, resource.logical_id])
if resource.type == 'Custom::ServiceApi':
resources[full_logical_id] = __get_service_api_mapping(resource_group, resource)
else:
resources[full_logical_id] = {
'PhysicalResourceId': resource.physical_id,
'ResourceType': resource.type
}
return {'Resources': resources}
raise errors.NotFoundError('Deployment {} not found'.format(deployment_name))
@service.api
def get_resource_group_resource_info(request, deployment_name, resource_group_name, resource_name):
project_stack_arn = CloudCanvas.get_setting('ProjectStackArn')
stack_info_manager = stack_info.StackInfoManager()
project = stack_info.ProjectInfo(stack_info_manager, project_stack_arn)
for deployment in project.deployments:
if deployment.deployment_name == deployment_name:
for resource_group in deployment.resource_groups:
if resource_group.resource_group_name == resource_group_name:
physical_id = resource_group.resources.get_by_logical_id(resource_name).physical_id
if physical_id:
return {
'PhysicalId': physical_id
}
raise errors.NotFoundError('Resource {} not found.'.format(resource_name))
raise errors.NotFoundError('Resource Group {} not found.'.format(resource_group))
raise errors.NotFoundError('Deployment {} not found'.format(deployment_name))
def __get_service_api_mapping(resource_group, resource):
outputs = resource_group.stack_description.get('Outputs', [])
for output in outputs:
if output.get('OutputKey') == 'ServiceUrl':
service_url = output.get('OutputValue')
if service_url:
return {
'PhysicalResourceId': service_url,
'ResourceType': resource.type
}
```
#### File: ServiceLambda/resource_types/Custom_ExternalResourceReference.py
```python
from cgf_utils import custom_resource_response
from cgf_utils import aws_utils
from cgf_utils import reference_type_utils
from cgf_utils import properties
from resource_manager_common import stack_info
def handler(event, context):
stack_arn = event['StackId']
stack = stack_info.StackInfoManager().get_stack_info(stack_arn)
props = properties.load(event, {
'ReferenceName': properties.String()
})
request_type = event['RequestType']
if request_type not in ['Create', 'Update', 'Delete']:
raise RuntimeError('Unexpected request type: {}'.format(request_type))
data = {}
if request_type != 'Delete':
data = {
'PhysicalId': _get_reference_physical_id(stack, props.ReferenceName)
}
physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data(stack_arn, event['LogicalResourceId'], {'ReferenceName': props.ReferenceName})
return custom_resource_response.success_response(data, physical_resource_id)
def arn_handler(event, context):
stack = stack_info.StackInfoManager().get_stack_info(event['StackId'])
reference_name = aws_utils.get_data_from_custom_physical_resource_id(event['ResourceName']).get('ReferenceName')
result = {
'Arn': _get_reference_arn(stack, reference_name)
}
return result
def _get_reference_arn(stack, reference_name):
reference_metadata = reference_type_utils.get_reference_metadata(stack.project_stack.configuration_bucket, stack.project_stack.project_name, reference_name)
return reference_metadata.get('Arn')
def _get_reference_physical_id(stack, reference_name):
reference_metadata = reference_type_utils.get_reference_metadata(stack.project_stack.configuration_bucket, stack.project_stack.project_name, reference_name)
return reference_metadata.get('PhysicalId')
```
#### File: resource-manager-code/swagger_processor/__init__.py
```python
import os
import json
import copy
from swagger_json_navigator import SwaggerNavigator
from resource_manager.errors import HandledError
import swagger_spec_validator
import interface
import lambda_dispatch
def add_cli_commands(subparsers, addCommonArgs):
# add-service-api-resources
subparser = subparsers.add_parser('service-api-process-swagger', help='Process the Cloud Canvas defined extension objects in a swagger.json to produce swagger definitions that can be imported into AWS API Gateway. This process is performed automatically before uploading a resource group''s swagger.json file.')
group = subparser.add_mutually_exclusive_group(required=True)
group.add_argument('--resource-group', '-r', metavar='GROUP', help='The name of the resource group.')
group.add_argument('--input', '-i', metavar='FILE-PATH', help='The file from which the swagger will be read.')
subparser.add_argument('--output', '-o', required=False, metavar='FILE-PATH', help='The file to which the processed swagger will be written. By default the output is written to stdout.')
addCommonArgs(subparser)
subparser.set_defaults(func=_process_swagger_cli)
def _process_swagger_cli(context, args):
if args.resource_group:
resource_group = context.resource_groups.get(args.resource_group)
swagger_path = os.path.join(resource_group.directory_path, 'swagger.json')
if not os.path.isfile(swagger_path):
raise HandledError('The resource group {} has no swagger file.'.format(args.resource_group))
else:
swagger_path = args.input
if not os.path.isfile(swagger_path):
raise HandledError('The swagger file {} does not exist.'.format(swagger_path))
result = process_swagger_path(context, swagger_path)
if args.output:
with open(args.output, 'w') as file:
file.write(result)
else:
print result
def process_swagger_path(context, swagger_path):
try:
with open(swagger_path, 'r') as swagger_file:
swagger_content = swagger_file.read()
except IOError as e:
raise HandledError('Could not read file {}: {}'.format(swagger_path, e.message))
try:
swagger = json.loads(swagger_content)
except ValueError as e:
raise HandledError('Cloud not parse {} as JSON: {}'.format(swagger_path, e.message))
try:
process_swagger(context, swagger)
except ValueError as e:
raise HandledError('Could not process {}: {}'.format(swagger_path, e.message))
try:
content = json.dumps(swagger, sort_keys=True, indent=4)
except ValueError as e:
raise HandledError('Could not convert processed swagger to JSON: {}'.format(e.message))
return content
def process_swagger(context, swagger):
# make sure we are starting with a valid document
validate_swagger(swagger)
# process the swagger, order matters
swagger_navigator = SwaggerNavigator(swagger)
interface.process_interface_implementation_objects(context, swagger_navigator)
lambda_dispatch.process_lambda_dispatch_objects(context, swagger_navigator)
# make sure we produce a valid document
validate_swagger(swagger)
def validate_swagger(swagger):
try:
# The validator inserts x-scope objects into the swagger when validating
# references. We validate against a copy of the swagger to keep these from
# showing up in our output.
# Hitting errors in the definitions dictionary gives better error messages,
# so run the validator with no paths first
swagger_definitions_only = copy.deepcopy(swagger)
swagger_definitions_only['paths'] = {}
swagger_spec_validator.validator20.validate_spec(swagger_definitions_only)
swagger_copy = copy.deepcopy(swagger)
swagger_spec_validator.validator20.validate_spec(swagger_copy)
except swagger_spec_validator.SwaggerValidationError as e:
try:
content = json.dumps(swagger, indent=4, sort_keys=True)
except:
content = swagger
raise ValueError('Swagger validation error: {}\n\nSwagger content:\n{}\n'.format(e.message, content))
```
#### File: ServiceLambda/api/example_data.py
```python
import service
import errors
import bucket_data
@service.api
def list(request, start = None, count = None):
keys = bucket_data.list(start, count)
return {
'Keys': [ { 'Key': key } for key in keys ]
}
@service.api
def create(request, data):
__validate_data(data)
key = bucket_data.create(data)
return {
'Key': key
}
@service.api
def read(request, key):
data = bucket_data.read(key)
if data is None:
raise errors.NotFoundError('No data with key {} was found.'.format(key))
return data
@service.api
def update(request, key, data):
__validate_data(data)
if not bucket_data.update(key, data):
raise errors.NotFoundError('No data with key {} was found.'.format(key))
@service.api
def delete(request, key):
if not bucket_data.delete(key):
raise errors.NotFoundError('No data with key {} was found.'.format(key))
def __validate_data(data):
if not isinstance(data.get('ExamplePropertyA', None), basestring):
raise errors.ClientError('Property ExamplePropertyA in provided data is missing or is not a string.')
if not isinstance(data.get('ExamplePropertyB', None), int):
raise errors.ClientError('Property ExamplePropertyB in provided data is missing or is not an integer.')
```
#### File: resource_manager/test/test_integration_version_update.py
```python
import contextlib
import mock
import os
from cgf_utils.version_utils import Version
import resource_manager.hook
import lmbr_aws_test_support
import project_snapshot
import test_constant
from resource_manager.test import base_stack_test
class Foo(object):
def bar(self, x):
print 'callled bar with', x
class IntegrationTest_CloudGemFramework_ResourceManager_version_update(base_stack_test.BaseStackTestCase):
def __init__(self, *args, **kwargs):
super(IntegrationTest_CloudGemFramework_ResourceManager_version_update, self).__init__(*args, **kwargs)
def setUp(self):
self.prepare_test_environment("project_update_1_0_0")
def test_framework_version_update_end_to_end(self):
self.run_all_tests()
def snapshot_path(self, snapshot_name):
return os.path.abspath(os.path.join(__file__, '..', 'snapshots', snapshot_name))
############################################
## Unitialized Project Tests
##
def __010_make_unitialized_framework_version_1_0_0_project(self):
project_snapshot.restore_snapshot(
region = self.TEST_REGION,
profile = self.TEST_PROFILE,
stack_name = self.TEST_PROJECT_STACK_NAME,
project_directory_path = self.GAME_DIR,
snapshot_file_path = self.snapshot_path('CGF_1_0_0_Minimal_Uninitialized'),
root_directory_path = self.REAL_ROOT_DIR)
def __020_commands_fail_before_updating_unitialized_project(self):
self.lmbr_aws('resource-group', 'list', expect_failure = True)
self.lmbr_aws('project', 'create', '--stack-name', self.TEST_PROJECT_STACK_NAME, '--region', self.TEST_REGION, '--confirm-aws-usage', '--confirm-security-change', expect_failure = True)
self.lmbr_aws('deployment', 'list', expect_failure = True)
self.lmbr_aws('deployment', 'create', '--deployment', 'TestDeployment1', expect_failure = True)
def __030_updating_unitialized_project_is_successful(self):
with self.spy_decorator(resource_manager.hook.HookContext.call_module_handlers) as mock_call_module_handlers:
self.lmbr_aws('project', 'update-framework-version')
mock_call_module_handlers.assert_any_call(
'resource-manager-code/update.py',
'before_framework_version_updated',
kwargs={
'from_version': Version('1.0.0'),
'to_version': self.CURRENT_FRAMEWORK_VERSION
})
mock_call_module_handlers.assert_any_call(
'resource-manager-code/update.py',
'after_framework_version_updated',
kwargs={
'from_version': Version('1.0.0'),
'to_version': self.CURRENT_FRAMEWORK_VERSION
})
def __040_commands_succeed_after_updating_unitialized_project(self):
self.lmbr_aws('resource-group', 'list')
def __041_commands_succeed_after_updating_unitialized_project(self):
self.lmbr_aws('project', 'create', '--stack-name', self.TEST_PROJECT_STACK_NAME, '--region', self.TEST_REGION, '--confirm-aws-usage', '--confirm-security-change')
def __042_commands_succeed_after_updating_unitialized_project(self):
self.lmbr_aws('deployment', 'list')
def __043_commands_succeed_after_updating_unitialized_project(self):
self.lmbr_aws('deployment', 'create', '--deployment', 'TestDeployment1', '--confirm-aws-usage', '--confirm-security-change', '--parallel')
def __099_cleanup_uninitialized_project(self):
self.lmbr_aws('deployment', 'delete', '-d', 'TestDeployment1', '--confirm-resource-deletion')
self.lmbr_aws('project', 'delete', '--confirm-resource-deletion')
##
## Unitialized Project Tests
############################################
## Initialized Project Tests
##
def __110_make_initialized_framework_version_1_0_0_project(self):
project_snapshot.restore_snapshot(
region = self.TEST_REGION,
profile = self.TEST_PROFILE,
stack_name = self.TEST_PROJECT_STACK_NAME,
project_directory_path = self.GAME_DIR,
snapshot_file_path = self.snapshot_path('CGF_1_0_0_Minimal_Initialized'),
root_directory_path = self.REAL_ROOT_DIR)
def __120_commands_fail_before_updating_initialized_project(self):
self.lmbr_aws('resource-group', 'list', expect_failure = True)
self.lmbr_aws('deployment', 'list', expect_failure = True)
self.lmbr_aws('deployment', 'create', '--deployment', 'TestDeployment2', expect_failure = True)
def __130_updating_initialized_project_is_successful(self):
with self.spy_decorator(resource_manager.hook.HookContext.call_module_handlers) as mock_call_module_handlers:
self.lmbr_aws('project', 'update-framework-version', '--confirm-aws-usage', '--confirm-security-change', '--confirm-resource-deletion')
mock_call_module_handlers.assert_any_call(
'resource-manager-code/update.py',
'before_framework_version_updated',
kwargs={
'from_version': Version('1.0.0'),
'to_version': self.CURRENT_FRAMEWORK_VERSION
})
mock_call_module_handlers.assert_any_call(
'resource-manager-code/update.py',
'after_framework_version_updated',
kwargs={
'from_version': Version('1.0.0'),
'to_version': self.CURRENT_FRAMEWORK_VERSION
})
def __140_commands_succeed_after_updating_initialized_project(self):
self.lmbr_aws('resource-group', 'list')
self.assertIn('CGF100ResourceGroup', self.lmbr_aws_stdout) # verify project local resource group present
self.assertIn('CGF100GemResourceGroup', self.lmbr_aws_stdout) # verify gem resource group present
def __141_commands_succeed_after_updating_initialized_project(self):
self.lmbr_aws('deployment', 'list')
def __142_commands_succeed_after_updating_initialized_project(self):
self.lmbr_aws('deployment', 'create', '--deployment', 'TestDeployment2', '--confirm-aws-usage', '--confirm-security-change', '--parallel' )
def __199_cleanup_initialized_project(self):
self.lmbr_aws('deployment', 'delete', '-d', 'TestDeployment1', '--confirm-resource-deletion')
self.lmbr_aws('deployment', 'delete', '-d', 'TestDeployment2', '--confirm-resource-deletion')
self.lmbr_aws('project', 'delete', '--confirm-resource-deletion')
if __name__ == '__main__':
unittest.main()
```
#### File: resource_manager/test/test_project_resource_hooks.py
```python
import os
import resource_manager.util
import lmbr_aws_test_support
import mock_specification
from resource_manager.test import base_stack_test
import resource_manager_common.constant as c
import test_constant
class IntegrationTest_CloudGemFramework_ResourceManager_ProjectResourceHooks(base_stack_test.BaseStackTestCase):
TEST_GEM_PROJECT_RESOURCE_NAME = 'TestGemProjectResource'
TEST_GEM_PROJECT_RESOURCE_TYPE = 'AWS::S3::Bucket'
TEST_PROJECT_NAME = lmbr_aws_test_support.unique_name()
def setUp(self):
self.set_resource_group_name(lmbr_aws_test_support.unique_name('prh'))
self.prepare_test_environment(temp_file_suffix = type(self).__name__)
self.register_for_shared_resources()
def test_project_resource_hooks_end_to_end(self):
self.run_all_tests()
def __000_create_test_gem(self):
self.lmbr_aws(
'cloud-gem', 'create',
'--gem', self.TEST_RESOURCE_GROUP_NAME,
'--initial-content', 'resource-manager-plugin',
'--enable','--no-sln-change', ignore_failure=True)
self.enable_shared_gem(self.TEST_RESOURCE_GROUP_NAME, 'v1', path=os.path.join(self.context[test_constant.ATTR_ROOT_DIR], os.path.join(test_constant.DIR_GEMS, self.TEST_RESOURCE_GROUP_NAME)))
gem_project_template_file_path = self.get_gem_aws_path(self.TEST_RESOURCE_GROUP_NAME, c.PROJECT_TEMPLATE_FILENAME)
resource_manager.util.save_json(gem_project_template_file_path,
{
"Resources": {
self.TEST_GEM_PROJECT_RESOURCE_NAME: {
"Type": self.TEST_GEM_PROJECT_RESOURCE_TYPE,
"Properties": {}
}
}
}
)
def __005_create_project_stack(self):
self.base_create_project_stack()
def __010_verify_initial_stack(self):
spec = mock_specification.ok_project_stack()
spec['StackResources'][self.TEST_GEM_PROJECT_RESOURCE_NAME] = {
'ResourceType': self.TEST_GEM_PROJECT_RESOURCE_TYPE
}
self.verify_stack("project stack", self.get_project_stack_arn(), spec, exact=False)
def __020_remove_gem(self):
self.lmbr_aws(
'cloud-gem', 'enable',
'--gem', self.TEST_RESOURCE_GROUP_NAME, ignore_failure=True)
self.lmbr_aws(
'cloud-gem', 'disable',
'--gem', self.TEST_RESOURCE_GROUP_NAME, ignore_failure=True)
def __030_update_project_stack(self):
self.base_update_project_stack()
def __040_verify_updated_stack(self):
spec = mock_specification.ok_project_stack()
self.verify_stack("project stack", self.get_project_stack_arn(), spec, exact=False)
def __050_delete_project_stack(self):
self.unregister_for_shared_resources()
self.teardown_base_stack()
```
#### File: ServiceLambda/api/active_surveys_answers.py
```python
import service
import CloudCanvas
import survey_utils
import survey_common
import validation_utils
import validation_common
import uuid
import errors
from boto3.dynamodb.conditions import Key
import time
@service.api
def post(request, survey_id, answer_list):
cognito_identity_id = request.event["cognitoIdentityId"]
validation_common.validate_cognito_identity_id(cognito_identity_id)
answers = answer_list.get('answers')
validation_common.validate_answers(answers)
survey_common.ensure_survey_active(survey_id)
question_ids = [answer['question_id'] for answer in answers]
if len(question_ids) != len(set(question_ids)):
raise errors.ClientError("Input has duplicate question IDs")
questions = survey_common.get_questions(survey_id, question_ids)
if len(questions) != len(question_ids):
raise errors.ClientError("Some question IDs not found")
question_map = {}
for question in questions:
question_map[question['question_id']] = question
submission_id = str(uuid.uuid4())
item = {}
item['survey_id'] = survey_id
item['submission_id'] = submission_id
item['user_id'] = cognito_identity_id
item['creation_time'] = int(time.time())
answers_map = {}
item['answers'] = answers_map
for answer in answers:
question = question_map[answer['question_id']]
validate_answer_by_question_type(question, answer['answer'])
# for empty text type answer, replace it with a single whitespace
# as dynamo db doesn't allow empty string..
if question['type'] == 'text' and len(answer['answer'][0]) == 0:
answer['answer'][0] = " "
answers_map[answer['question_id']] = answer['answer']
survey_utils.get_answer_table().put_item(
Item=item
)
# +1 num_responses
survey_utils.get_survey_table().update_item(
Key={'survey_id':survey_id},
UpdateExpression='ADD num_responses :one',
ExpressionAttributeValues={':one':1}
)
return {
'submission_id': submission_id
}
@service.api
def put(request, survey_id, submission_id, answer_list):
cognito_identity_id = request.event["cognitoIdentityId"]
validation_common.validate_cognito_identity_id(cognito_identity_id)
validation_common.validate_survey_id(survey_id)
validation_common.validate_submission_id(submission_id)
answers = answer_list.get('answers')
validation_common.validate_answers(answers)
survey_common.ensure_survey_active(survey_id)
submission = survey_utils.get_submission_by_id(survey_id, submission_id, ['user_id'], True)
if submission.get('user_id') != cognito_identity_id:
raise errors.ClientError("Cognito identity ID [{}] doesn't own this submission".format(cognito_identity_id))
question_ids = [answer['question_id'] for answer in answers]
if len(question_ids) != len(set(question_ids)):
raise errors.ClientError("Input has duplicate question IDs")
questions = survey_common.get_questions(survey_id, question_ids)
if len(questions) != len(question_ids):
raise errors.ClientError("Some question IDs not found")
question_map = {}
for question in questions:
question_map[question['question_id']] = question
answers_map = {}
for answer in answers:
question = question_map[answer['question_id']]
validate_answer_by_question_type(question, answer['answer'])
# for empty text type answer, replace it with a single whitespace
# as dynamo db doesn't allow empty string..
if question['type'] == 'text' and len(answer['answer'][0]) == 0:
answer['answer'][0] = " "
answers_map[answer['question_id']] = answer['answer']
survey_utils.get_answer_table().update_item(
Key={'survey_id':survey_id, 'submission_id': submission_id},
UpdateExpression='SET answers=:answers',
ExpressionAttributeValues={':answers': answers_map}
)
return {
'submission_id': submission_id
}
def validate_answer_by_question_type(question, answer):
question_type = question['type']
if question_type == 'text':
if len(answer[0]) > question['max_chars']:
raise errors.ClientError("answer to text type question is invalid, number of characters: {}, max allowed: {}".format(len(answer[0]), question['max_chars']))
elif question_type == 'scale':
if not validation_utils.is_num_str(answer[0]):
raise errors.ClientError("answer to scale type question is invalid: {}".format(answer[0]))
val = int(answer[0])
if val < question['min'] or val > question['max']:
raise errors.ClientError("answer to scale type question is invalid: {}, min: {}, max: {}".format(answer[0], question['min'], question['max']))
elif question_type == 'predefined':
if question.get('multiple_select'):
for ans in answer:
validate_answer_to_predefined_type_question(question, ans)
else:
validate_answer_to_predefined_type_question(question, answer[0])
def validate_answer_to_predefined_type_question(question, answer):
if not validation_utils.is_num_str(answer):
raise errors.ClientError("answer to predefined type question is invalid: {}".format(answer))
val = int(answer)
if val < 0 or val >= len(question['predefines']):
raise errors.ClientError("answer to predefined type question is invalid: {}, number of options: {}".format(answer, len(question['predefines'])))
```
#### File: ServiceLambda/api/active_surveys_player_submissions.py
```python
import service
import CloudCanvas
import survey_utils
import survey_common
import validation_utils
import validation_common
from boto3.dynamodb.conditions import Key
@service.api
def get(request, survey_id):
cognito_identity_id = request.event["cognitoIdentityId"]
params = {}
params['KeyConditionExpression'] = Key('user_id').eq(cognito_identity_id) & Key('survey_id').eq(survey_id)
params['IndexName'] = 'UserAnswersIndex'
params['ProjectionExpression'] = 'submission_id'
query_result = survey_utils.get_answer_table().query(**params)
submission_ids = []
for submission in query_result['Items']:
submission_ids.append(submission['submission_id'])
return {
'submission_id_list': submission_ids
}
```
#### File: ServiceLambda/api/surveys_activation_period.py
```python
import service
import survey_common
import survey_utils
import validation_utils
import validation_common
import errors
import json
import time
import StringIO
import uuid
@service.api
def put(request, survey_id, update_activation_period_input):
validation_common.validate_survey_id(survey_id)
activation_start_time, activation_end_time = validation_common.validate_activation_period(
update_activation_period_input.get('activation_start_time'), update_activation_period_input.get('activation_end_time'))
survey_common.ensure_survey_exists(survey_id)
expression_attribute_values = {':activation_start_time':activation_start_time}
update_expression = 'SET activation_start_time = :activation_start_time'
if activation_end_time is None:
update_expression += ' REMOVE activation_end_time'
else:
update_expression += ',activation_end_time = :activation_end_time'
expression_attribute_values[':activation_end_time'] = activation_end_time
survey_utils.get_survey_table().update_item(
Key={'survey_id':survey_id},
UpdateExpression=update_expression,
ExpressionAttributeValues=expression_attribute_values
)
return "success"
```
#### File: ServiceLambda/api/player_ban.py
```python
import boto3
import service
import cgf_lambda_settings
import cgf_service_client
import ban_handler
import identity_validator
@service.api
def post(request, user = None):
interface_url = cgf_lambda_settings.get_service_url("CloudGemPlayerAccount_banplayer_1_0_0")
if not interface_url:
return {
"status": ban_handler.ban(user)
}
service_client = cgf_service_client.for_url(interface_url, verbose=True, session=boto3._get_default_session())
result = service_client.navigate('playerban').POST({"id": identity_validator.get_id_from_user(user)})
return result.DATA
@service.api
def delete(request, user = None):
interface_url = cgf_lambda_settings.get_service_url(
"CloudGemPlayerAccount_banplayer_1_0_0")
if not interface_url:
return {
"status": ban_handler.lift_ban(user)
}
service_client = cgf_service_client.for_url(
interface_url, verbose=True, session=boto3._get_default_session())
navigation = service_client.navigate('playerban')
cog_id = identity_validator.get_id_from_user(user)
result = navigation.DELETE(
{ "id": cog_id }
)
return result.DATA
```
#### File: common-code/AWSCommon/athena.py
```python
import retry
import metric_constant as c
import math
import uuid
import sys
import time
import os
import math
import csv
import util
import sensitivity
import retry
import enum_type
import boto3_util
from s3 import S3
from StringIO import StringIO
DEFAULT_EVENTS = enum_type.create(CLIENTINITCOMPLETE="clientinitcomplete", SESSIONSTART="sessionstart")
class Athena(object):
def __init__(self, db_name, context = {}):
self.__context = context
self.__client = boto3_util.client('athena', api_version='2017-05-18')
self.__db_name = db_name
self.__bucket = os.environ[c.ENV_S3_STORAGE]
self.__s3 = S3(bucket=self.__bucket)
@property
def query_results_path(self):
return "results"
def query(self, sql, result_as_list = True, sync=True):
if not self.is_valid_query(sql):
return None
print "Executing query\n\t", sql
params = dict({})
params['QueryString'] = sql
params['QueryExecutionContext']={
'Database': self.__db_name
}
params['ResultConfiguration'] = {
'OutputLocation': "s3://{}/{}".format(self.__bucket, self.query_results_path),
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
response = retry.try_with_backoff(self.__context, self.__client.start_query_execution, **params)
id = response['QueryExecutionId']
if sync:
#TODO: implement a boto3 waiter
while True:
query = self.get_query_execution(id)
print "Query '{}...' is".format(sql[:30]), query['Status']['State']
if query['Status']['State'] == 'RUNNING' or query['Status']['State'] == 'QUEUED':
time.sleep(3)
elif query['Status']['State'] == 'FAILED':
print "The query '{}' FAILED with ERROR: {}".format(query, query['Status']["StateChangeReason"])
if 'HIVE_CANNOT_OPEN_SPLIT' in query['Status']["StateChangeReason"]:
#The amoeba generator could be running which would cause files to be removed
return []
else:
return None
else:
return self.get_output( query['ResultConfiguration']['OutputLocation'], result_as_list)
else:
return id
def is_valid_query(self, sql):
#To be a valid client query the query must contain a SELECT and FROM operator.
#Athena only allows one query to be executed at a time. The Athena compiler would throw an error on this -> select 1; select 2
required_operators = [['select','from'], ['describe']]
valid = False
sql = sql.lower()
for operator_set in required_operators:
is_set_valid = False
for operator in operator_set:
if operator not in sql:
is_set_valid = False
break
else:
is_set_valid = True
valid = is_set_valid or valid
return valid
def get_named_query(self, name):
response = self.__client.get_named_query(
NamedQueryId=name
)
return response['NamedQuery']
def get_query_execution(self, id):
params = dict({})
params['QueryExecutionId'] = id
response = retry.try_with_backoff(self.__context, self.__client.get_query_execution, **params)
return response['QueryExecution']
def get_output(self, location, result_as_list=True):
parts = location.split("/")
file = parts[len(parts)-1]
result = StringIO(self.__s3.read("{}/{}".format(self.query_results_path, file)))
self.__s3.delete(["/{}/{}".format(self.query_results_path, file)])
if result_as_list:
return list(csv.reader(result, delimiter=',', quotechar='"'))
return result.getvalue()
def get_table_prefix(arn, use_cache=True):
return "{}_".format(get_database_name(arn, use_cache))
def get_database_name(arn, use_cache=True):
project_name = util.get_project_name(arn, use_cache).replace("-","_").lower()
deployment_name = util.get_deployment_name(arn, use_cache).replace("-", "_").lower()
return "{}_{}".format(project_name,deployment_name)
class Query(object):
def __init__(self, arn):
self.__database_name = get_database_name(arn)
self.__table_prefix = "{}_table_".format(self.__database_name.lower())
self.__athena = Athena(self.__database_name)
@property
def client(self):
return self.__athena
@property
def database_name(self):
return self.__database_name
@property
def prefix(self):
return self.__table_prefix
def execute(self, query, result_as_list=True, sync=True):
return self.__athena.query(query, result_as_list, sync)
def execute_with_format(self, query_format, result_as_list=True, sync=True):
return self.__athena.query(query_format.format(self.__database_name, self.__table_prefix), result_as_list, sync)
```
#### File: common-code/Constant/metric_error_code.py
```python
class Error(object):
@staticmethod
def exceeded_maximum_metric_capacity():
return "ErrorExceededMaximumMetricCapacity"
@staticmethod
def missing_attribute():
return "ErrorMissingAttributes"
@staticmethod
def is_not_lower():
return "ErrorNotLowerCase"
@staticmethod
def out_of_order():
return "ErrorOutOfOrder"
@staticmethod
def unable_to_sort():
return "ErrorNotSorted"
@staticmethod
def is_null():
return "ErrorNullValue"
@staticmethod
def empty_dataframe():
return "ErrorEmptyDataFrame"
```
#### File: tests/cudadrv/test_cuda_array_slicing.py
```python
from __future__ import print_function
from itertools import product
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class CudaArrayIndexing(unittest.TestCase):
def test_index_1d(self):
arr = np.arange(10)
darr = cuda.to_device(arr)
for i in range(arr.size):
self.assertEqual(arr[i], darr[i])
def test_index_2d(self):
arr = np.arange(9).reshape(3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
self.assertEqual(arr[i, j], darr[i, j])
def test_index_3d(self):
arr = np.arange(3 ** 3).reshape(3, 3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
for k in range(arr.shape[2]):
self.assertEqual(arr[i, j, k], darr[i, j, k])
class CudaArrayStridedSlice(unittest.TestCase):
def test_strided_index_1d(self):
arr = np.arange(10)
darr = cuda.to_device(arr)
for i in range(arr.size):
np.testing.assert_equal(arr[i::2], darr[i::2].copy_to_host())
def test_strided_index_2d(self):
arr = np.arange(6 ** 2).reshape(6, 6)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
np.testing.assert_equal(arr[i::2, j::2],
darr[i::2, j::2].copy_to_host())
def test_strided_index_3d(self):
arr = np.arange(6 ** 3).reshape(6, 6, 6)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
for k in range(arr.shape[2]):
np.testing.assert_equal(arr[i::2, j::2, k::2],
darr[i::2, j::2, k::2].copy_to_host())
class CudaArraySlicing(unittest.TestCase):
def test_prefix_1d(self):
arr = np.arange(5)
darr = cuda.to_device(arr)
for i in range(arr.size):
expect = arr[i:]
got = darr[i:].copy_to_host()
self.assertTrue(np.all(expect == got))
def test_prefix_2d(self):
arr = np.arange(3 ** 2).reshape(3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
expect = arr[i:, j:]
sliced = darr[i:, j:]
self.assertEqual(expect.shape, sliced.shape)
self.assertEqual(expect.strides, sliced.strides)
got = sliced.copy_to_host()
self.assertTrue(np.all(expect == got))
def test_select_column(self):
a = np.arange(25).reshape(5, 5, order='F')
da = cuda.to_device(a)
for i in range(a.shape[1]):
self.assertTrue(np.all(da[:, i].copy_to_host() == a[:, i]))
def test_select_row(self):
a = np.arange(25).reshape(5, 5, order='C')
da = cuda.to_device(a)
for i in range(a.shape[0]):
self.assertTrue(np.all(da[i, :].copy_to_host() == a[i, :]))
def test_prefix_select(self):
arr = np.arange(5 ** 2).reshape(5, 5, order='F')
darr = cuda.to_device(arr)
self.assertTrue(np.all(darr[:1, 1].copy_to_host() == arr[:1, 1]))
def test_negative_slicing_1d(self):
arr = np.arange(10)
darr = cuda.to_device(arr)
for i, j in product(range(-10, 10), repeat=2):
np.testing.assert_array_equal(arr[i:j],
darr[i:j].copy_to_host())
def test_negative_slicing_2d(self):
arr = np.arange(9).reshape(3, 3)
darr = cuda.to_device(arr)
for x, y, w, s in product(range(-4, 4), repeat=4):
np.testing.assert_array_equal(arr[x:y, w:s],
darr[x:y, w:s].copy_to_host())
def test_empty_slice_1d(self):
arr = np.arange(5)
darr = cuda.to_device(arr)
for i in range(darr.shape[0]):
np.testing.assert_array_equal(darr[i:i].copy_to_host(), arr[i:i])
# empty slice of empty slice
self.assertFalse(darr[:0][:0].copy_to_host())
# out-of-bound slice just produces empty slices
np.testing.assert_array_equal(darr[:0][:1].copy_to_host(), arr[:0][:1])
np.testing.assert_array_equal(darr[:0][-1:].copy_to_host(), arr[:0][-1:])
def test_empty_slice_2d(self):
arr = np.arange(5 * 5).reshape(5, 5)
darr = cuda.to_device(arr)
np.testing.assert_array_equal(darr[:0].copy_to_host(), arr[:0])
np.testing.assert_array_equal(darr[3, :0].copy_to_host(), arr[3, :0])
# empty slice of empty slice
self.assertFalse(darr[:0][:0].copy_to_host())
# out-of-bound slice just produces empty slices
np.testing.assert_array_equal(darr[:0][:1].copy_to_host(), arr[:0][:1])
np.testing.assert_array_equal(darr[:0][-1:].copy_to_host(), arr[:0][-1:])
if __name__ == '__main__':
unittest.main()
```
#### File: tests/cudadrv/test_ir_patch.py
```python
from __future__ import print_function, absolute_import, division
from numba.cuda.testing import unittest
from numba.cuda.testing import skip_on_cudasim
@skip_on_cudasim('Linking unsupported in the simulator')
class TestIRPatch(unittest.TestCase):
def patch(self, ir):
# Import here to avoid error in CUDASIM
from numba.cuda.cudadrv.nvvm import llvm39_to_34_ir
return llvm39_to_34_ir(ir)
def test_load_rewrite(self):
text = "%myload = not really"
out = self.patch(text)
# No rewrite
self.assertEqual(text, out)
text = "%myload = load i32, i32* val"
out = self.patch(text)
# Rewritten
self.assertEqual("%myload = load i32* val", out)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/cudasim/__init__.py
```python
from numba.testing import SerialSuite
from numba.testing import load_testsuite
import os
from numba import config
def load_tests(loader, tests, pattern):
return SerialSuite(load_testsuite(loader, os.path.dirname(__file__)))
```
#### File: tests/hsapy/test_scan.py
```python
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import unittest_support as unittest
from numba import hsa, intp
@hsa.jit(device=True)
def device_scan_generic(tid, data):
"""Inclusive prefix sum within a single block
Requires tid should have range [0, data.size) and data.size must be
power of 2.
"""
n = data.size
# Upsweep
offset = 1
d = n // 2
while d > 0:
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
if tid < d:
ai = offset * (2 * tid + 1) - 1
bi = offset * (2 * tid + 2) - 1
data[bi] += data[ai]
offset *= 2
d //= 2
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
prefixsum = data[n - 1]
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
if tid == 0:
data[n - 1] = 0
# Downsweep
d = 1
offset = n
while d < n:
offset //= 2
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
if tid < d:
ai = offset * (2 * tid + 1) - 1
bi = offset * (2 * tid + 2) - 1
tmp = data[ai]
data[ai] = data[bi]
data[bi] += tmp
d *= 2
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
return prefixsum
_WARPSIZE = 64
@hsa.jit(device=True)
def warp_scan(tid, temp, inclusive):
"""Intra-warp scan
Note
----
Assume all threads are in lockstep
"""
hsa.wavebarrier()
lane = tid & (_WARPSIZE - 1)
if lane >= 1:
temp[tid] += temp[tid - 1]
hsa.wavebarrier()
if lane >= 2:
temp[tid] += temp[tid - 2]
hsa.wavebarrier()
if lane >= 4:
temp[tid] += temp[tid - 4]
hsa.wavebarrier()
if lane >= 8:
temp[tid] += temp[tid - 8]
hsa.wavebarrier()
if lane >= 16:
temp[tid] += temp[tid - 16]
hsa.wavebarrier()
if lane >= 32:
temp[tid] += temp[tid - 32]
hsa.wavebarrier()
if inclusive:
return temp[tid]
else:
return temp[tid - 1] if lane > 0 else 0
@hsa.jit(device=True)
def device_scan(tid, data, temp, inclusive):
"""
Args
----
tid:
thread id
data: scalar
input for tid
temp: shared memory for temporary work
"""
lane = tid & (_WARPSIZE - 1)
warpid = tid >> 6
# Preload
temp[tid] = data
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
# Scan warps in parallel
warp_scan_res = warp_scan(tid, temp, inclusive)
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
# Get parital result
if lane == (_WARPSIZE - 1):
temp[warpid] = temp[tid]
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
# Scan the partial results
if warpid == 0:
warp_scan(tid, temp, True)
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
# Accumlate scanned partial results
if warpid > 0:
warp_scan_res += temp[warpid - 1]
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
# Output
if tid == temp.size - 1:
# Last thread computes prefix sum
if inclusive:
temp[0] = warp_scan_res
else:
temp[0] = warp_scan_res + data
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
# Load prefixsum
prefixsum = temp[0]
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
return warp_scan_res, prefixsum
@hsa.jit(device=True)
def shuffle_up(val, width):
tid = hsa.get_local_id(0)
hsa.wavebarrier()
res = hsa.activelanepermute_wavewidth(val, tid - width, 0, False)
return res
@hsa.jit(device=True)
def shuf_wave_inclusive_scan(val):
tid = hsa.get_local_id(0)
lane = tid & (_WARPSIZE - 1)
hsa.wavebarrier()
shuf = shuffle_up(val, 1)
if lane >= 1:
val += shuf
hsa.wavebarrier()
shuf = shuffle_up(val, 2)
if lane >= 2:
val += shuf
hsa.wavebarrier()
shuf = shuffle_up(val, 4)
if lane >= 4:
val += shuf
hsa.wavebarrier()
shuf = shuffle_up(val, 8)
if lane >= 8:
val += shuf
hsa.wavebarrier()
shuf = shuffle_up(val, 16)
if lane >= 16:
val += shuf
hsa.wavebarrier()
shuf = shuffle_up(val, 32)
if lane >= 32:
val += shuf
hsa.wavebarrier()
return val
@hsa.jit(device=True)
def shuf_device_inclusive_scan(data, temp):
"""
Args
----
data: scalar
input for tid
temp: shared memory for temporary work, requires at least
threadcount/wavesize storage
"""
tid = hsa.get_local_id(0)
lane = tid & (_WARPSIZE - 1)
warpid = tid >> 6
# Scan warps in parallel
warp_scan_res = shuf_wave_inclusive_scan(data)
hsa.barrier()
# Store partial sum into shared memory
if lane == (_WARPSIZE - 1):
temp[warpid] = warp_scan_res
hsa.barrier()
# Scan the partial sum by first wave
if warpid == 0:
shuf_wave_inclusive_scan(temp[lane])
hsa.barrier()
# Get block sum for each wave
blocksum = 0 # first wave is 0
if warpid > 0:
blocksum = temp[warpid - 1]
return warp_scan_res + blocksum
class TestScan(unittest.TestCase):
def test_single_block(self):
@hsa.jit
def scan_block(data, sums):
sm_data = hsa.shared.array(64, dtype=intp)
tid = hsa.get_local_id(0)
gid = hsa.get_global_id(0)
blkid = hsa.get_group_id(0)
sm_data[tid] = data[gid]
prefixsum = device_scan_generic(tid, sm_data)
data[gid] = sm_data[tid]
if tid == 0:
sums[blkid] = prefixsum
data = np.random.randint(0, 4, size=64).astype(np.intp)
expected = data.cumsum()
sums = np.zeros(1, dtype=np.intp)
scan_block[1, 64](data, sums)
np.testing.assert_equal(expected[:-1], data[1:])
self.assertEqual(expected[-1], sums[0])
self.assertEqual(0, data[0])
def test_multi_block(self):
@hsa.jit
def scan_block(data, sums):
sm_data = hsa.shared.array(64, dtype=intp)
tid = hsa.get_local_id(0)
gid = hsa.get_global_id(0)
blkid = hsa.get_group_id(0)
sm_data[tid] = data[gid]
prefixsum = device_scan_generic(tid, sm_data)
data[gid] = sm_data[tid]
if tid == 0:
sums[blkid] = prefixsum
nd_data = np.random.randint(0, 4, size=3 * 64).astype(
np.intp).reshape(3, 64)
nd_expected = nd_data.cumsum(axis=1)
sums = np.zeros(3, dtype=np.intp)
scan_block[3, 64](nd_data.ravel(), sums)
for nd in range(nd_expected.shape[0]):
expected = nd_expected[nd]
data = nd_data[nd]
np.testing.assert_equal(expected[:-1], data[1:])
self.assertEqual(expected[-1], sums[nd])
self.assertEqual(0, data[0])
def test_multi_large_block(self):
@hsa.jit
def scan_block(data, sums):
sm_data = hsa.shared.array(128, dtype=intp)
tid = hsa.get_local_id(0)
gid = hsa.get_global_id(0)
blkid = hsa.get_group_id(0)
sm_data[tid] = data[gid]
prefixsum = device_scan_generic(tid, sm_data)
data[gid] = sm_data[tid]
sums[blkid, tid] = prefixsum
nd_data = np.random.randint(0, 4, size=3 * 128).astype(
np.intp).reshape(3, 128)
nd_expected = nd_data.cumsum(axis=1)
sums = np.zeros((3, 128), dtype=np.intp)
scan_block[3, 128](nd_data.ravel(), sums)
for nd in range(nd_expected.shape[0]):
expected = nd_expected[nd]
data = nd_data[nd]
np.testing.assert_equal(expected[:-1], data[1:])
np.testing.assert_equal(expected[-1], sums[nd])
self.assertEqual(0, data[0])
class TestFasterScan(unittest.TestCase):
def test_single_block(self):
@hsa.jit
def scan_block(data, sums):
sm_data = hsa.shared.array(64, dtype=intp)
tid = hsa.get_local_id(0)
gid = hsa.get_global_id(0)
blkid = hsa.get_group_id(0)
scanval, prefixsum = device_scan(tid, data[gid], sm_data,
False)
data[gid] = scanval
if tid == 0:
sums[blkid] = prefixsum
data = np.random.randint(0, 4, size=64).astype(np.intp)
expected = data.cumsum()
sums = np.zeros(1, dtype=np.intp)
scan_block[1, 64](data, sums)
np.testing.assert_equal(expected[:-1], data[1:])
self.assertEqual(expected[-1], sums[0])
self.assertEqual(0, data[0])
def test_single_larger_block(self):
@hsa.jit
def scan_block(data, sums):
sm_data = hsa.shared.array(256, dtype=intp)
tid = hsa.get_local_id(0)
gid = hsa.get_global_id(0)
blkid = hsa.get_group_id(0)
scanval, prefixsum = device_scan(tid, data[gid], sm_data,
False)
data[gid] = scanval
if tid == 0:
sums[blkid] = prefixsum
data = np.random.randint(0, 4, size=256).astype(np.intp)
expected = data.cumsum()
sums = np.zeros(1, dtype=np.intp)
scan_block[1, 256](data, sums)
np.testing.assert_equal(expected[:-1], data[1:])
print(data)
print(sums)
self.assertEqual(expected[-1], sums[0])
self.assertEqual(0, data[0])
def test_multi_large_block(self):
@hsa.jit
def scan_block(data, sums):
sm_data = hsa.shared.array(128, dtype=intp)
tid = hsa.get_local_id(0)
gid = hsa.get_global_id(0)
blkid = hsa.get_group_id(0)
scanval, prefixsum = device_scan(tid, data[gid], sm_data,
False)
data[gid] = scanval
sums[blkid, tid] = prefixsum
nd_data = np.random.randint(0, 4, size=3 * 128).astype(
np.intp).reshape(3, 128)
nd_expected = nd_data.cumsum(axis=1)
sums = np.zeros((3, 128), dtype=np.intp)
scan_block[3, 128](nd_data.ravel(), sums)
for nd in range(nd_expected.shape[0]):
expected = nd_expected[nd]
data = nd_data[nd]
np.testing.assert_equal(expected[:-1], data[1:])
np.testing.assert_equal(expected[-1], sums[nd])
self.assertEqual(0, data[0])
class TestShuffleScan(unittest.TestCase):
def test_shuffle(self):
@hsa.jit
def foo(inp, mask, out):
tid = hsa.get_local_id(0)
out[tid] = hsa.activelanepermute_wavewidth(inp[tid], mask[tid], 0,
False)
inp = np.arange(64, dtype=np.intp)
for i in range(10):
mask = np.random.randint(0, inp.size, inp.size).astype(np.uint32)
out = np.zeros_like(inp)
foo[1, 64](inp, mask, out)
np.testing.assert_equal(inp[mask], out)
def test_shuffle_up(self):
@hsa.jit
def foo(inp, out):
gid = hsa.get_global_id(0)
out[gid] = shuffle_up(inp[gid], 1)
inp = np.arange(128, dtype=np.intp)
out = np.zeros_like(inp)
foo[1, 128](inp, out)
inp = inp.reshape(2, 64)
out = out.reshape(inp.shape)
for i in range(out.shape[0]):
np.testing.assert_equal(inp[0, :-1], out[0, 1:])
np.testing.assert_equal(inp[0, -1], out[0, 0])
def test_shuf_wave_inclusive_scan(self):
@hsa.jit
def foo(inp, out):
gid = hsa.get_global_id(0)
out[gid] = shuf_wave_inclusive_scan(inp[gid])
inp = np.arange(64, dtype=np.intp)
out = np.zeros_like(inp)
foo[1, 64](inp, out)
np.testing.assert_equal(out, inp.cumsum())
def test_shuf_device_inclusive_scan(self):
@hsa.jit
def foo(inp, out):
gid = hsa.get_global_id(0)
temp = hsa.shared.array(2, dtype=intp)
out[gid] = shuf_device_inclusive_scan(inp[gid], temp)
inp = np.arange(128, dtype=np.intp)
out = np.zeros_like(inp)
foo[1, inp.size](inp, out)
np.testing.assert_equal(out, np.cumsum(inp))
if __name__ == '__main__':
unittest.main()
```
#### File: numba/tests/cfunc_cache_usecases.py
```python
from __future__ import division, print_function, absolute_import
import sys
from numba import cfunc, jit
from numba.tests.support import TestCase, captured_stderr
Z = 1
add_sig = "float64(float64, float64)"
div_sig = "float64(int64, int64)"
@cfunc(add_sig, cache=True, nopython=True)
def add_usecase(x, y):
return x + y + Z
@cfunc(add_sig, nopython=True)
def add_nocache_usecase(x, y):
return x + y + Z
@cfunc(div_sig, cache=True, nopython=True)
def div_usecase(a, b):
return a / b
@jit(nopython=True)
def inner(x, y):
return x + y + Z
@cfunc(add_sig, cache=True, nopython=True)
def outer(x, y):
return inner(-y, x)
class _TestModule(TestCase):
"""
Tests for functionality of this module's cfuncs.
Note this does not define any "test_*" method, instead check_module()
should be called by hand.
"""
def check_module(self, mod):
f = mod.add_usecase
self.assertPreciseEqual(f.ctypes(2.0, 3.0), 6.0)
f = mod.add_nocache_usecase
self.assertPreciseEqual(f.ctypes(2.0, 3.0), 6.0)
f = mod.outer
self.assertPreciseEqual(f.ctypes(5.0, 2.0), 4.0)
f = mod.div_usecase
with captured_stderr() as err:
self.assertPreciseEqual(f.ctypes(7, 2), 3.5)
self.assertEqual(err.getvalue(), "")
with captured_stderr() as err:
f.ctypes(7, 0)
err = err.getvalue()
self.assertIn("ZeroDivisionError", err)
# For 2.x
def runTest(self):
raise NotImplementedError
def self_test():
mod = sys.modules[__name__]
_TestModule().check_module(mod)
```
#### File: tests/npyufunc/test_dufunc.py
```python
from __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
import numpy as np
from numba import njit
from numba.npyufunc import dufunc
from ..support import MemoryLeakMixin
def pyuadd(a0, a1):
return a0 + a1
class TestDUFunc(MemoryLeakMixin, unittest.TestCase):
def nopython_dufunc(self, pyfunc):
return dufunc.DUFunc(pyfunc, targetoptions=dict(nopython=True))
def test_frozen(self):
duadd = self.nopython_dufunc(pyuadd)
self.assertFalse(duadd._frozen)
duadd._frozen = True
self.assertTrue(duadd._frozen)
with self.assertRaises(ValueError):
duadd._frozen = False
with self.assertRaises(TypeError):
duadd(np.linspace(0,1,10), np.linspace(1,2,10))
def test_scalar(self):
duadd = self.nopython_dufunc(pyuadd)
self.assertEqual(pyuadd(1,2), duadd(1,2))
def test_npm_call(self):
duadd = self.nopython_dufunc(pyuadd)
@njit
def npmadd(a0, a1, o0):
duadd(a0, a1, o0)
X = np.linspace(0,1.9,20)
X0 = X[:10]
X1 = X[10:]
out0 = np.zeros(10)
npmadd(X0, X1, out0)
np.testing.assert_array_equal(X0 + X1, out0)
Y0 = X0.reshape((2,5))
Y1 = X1.reshape((2,5))
out1 = np.zeros((2,5))
npmadd(Y0, Y1, out1)
np.testing.assert_array_equal(Y0 + Y1, out1)
Y2 = X1[:5]
out2 = np.zeros((2,5))
npmadd(Y0, Y2, out2)
np.testing.assert_array_equal(Y0 + Y2, out2)
def test_npm_call_implicit_output(self):
duadd = self.nopython_dufunc(pyuadd)
@njit
def npmadd(a0, a1):
return duadd(a0, a1)
X = np.linspace(0,1.9,20)
X0 = X[:10]
X1 = X[10:]
out0 = npmadd(X0, X1)
np.testing.assert_array_equal(X0 + X1, out0)
Y0 = X0.reshape((2,5))
Y1 = X1.reshape((2,5))
out1 = npmadd(Y0, Y1)
np.testing.assert_array_equal(Y0 + Y1, out1)
Y2 = X1[:5]
out2 = npmadd(Y0, Y2)
np.testing.assert_array_equal(Y0 + Y2, out2)
out3 = npmadd(1.,2.)
self.assertEqual(out3, 3.)
def test_ufunc_props(self):
duadd = self.nopython_dufunc(pyuadd)
self.assertEqual(duadd.nin, 2)
self.assertEqual(duadd.nout, 1)
self.assertEqual(duadd.nargs, duadd.nin + duadd.nout)
self.assertEqual(duadd.ntypes, 0)
self.assertEqual(duadd.types, [])
self.assertEqual(duadd.identity, None)
duadd(1, 2)
self.assertEqual(duadd.ntypes, 1)
self.assertEqual(duadd.ntypes, len(duadd.types))
if __name__ == "__main__":
unittest.main()
```
#### File: numba/tests/test_annotations.py
```python
from __future__ import absolute_import, division
import re
import numba
from numba import unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types
from numba.io_support import StringIO
try:
import jinja2
except ImportError:
jinja2 = None
@unittest.skipIf(jinja2 is None, "please install the 'jinja2' package")
class TestAnnotation(unittest.TestCase):
def test_exercise_code_path(self):
"""
Ensures template.html is available
"""
def foo(n, a):
s = a
for i in range(n):
s += i
return s
cres = compile_isolated(foo, [types.int32, types.int32])
ta = cres.type_annotation
buf = StringIO()
ta.html_annotate(buf)
output = buf.getvalue()
buf.close()
self.assertIn("foo", output)
def test_exercise_code_path_with_lifted_loop(self):
"""
Ensures that lifted loops are handled correctly in obj mode
"""
# the functions to jit
def bar(x):
return x
def foo(x):
h = 0.
for k in range(x):
h = h + k
if x:
h = h - bar(x)
return h
# compile into an isolated context
flags = Flags()
flags.set('enable_pyobject')
flags.set('enable_looplift')
cres = compile_isolated(foo, [types.intp], flags=flags)
ta = cres.type_annotation
buf = StringIO()
ta.html_annotate(buf)
output = buf.getvalue()
buf.close()
self.assertIn("bar", output)
self.assertIn("foo", output)
self.assertIn("LiftedLoop", output)
def test_html_output_with_lifted_loop(self):
"""
Test some format and behavior of the html annotation with lifted loop
"""
@numba.jit
def udt(x):
object() # to force object mode
z = 0
for i in range(x): # this line is tagged
z += i
return z
# Regex pattern to check for the "lifted_tag" in the line of the loop
re_lifted_tag = re.compile(
r'<td class="lifted_tag">\s*'
r'[ ]+for i in range\(x\): # this line is tagged\s*'
r'</td>', re.MULTILINE)
# Compile int64 version
sig_i64 = (types.int64,)
udt.compile(sig_i64) # compile with lifted loop
cres = udt.overloads[sig_i64]
# Make html output
buf = StringIO()
cres.type_annotation.html_annotate(buf)
output = buf.getvalue()
buf.close()
# There should be only one function output.
self.assertEqual(output.count("Function name: udt"), 1)
sigfmt = "with signature: {} -> pyobject"
self.assertEqual(output.count(sigfmt.format(sig_i64)), 1)
# Ensure the loop is tagged
self.assertEqual(len(re.findall(re_lifted_tag, output)), 1)
# Compile float64 version
sig_f64 = (types.float64,)
udt.compile(sig_f64)
cres = udt.overloads[sig_f64]
# Make html output
buf = StringIO()
cres.type_annotation.html_annotate(buf)
output = buf.getvalue()
buf.close()
# There should be two function output
self.assertEqual(output.count("Function name: udt"), 2)
self.assertEqual(output.count(sigfmt.format(sig_i64)), 1)
self.assertEqual(output.count(sigfmt.format(sig_f64)), 1)
# Ensure the loop is tagged in both output
self.assertEqual(len(re.findall(re_lifted_tag, output)), 2)
if __name__ == '__main__':
unittest.main()
```
#### File: numba/tests/test_casting.py
```python
from numba import unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated
from numba import types, njit
import struct
def float_to_int(x):
return types.int32(x)
def int_to_float(x):
return types.float64(x) / 2
def float_to_unsigned(x):
return types.uint32(x)
def float_to_complex(x):
return types.complex128(x)
class TestCasting(unittest.TestCase):
def test_float_to_int(self):
pyfunc = float_to_int
cr = compile_isolated(pyfunc, [types.float32])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.int32)
self.assertEqual(cfunc(12.3), pyfunc(12.3))
self.assertEqual(cfunc(12.3), int(12.3))
self.assertEqual(cfunc(-12.3), pyfunc(-12.3))
self.assertEqual(cfunc(-12.3), int(-12.3))
def test_int_to_float(self):
pyfunc = int_to_float
cr = compile_isolated(pyfunc, [types.int64])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.float64)
self.assertEqual(cfunc(321), pyfunc(321))
self.assertEqual(cfunc(321), 321. / 2)
def test_float_to_unsigned(self):
pyfunc = float_to_unsigned
cr = compile_isolated(pyfunc, [types.float32])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.uint32)
self.assertEqual(cfunc(3.21), pyfunc(3.21))
self.assertEqual(cfunc(3.21), struct.unpack('I', struct.pack('i',
3))[0])
def test_float_to_complex(self):
pyfunc = float_to_complex
cr = compile_isolated(pyfunc, [types.float64])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.complex128)
self.assertEqual(cfunc(-3.21), pyfunc(-3.21))
self.assertEqual(cfunc(-3.21), -3.21 + 0j)
def test_array_to_array(self):
"""Make sure this compiles.
Cast C to A array
"""
@njit("f8(f8[:])")
def inner(x):
return x[0]
inner.disable_compile()
@njit("f8(f8[::1])")
def driver(x):
return inner(x)
x = np.array([1234], dtype=np.float64)
self.assertEqual(driver(x), x[0])
self.assertEqual(len(inner.overloads), 1)
def test_optional_to_optional(self):
"""
Test error due mishandling of Optional to Optional casting
Related issue: https://github.com/numba/numba/issues/1718
"""
# Attempt to cast optional(intp) to optional(float64)
opt_int = types.Optional(types.intp)
opt_flt = types.Optional(types.float64)
sig = opt_flt(opt_int)
@njit(sig)
def foo(a):
return a
self.assertEqual(foo(2), 2)
self.assertIsNone(foo(None))
if __name__ == '__main__':
unittest.main()
```
#### File: numba/tests/test_multi3.py
```python
from __future__ import print_function, absolute_import, division
import random
import numpy as np
from numba import njit, types
from numba import unittest_support as unittest
class TestMulti3(unittest.TestCase):
"""
This test is only relevant for 32-bit architectures.
Test __multi3 implementation in _helperlib.c.
The symbol defines a i128 multiplication.
It is necessary for working around an issue in LLVM (see issue #969).
The symbol does not exist in 32-bit platform, and should not be used by
LLVM. However, optimization passes will create i65 multiplication that
is then lowered to __multi3.
"""
def test_multi3(self):
@njit("(int64,)")
def func(x):
res = 0
for i in range(x):
res += i
return res
x_cases = [-1, 0, 1, 3, 4, 8,
0xffffffff - 1, 0xffffffff, 0xffffffff + 1,
0x123456789abcdef, -0x123456789abcdef]
for _ in range(500):
x_cases.append(random.randint(0, 0xffffffff))
def expected(x):
if x <= 0: return 0
return ((x * (x - 1)) // 2) & (2**64 - 1)
for x in x_cases:
self.assertEqual(expected(x), func(x))
if __name__ == '__main__':
unittest.main()
```
#### File: numba/tests/test_nested_calls.py
```python
from __future__ import print_function, division, absolute_import
from numba import int32, int64
from numba import jit, generated_jit, types
from numba import unittest_support as unittest
from .support import TestCase, tag
@jit(nopython=True)
def f_inner(a, b, c):
return a, b, c
def f(x, y, z):
return f_inner(x, c=y, b=z)
@jit(nopython=True)
def g_inner(a, b=2, c=3):
return a, b, c
def g(x, y, z):
return g_inner(x, b=y), g_inner(a=z, c=x)
@jit(nopython=True)
def star_inner(a=5, *b):
return a, b
def star(x, y, z):
return star_inner(a=x), star_inner(x, y, z)
def star_call(x, y, z):
return star_inner(x, *y), star_inner(*z)
@jit(nopython=True)
def argcast_inner(a, b):
if b:
# Here `a` is unified to int64 (from int32 originally)
a = int64(0)
return a
def argcast(a, b):
return argcast_inner(int32(a), b)
@generated_jit(nopython=True)
def generated_inner(x, y=5, z=6):
if isinstance(x, types.Complex):
def impl(x, y, z):
return x + y, z
else:
def impl(x, y, z):
return x - y, z
return impl
def call_generated(a, b):
return generated_inner(a, z=b)
class TestNestedCall(TestCase):
def compile_func(self, pyfunc, objmode=False):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
flags = dict(forceobj=True) if objmode else dict(nopython=True)
f = jit(**flags)(pyfunc)
return f, check
def test_boolean_return(self):
@jit(nopython=True)
def inner(x):
return not x
@jit(nopython=True)
def outer(x):
if inner(x):
return True
else:
return False
self.assertFalse(outer(True))
self.assertTrue(outer(False))
@tag('important')
def test_named_args(self, objmode=False):
"""
Test a nested function call with named (keyword) arguments.
"""
cfunc, check = self.compile_func(f, objmode)
check(1, 2, 3)
check(1, y=2, z=3)
def test_named_args_objmode(self):
self.test_named_args(objmode=True)
@tag('important')
def test_default_args(self, objmode=False):
"""
Test a nested function call using default argument values.
"""
cfunc, check = self.compile_func(g, objmode)
check(1, 2, 3)
check(1, y=2, z=3)
def test_default_args_objmode(self):
self.test_default_args(objmode=True)
@tag('important')
def test_star_args(self):
"""
Test a nested function call to a function with *args in its signature.
"""
cfunc, check = self.compile_func(star)
check(1, 2, 3)
@tag('important')
def test_star_call(self, objmode=False):
"""
Test a function call with a *args.
"""
cfunc, check = self.compile_func(star_call, objmode)
check(1, (2,), (3,))
def test_star_call_objmode(self):
self.test_star_call(objmode=True)
def test_argcast(self):
"""
Issue #1488: implicitly casting an argument variable should not
break nested calls.
"""
cfunc, check = self.compile_func(argcast)
check(1, 0)
check(1, 1)
@tag('important')
def test_call_generated(self):
"""
Test a nested function call to a generated jit function.
"""
cfunc = jit(nopython=True)(call_generated)
self.assertPreciseEqual(cfunc(1, 2), (-4, 2))
self.assertPreciseEqual(cfunc(1j, 2), (1j + 5, 2))
if __name__ == '__main__':
unittest.main()
```
#### File: numba/tests/test_object_mode.py
```python
from __future__ import print_function
import numpy as np
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import utils, jit
from .support import TestCase
def complex_constant(n):
tmp = n + 4
return tmp + 3j
def long_constant(n):
return n + 100000000000000000000000000000000000000000000000
def delitem_usecase(x):
del x[:]
forceobj = Flags()
forceobj.set("force_pyobject")
def loop_nest_3(x, y):
n = 0
for i in range(x):
for j in range(y):
for k in range(x+y):
n += i * j
return n
def array_of_object(x):
return x
class TestObjectMode(TestCase):
def test_complex_constant(self):
pyfunc = complex_constant
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertPreciseEqual(pyfunc(12), cfunc(12))
def test_long_constant(self):
pyfunc = long_constant
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertPreciseEqual(pyfunc(12), cfunc(12))
def test_loop_nest(self):
"""
Test bug that decref the iterator early.
If the bug occurs, a segfault should occur
"""
pyfunc = loop_nest_3
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
self.assertEqual(pyfunc(5, 5), cfunc(5, 5))
def bm_pyfunc():
pyfunc(5, 5)
def bm_cfunc():
cfunc(5, 5)
print(utils.benchmark(bm_pyfunc))
print(utils.benchmark(bm_cfunc))
def test_array_of_object(self):
cfunc = jit(array_of_object)
objarr = np.array([object()] * 10)
self.assertIs(cfunc(objarr), objarr)
def test_sequence_contains(self):
"""
Test handling of the `in` comparison
"""
@jit(forceobj=True)
def foo(x, y):
return x in y
self.assertTrue(foo(1, [0, 1]))
self.assertTrue(foo(0, [0, 1]))
self.assertFalse(foo(2, [0, 1]))
with self.assertRaises(TypeError) as raises:
foo(None, None)
self.assertIn("is not iterable", str(raises.exception))
def test_delitem(self):
pyfunc = delitem_usecase
cres = compile_isolated(pyfunc, (), flags=forceobj)
cfunc = cres.entry_point
l = [3, 4, 5]
cfunc(l)
self.assertPreciseEqual(l, [])
with self.assertRaises(TypeError):
cfunc(42)
class TestObjectModeInvalidRewrite(TestCase):
"""
Tests to ensure that rewrite passes didn't affect objmode lowering.
"""
def _ensure_objmode(self, disp):
self.assertTrue(disp.signatures)
self.assertFalse(disp.nopython_signatures)
return disp
def test_static_raise_in_objmode_fallback(self):
"""
Test code based on user submitted issue at
https://github.com/numba/numba/issues/2159
"""
def test0(n):
return n
def test1(n):
if n == 0:
# static raise will fail in objmode if the IR is modified by
# rewrite pass
raise ValueError()
return test0(n) # trigger objmode fallback
compiled = jit(test1)
self.assertEqual(test1(10), compiled(10))
self._ensure_objmode(compiled)
def test_static_setitem_in_objmode_fallback(self):
"""
Test code based on user submitted issue at
https://github.com/numba/numba/issues/2169
"""
def test0(n):
return n
def test(a1, a2):
a1 = np.asarray(a1)
# static setitem here will fail in objmode if the IR is modified by
# rewrite pass
a2[0] = 1
return test0(a1.sum() + a2.sum()) # trigger objmode fallback
compiled = jit(test)
args = np.array([3]), np.array([4])
self.assertEqual(test(*args), compiled(*args))
self._ensure_objmode(compiled)
if __name__ == '__main__':
unittest.main()
```
#### File: numba/tests/true_div_usecase.py
```python
from __future__ import division
# These functions have their own module in order to be compiled with the right
# __future__ flag (and be tested alongside the 2.x legacy division operator).
def truediv_usecase(x, y):
return x / y
def itruediv_usecase(x, y):
x /= y
return x
```
#### File: tests/dtypes/test_generic.py
```python
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.core.dtypes import generic as gt
class TestABCClasses(object):
tuples = [[1, 2, 2], ['red', 'blue', 'red']]
multi_index = pd.MultiIndex.from_arrays(tuples, names=('number', 'color'))
datetime_index = pd.to_datetime(['2000/1/1', '2010/1/1'])
timedelta_index = pd.to_timedelta(np.arange(5), unit='s')
period_index = pd.period_range('2000/1/1', '2010/1/1/', freq='M')
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({'names': ['a', 'b', 'c']}, index=multi_index)
sparse_series = pd.Series([1, 2, 3]).to_sparse()
sparse_array = pd.SparseArray(np.random.randn(10))
def test_abc_types(self):
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndex)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index)
assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
assert isinstance(self.multi_index, gt.ABCMultiIndex)
assert isinstance(self.datetime_index, gt.ABCDatetimeIndex)
assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex)
assert isinstance(self.period_index, gt.ABCPeriodIndex)
assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex)
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndexClass)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
assert isinstance(self.df, gt.ABCDataFrame)
with catch_warnings(record=True):
assert isinstance(self.df.to_panel(), gt.ABCPanel)
assert isinstance(self.sparse_series, gt.ABCSparseSeries)
assert isinstance(self.sparse_array, gt.ABCSparseArray)
assert isinstance(self.categorical, gt.ABCCategorical)
assert isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCPeriod)
```
#### File: tests/scalar/test_interval.py
```python
from __future__ import division
import pytest
from pandas import Interval
import pandas.util.testing as tm
class TestInterval(object):
def setup_method(self, method):
self.interval = Interval(0, 1)
def test_properties(self):
assert self.interval.closed == 'right'
assert self.interval.left == 0
assert self.interval.right == 1
assert self.interval.mid == 0.5
def test_repr(self):
assert repr(self.interval) == "Interval(0, 1, closed='right')"
assert str(self.interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self):
assert 0.5 in self.interval
assert 1 in self.interval
assert 0 not in self.interval
pytest.raises(TypeError, lambda: self.interval in self.interval)
interval = Interval(0, 1, closed='both')
assert 0 in interval
assert 1 in interval
interval = Interval(0, 1, closed='neither')
assert 0 not in interval
assert 0.5 in interval
assert 1 not in interval
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self):
# should not raise
hash(self.interval)
def test_math_add(self):
expected = Interval(1, 2)
actual = self.interval + 1
assert expected == actual
expected = Interval(1, 2)
actual = 1 + self.interval
assert expected == actual
actual = self.interval
actual += 1
assert expected == actual
with pytest.raises(TypeError):
self.interval + Interval(1, 2)
with pytest.raises(TypeError):
self.interval + 'foo'
def test_math_sub(self):
expected = Interval(-1, 0)
actual = self.interval - 1
assert expected == actual
actual = self.interval
actual -= 1
assert expected == actual
with pytest.raises(TypeError):
self.interval - Interval(1, 2)
with pytest.raises(TypeError):
self.interval - 'foo'
def test_math_mult(self):
expected = Interval(0, 2)
actual = self.interval * 2
assert expected == actual
expected = Interval(0, 2)
actual = 2 * self.interval
assert expected == actual
actual = self.interval
actual *= 2
assert expected == actual
with pytest.raises(TypeError):
self.interval * Interval(1, 2)
with pytest.raises(TypeError):
self.interval * 'foo'
def test_math_div(self):
expected = Interval(0, 0.5)
actual = self.interval / 2.0
assert expected == actual
actual = self.interval
actual /= 2.0
assert expected == actual
with pytest.raises(TypeError):
self.interval / Interval(1, 2)
with pytest.raises(TypeError):
self.interval / 'foo'
```
#### File: pandas/tests/test_compat.py
```python
from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap,
lfilter, builtins, iterkeys, itervalues, iteritems,
next)
class TestBuiltinIterators(object):
@classmethod
def check_result(cls, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected,
lengths):
assert not isinstance(iter_res, list)
assert isinstance(list_res, list)
iter_res = list(iter_res)
assert len(list_res) == length
assert len(iter_res) == length
assert iter_res == exp
assert list_res == exp
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_dict_iterators(self):
assert next(itervalues({1: 2})) == 2
assert next(iterkeys({1: 2})) == 1
assert next(iteritems({1: 2})) == (1, 2)
```
#### File: common-code/LoadTest/LoadTest__CloudGemMetric.py
```python
from __future__ import print_function
from cloud_gem_load_test.service_api_call import ServiceApiCall
from data_generator import DataGenerator
import metric_constant as c
#
# Load Test Transaction Handler registration
#
def add_transaction_handlers(handler_context, transaction_handlers):
service_api_name = c.RES_GEM_NAME + '.ServiceApi'
base_url = handler_context.mappings.get(service_api_name, {}).get('PhysicalResourceId')
if not base_url:
raise RuntimeError('Missing PhysicalResourceId for ' + service_api_name)
transaction_handlers.append(ServiceStatus(base_url))
transaction_handlers.append(ProduceMessage(base_url))
#
# Check for the service status of Cloud Gem Under Test
#
class ServiceStatus(ServiceApiCall):
def __init__(self, base_url):
ServiceApiCall.__init__(self, name=c.RES_GEM_NAME + '.ServiceStatus', method='get', base_url=base_url,
path='/service/status')
#
# Produce Metric Messages
#
class ProduceMessage(ServiceApiCall):
def __init__(self, base_url):
ServiceApiCall.__init__(self, name=c.RES_GEM_NAME + '.ProduceMessage', method='post', base_url=base_url,
path='/producer/produce/message?compression_mode=NoCompression&sensitivity_type=Insensitive&payload_type=JSON')
def build_request(self):
request = ServiceApiCall.build_request(self)
request['body'] = {
'data': build_metric_data()
}
return request
#
# Build the metric data object needed for the metric producer request body
#
def build_metric_data():
print('Building metric event data')
data_generator = DataGenerator()
return data_generator.json(1)
```
#### File: common-code/MetricUtils/compression.py
```python
from io import BytesIO
from abc import ABCMeta, abstractmethod
import metric_constant as c
import os
import gzip
import zlib
import base64
import sqs
import enum_type
def checksum(data):
return zlib.crc32(data)
def b64encode(data):
return base64.b64encode(data)
def b64decode(data):
return base64.b64decode(data)
COMPRESSION_MODE = enum_type.create(NONE="NoCompression", COMPRESS="Compress")
class CompressionClassFactory():
@staticmethod
def instance(name):
#done this way for performance
if name.lower() == COMPRESSION_MODE.NONE.lower():
return NoCompression()
if name.lower() == COMPRESSION_MODE.COMPRESS.lower():
return Compress()
class AbstractCompression:
__metaclass__ = ABCMeta
@abstractmethod
def compress(self, data, compressionlevel=None):
raise NotImplementedError('subclasses must override compress')
@abstractmethod
def uncompress(self, data):
raise NotImplementedError('subclasses must override uncompress')
@abstractmethod
def extract_message_body(self, message):
raise NotImplementedError('subclasses must override extract_message_body')
@abstractmethod
def add_message_payload(self, params, data):
raise NotImplementedError('subclasses must override add_message_payload')
@property
def identifier(self):
return self.__class__.__name__
def size_of(self, data):
return len(data)
class Compress(AbstractCompression):
def compress(self, data, compressionlevel=3):
bytes = BytesIO()
f = gzip.GzipFile(mode='wb',
compresslevel=compressionlevel,
fileobj=bytes)
f.write(data)
f.close()
return bytes.getvalue()
def uncompress(self, data):
return zlib.decompress(data, 16+15)
def extract_message_body(self, message):
return self.uncompress(message['MessageAttributes']['compressed_payload']['BinaryValue'])
def add_message_payload(self, params, data):
params["MessageBody"] = sqs.empty_body_message()
params["MessageAttributes"]['compressed_payload'] = {
'BinaryValue': data,
'DataType': 'Binary'
}
class NoCompression(AbstractCompression):
def compress(self, data):
return data
def uncompress(self, data):
return data
def extract_message_body(self, message):
return message['Body']
def add_message_payload(self, params, data):
params["MessageBody"]= data
```
#### File: common-code/MetricUtils/thread_pool.py
```python
from Queue import Queue
from threading import Thread
from worker import Worker
"""
Inspired by <NAME> work
https://www.metachris.com/2016/04/python-threadpool/
"""
class ThreadPool:
def __init__(self, context={}, size=2):
self.tasks = Queue(size)
for num in range(size):
Worker(self.tasks, context)
def add(self, func, *args, **kargs):
self.tasks.put((func, args, kargs))
def wait(self):
self.tasks.join()
```
#### File: fastparquet/test/test_compression.py
```python
from fastparquet.compression import (compress_data, decompress_data,
compressions, decompressions)
import pytest
@pytest.mark.parametrize('fmt', compressions)
def test_compress_decompress_roundtrip(fmt):
data = b'123' * 1000
compressed = compress_data(data, algorithm=fmt)
if fmt.lower() == 'uncompressed':
assert compressed is data
else:
assert len(compressed) < len(data)
decompressed = decompress_data(compressed, algorithm=fmt)
assert data == decompressed
def test_errors():
with pytest.raises(RuntimeError) as e:
compress_data(b'123', algorithm='not-an-algorithm')
assert 'not-an-algorithm' in str(e)
assert 'gzip' in str(e).lower()
def test_not_installed():
compressions.pop('BROTLI', None)
with pytest.raises(RuntimeError) as e:
compress_data(b'123', algorithm=4)
assert 'brotli' in str(e.value).lower()
```
#### File: fastparquet/test/test_encoding.py
```python
import array
import io
import numpy as np
import struct
import fastparquet.encoding
from fastparquet import parquet_thrift
def test_int32():
"""Test reading bytes containing int32 data."""
assert 999 == fastparquet.encoding.read_plain(
struct.pack(b"<i", 999),
parquet_thrift.Type.INT32, 1)
def test_int64():
"""Test reading bytes containing int64 data."""
assert 999 == fastparquet.encoding.read_plain(
struct.pack(b"<q", 999),
parquet_thrift.Type.INT64, 1)
def test_int96():
"""Test reading bytes containing int96 data."""
assert b'\x00\x00\x00\x00\x00\x00\x00\x00\xe7\x03\x00\x00' == fastparquet.encoding.read_plain(
struct.pack(b"<qi", 0, 999),
parquet_thrift.Type.INT96, 1)
def test_float():
"""Test reading bytes containing float data."""
assert (9.99 - fastparquet.encoding.read_plain(
struct.pack(b"<f", 9.99),
parquet_thrift.Type.FLOAT, 1)) < 0.01
def test_double():
"""Test reading bytes containing double data."""
assert (9.99 - fastparquet.encoding.read_plain(
struct.pack(b"<d", 9.99),
parquet_thrift.Type.DOUBLE, 1)) < 0.01
def test_fixed():
"""Test reading bytes containing fixed bytes data."""
data = b"foobar"
assert data[:3] == fastparquet.encoding.read_plain(
data, parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY, -1, 3)[0]
assert data[3:] == fastparquet.encoding.read_plain(
data, parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY, -1, 3)[1]
def test_boolean():
"""Test reading bytes containing boolean data."""
data = 0b1101
d = struct.pack(b"<i", data)
assert ([True, False, True, True] == fastparquet.encoding.read_plain(
d, parquet_thrift.Type.BOOLEAN, 4)).all(0)
def testFourByteValue():
"""Test reading a run with a single four-byte value."""
fo = fastparquet.encoding.Numpy8(np.fromstring(struct.pack(b"<i", 1 << 30), np.uint8))
o = fastparquet.encoding.Numpy32(np.empty(10, np.uint32))
fastparquet.encoding.read_rle(fo, 2 << 1, 30, o)
assert ([1 << 30] * 2 == o.so_far()).all()
def testSingleByte():
"""Test reading a single byte value."""
fo = fastparquet.encoding.Numpy8(np.fromstring(struct.pack(b"<i", 0x7F), np.uint8))
out = fastparquet.encoding.read_unsigned_var_int(fo)
assert 0x7F == out
def testFourByte():
"""Test reading a four byte value."""
fo = fastparquet.encoding.Numpy8(np.fromstring(struct.pack(b"<BBBB", 0xFF, 0xFF, 0xFF, 0x7F), np.uint8))
out = fastparquet.encoding.read_unsigned_var_int(fo)
assert 0x0FFFFFFF == out
def testFromExample():
"""Test a simple example."""
raw_data_in = [0b10001000, 0b11000110, 0b11111010]
encoded_bitstring = b'\x88\xc6\xfa'
fo = fastparquet.encoding.Numpy8(np.fromstring(encoded_bitstring, np.uint8))
count = 8
o = fastparquet.encoding.Numpy32(np.empty(count, np.uint32))
fastparquet.encoding.read_bitpacked(fo, count, 3, o)
assert (list(range(8)) == o.so_far()).all()
def testWidths():
"""Test all possible widths for a single byte."""
assert 0 == fastparquet.encoding.width_from_max_int(0)
assert 1 == fastparquet.encoding.width_from_max_int(1)
assert 2 == fastparquet.encoding.width_from_max_int(2)
assert 2 == fastparquet.encoding.width_from_max_int(3)
assert 3 == fastparquet.encoding.width_from_max_int(4)
assert 3 == fastparquet.encoding.width_from_max_int(5)
assert 3 == fastparquet.encoding.width_from_max_int(6)
assert 3 == fastparquet.encoding.width_from_max_int(7)
assert 4 == fastparquet.encoding.width_from_max_int(8)
assert 4 == fastparquet.encoding.width_from_max_int(15)
assert 5 == fastparquet.encoding.width_from_max_int(16)
assert 5 == fastparquet.encoding.width_from_max_int(31)
assert 6 == fastparquet.encoding.width_from_max_int(32)
assert 6 == fastparquet.encoding.width_from_max_int(63)
assert 7 == fastparquet.encoding.width_from_max_int(64)
assert 7 == fastparquet.encoding.width_from_max_int(127)
assert 8 == fastparquet.encoding.width_from_max_int(128)
assert 8 == fastparquet.encoding.width_from_max_int(255)
```
#### File: fastparquet/test/test_partition_filters_specialstrings.py
```python
import os
import shutil
import pytest
import numpy as np
import pandas as pd
from pandas.tslib import Timestamp
from fastparquet.test.util import tempdir
from fastparquet import write, ParquetFile
import datetime as dt
import string
def frame_symbol_dtTrade_type_strike(days=1 * 252,
start_date=dt.datetime(2005, 1, 1, hour=0, minute=0, second=0),
symbols=['SPY', 'FB', 'TLT'],
numbercolumns=1):
base = start_date
date_list = [base + dt.timedelta(days=x) for x in range(0, days)]
tuple_list = []
for x in symbols:
for y in date_list:
tuple_list.append((x, y.year, y))
index = pd.MultiIndex.from_tuples(tuple_list, names=('symbol', 'year', 'dtTrade'))
np.random.seed(seed=0)
df = pd.DataFrame(np.random.randn(index.size, numbercolumns),
index=index, columns=[x for x in string.ascii_uppercase[0:numbercolumns]])
return df
@pytest.mark.parametrize('tempdir,input_symbols,input_days,file_scheme,input_columns,partitions,filters',
[
(tempdir, ['NOW', 'SPY', 'VIX'], 2*252, 'hive', 2, ['symbol', 'year'], [('symbol', '==', 'SPY')]),
(tempdir, ['now', 'SPY', 'VIX'], 2*252, 'hive', 2, ['symbol', 'year'], [('symbol', '==', 'SPY')]),
(tempdir, ['TODAY', 'SPY', 'VIX'], 2*252, 'hive', 2, ['symbol', 'year'], [('symbol', '==', 'SPY')]),
(tempdir, ['VIX*', 'SPY', 'VIX'], 2*252, 'hive', 2, ['symbol', 'year'], [('symbol', '==', 'SPY')]),
(tempdir, ['QQQ*', 'SPY', 'VIX'], 2*252, 'hive', 2, ['symbol', 'year'], [('symbol', '==', 'SPY')]),
(tempdir, ['QQQ!', 'SPY', 'VIX'], 2*252, 'hive', 2, ['symbol', 'year'], [('symbol', '==', 'SPY')]),
(tempdir, ['Q%QQ', 'SPY', 'VIX'], 2*252, 'hive', 2, ['symbol', 'year'], [('symbol', '==', 'SPY')]),
(tempdir, ['NOW', 'SPY', 'VIX'], 10, 'hive', 2, ['symbol', 'dtTrade'], [('symbol', '==', 'SPY')]),
(tempdir, ['NOW', 'SPY', 'VIX'], 10, 'hive', 2, ['symbol', 'dtTrade'],
[('dtTrade','==','2005-01-02T00:00:00.000000000')]),
(tempdir, ['NOW', 'SPY', 'VIX'], 10, 'hive', 2, ['symbol', 'dtTrade'],
[('dtTrade','==', Timestamp('2005-01-01 00:00:00'))]),
]
)
def test_frame_write_read_verify(tempdir, input_symbols, input_days, file_scheme,
input_columns, partitions, filters):
#Generate Temp Director for parquet Files
fdir = str(tempdir)
fname = os.path.join(fdir, 'test')
#Generate Test Input Frame
input_df = frame_symbol_dtTrade_type_strike(days=input_days,
symbols=input_symbols,
numbercolumns=input_columns)
input_df.reset_index(inplace=True)
write(fname, input_df, partition_on=partitions, file_scheme=file_scheme, compression='SNAPPY')
#Read Back Whole Parquet Structure
output_df = ParquetFile(fname).to_pandas()
for col in output_df.columns:
assert col in input_df.columns.values
assert len(input_df) == len(output_df)
#Read with filters
filtered_output_df = ParquetFile(fname).to_pandas(filters=filters)
#Filter Input Frame to Match What Should Be Expected from parquet read
# Handle either string or non-string inputs / works for timestamps
filterStrings = []
for name, operator, value in filters:
if isinstance(value, str):
value = "'{}'".format(value)
else:
value = value.__repr__()
filterStrings.append("{} {} {}".format(name, operator, value))
filters_expression = " and ".join(filterStrings)
filtered_input_df = input_df.query(filters_expression)
# Check to Ensure Columns Match
for col in filtered_output_df.columns:
assert col in filtered_input_df.columns.values
# Check to Ensure Number of Rows Match
assert len(filtered_input_df) == len(filtered_output_df)
# Clean Up
shutil.rmtree(fdir, ignore_errors=True)
```
#### File: llvmlite/binding/dylib.py
```python
from __future__ import absolute_import, print_function
from ctypes import c_void_p, c_char_p, c_bool, POINTER
from . import ffi
from .common import _encode_string
def address_of_symbol(name):
"""
Get the in-process address of symbol named *name*.
An integer is returned, or None if the symbol isn't found.
"""
return ffi.lib.LLVMPY_SearchAddressOfSymbol(_encode_string(name))
def add_symbol(name, address):
"""
Register the *address* of global symbol *name*. This will make
it usable (e.g. callable) from LLVM-compiled functions.
"""
ffi.lib.LLVMPY_AddSymbol(_encode_string(name), c_void_p(address))
def load_library_permanently(filename):
"""
Load an external library
"""
with ffi.OutputString() as outerr:
if ffi.lib.LLVMPY_LoadLibraryPermanently(
_encode_string(filename), outerr):
raise RuntimeError(str(outerr))
# ============================================================================
# FFI
ffi.lib.LLVMPY_AddSymbol.argtypes = [
c_char_p,
c_void_p,
]
ffi.lib.LLVMPY_SearchAddressOfSymbol.argtypes = [c_char_p]
ffi.lib.LLVMPY_SearchAddressOfSymbol.restype = c_void_p
ffi.lib.LLVMPY_LoadLibraryPermanently.argtypes = [c_char_p, POINTER(c_char_p)]
ffi.lib.LLVMPY_LoadLibraryPermanently.restype = c_bool
```
#### File: llvmlite/ir/transforms.py
```python
from . import CallInstr
class Visitor(object):
def visit(self, module):
self._module = module
for func in module.functions:
self.visit_Function(func)
def visit_Function(self, func):
self._function = func
for bb in func.blocks:
self.visit_BasicBlock(bb)
def visit_BasicBlock(self, bb):
self._basic_block = bb
for instr in bb.instructions:
self.visit_Instruction(instr)
def visit_Instruction(self, instr):
raise NotImplementedError
@property
def module(self):
return self._module
@property
def function(self):
return self._function
@property
def basic_block(self):
return self._basic_block
class CallVisitor(Visitor):
def visit_Instruction(self, instr):
if isinstance(instr, CallInstr):
self.visit_Call(instr)
def visit_Call(self, instr):
raise NotImplementedError
class ReplaceCalls(CallVisitor):
def __init__(self, orig, repl):
super(ReplaceCalls, self).__init__()
self.orig = orig
self.repl = repl
self.calls = []
def visit_Call(self, instr):
if instr.callee == self.orig:
instr.replace_callee(self.repl)
self.calls.append(instr)
def replace_all_calls(mod, orig, repl):
"""Replace all calls to `orig` to `repl` in module `mod`.
Returns the references to the returned calls
"""
rc = ReplaceCalls(orig, repl)
rc.visit(mod)
return rc.calls
```
#### File: numba/cuda/codegen.py
```python
from llvmlite import binding as ll
from llvmlite.llvmpy import core as lc
from numba.targets.codegen import BaseCPUCodegen, CodeLibrary
from numba import utils
from .cudadrv import nvvm
CUDA_TRIPLE = {32: 'nvptx-nvidia-cuda',
64: 'nvptx64-nvidia-cuda'}
class CUDACodeLibrary(CodeLibrary):
def _optimize_functions(self, ll_module):
pass
def _optimize_final_module(self):
# Run some lightweight optimization to simplify the module.
# This seems to workaround a libnvvm compilation bug (see #1341)
pmb = ll.PassManagerBuilder()
pmb.opt_level = 1
pmb.disable_unit_at_a_time = False
pmb.disable_unroll_loops = True
pmb.loop_vectorize = False
pmb.slp_vectorize = False
pm = ll.ModulePassManager()
pmb.populate(pm)
pm.run(self._final_module)
def _finalize_specific(self):
# Fix global naming
for gv in self._final_module.global_variables:
if '.' in gv.name:
gv.name = gv.name.replace('.', '_')
def get_asm_str(self):
# Return nothing: we can only dump assembler code when it is later
# generated (in numba.cuda.compiler).
return None
class JITCUDACodegen(BaseCPUCodegen):
"""
This codegen implementation for CUDA actually only generates optimized
LLVM IR. Generation of PTX code is done separately (see numba.cuda.compiler).
"""
_library_class = CUDACodeLibrary
def _init(self, llvm_module):
assert list(llvm_module.global_variables) == [], "Module isn't empty"
self._data_layout = nvvm.default_data_layout
self._target_data = ll.create_target_data(self._data_layout)
def _create_empty_module(self, name):
ir_module = lc.Module(name)
ir_module.triple = CUDA_TRIPLE[utils.MACHINE_BITS]
if self._data_layout:
ir_module.data_layout = self._data_layout
return ir_module
def _module_pass_manager(self):
raise NotImplementedError
def _function_pass_manager(self, llvm_module):
raise NotImplementedError
def _add_module(self, module):
pass
```
#### File: numba/cuda/errors.py
```python
from __future__ import print_function, absolute_import
import numbers
class KernelRuntimeError(RuntimeError):
def __init__(self, msg, tid=None, ctaid=None):
self.tid = tid
self.ctaid = ctaid
self.msg = msg
t = ("An exception was raised in thread=%s block=%s\n"
"\t%s")
msg = t % (self.tid, self.ctaid, self.msg)
super(KernelRuntimeError, self).__init__(msg)
def normalize_kernel_dimensions(griddim, blockdim):
"""
Normalize and validate the user-supplied kernel dimensions.
"""
def check_dim(dim, name):
if not isinstance(dim, (tuple, list)):
dim = [dim]
else:
dim = list(dim)
if len(dim) > 3:
raise ValueError('%s must be a sequence of 1, 2 or 3 integers, got %r'
% (name, dim))
for v in dim:
if not isinstance(v, numbers.Integral):
raise TypeError('%s must be a sequence of integers, got %r'
% (name, dim))
while len(dim) < 3:
dim.append(1)
return dim
griddim = check_dim(griddim, 'griddim')
blockdim = check_dim(blockdim, 'blockdim')
return griddim, blockdim
```
#### File: cuda/simulator/reduction.py
```python
from numba.six.moves import reduce as pyreduce
def Reduce(func):
def reduce_wrapper(seq, res=None, init=0):
r = pyreduce(func, seq, init)
if res is not None:
res[0] = r
return None
else:
return r
return reduce_wrapper
reduce = Reduce
```
#### File: numba/datamodel/registry.py
```python
from __future__ import print_function, absolute_import
import functools
from .manager import DataModelManager
def register(dmm, typecls):
"""Used as decorator to simplify datamodel registration.
Returns the object being decorated so that chaining is possible.
"""
def wraps(fn):
dmm.register(typecls, fn)
return fn
return wraps
default_manager = DataModelManager()
register_default = functools.partial(register, default_manager)
```
#### File: hsa/hlc/hlc.py
```python
from __future__ import print_function, absolute_import
import sys
from subprocess import check_call
import tempfile
import os
from collections import namedtuple
from numba import config
from .utils import adapt_llvm_version
from .config import BUILTIN_PATH
_real_check_call = check_call
def check_call(*args, **kwargs):
print('CMD: ' + ';'.join(args), file=sys.stdout)
return _real_check_call(*args, **kwargs)
class CmdLine(object):
CMD_OPT = ("$HSAILBIN/opt "
"-O3 "
# "-gpu "
# "-whole "
"-verify "
"-S "
"-o {fout} "
"{fin}")
CMD_VERIFY = ("$HSAILBIN/opt "
"-verify "
"-S "
"-o {fout} "
"{fin}")
CMD_GEN_HSAIL = ("$HSAILBIN/llc -O2 "
"-march=hsail64 "
"-filetype=asm "
"-o {fout} "
"{fin}")
CMD_GEN_BRIG = ("$HSAILBIN/llc -O2 "
"-march=hsail64 "
"-filetype=obj "
"-o {fout} "
"{fin}")
CMD_LINK_BUILTINS = ("$HSAILBIN/llvm-link "
# "-prelink-opt "
"-S "
"-o {fout} "
"{fin} "
"{lib}")
CMD_LINK_LIBS = ("$HSAILBIN/llvm-link "
# "-prelink-opt "
"-S "
"-o {fout} "
"{fin} ")
def verify(self, ipath, opath):
check_call(self.CMD_VERIFY.format(fout=opath, fin=ipath), shell=True)
def optimize(self, ipath, opath):
check_call(self.CMD_OPT.format(fout=opath, fin=ipath), shell=True)
def generate_hsail(self, ipath, opath):
check_call(self.CMD_GEN_HSAIL.format(fout=opath, fin=ipath), shell=True)
def generate_brig(self, ipath, opath):
check_call(self.CMD_GEN_BRIG.format(fout=opath, fin=ipath), shell=True)
def link_builtins(self, ipath, opath):
cmd = self.CMD_LINK_BUILTINS.format(fout=opath, fin=ipath,
lib=BUILTIN_PATH)
check_call(cmd, shell=True)
def link_libs(self, ipath, libpaths, opath):
cmdline = self.CMD_LINK_LIBS.format(fout=opath, fin=ipath)
cmdline += ' '.join(["{0}".format(lib) for lib in libpaths])
check_call(cmdline, shell=True)
class Module(object):
def __init__(self):
"""
Setup
"""
self._tmpdir = tempfile.mkdtemp()
self._tempfiles = []
self._linkfiles = []
self._cmd = CmdLine()
self._finalized = False
def __del__(self):
return
self.close()
def close(self):
# Remove all temporary files
for afile in self._tempfiles:
os.unlink(afile)
# Remove directory
os.rmdir(self._tmpdir)
def _create_temp_file(self, name, mode='wb'):
path = self._track_temp_file(name)
fobj = open(path, mode=mode)
return fobj, path
def _track_temp_file(self, name):
path = os.path.join(self._tmpdir,
"{0}-{1}".format(len(self._tempfiles), name))
self._tempfiles.append(path)
return path
def _preprocess(self, llvmir):
return adapt_llvm_version(llvmir)
def load_llvm(self, llvmir):
"""
Load LLVM with HSAIL SPIR spec
"""
# Preprocess LLVM IR
# Because HLC does not handle dot in LLVM variable names
llvmir = self._preprocess(llvmir)
# Create temp file to store the input file
tmp_llvm_ir, fin = self._create_temp_file("dump-llvm-ir")
with tmp_llvm_ir:
tmp_llvm_ir.write(llvmir.encode('ascii'))
# Create temp file for optimization
fout = self._track_temp_file("verified-llvm-ir")
self._cmd.verify(ipath=fin, opath=fout)
if config.DUMP_OPTIMIZED:
with open(fout, 'rb') as fin_opt:
print(fin_opt.read().decode('ascii'))
self._linkfiles.append(fout)
def finalize(self):
"""
Finalize module and return the HSAIL code
"""
assert not self._finalized, "Module finalized already"
# Link dependencies libraries
llvmfile = self._linkfiles[0]
pre_builtin_path = self._track_temp_file("link-dep")
libpaths = self._linkfiles[1:]
self._cmd.link_libs(ipath=llvmfile, libpaths=libpaths,
opath=pre_builtin_path)
# Link library with the builtin modules
linked_path = self._track_temp_file("linked-path")
self._cmd.link_builtins(ipath=pre_builtin_path, opath=linked_path)
# Optimize
opt_path = self._track_temp_file("optimized-llvm-ir")
self._cmd.optimize(ipath=linked_path, opath=opt_path)
if config.DUMP_OPTIMIZED:
with open(opt_path, 'rb') as fin:
print(fin.read().decode('ascii'))
# Finalize the llvm to HSAIL
hsail_path = self._track_temp_file("finalized-hsail")
self._cmd.generate_hsail(ipath=opt_path, opath=hsail_path)
# Finalize the llvm to BRIG
brig_path = self._track_temp_file("finalized-brig")
self._cmd.generate_brig(ipath=opt_path, opath=brig_path)
self._finalized = True
# Read HSAIL
with open(hsail_path, 'rb') as fin:
hsail = fin.read().decode('ascii')
# Read BRIG
with open(brig_path, 'rb') as fin:
brig = fin.read()
if config.DUMP_ASSEMBLY:
print(hsail)
return namedtuple('FinalizerResult', ['hsail', 'brig'])(hsail, brig)
```
#### File: hsa/hsadrv/error.py
```python
from __future__ import print_function, absolute_import, division
class HsaDriverError(Exception):
pass
class HsaSupportError(ImportError):
pass
class HsaApiError(HsaDriverError):
def __init__(self, code, msg):
self.code = code
super(HsaApiError, self).__init__(msg)
class HsaWarning(UserWarning):
pass
```
#### File: Lib/numba/inline_closurecall.py
```python
from numba import config, ir, ir_utils, utils, prange
import types
from numba.ir_utils import (
mk_unique_var,
next_label,
add_offset_to_labels,
replace_vars,
remove_dels,
remove_dead,
rename_labels,
find_topo_order,
merge_adjacent_blocks,
GuardException,
require,
guard,
get_definition,
find_callname
)
from numba.analysis import compute_cfg_from_blocks
from numba.targets.rangeobj import range_iter_len
from numba.unsafe.ndarray import empty_inferred as unsafe_empty_inferred
import numba.types as nbtypes
import numpy as np
"""
Variable enable_inline_arraycall is only used for testing purpose.
"""
enable_inline_arraycall = True
class InlineClosureCallPass(object):
"""InlineClosureCallPass class looks for direct calls to locally defined
closures, and inlines the body of the closure function to the call site.
"""
def __init__(self, func_ir, flags, run_frontend):
self.func_ir = func_ir
self.flags = flags
self.run_frontend = run_frontend
def run(self):
"""Run inline closure call pass.
"""
modified = False
work_list = list(self.func_ir.blocks.items())
debug_print = _make_debug_print("InlineClosureCallPass")
debug_print("START")
while work_list:
label, block = work_list.pop()
for i in range(len(block.body)):
instr = block.body[i]
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == 'call':
func_def = guard(get_definition, self.func_ir, expr.func)
debug_print("found call to ", expr.func, " def = ", func_def)
if isinstance(func_def, ir.Expr) and func_def.op == "make_function":
new_blocks = self.inline_closure_call(block, i, func_def)
for block in new_blocks:
work_list.append(block)
modified = True
# current block is modified, skip the rest
break
if enable_inline_arraycall:
# Identify loop structure
if modified:
# Need to do some cleanups if closure inlining kicked in
merge_adjacent_blocks(self.func_ir)
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
debug_print("start inline arraycall")
_debug_dump(cfg)
loops = cfg.loops()
sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
visited = []
# We go over all loops, bigger loops first (outer first)
for k, s in sorted(sized_loops, key=lambda tup: tup[1], reverse=True):
visited.append(k)
if guard(_inline_arraycall, self.func_ir, cfg, visited, loops[k],
self.flags.auto_parallel):
modified = True
if modified:
_fix_nested_array(self.func_ir)
if modified:
remove_dels(self.func_ir.blocks)
# repeat dead code elimintation until nothing can be further
# removed
while (remove_dead(self.func_ir.blocks, self.func_ir.arg_names)):
pass
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
debug_print("END")
def inline_closure_call(self, block, i, callee):
"""Inline the body of `callee` at its callsite (`i`-th instruction of `block`)
"""
scope = block.scope
instr = block.body[i]
call_expr = instr.value
debug_print = _make_debug_print("inline_closure_call")
debug_print("Found closure call: ", instr, " with callee = ", callee)
func_ir = self.func_ir
# first, get the IR of the callee
callee_ir = self.get_ir_of_code(callee.code)
callee_blocks = callee_ir.blocks
# 1. relabel callee_ir by adding an offset
max_label = max(func_ir.blocks.keys())
callee_blocks = add_offset_to_labels(callee_blocks, max_label + 1)
callee_ir.blocks = callee_blocks
min_label = min(callee_blocks.keys())
max_label = max(callee_blocks.keys())
# reset globals in ir_utils before we use it
ir_utils._max_label = max_label
debug_print("After relabel")
_debug_dump(callee_ir)
# 2. rename all local variables in callee_ir with new locals created in func_ir
callee_scopes = _get_all_scopes(callee_blocks)
debug_print("callee_scopes = ", callee_scopes)
# one function should only have one local scope
assert(len(callee_scopes) == 1)
callee_scope = callee_scopes[0]
var_dict = {}
for var in callee_scope.localvars._con.values():
if not (var.name in callee.code.co_freevars):
new_var = scope.define(mk_unique_var(var.name), loc=var.loc)
var_dict[var.name] = new_var
debug_print("var_dict = ", var_dict)
replace_vars(callee_blocks, var_dict)
debug_print("After local var rename")
_debug_dump(callee_ir)
# 3. replace formal parameters with actual arguments
args = list(call_expr.args)
if callee.defaults:
debug_print("defaults = ", callee.defaults)
if isinstance(callee.defaults, tuple): # Python 3.5
args = args + list(callee.defaults)
elif isinstance(callee.defaults, ir.Var) or isinstance(callee.defaults, str):
defaults = func_ir.get_definition(callee.defaults)
assert(isinstance(defaults, ir.Const))
loc = defaults.loc
args = args + [ir.Const(value=v, loc=loc)
for v in defaults.value]
else:
raise NotImplementedError(
"Unsupported defaults to make_function: {}".format(defaults))
_replace_args_with(callee_blocks, args)
debug_print("After arguments rename: ")
_debug_dump(callee_ir)
# 4. replace freevar with actual closure var
if callee.closure:
closure = func_ir.get_definition(callee.closure)
assert(isinstance(closure, ir.Expr)
and closure.op == 'build_tuple')
assert(len(callee.code.co_freevars) == len(closure.items))
debug_print("callee's closure = ", closure)
_replace_freevars(callee_blocks, closure.items)
debug_print("After closure rename")
_debug_dump(callee_ir)
# 5. split caller blocks into two
new_blocks = []
new_block = ir.Block(scope, block.loc)
new_block.body = block.body[i + 1:]
new_label = next_label()
func_ir.blocks[new_label] = new_block
new_blocks.append((new_label, new_block))
block.body = block.body[:i]
block.body.append(ir.Jump(min_label, instr.loc))
# 6. replace Return with assignment to LHS
topo_order = find_topo_order(callee_blocks)
_replace_returns(callee_blocks, instr.target, new_label)
# remove the old definition of instr.target too
if (instr.target.name in func_ir._definitions):
func_ir._definitions[instr.target.name] = []
# 7. insert all new blocks, and add back definitions
for label in topo_order:
# block scope must point to parent's
block = callee_blocks[label]
block.scope = scope
_add_definitions(func_ir, block)
func_ir.blocks[label] = block
new_blocks.append((label, block))
debug_print("After merge in")
_debug_dump(func_ir)
return new_blocks
def get_ir_of_code(self, fcode):
"""
Compile a code object to get its IR.
"""
glbls = self.func_ir.func_id.func.__globals__
nfree = len(fcode.co_freevars)
func_env = "\n".join([" c_%d = None" % i for i in range(nfree)])
func_clo = ",".join(["c_%d" % i for i in range(nfree)])
func_arg = ",".join(["x_%d" % i for i in range(fcode.co_argcount)])
func_text = "def g():\n%s\n def f(%s):\n return (%s)\n return f" % (
func_env, func_arg, func_clo)
loc = {}
exec(func_text, glbls, loc)
# hack parameter name .0 for Python 3 versions < 3.6
if utils.PYVERSION >= (3,) and utils.PYVERSION < (3, 6):
co_varnames = list(fcode.co_varnames)
if co_varnames[0] == ".0":
co_varnames[0] = "implicit0"
fcode = types.CodeType(
fcode.co_argcount,
fcode.co_kwonlyargcount,
fcode.co_nlocals,
fcode.co_stacksize,
fcode.co_flags,
fcode.co_code,
fcode.co_consts,
fcode.co_names,
tuple(co_varnames),
fcode.co_filename,
fcode.co_name,
fcode.co_firstlineno,
fcode.co_lnotab,
fcode.co_freevars,
fcode.co_cellvars)
f = loc['g']()
f.__code__ = fcode
f.__name__ = fcode.co_name
ir = self.run_frontend(f)
return ir
def _make_debug_print(prefix):
def debug_print(*args):
if config.DEBUG_INLINE_CLOSURE:
print(prefix + ": " + "".join(str(x) for x in args))
return debug_print
def _debug_dump(func_ir):
if config.DEBUG_INLINE_CLOSURE:
func_ir.dump()
def _get_all_scopes(blocks):
"""Get all block-local scopes from an IR.
"""
all_scopes = []
for label, block in blocks.items():
if not (block.scope in all_scopes):
all_scopes.append(block.scope)
return all_scopes
def _replace_args_with(blocks, args):
"""
Replace ir.Arg(...) with real arguments from call site
"""
for label, block in blocks.items():
assigns = block.find_insts(ir.Assign)
for stmt in assigns:
if isinstance(stmt.value, ir.Arg):
idx = stmt.value.index
assert(idx < len(args))
stmt.value = args[idx]
def _replace_freevars(blocks, args):
"""
Replace ir.FreeVar(...) with real variables from parent function
"""
for label, block in blocks.items():
assigns = block.find_insts(ir.Assign)
for stmt in assigns:
if isinstance(stmt.value, ir.FreeVar):
idx = stmt.value.index
assert(idx < len(args))
stmt.value = args[idx]
def _replace_returns(blocks, target, return_label):
"""
Return return statement by assigning directly to target, and a jump.
"""
for label, block in blocks.items():
casts = []
for i in range(len(block.body)):
stmt = block.body[i]
if isinstance(stmt, ir.Return):
assert(i + 1 == len(block.body))
block.body[i] = ir.Assign(stmt.value, target, stmt.loc)
block.body.append(ir.Jump(return_label, stmt.loc))
# remove cast of the returned value
for cast in casts:
if cast.target.name == stmt.value.name:
cast.value = cast.value.value
elif isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op == 'cast':
casts.append(stmt)
def _add_definitions(func_ir, block):
"""
Add variable definitions found in a block to parent func_ir.
"""
definitions = func_ir._definitions
assigns = block.find_insts(ir.Assign)
for stmt in assigns:
definitions[stmt.target.name].append(stmt.value)
def _find_arraycall(func_ir, block):
"""Look for statement like "x = numpy.array(y)" or "x[..] = y"
immediately after the closure call that creates list y (the i-th
statement in block). Return the statement index if found, or
raise GuardException.
"""
array_var = None
array_call_index = None
list_var_dead_after_array_call = False
list_var = None
i = 0
while i < len(block.body):
instr = block.body[i]
if isinstance(instr, ir.Del):
# Stop the process if list_var becomes dead
if list_var and array_var and instr.value == list_var.name:
list_var_dead_after_array_call = True
break
pass
elif isinstance(instr, ir.Assign):
# Found array_var = array(list_var)
lhs = instr.target
expr = instr.value
if (guard(find_callname, func_ir, expr) == ('array', 'numpy') and
isinstance(expr.args[0], ir.Var)):
list_var = expr.args[0]
array_var = lhs
array_stmt_index = i
array_kws = dict(expr.kws)
elif (isinstance(instr, ir.SetItem) and
isinstance(instr.value, ir.Var) and
not list_var):
list_var = instr.value
# Found array_var[..] = list_var, the case for nested array
array_var = instr.target
array_def = get_definition(func_ir, array_var)
require(guard(_find_unsafe_empty_inferred, func_ir, array_def))
array_stmt_index = i
array_kws = {}
else:
# Bail out otherwise
break
i = i + 1
# require array_var is found, and list_var is dead after array_call.
require(array_var and list_var_dead_after_array_call)
_make_debug_print("find_array_call")(block.body[array_stmt_index])
return list_var, array_stmt_index, array_kws
def _find_iter_range(func_ir, range_iter_var):
"""Find the iterator's actual range if it is either range(n), or range(m, n),
otherwise return raise GuardException.
"""
debug_print = _make_debug_print("find_iter_range")
range_iter_def = get_definition(func_ir, range_iter_var)
debug_print("range_iter_var = ", range_iter_var, " def = ", range_iter_def)
require(isinstance(range_iter_def, ir.Expr) and range_iter_def.op == 'getiter')
range_var = range_iter_def.value
range_def = get_definition(func_ir, range_var)
debug_print("range_var = ", range_var, " range_def = ", range_def)
require(isinstance(range_def, ir.Expr) and range_def.op == 'call')
func_var = range_def.func
func_def = get_definition(func_ir, func_var)
debug_print("func_var = ", func_var, " func_def = ", func_def)
require(isinstance(func_def, ir.Global) and func_def.value == range)
nargs = len(range_def.args)
if nargs == 1:
stop = get_definition(func_ir, range_def.args[0], lhs_only=True)
return (0, range_def.args[0], func_def)
elif nargs == 2:
start = get_definition(func_ir, range_def.args[0], lhs_only=True)
stop = get_definition(func_ir, range_def.args[1], lhs_only=True)
return (start, stop, func_def)
else:
raise GuardException
def _inline_arraycall(func_ir, cfg, visited, loop, enable_prange=False):
"""Look for array(list) call in the exit block of a given loop, and turn list operations into
array operations in the loop if the following conditions are met:
1. The exit block contains an array call on the list;
2. The list variable is no longer live after array call;
3. The list is created in the loop entry block;
4. The loop is created from an range iterator whose length is known prior to the loop;
5. There is only one list_append operation on the list variable in the loop body;
6. The block that contains list_append dominates the loop head, which ensures list
length is the same as loop length;
If any condition check fails, no modification will be made to the incoming IR.
"""
debug_print = _make_debug_print("inline_arraycall")
# There should only be one loop exit
require(len(loop.exits) == 1)
exit_block = next(iter(loop.exits))
list_var, array_call_index, array_kws = _find_arraycall(func_ir, func_ir.blocks[exit_block])
# check if dtype is present in array call
dtype_def = None
dtype_mod_def = None
if 'dtype' in array_kws:
require(isinstance(array_kws['dtype'], ir.Var))
# We require that dtype argument to be a constant of getattr Expr, and we'll
# remember its definition for later use.
dtype_def = get_definition(func_ir, array_kws['dtype'])
require(isinstance(dtype_def, ir.Expr) and dtype_def.op == 'getattr')
dtype_mod_def = get_definition(func_ir, dtype_def.value)
list_var_def = get_definition(func_ir, list_var)
debug_print("list_var = ", list_var, " def = ", list_var_def)
if isinstance(list_var_def, ir.Expr) and list_var_def.op == 'cast':
list_var_def = get_definition(func_ir, list_var_def.value)
# Check if the definition is a build_list
require(isinstance(list_var_def, ir.Expr) and list_var_def.op == 'build_list')
# Look for list_append in "last" block in loop body, which should be a block that is
# a post-dominator of the loop header.
list_append_stmts = []
for label in loop.body:
# We have to consider blocks of this loop, but not sub-loops.
# To achieve this, we require the set of "in_loops" of "label" to be visited loops.
in_visited_loops = [l.header in visited for l in cfg.in_loops(label)]
if not all(in_visited_loops):
continue
block = func_ir.blocks[label]
debug_print("check loop body block ", label)
for stmt in block.find_insts(ir.Assign):
lhs = stmt.target
expr = stmt.value
if isinstance(expr, ir.Expr) and expr.op == 'call':
func_def = get_definition(func_ir, expr.func)
if isinstance(func_def, ir.Expr) and func_def.op == 'getattr' \
and func_def.attr == 'append':
list_def = get_definition(func_ir, func_def.value)
debug_print("list_def = ", list_def, list_def == list_var_def)
if list_def == list_var_def:
# found matching append call
list_append_stmts.append((label, block, stmt))
# Require only one list_append, otherwise we won't know the indices
require(len(list_append_stmts) == 1)
append_block_label, append_block, append_stmt = list_append_stmts[0]
# Check if append_block (besides loop entry) dominates loop header.
# Since CFG doesn't give us this info without loop entry, we approximate
# by checking if the predecessor set of the header block is the same
# as loop_entries plus append_block, which is certainly more restrictive
# than necessary, and can be relaxed if needed.
preds = set(l for l, b in cfg.predecessors(loop.header))
debug_print("preds = ", preds, (loop.entries | set([append_block_label])))
require(preds == (loop.entries | set([append_block_label])))
# Find iterator in loop header
iter_vars = []
iter_first_vars = []
loop_header = func_ir.blocks[loop.header]
for stmt in loop_header.find_insts(ir.Assign):
expr = stmt.value
if isinstance(expr, ir.Expr):
if expr.op == 'iternext':
iter_def = get_definition(func_ir, expr.value)
debug_print("iter_def = ", iter_def)
iter_vars.append(expr.value)
elif expr.op == 'pair_first':
iter_first_vars.append(stmt.target)
# Require only one iterator in loop header
require(len(iter_vars) == 1 and len(iter_first_vars) == 1)
iter_var = iter_vars[0] # variable that holds the iterator object
iter_first_var = iter_first_vars[0] # variable that holds the value out of iterator
# Final requirement: only one loop entry, and we're going to modify it by:
# 1. replacing the list definition with an array definition;
# 2. adding a counter for the array iteration.
require(len(loop.entries) == 1)
loop_entry = func_ir.blocks[next(iter(loop.entries))]
terminator = loop_entry.terminator
scope = loop_entry.scope
loc = loop_entry.loc
stmts = []
removed = []
def is_removed(val, removed):
if isinstance(val, ir.Var):
for x in removed:
if x.name == val.name:
return True
return False
# Skip list construction and skip terminator, add the rest to stmts
for i in range(len(loop_entry.body) - 1):
stmt = loop_entry.body[i]
if isinstance(stmt, ir.Assign) and (stmt.value == list_def or is_removed(stmt.value, removed)):
removed.append(stmt.target)
else:
stmts.append(stmt)
debug_print("removed variables: ", removed)
# Define an index_var to index the array.
# If the range happens to be single step ranges like range(n), or range(m, n),
# then the index_var correlates to iterator index; otherwise we'll have to
# define a new counter.
range_def = guard(_find_iter_range, func_ir, iter_var)
index_var = scope.make_temp(loc)
if range_def and range_def[0] == 0:
# iterator starts with 0, index_var can just be iter_first_var
index_var = iter_first_var
else:
# index_var = -1 # starting the index with -1 since it will incremented in loop header
stmts.append(_new_definition(func_ir, index_var, ir.Const(value=-1, loc=loc), loc))
# Insert statement to get the size of the loop iterator
size_var = scope.make_temp(loc)
if range_def:
start, stop, range_func_def = range_def
if start == 0:
size_val = stop
else:
size_val = ir.Expr.binop(fn='-', lhs=stop, rhs=start, loc=loc)
# we can parallelize this loop if enable_prange = True, by changing
# range function from range, to prange.
if enable_prange and isinstance(range_func_def, ir.Global):
range_func_def.name = 'prange'
range_func_def.value = prange
else:
len_func_var = scope.make_temp(loc)
stmts.append(_new_definition(func_ir, len_func_var,
ir.Global('range_iter_len', range_iter_len, loc=loc), loc))
size_val = ir.Expr.call(len_func_var, (iter_var,), (), loc=loc)
stmts.append(_new_definition(func_ir, size_var, size_val, loc))
size_tuple_var = scope.make_temp(loc)
stmts.append(_new_definition(func_ir, size_tuple_var,
ir.Expr.build_tuple(items=[size_var], loc=loc), loc))
array_var = scope.make_temp(loc)
# Insert array allocation
array_var = scope.make_temp(loc)
empty_func = scope.make_temp(loc)
if dtype_def and dtype_mod_def:
# when dtype is present, we'll call emtpy with dtype
dtype_mod_var = scope.make_temp(loc)
dtype_var = scope.make_temp(loc)
stmts.append(_new_definition(func_ir, dtype_mod_var, dtype_mod_def, loc))
stmts.append(_new_definition(func_ir, dtype_var,
ir.Expr.getattr(dtype_mod_var, dtype_def.attr, loc), loc))
stmts.append(_new_definition(func_ir, empty_func,
ir.Global('empty', np.empty, loc=loc), loc))
array_kws = [('dtype', dtype_var)]
else:
# otherwise we'll call unsafe_empty_inferred
stmts.append(_new_definition(func_ir, empty_func,
ir.Global('unsafe_empty_inferred',
unsafe_empty_inferred, loc=loc), loc))
array_kws = []
# array_var = empty_func(size_tuple_var)
stmts.append(_new_definition(func_ir, array_var,
ir.Expr.call(empty_func, (size_tuple_var,), list(array_kws), loc=loc), loc))
# Add back removed just in case they are used by something else
for var in removed:
stmts.append(_new_definition(func_ir, var, array_var, loc))
# Add back terminator
stmts.append(terminator)
# Modify loop_entry
loop_entry.body = stmts
if range_def:
if range_def[0] != 0:
# when range doesn't start from 0, index_var becomes loop index
# (iter_first_var) minus an offset (range_def[0])
terminator = loop_header.terminator
assert(isinstance(terminator, ir.Branch))
# find the block in the loop body that header jumps to
block_id = terminator.truebr
blk = func_ir.blocks[block_id]
loc = blk.loc
blk.body.insert(0, _new_definition(func_ir, index_var,
ir.Expr.binop(fn='-', lhs=iter_first_var,
rhs=range_def[0], loc=loc),
loc))
else:
# Insert index_var increment to the end of loop header
loc = loop_header.loc
terminator = loop_header.terminator
stmts = loop_header.body[0:-1]
next_index_var = scope.make_temp(loc)
one = scope.make_temp(loc)
# one = 1
stmts.append(_new_definition(func_ir, one,
ir.Const(value=1,loc=loc), loc))
# next_index_var = index_var + 1
stmts.append(_new_definition(func_ir, next_index_var,
ir.Expr.binop(fn='+', lhs=index_var, rhs=one, loc=loc), loc))
# index_var = next_index_var
stmts.append(_new_definition(func_ir, index_var, next_index_var, loc))
stmts.append(terminator)
loop_header.body = stmts
# In append_block, change list_append into array assign
for i in range(len(append_block.body)):
if append_block.body[i] == append_stmt:
debug_print("Replace append with SetItem")
append_block.body[i] = ir.SetItem(target=array_var, index=index_var,
value=append_stmt.value.args[0], loc=append_stmt.loc)
# replace array call, by changing "a = array(b)" to "a = b"
stmt = func_ir.blocks[exit_block].body[array_call_index]
# stmt can be either array call or SetItem, we only replace array call
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
stmt.value = array_var
func_ir._definitions[stmt.target.name] = [stmt.value]
return True
def _find_unsafe_empty_inferred(func_ir, expr):
unsafe_empty_inferred
require(isinstance(expr, ir.Expr) and expr.op == 'call')
callee = expr.func
callee_def = get_definition(func_ir, callee)
require(isinstance(callee_def, ir.Global))
_make_debug_print("_find_unsafe_empty_inferred")(callee_def.value)
return callee_def.value == unsafe_empty_inferred
def _fix_nested_array(func_ir):
"""Look for assignment like: a[..] = b, where both a and b are numpy arrays, and
try to eliminate array b by expanding a with an extra dimension.
"""
"""
cfg = compute_cfg_from_blocks(func_ir.blocks)
all_loops = list(cfg.loops().values())
def find_nest_level(label):
level = 0
for loop in all_loops:
if label in loop.body:
level += 1
"""
def find_array_def(arr):
"""Find numpy array definition such as
arr = numba.unsafe.ndarray.empty_inferred(...).
If it is arr = b[...], find array definition of b recursively.
"""
arr_def = func_ir.get_definition(arr)
_make_debug_print("find_array_def")(arr, arr_def)
if isinstance(arr_def, ir.Expr):
if guard(_find_unsafe_empty_inferred, func_ir, arr_def):
return arr_def
elif arr_def.op == 'getitem':
return find_array_def(arr_def.value)
raise GuardException
def fix_array_assign(stmt):
"""For assignment like lhs[idx] = rhs, where both lhs and rhs are arrays, do the
following:
1. find the definition of rhs, which has to be a call to numba.unsafe.ndarray.empty_inferred
2. find the source array creation for lhs, insert an extra dimension of size of b.
3. replace the definition of rhs = numba.unsafe.ndarray.empty_inferred(...) with rhs = lhs[idx]
"""
require(isinstance(stmt, ir.SetItem))
require(isinstance(stmt.value, ir.Var))
debug_print = _make_debug_print("fix_array_assign")
debug_print("found SetItem: ", stmt)
lhs = stmt.target
# Find the source array creation of lhs
lhs_def = find_array_def(lhs)
debug_print("found lhs_def: ", lhs_def)
rhs_def = get_definition(func_ir, stmt.value)
debug_print("found rhs_def: ", rhs_def)
require(isinstance(rhs_def, ir.Expr))
if rhs_def.op == 'cast':
rhs_def = get_definition(func_ir, rhs_def.value)
require(isinstance(rhs_def, ir.Expr))
require(_find_unsafe_empty_inferred(func_ir, rhs_def))
# Find the array dimension of rhs
dim_def = get_definition(func_ir, rhs_def.args[0])
require(isinstance(dim_def, ir.Expr) and dim_def.op == 'build_tuple')
debug_print("dim_def = ", dim_def)
extra_dims = [ get_definition(func_ir, x, lhs_only=True) for x in dim_def.items ]
debug_print("extra_dims = ", extra_dims)
# Expand size tuple when creating lhs_def with extra_dims
size_tuple_def = get_definition(func_ir, lhs_def.args[0])
require(isinstance(size_tuple_def, ir.Expr) and size_tuple_def.op == 'build_tuple')
debug_print("size_tuple_def = ", size_tuple_def)
size_tuple_def.items += extra_dims
# In-place modify rhs_def to be getitem
rhs_def.op = 'getitem'
rhs_def.value = get_definition(func_ir, lhs, lhs_only=True)
rhs_def.index = stmt.index
del rhs_def._kws['func']
del rhs_def._kws['args']
del rhs_def._kws['vararg']
del rhs_def._kws['kws']
# success
return True
for label in find_topo_order(func_ir.blocks):
block = func_ir.blocks[label]
for stmt in block.body:
if guard(fix_array_assign, stmt):
block.body.remove(stmt)
def _new_definition(func_ir, var, value, loc):
func_ir._definitions[var.name] = [value]
return ir.Assign(value=value, target=var, loc=loc)
```
#### File: numba/jitclass/boxing.py
```python
from __future__ import print_function, absolute_import
from functools import wraps, partial
from llvmlite import ir
from numba import types, cgutils
from numba.pythonapi import box, unbox, NativeValue
from numba import njit
from numba.six import exec_
from . import _box
_getter_code_template = """
def accessor(__numba_self_):
return __numba_self_.{0}
"""
_setter_code_template = """
def mutator(__numba_self_, __numba_val):
__numba_self_.{0} = __numba_val
"""
_method_code_template = """
def method(__numba_self_, *args):
return __numba_self_.{method}(*args)
"""
def _generate_property(field, template, fname):
"""
Generate simple function that get/set a field of the instance
"""
source = template.format(field)
glbls = {}
exec_(source, glbls)
return njit(glbls[fname])
_generate_getter = partial(_generate_property, template=_getter_code_template,
fname='accessor')
_generate_setter = partial(_generate_property, template=_setter_code_template,
fname='mutator')
def _generate_method(name, func):
"""
Generate a wrapper for calling a method. Note the wrapper will only
accept positional arguments.
"""
source = _method_code_template.format(method=name)
glbls = {}
exec_(source, glbls)
method = njit(glbls['method'])
@wraps(func)
def wrapper(*args, **kwargs):
return method(*args, **kwargs)
return wrapper
_cache_specialized_box = {}
def _specialize_box(typ):
"""
Create a subclass of Box that is specialized to the jitclass.
This function caches the result to avoid code bloat.
"""
# Check cache
if typ in _cache_specialized_box:
return _cache_specialized_box[typ]
dct = {'__slots__': (),
'_numba_type_': typ,
'__doc__': typ.class_type.class_def.__doc__,
}
# Inject attributes as class properties
for field in typ.struct:
getter = _generate_getter(field)
setter = _generate_setter(field)
dct[field] = property(getter, setter)
# Inject properties as class properties
for field, impdct in typ.jitprops.items():
getter = None
setter = None
if 'get' in impdct:
getter = _generate_getter(field)
if 'set' in impdct:
setter = _generate_setter(field)
# get docstring from either the fget or fset
imp = impdct.get('get') or impdct.get('set') or None
doc = getattr(imp, '__doc__', None)
dct[field] = property(getter, setter, doc=doc)
# Inject methods as class members
for name, func in typ.methods.items():
if not (name.startswith('__') and name.endswith('__')):
dct[name] = _generate_method(name, func)
# Create subclass
subcls = type(typ.classname, (_box.Box,), dct)
# Store to cache
_cache_specialized_box[typ] = subcls
# Pre-compile attribute getter.
# Note: This must be done after the "box" class is created because
# compiling the getter requires the "box" class to be defined.
for k, v in dct.items():
if isinstance(v, property):
prop = getattr(subcls, k)
if prop.fget is not None:
fget = prop.fget
fast_fget = fget.compile((typ,))
fget.disable_compile()
setattr(subcls, k,
property(fast_fget, prop.fset, prop.fdel,
doc=prop.__doc__))
return subcls
###############################################################################
# Implement box/unbox for call wrapper
@box(types.ClassInstanceType)
def _box_class_instance(typ, val, c):
meminfo, dataptr = cgutils.unpack_tuple(c.builder, val)
# Create Box instance
box_subclassed = _specialize_box(typ)
# Note: the ``box_subclassed`` is kept alive by the cache
int_addr_boxcls = c.context.get_constant(types.uintp, id(box_subclassed))
box_cls = c.builder.inttoptr(int_addr_boxcls, c.pyapi.pyobj)
box = c.pyapi.call_function_objargs(box_cls, ())
# Initialize Box instance
llvoidptr = ir.IntType(8).as_pointer()
addr_meminfo = c.builder.bitcast(meminfo, llvoidptr)
addr_data = c.builder.bitcast(dataptr, llvoidptr)
def set_member(member_offset, value):
# Access member by byte offset
offset = c.context.get_constant(types.uintp, member_offset)
ptr = cgutils.pointer_add(c.builder, box, offset)
casted = c.builder.bitcast(ptr, llvoidptr.as_pointer())
c.builder.store(value, casted)
set_member(_box.box_meminfoptr_offset, addr_meminfo)
set_member(_box.box_dataptr_offset, addr_data)
return box
@unbox(types.ClassInstanceType)
def _unbox_class_instance(typ, val, c):
def access_member(member_offset):
# Access member by byte offset
offset = c.context.get_constant(types.uintp, member_offset)
llvoidptr = ir.IntType(8).as_pointer()
ptr = cgutils.pointer_add(c.builder, val, offset)
casted = c.builder.bitcast(ptr, llvoidptr.as_pointer())
return c.builder.load(casted)
struct_cls = cgutils.create_struct_proxy(typ)
inst = struct_cls(c.context, c.builder)
# load from Python object
ptr_meminfo = access_member(_box.box_meminfoptr_offset)
ptr_dataptr = access_member(_box.box_dataptr_offset)
# store to native structure
inst.meminfo = c.builder.bitcast(ptr_meminfo, inst.meminfo.type)
inst.data = c.builder.bitcast(ptr_dataptr, inst.data.type)
ret = inst._getvalue()
c.context.nrt.incref(c.builder, typ, ret)
return NativeValue(ret, is_error=c.pyapi.c_api_error())
```
#### File: numba/runtime/nrtdynmod.py
```python
from __future__ import print_function, absolute_import, division
from numba.config import MACHINE_BITS
from numba import cgutils, types
from llvmlite import ir
# Flag to enable debug print in NRT_incref and NRT_decref
_debug_print = False
_word_type = ir.IntType(MACHINE_BITS)
_pointer_type = ir.PointerType(ir.IntType(8))
_meminfo_struct_type = ir.LiteralStructType([
_word_type, # size_t refct
_pointer_type, # dtor_function dtor
_pointer_type, # void *dtor_info
_pointer_type, # void *data
_word_type, # size_t size
])
incref_decref_ty = ir.FunctionType(ir.VoidType(), [_pointer_type])
meminfo_data_ty = ir.FunctionType(_pointer_type, [_pointer_type])
def _define_nrt_meminfo_data(module):
"""
Implement NRT_MemInfo_data_fast in the module. This allows LLVM
to inline lookup of the data pointer.
"""
fn = module.get_or_insert_function(meminfo_data_ty,
name="NRT_MemInfo_data_fast")
builder = ir.IRBuilder(fn.append_basic_block())
[ptr] = fn.args
struct_ptr = builder.bitcast(ptr, _meminfo_struct_type.as_pointer())
data_ptr = builder.load(cgutils.gep(builder, struct_ptr, 0, 3))
builder.ret(data_ptr)
def _define_nrt_incref(module, atomic_incr):
"""
Implement NRT_incref in the module
"""
fn_incref = module.get_or_insert_function(incref_decref_ty,
name="NRT_incref")
# Cannot inline this for refcount pruning to work
fn_incref.attributes.add('noinline')
builder = ir.IRBuilder(fn_incref.append_basic_block())
[ptr] = fn_incref.args
is_null = builder.icmp_unsigned("==", ptr, cgutils.get_null_value(ptr.type))
with cgutils.if_unlikely(builder, is_null):
builder.ret_void()
if _debug_print:
cgutils.printf(builder, "*** NRT_Incref %zu [%p]\n", builder.load(ptr),
ptr)
builder.call(atomic_incr, [builder.bitcast(ptr, atomic_incr.args[0].type)])
builder.ret_void()
def _define_nrt_decref(module, atomic_decr):
"""
Implement NRT_decref in the module
"""
fn_decref = module.get_or_insert_function(incref_decref_ty,
name="NRT_decref")
# Cannot inline this for refcount pruning to work
fn_decref.attributes.add('noinline')
calldtor = module.add_function(ir.FunctionType(ir.VoidType(), [_pointer_type]),
name="NRT_MemInfo_call_dtor")
builder = ir.IRBuilder(fn_decref.append_basic_block())
[ptr] = fn_decref.args
is_null = builder.icmp_unsigned("==", ptr, cgutils.get_null_value(ptr.type))
with cgutils.if_unlikely(builder, is_null):
builder.ret_void()
if _debug_print:
cgutils.printf(builder, "*** NRT_Decref %zu [%p]\n", builder.load(ptr),
ptr)
newrefct = builder.call(atomic_decr,
[builder.bitcast(ptr, atomic_decr.args[0].type)])
refct_eq_0 = builder.icmp_unsigned("==", newrefct,
ir.Constant(newrefct.type, 0))
with cgutils.if_unlikely(builder, refct_eq_0):
builder.call(calldtor, [ptr])
builder.ret_void()
# Set this to True to measure the overhead of atomic refcounts compared
# to non-atomic.
_disable_atomicity = 0
def _define_atomic_inc_dec(module, op, ordering):
"""Define a llvm function for atomic increment/decrement to the given module
Argument ``op`` is the operation "add"/"sub". Argument ``ordering`` is
the memory ordering. The generated function returns the new value.
"""
ftype = ir.FunctionType(_word_type, [_word_type.as_pointer()])
fn_atomic = ir.Function(module, ftype, name="nrt_atomic_{0}".format(op))
[ptr] = fn_atomic.args
bb = fn_atomic.append_basic_block()
builder = ir.IRBuilder(bb)
ONE = ir.Constant(_word_type, 1)
if not _disable_atomicity:
oldval = builder.atomic_rmw(op, ptr, ONE, ordering=ordering)
# Perform the operation on the old value so that we can pretend returning
# the "new" value.
res = getattr(builder, op)(oldval, ONE)
builder.ret(res)
else:
oldval = builder.load(ptr)
newval = getattr(builder, op)(oldval, ONE)
builder.store(newval, ptr)
builder.ret(oldval)
return fn_atomic
def _define_atomic_cas(module, ordering):
"""Define a llvm function for atomic compare-and-swap.
The generated function is a direct wrapper of the LLVM cmpxchg with the
difference that the a int indicate success (1) or failure (0) is returned
and the last argument is a output pointer for storing the old value.
Note
----
On failure, the generated function behaves like an atomic load. The loaded
value is stored to the last argument.
"""
ftype = ir.FunctionType(ir.IntType(32), [_word_type.as_pointer(),
_word_type, _word_type,
_word_type.as_pointer()])
fn_cas = ir.Function(module, ftype, name="nrt_atomic_cas")
[ptr, cmp, repl, oldptr] = fn_cas.args
bb = fn_cas.append_basic_block()
builder = ir.IRBuilder(bb)
outtup = builder.cmpxchg(ptr, cmp, repl, ordering=ordering)
old, ok = cgutils.unpack_tuple(builder, outtup, 2)
builder.store(old, oldptr)
builder.ret(builder.zext(ok, ftype.return_type))
return fn_cas
def _define_nrt_unresolved_abort(ctx, module):
"""
Defines an abort function due to unresolved symbol.
The function takes no args and will always raise an exception.
It should be safe to call this function with incorrect number of arguments.
"""
fnty = ctx.call_conv.get_function_type(types.none, ())
fn = ir.Function(module, fnty, name="nrt_unresolved_abort")
bb = fn.append_basic_block()
builder = ir.IRBuilder(bb)
msg = "numba jitted function aborted due to unresolved symbol"
ctx.call_conv.return_user_exc(builder, RuntimeError, (msg,))
return fn
def create_nrt_module(ctx):
"""
Create an IR module defining the LLVM NRT functions.
A (IR module, library) tuple is returned.
"""
codegen = ctx.codegen()
library = codegen.create_library("nrt")
# Implement LLVM module with atomic ops
ir_mod = library.create_ir_module("nrt_module")
atomic_inc = _define_atomic_inc_dec(ir_mod, "add", ordering='monotonic')
atomic_dec = _define_atomic_inc_dec(ir_mod, "sub", ordering='monotonic')
_define_atomic_cas(ir_mod, ordering='monotonic')
_define_nrt_meminfo_data(ir_mod)
_define_nrt_incref(ir_mod, atomic_inc)
_define_nrt_decref(ir_mod, atomic_dec)
_define_nrt_unresolved_abort(ctx, ir_mod)
return ir_mod, library
def compile_nrt_functions(ctx):
"""
Compile all LLVM NRT functions and return a library containing them.
The library is created using the given target context.
"""
ir_mod, library = create_nrt_module(ctx)
library.add_ir_module(ir_mod)
library.finalize()
return library
```
#### File: numba/servicelib/service.py
```python
from __future__ import absolute_import, print_function, division
import functools
class Service(object):
def __init__(self, name="unnamed", arg=None):
self.name = name
self.enabled = True
self.arg = arg
self._task = self.process(self.arg)
next(self._task)
def service(self):
"""
Request for the service task.
Servicing is disabled if it is disabled thourght the "enabled"
attribute. When the task is executing, the service is disabled to
avoid recursion.
"""
if self.enabled:
enable = self.enabled
try:
# Prevent recursion
self.enabled = False
next(self._task)
finally:
self.enabled = enable
def process(self, arg):
"""
Overrided to implement the service task.
This must be a generator.
Use `yield` to return control.
"""
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.service()
def after(self, fn):
"""
A decorator for a function. Service is triggered on return.
"""
@functools.wraps(fn)
def wrap(*args, **kws):
with self:
return fn(*args, **kws)
return wrap
# -----------------------------------------------------------------------------
# The rest are for testing
class HelloService(Service):
def process(self, arg):
count = 0
yield
while True:
print("Hello", count)
count += 1
yield
def test():
serv = HelloService("my.hello")
print("1")
serv.service()
print("2")
serv.service()
with serv:
print("3")
@serv.after
def nested():
print("4")
nested()
if __name__ == '__main__':
test()
```
#### File: numba/typeconv/rules.py
```python
from __future__ import print_function, absolute_import
import itertools
from .typeconv import TypeManager, TypeCastingRules
from numba import types
default_type_manager = TypeManager()
def dump_number_rules():
tm = default_type_manager
for a, b in itertools.product(types.number_domain, types.number_domain):
print(a, '->', b, tm.check_compatible(a, b))
def _init_casting_rules(tm):
tcr = TypeCastingRules(tm)
tcr.safe_unsafe(types.boolean, types.int8)
tcr.safe_unsafe(types.boolean, types.uint8)
tcr.promote_unsafe(types.int8, types.int16)
tcr.promote_unsafe(types.uint8, types.uint16)
tcr.promote_unsafe(types.int16, types.int32)
tcr.promote_unsafe(types.uint16, types.uint32)
tcr.promote_unsafe(types.int32, types.int64)
tcr.promote_unsafe(types.uint32, types.uint64)
tcr.safe_unsafe(types.uint8, types.int16)
tcr.safe_unsafe(types.uint16, types.int32)
tcr.safe_unsafe(types.uint32, types.int64)
tcr.safe_unsafe(types.int16, types.float32)
tcr.safe_unsafe(types.int32, types.float64)
tcr.unsafe_unsafe(types.int32, types.float32)
# XXX this is inconsistent with the above; but we want to prefer
# float64 over int64 when typing a heterogenous operation,
# e.g. `float64 + int64`. Perhaps we need more granularity in the
# conversion kinds.
tcr.safe_unsafe(types.int64, types.float64)
tcr.safe_unsafe(types.uint64, types.float64)
tcr.promote_unsafe(types.float32, types.float64)
tcr.safe(types.float32, types.complex64)
tcr.safe(types.float64, types.complex128)
tcr.promote_unsafe(types.complex64, types.complex128)
# Allow integers to cast ot void*
tcr.unsafe_unsafe(types.uintp, types.voidptr)
return tcr
default_casting_rules = _init_casting_rules(default_type_manager)
```
#### File: numba/types/functions.py
```python
from __future__ import print_function, division, absolute_import
from .abstract import *
from .common import *
class BaseFunction(Callable):
"""
Base type class for some function types.
"""
def __init__(self, template):
if isinstance(template, (list, tuple)):
self.templates = tuple(template)
keys = set(temp.key for temp in self.templates)
if len(keys) != 1:
raise ValueError("incompatible templates: keys = %s"
% (this,))
self.typing_key, = keys
else:
self.templates = (template,)
self.typing_key = template.key
self._impl_keys = {}
name = "%s(%s)" % (self.__class__.__name__, self.typing_key)
super(BaseFunction, self).__init__(name)
@property
def key(self):
return self.typing_key, self.templates
def augment(self, other):
"""
Augment this function type with the other function types' templates,
so as to support more input types.
"""
if type(other) is type(self) and other.typing_key == self.typing_key:
return type(self)(self.templates + other.templates)
def get_impl_key(self, sig):
"""
Get the implementation key (used by the target context) for the
given signature.
"""
return self._impl_keys[sig.args]
def get_call_type(self, context, args, kws):
for temp_cls in self.templates:
temp = temp_cls(context)
sig = temp.apply(args, kws)
if sig is not None:
self._impl_keys[sig.args] = temp.get_impl_key(sig)
return sig
def get_call_type_with_literals(self, context, args, kws, literals):
for temp_cls in self.templates:
temp = temp_cls(context)
if literals is not None and temp.support_literals:
sig = temp.apply(*literals)
else:
sig = temp.apply(args, kws)
if sig is not None:
self._impl_keys[sig.args] = temp.get_impl_key(sig)
return sig
def get_call_signatures(self):
sigs = []
is_param = False
for temp in self.templates:
sigs += getattr(temp, 'cases', [])
is_param = is_param or hasattr(temp, 'generic')
return sigs, is_param
class Function(BaseFunction, Opaque):
"""
Type class for builtin functions implemented by Numba.
"""
class BoundFunction(Callable, Opaque):
"""
A function with an implicit first argument (denoted as *this* below).
"""
def __init__(self, template, this):
# Create a derived template with an attribute *this*
newcls = type(template.__name__ + '.' + str(this), (template,),
dict(this=this))
self.template = newcls
self.typing_key = self.template.key
self.this = this
name = "%s(%s for %s)" % (self.__class__.__name__,
self.typing_key, self.this)
super(BoundFunction, self).__init__(name)
def unify(self, typingctx, other):
if (isinstance(other, BoundFunction) and
self.typing_key == other.typing_key):
this = typingctx.unify_pairs(self.this, other.this)
if this is not None:
# XXX is it right that both template instances are distinct?
return self.copy(this=this)
def copy(self, this):
return type(self)(self.template, this)
@property
def key(self):
return self.typing_key, self.this
def get_impl_key(self, sig):
"""
Get the implementation key (used by the target context) for the
given signature.
"""
return self.typing_key
def get_call_type(self, context, args, kws):
return self.template(context).apply(args, kws)
def get_call_type_with_literals(self, context, args, kws, literals):
if literals is not None and self.template.support_literals:
return self.template(context).apply(*literals)
else:
return self.get_call_type(context, args, kws)
def get_call_signatures(self):
sigs = getattr(self.template, 'cases', [])
is_param = hasattr(self.template, 'generic')
return sigs, is_param
class WeakType(Type):
"""
Base class for types parametered by a mortal object, to which only
a weak reference is kept.
"""
def _store_object(self, obj):
self._wr = weakref.ref(obj)
def _get_object(self):
obj = self._wr()
if obj is None:
raise ReferenceError("underlying object has vanished")
return obj
@property
def key(self):
return self._wr
def __eq__(self, other):
if type(self) is type(other):
obj = self._wr()
return obj is not None and obj is other._wr()
def __hash__(self):
return Type.__hash__(self)
class Dispatcher(WeakType, Callable, Dummy):
"""
Type class for @jit-compiled functions.
"""
def __init__(self, dispatcher):
self._store_object(dispatcher)
super(Dispatcher, self).__init__("type(%s)" % dispatcher)
def get_call_type(self, context, args, kws):
"""
Resolve a call to this dispatcher using the given argument types.
A signature returned and it is ensured that a compiled specialization
is available for it.
"""
template, pysig, args, kws = self.dispatcher.get_call_template(args, kws)
sig = template(context).apply(args, kws)
if sig:
sig.pysig = pysig
return sig
def get_call_signatures(self):
sigs = self.dispatcher.nopython_signatures
return sigs, True
@property
def dispatcher(self):
"""
A strong reference to the underlying numba.dispatcher.Dispatcher instance.
"""
return self._get_object()
def get_overload(self, sig):
"""
Get the compiled overload for the given signature.
"""
return self.dispatcher.get_overload(sig.args)
def get_impl_key(self, sig):
"""
Get the implementation key for the given signature.
"""
return self.get_overload(sig)
class ExternalFunctionPointer(BaseFunction):
"""
A pointer to a native function (e.g. exported via ctypes or cffi).
*get_pointer* is a Python function taking an object
and returning the raw pointer value as an int.
"""
def __init__(self, sig, get_pointer, cconv=None):
from ..typing.templates import (AbstractTemplate, make_concrete_template,
signature)
from . import ffi_forced_object
if sig.return_type == ffi_forced_object:
raise TypeError("Cannot return a pyobject from a external function")
self.sig = sig
self.requires_gil = any(a == ffi_forced_object for a in self.sig.args)
self.get_pointer = get_pointer
self.cconv = cconv
if self.requires_gil:
class GilRequiringDefn(AbstractTemplate):
key = self.sig
def generic(self, args, kws):
if kws:
raise TypeError("does not support keyword arguments")
# Make ffi_forced_object a bottom type to allow any type to be
# casted to it. This is the only place that support
# ffi_forced_object.
coerced = [actual if formal == ffi_forced_object else formal
for actual, formal
in zip(args, self.key.args)]
return signature(self.key.return_type, *coerced)
template = GilRequiringDefn
else:
template = make_concrete_template("CFuncPtr", sig, [sig])
super(ExternalFunctionPointer, self).__init__(template)
@property
def key(self):
return self.sig, self.cconv, self.get_pointer
class ExternalFunction(Function):
"""
A named native function (resolvable by LLVM) accepting an explicit signature.
For internal use only.
"""
def __init__(self, symbol, sig):
from .. import typing
self.symbol = symbol
self.sig = sig
template = typing.make_concrete_template(symbol, symbol, [sig])
super(ExternalFunction, self).__init__(template)
@property
def key(self):
return self.symbol, self.sig
class NumbaFunction(Function):
"""
A named native function with the Numba calling convention
(resolvable by LLVM).
For internal use only.
"""
def __init__(self, fndesc, sig):
from .. import typing
self.fndesc = fndesc
self.sig = sig
template = typing.make_concrete_template(fndesc.qualname,
fndesc.qualname, [sig])
super(NumbaFunction, self).__init__(template)
@property
def key(self):
return self.fndesc.unique_name, self.sig
class NamedTupleClass(Callable, Opaque):
"""
Type class for namedtuple classes.
"""
def __init__(self, instance_class):
self.instance_class = instance_class
name = "class(%s)" % (instance_class)
super(NamedTupleClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overriden by the __call__ constructor resolution in typing.collections
return None
def get_call_signatures(self):
return (), True
@property
def key(self):
return self.instance_class
class NumberClass(Callable, DTypeSpec, Opaque):
"""
Type class for number classes (e.g. "np.float64").
"""
def __init__(self, instance_type):
self.instance_type = instance_type
name = "class(%s)" % (instance_type,)
super(NumberClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overriden by the __call__ constructor resolution in typing.builtins
return None
def get_call_signatures(self):
return (), True
@property
def key(self):
return self.instance_type
@property
def dtype(self):
return self.instance_type
class RecursiveCall(Opaque):
"""
Recursive call to a Dispatcher.
"""
_overloads = None
def __init__(self, dispatcher_type):
assert isinstance(dispatcher_type, Dispatcher)
self.dispatcher_type = dispatcher_type
name = "recursive(%s)" % (dispatcher_type,)
super(RecursiveCall, self).__init__(name)
# Initializing for the first time
if self._overloads is None:
self._overloads = {}
@property
def overloads(self):
return self._overloads
@property
def key(self):
return self.dispatcher_type
```
#### File: numba/typing/enumdecl.py
```python
from numba import types
from numba.typing.templates import (AbstractTemplate, AttributeTemplate,
signature, Registry)
registry = Registry()
infer = registry.register
infer_global = registry.register_global
infer_getattr = registry.register_attr
@infer_getattr
class EnumAttribute(AttributeTemplate):
key = types.EnumMember
def resolve_value(self, ty):
return ty.dtype
@infer_getattr
class EnumClassAttribute(AttributeTemplate):
key = types.EnumClass
def generic_resolve(self, ty, attr):
"""
Resolve attributes of an enum class as enum members.
"""
if attr in ty.instance_class.__members__:
return ty.member_type
@infer
class EnumClassStaticGetItem(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
enum, idx = args
if (isinstance(enum, types.EnumClass)
and idx in enum.instance_class.__members__):
return enum.member_type
class EnumCompare(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if (isinstance(lhs, types.EnumMember)
and isinstance(rhs, types.EnumMember)
and lhs == rhs):
return signature(types.boolean, lhs, rhs)
@infer
class EnumEq(EnumCompare):
key = '=='
@infer
class EnumNe(EnumCompare):
key = '!='
```
#### File: numpy/core/numeric.py
```python
from __future__ import division, absolute_import, print_function
import collections
import itertools
import operator
import sys
import warnings
import numpy as np
from . import multiarray
from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
WRAP, arange, array, broadcast, can_cast, compare_chararrays,
concatenate, copyto, count_nonzero, dot, dtype, empty,
empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
inner, int_asbuffer, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
zeros, normalize_axis_index)
if sys.version_info[0] < 3:
from .multiarray import newbuffer, getbuffer
from . import umath
from .umath import (invert, sin, UFUNC_BUFSIZE_DEFAULT, ERR_IGNORE,
ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG,
ERR_DEFAULT, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._internal import TooHardError, AxisError
bitwise_not = invert
ufunc = type(sin)
newaxis = None
if sys.version_info[0] >= 3:
import pickle
basestring = str
import builtins
else:
import cPickle as pickle
import __builtin__ as builtins
loads = pickle.loads
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer',
'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose',
'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types',
'min_scalar_type', 'result_type', 'asarray', 'asanyarray',
'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
'outer', 'vdot', 'roll',
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string',
'get_printoptions', 'set_printoptions', 'array_repr', 'array_str',
'set_string_function', 'little_endian', 'require', 'fromiter',
'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load',
'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity',
'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr',
'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate',
'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_',
'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul',
'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT',
'TooHardError', 'AxisError'
]
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
def zeros_like(a, dtype=None, order='K', subok=True):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
# needed instead of a 0 to get same result as zeros for for string dtypes
z = zeros(1, dtype=res.dtype)
multiarray.copyto(res, z, casting='unsafe')
return res
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and order.
See Also
--------
zeros, ones_like
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
def ones_like(a, dtype=None, order='K', subok=True):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.ones_like(y)
array([ 1., 1., 1.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, 1, casting='unsafe')
return res
def full(shape, fill_value, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar
Fill value.
dtype : data-type, optional
The desired data-type for the array The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
full_like : Fill an array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.full((2, 2), np.inf)
array([[ inf, inf],
[ inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
"""
if dtype is None:
dtype = array(fill_value).dtype
a = empty(shape, dtype, order)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
def full_like(a, fill_value, dtype=None, order='K', subok=True):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
full : Fill a new array.
Examples
--------
>>> x = np.arange(6, dtype=np.int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0])
>>> np.full_like(x, 0.1, dtype=np.double)
array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> np.full_like(x, np.nan, dtype=np.double)
array([ nan, nan, nan, nan, nan, nan])
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, fill_value, casting='unsafe')
return res
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
def count_nonzero(a, axis=None):
"""
Counts the number of non-zero values in the array ``a``.
The word "non-zero" is in reference to the Python 2.x
built-in method ``__nonzero__()`` (renamed ``__bool__()``
in Python 3.x) of Python objects that tests an object's
"truthfulness". For example, any number is considered
truthful if it is nonzero, whereas any string is considered
truthful if it is not the empty string. Thus, this function
(recursively) counts how many elements in ``a`` (and in
sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
method evaluated to ``True``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
axis : int or tuple, optional
Axis or tuple of axes along which to count non-zeros.
Default is None, meaning that non-zeros will be counted
along a flattened version of ``a``.
.. versionadded:: 1.12.0
Returns
-------
count : int or array of int
Number of non-zero values in the array along a given axis.
Otherwise, the total number of non-zero values in the array
is returned.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]])
5
>>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=0)
array([1, 1, 1, 1, 1])
>>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1)
array([2, 3])
"""
if axis is None or axis == ():
return multiarray.count_nonzero(a)
a = asanyarray(a)
if a.dtype == bool:
return a.sum(axis=axis, dtype=np.intp)
if issubdtype(a.dtype, np.number):
return (a != 0).sum(axis=axis, dtype=np.intp)
if (issubdtype(a.dtype, np.string_) or
issubdtype(a.dtype, np.unicode_)):
nullstr = a.dtype.type('')
return (a != nullstr).sum(axis=axis, dtype=np.intp)
axis = asarray(normalize_axis_tuple(axis, a.ndim))
counts = np.apply_along_axis(multiarray.count_nonzero, axis[0], a)
if axis.size == 1:
return counts.astype(np.intp, copy=False)
else:
# for subsequent axis numbers, that number decreases
# by one in this new 'counts' array if it was larger
# than the first axis upon which 'count_nonzero' was
# applied but remains unchanged if that number was
# smaller than that first axis
#
# this trick enables us to perform counts on object-like
# elements across multiple axes very quickly because integer
# addition is very well optimized
return counts.sum(axis=tuple(axis[1:] - (
axis[1:] > axis[0])), dtype=np.intp)
def asarray(a, dtype=None, order=None):
"""Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray with matching dtype and order. If `a` is a
subclass of ndarray, a base class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or column-major
(Fortran-style) memory representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type. If None preserve the current dtype. If your
application requires the data to be in native byteorder, include
a byteorder specification as a part of the dtype specification.
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
possible_flags = {'C':'C', 'C_CONTIGUOUS':'C', 'CONTIGUOUS':'C',
'F':'F', 'F_CONTIGUOUS':'F', 'FORTRAN':'F',
'A':'A', 'ALIGNED':'A',
'W':'W', 'WRITEABLE':'W',
'O':'O', 'OWNDATA':'O',
'E':'E', 'ENSUREARRAY':'E'}
if not requirements:
return asanyarray(a, dtype=dtype)
else:
requirements = set(possible_flags[x.upper()] for x in requirements)
if 'E' in requirements:
requirements.remove('E')
subok = False
else:
subok = True
order = 'A'
if requirements >= set(['C', 'F']):
raise ValueError('Cannot specify both "C" and "F" order')
elif 'F' in requirements:
order = 'F'
requirements.remove('F')
elif 'C' in requirements:
order = 'C'
requirements.remove('C')
arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(order)
break
return arr
def isfortran(a):
"""
Returns True if the array is Fortran contiguous but *not* C contiguous.
This function is obsolete and, because of changes due to relaxed stride
checking, its return value for the same array may differ for versions
of NumPy >= 1.10.0 and previous versions. If you only want to check if an
array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(nonzero(a))
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's': 1,
'f': 2}
def _mode_from_name(mode):
if isinstance(mode, basestring):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
c_{av}[k] = sum_n a[n+k] * conj(v[n])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
old_behavior : bool
`old_behavior` was removed in NumPy 1.10. If you need the old
behavior, use `multiarray.correlate`.
Returns
-------
out : ndarray
Discrete cross-correlation of `a` and `v`.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
multiarray.correlate : Old, no conjugate, version of correlate.
Notes
-----
The definition of correlation above is not unique and sometimes correlation
may be defined differently. Another common definition is::
c'_{av}[k] = sum_n a[n] conj(v[n+k])
which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
Using complex sequences:
>>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
Note that you get the time reversed, complex conjugated result
when the two input sequences change places, i.e.,
``c_{va}[k] = c^{*}_{av}[-k]``:
>>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
"""
mode = _mode_from_name(mode)
return multiarray.correlate2(a, v, mode)
def convolve(a, v, mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
If `v` is longer than `a`, the arrays are swapped before computation.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode 'same' returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode 'valid' returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
polymul : Polynomial multiplication. Same output as convolve, but also
accepts poly1d objects as input.
Notes
-----
The discrete convolution operation is defined as
.. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0:
raise ValueError('a cannot be empty')
if len(v) == 0:
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a, b, out=None):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) array_like
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) array_like
Second input vector. Input is flattened if
not already 1-dimensional.
out : (M, N) ndarray, optional
A location where the result is stored
.. versionadded:: 1.9.0
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner, einsum
References
----------
.. [1] : <NAME> and <NAME>, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis,:], out)
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) array_like
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) array_like
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2 for double-contraction
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Notes
-----
.. versionadded:: 1.12.0
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
return roll(a.ravel(), shift, 0).reshape(a.shape)
else:
axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
broadcasted = broadcast(shift, axis)
if broadcasted.ndim > 1:
raise ValueError(
"'shift' and 'axis' should be scalars or 1D sequences")
shifts = {ax: 0 for ax in range(a.ndim)}
for sh, ax in broadcasted:
shifts[ax] += sh
rolls = [((slice(None), slice(None)),)] * a.ndim
for ax, offset in shifts.items():
offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
if offset:
# (original, result), (original, result)
rolls[ax] = ((slice(None, -offset), slice(offset, None)),
(slice(-offset, None), slice(None, offset)))
result = empty_like(a)
for indices in itertools.product(*rolls):
arr_index, res_index = zip(*indices)
result[res_index] = a[arr_index]
return result
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : ndarray
For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
NumPy versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
See Also
--------
moveaxis : Move array axes to new positions.
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
axis = normalize_axis_index(axis, n)
if start < 0:
start += n
msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
if not (0 <= start < n + 1):
raise AxisError(msg % ('start', -n, 'start', n + 1, start))
if axis < start:
# it's been removed
start -= 1
if axis == start:
return a[...]
axes = list(range(0, n))
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
"""
Normalizes an axis argument into a tuple of non-negative integer axes.
This handles shorthands such as ``1`` and converts them to ``(1,)``,
as well as performing the handling of negative indices covered by
`normalize_axis_index`.
By default, this forbids axes from being specified multiple times.
Used internally by multi-axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int, iterable of int
The un-normalized index or indices of the axis.
ndim : int
The number of dimensions of the array that `axis` should be normalized
against.
argname : str, optional
A prefix to put before the error message, typically the name of the
argument.
allow_duplicate : bool, optional
If False, the default, disallow an axis from being specified twice.
Returns
-------
normalized_axes : tuple of int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If any axis provided is out of range
ValueError
If an axis is repeated
See also
--------
normalize_axis_index : normalizing a single scalar axis
"""
try:
axis = [operator.index(axis)]
except TypeError:
axis = tuple(axis)
axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis)
if not allow_duplicate and len(set(axis)) != len(axis):
if argname:
raise ValueError('repeated axis in `{}` argument'.format(argname))
else:
raise ValueError('repeated axis')
return axis
def moveaxis(a, source, destination):
"""
Move axes of an array to new positions.
Other axes remain in their original order.
.. versionadded::1.11.0
Parameters
----------
a : np.ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
destination : int or sequence of int
Destination positions for each of the original axes. These must also be
unique.
Returns
-------
result : np.ndarray
Array with moved axes. This array is a view of the input array.
See Also
--------
transpose: Permute the dimensions of an array.
swapaxes: Interchange two axes of an array.
Examples
--------
>>> x = np.zeros((3, 4, 5))
>>> np.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> np.moveaxis(x, -1, 0).shape
(5, 3, 4)
These all achieve the same result:
>>> np.transpose(x).shape
(5, 4, 3)
>>> np.swapaxes(x, 0, -1).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
(5, 4, 3)
"""
try:
# allow duck-array types if they define transpose
transpose = a.transpose
except AttributeError:
a = asarray(a)
transpose = a.transpose
source = normalize_axis_tuple(source, a.ndim, 'source')
destination = normalize_axis_tuple(destination, a.ndim, 'destination')
if len(source) != len(destination):
raise ValueError('`source` and `destination` arguments must have '
'the same number of elements')
order = [n for n in range(a.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
result = transpose(order)
return result
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Notes
-----
.. versionadded:: 1.9.0
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
a = asarray(a)
b = asarray(b)
# Check axisa and axisb are within bounds
axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
a = rollaxis(a, axisa, a.ndim)
b = rollaxis(b, axisb, b.ndim)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError(msg)
# Create the output array
shape = broadcast(a[..., 0], b[..., 0]).shape
if a.shape[-1] == 3 or b.shape[-1] == 3:
shape += (3,)
# Check axisc is within bounds
axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
dtype = promote_types(a.dtype, b.dtype)
cp = empty(shape, dtype)
# create local aliases for readability
a0 = a[..., 0]
a1 = a[..., 1]
if a.shape[-1] == 3:
a2 = a[..., 2]
b0 = b[..., 0]
b1 = b[..., 1]
if b.shape[-1] == 3:
b2 = b[..., 2]
if cp.ndim != 0 and cp.shape[-1] == 3:
cp0 = cp[..., 0]
cp1 = cp[..., 1]
cp2 = cp[..., 2]
if a.shape[-1] == 2:
if b.shape[-1] == 2:
# a0 * b1 - a1 * b0
multiply(a0, b1, out=cp)
cp -= a1 * b0
return cp
else:
assert b.shape[-1] == 3
# cp0 = a1 * b2 - 0 (a2 = 0)
# cp1 = 0 - a0 * b2 (a2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
multiply(a0, b2, out=cp1)
negative(cp1, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
else:
assert a.shape[-1] == 3
if b.shape[-1] == 3:
# cp0 = a1 * b2 - a2 * b1
# cp1 = a2 * b0 - a0 * b2
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
tmp = array(a2 * b1)
cp0 -= tmp
multiply(a2, b0, out=cp1)
multiply(a0, b2, out=tmp)
cp1 -= tmp
multiply(a0, b1, out=cp2)
multiply(a1, b0, out=tmp)
cp2 -= tmp
else:
assert b.shape[-1] == 2
# cp0 = 0 - a2 * b1 (b2 = 0)
# cp1 = a2 * b0 - 0 (b2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a2, b1, out=cp0)
negative(cp0, out=cp0)
multiply(a2, b0, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
# This works because we are moving the last axis
return rollaxis(cp, -1, axisc)
# Use numarray's printing function
from .arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if type(arr) is not ndarray:
class_name = type(arr).__name__
else:
class_name = "array"
if arr.size > 0 or arr.shape == (0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', class_name + "(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
if skipdtype:
return "%s(%s)" % (class_name, lst)
else:
typename = arr.dtype.name
# Quote typename in the output if it is "complex".
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = "'%s'" % typename
lf = ' '
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len(class_name + "(")
return "%s(%s,%sdtype=%s)" % (class_name, lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print(a)
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,)*N
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
res[i] = arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i+1:]
)
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters would be
``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in the
designated form.
If the `width` value is insufficient, it will be ignored, and `num` will
be returned in binary (`num` > 0) or two's complement (`num` < 0) form
with its width equal to the minimum number of bits needed to represent
the number in the designated form. This behavior is deprecated and will
later raise an error.
.. deprecated:: 1.12.0
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
"""
def warn_if_insufficient(width, binwdith):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
"will raise an error in the future.", DeprecationWarning,
stacklevel=3)
if num == 0:
return '0' * (width or 1)
elif num > 0:
binary = bin(num)[2:]
binwidth = len(binary)
outwidth = (binwidth if width is None
else max(binwidth, width))
warn_if_insufficient(width, binwidth)
return binary.zfill(outwidth)
else:
if width is None:
return '-' + bin(-num)[2:]
else:
poswidth = len(bin(-num)[2:])
# See gh-8679: remove extra digit
# for numbers at boundaries.
if 2**(poswidth - 1) == -num:
poswidth -= 1
twocomp = 2**(poswidth + 1) + num
binary = bin(twocomp)[2:]
binwidth = len(binary)
outwidth = max(binwidth, width)
warn_if_insufficient(width, binwidth)
return '1' * (outwidth - binwidth) + binary
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Positive and negative values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
elif base < 2:
raise ValueError("Bases less than 2 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = open(file, "rb")
return pickle.load(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0], val) for name in dt.names]
return tuple(res)
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
from numpy import eye
return eye(n, dtype=dtype)
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
If either array contains one or more NaNs, False is returned.
Infs are treated as equal if they are in the same place and of the same
sign in both arrays.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
.. versionadded:: 1.10.0
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
See Also
--------
isclose, all, any
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
True
"""
res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
return bool(res)
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
array([True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
array([True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
array([True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([True, True])
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
result = less_equal(abs(x-y), atol + rtol * abs(y))
if isscalar(a) and isscalar(b):
result = bool(result)
return result
x = array(a, copy=False, subok=True, ndmin=1)
y = array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
dt = multiarray.result_type(y, 1.)
y = array(y, dtype=dt, copy=False, subok=True)
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * ones_like(cond)
y = y * ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = isnan(x) & isnan(y)
cond[both_nan] = both_nan[both_nan]
if isscalar(a) and isscalar(b):
return bool(cond)
else:
return cond
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(asarray(a1 == a2).all())
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
multiarray.broadcast(a1, a2)
except:
return False
return bool(asarray(a1 == a2).all())
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall, errstate
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(**old_settings) # reset to default
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None:
divide = all or old['divide']
if over is None:
over = all or old['over']
if under is None:
under = all or old['under']
if invalid is None:
invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError("Buffer size, %s, is too big." % size)
if size < 5:
raise ValueError("Buffer size, %s, is too small." % size)
if size % 16 != 0:
raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""
Return the size of the buffer used in ufuncs.
Returns
-------
getbufsize : int
Size of ufunc buffer in bytes.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is a string describing the
type of error (such as "divide by zero", "overflow", "underflow", or "invalid value"),
and the second is the status flag. The flag is a byte, whose four
least-significant bits indicate the type of error, one of "divide", "over",
"under", "invalid"::
[0 0 0 0 divide over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
Log error message:
>>> class Log(object):
... def write(self, msg):
... print("LOG: %s" % msg)
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
<BLANKLINE>
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<__main__.Log object at 0x...>
>>> np.seterr(**save_err)
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
if func is not None and not isinstance(func, collections.Callable):
if not hasattr(func, 'write') or not isinstance(func.write, collections.Callable):
raise ValueError("Only callable can be used as callback")
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call', _Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
from .umath import *
from .numerictypes import *
from . import fromnumeric
from .fromnumeric import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
```
#### File: common-code/LoadTest/LoadTest__CloudGemPlayerAccount.py
```python
from cloud_gem_load_test.service_api_call import ServiceApiCall
from random import randint
GEM_NAME = 'CloudGemPlayerAccount'
def add_transaction_handlers(handler_context, transaction_handlers):
service_api_name = GEM_NAME + '.ServiceApi'
base_url = handler_context.mappings.get(service_api_name, {}).get('PhysicalResourceId')
if not base_url:
raise RuntimeError('Missing PhysicalResourceId for ' + service_api_name)
transaction_handlers.append(ServiceStatus(base_url))
transaction_handlers.append(ChangePlayerName(base_url))
transaction_handlers.append(GetAccount(base_url))
class ServiceStatus(ServiceApiCall):
def __init__(self, base_url):
ServiceApiCall.__init__(self, name=GEM_NAME + '.ServiceStatus', method='get', base_url=base_url, path='/service/status')
class GetAccount(ServiceApiCall):
def __init__(self, base_url):
ServiceApiCall.__init__(self, name=GEM_NAME + '.GetAccount', method='get', base_url=base_url, path='/account')
class ChangePlayerName(ServiceApiCall):
def __init__(self, base_url):
ServiceApiCall.__init__(self, name=GEM_NAME + '.ChangePlayerName', method='put', base_url=base_url, path='/account')
def build_request(self):
request = ServiceApiCall.build_request(self)
request['body'] = {'PlayerName': 'Player' + str(randint(0,1000))}
return request
```
#### File: common-code/PollyCommon/text_to_speech_s3.py
```python
import os
import boto3
from botocore.client import Config
def get_s3_client():
if not hasattr(get_s3_client, 'client'):
current_region = os.environ.get('AWS_REGION')
if current_region is None:
raise RuntimeError('AWS region is empty')
configuration = Config(signature_version="s3v4", s3={'addressing_style': 'path'})
get_s3_client.client = boto3.client('s3', region_name=current_region, config=configuration)
return get_s3_client.client
```
#### File: AWSIoTPythonSDK/exception/AWSIoTExceptions.py
```python
import AWSIoTPythonSDK.exception.operationTimeoutException as operationTimeoutException
import AWSIoTPythonSDK.exception.operationError as operationError
# Serial Exception
class acceptTimeoutException(Exception):
def __init__(self, msg="Accept Timeout"):
self.message = msg
# MQTT Operation Timeout Exception
class connectTimeoutException(operationTimeoutException.operationTimeoutException):
def __init__(self, msg="Connect Timeout"):
self.message = msg
class disconnectTimeoutException(operationTimeoutException.operationTimeoutException):
def __init__(self, msg="Disconnect Timeout"):
self.message = msg
class publishTimeoutException(operationTimeoutException.operationTimeoutException):
def __init__(self, msg="Publish Timeout"):
self.message = msg
class subscribeTimeoutException(operationTimeoutException.operationTimeoutException):
def __init__(self, msg="Subscribe Timeout"):
self.message = msg
class unsubscribeTimeoutException(operationTimeoutException.operationTimeoutException):
def __init__(self, msg="Unsubscribe Timeout"):
self.message = msg
# MQTT Operation Error
class connectError(operationError.operationError):
def __init__(self, errorCode):
self.message = "Connect Error: " + str(errorCode)
class disconnectError(operationError.operationError):
def __init__(self, errorCode):
self.message = "Disconnect Error: " + str(errorCode)
class publishError(operationError.operationError):
def __init__(self, errorCode):
self.message = "Publish Error: " + str(errorCode)
class publishQueueFullException(operationError.operationError):
def __init__(self):
self.message = "Internal Publish Queue Full"
class publishQueueDisabledException(operationError.operationError):
def __init__(self):
self.message = "Offline publish request dropped because queueing is disabled"
class subscribeError(operationError.operationError):
def __init__(self, errorCode):
self.message = "Subscribe Error: " + str(errorCode)
class subscribeQueueFullException(operationError.operationError):
def __init__(self):
self.message = "Internal Subscribe Queue Full"
class subscribeQueueDisabledException(operationError.operationError):
def __init__(self):
self.message = "Offline subscribe request dropped because queueing is disabled"
class unsubscribeError(operationError.operationError):
def __init__(self, errorCode):
self.message = "Unsubscribe Error: " + str(errorCode)
class unsubscribeQueueFullException(operationError.operationError):
def __init__(self):
self.message = "Internal Unsubscribe Queue Full"
class unsubscribeQueueDisabledException(operationError.operationError):
def __init__(self):
self.message = "Offline unsubscribe request dropped because queueing is disabled"
# Websocket Error
class wssNoKeyInEnvironmentError(operationError.operationError):
def __init__(self):
self.message = "No AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY detected in $ENV."
class wssHandShakeError(operationError.operationError):
def __init__(self):
self.message = "Error in WSS handshake."
# Greengrass Discovery Error
class DiscoveryDataNotFoundException(operationError.operationError):
def __init__(self):
self.message = "No discovery data found"
class DiscoveryTimeoutException(operationTimeoutException.operationTimeoutException):
def __init__(self, message="Discovery request timed out"):
self.message = message
class DiscoveryInvalidRequestException(operationError.operationError):
def __init__(self):
self.message = "Invalid discovery request"
class DiscoveryUnauthorizedException(operationError.operationError):
def __init__(self):
self.message = "Discovery request not authorized"
class DiscoveryThrottlingException(operationError.operationError):
def __init__(self):
self.message = "Too many discovery requests"
class DiscoveryFailure(operationError.operationError):
def __init__(self, message):
self.message = message
```
#### File: aws_iot_device_sdk_python/AWSIoTPythonSDK/MQTTLib.py
```python
from AWSIoTPythonSDK.core.util.providers import CertificateCredentialsProvider
from AWSIoTPythonSDK.core.util.providers import IAMCredentialsProvider
from AWSIoTPythonSDK.core.util.providers import EndpointProvider
from AWSIoTPythonSDK.core.protocol.mqtt_core import MqttCore
import AWSIoTPythonSDK.core.shadow.shadowManager as shadowManager
import AWSIoTPythonSDK.core.shadow.deviceShadow as deviceShadow
# Constants
# - Protocol types:
MQTTv3_1 = 3
MQTTv3_1_1 = 4
DROP_OLDEST = 0
DROP_NEWEST = 1
class AWSIoTMQTTClient:
def __init__(self, clientID, protocolType=MQTTv3_1_1, useWebsocket=False, cleanSession=True):
"""
The client class that connects to and accesses AWS IoT over MQTT v3.1/3.1.1.
The following connection types are available:
- TLSv1.2 Mutual Authentication
X.509 certificate-based secured MQTT connection to AWS IoT
- Websocket SigV4
IAM credential-based secured MQTT connection over Websocket to AWS IoT
It provides basic synchronous MQTT operations in the classic MQTT publish-subscribe
model, along with configurations of on-top features:
- Auto reconnect/resubscribe
- Progressive reconnect backoff
- Offline publish requests queueing with draining
**Syntax**
.. code:: python
import AWSIoTPythonSDK.MQTTLib as AWSIoTPyMQTT
# Create an AWS IoT MQTT Client using TLSv1.2 Mutual Authentication
myAWSIoTMQTTClient = AWSIoTPyMQTT.AWSIoTMQTTClient("testIoTPySDK")
# Create an AWS IoT MQTT Client using Websocket SigV4
myAWSIoTMQTTClient = AWSIoTPyMQTT.AWSIoTMQTTClient("testIoTPySDK", useWebsocket=True)
**Parameters**
*clientID* - String that denotes the client identifier used to connect to AWS IoT.
If empty string were provided, client id for this connection will be randomly generated
n server side.
*protocolType* - MQTT version in use for this connection. Could be :code:`AWSIoTPythonSDK.MQTTLib.MQTTv3_1` or :code:`AWSIoTPythonSDK.MQTTLib.MQTTv3_1_1`
*useWebsocket* - Boolean that denotes enabling MQTT over Websocket SigV4 or not.
**Returns**
:code:`AWSIoTPythonSDK.MQTTLib.AWSIoTMQTTClient` object
"""
self._mqtt_core = MqttCore(clientID, cleanSession, protocolType, useWebsocket)
# Configuration APIs
def configureLastWill(self, topic, payload, QoS, retain=False):
"""
**Description**
Used to configure the last will topic, payload and QoS of the client. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0)
**Parameters**
*topic* - Topic name that last will publishes to.
*payload* - Payload to publish for last will.
*QoS* - Quality of Service. Could be 0 or 1.
**Returns**
None
"""
self._mqtt_core.configure_last_will(topic, payload, QoS, retain)
def clearLastWill(self):
"""
**Description**
Used to clear the last will configuration that is previously set through configureLastWill.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.clearLastWill()
**Parameter**
None
**Returns**
None
"""
self._mqtt_core.clear_last_will()
def configureEndpoint(self, hostName, portNumber):
"""
**Description**
Used to configure the host name and port number the client tries to connect to. Should be called
before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureEndpoint("random.iot.region.amazonaws.com", 8883)
**Parameters**
*hostName* - String that denotes the host name of the user-specific AWS IoT endpoint.
*portNumber* - Integer that denotes the port number to connect to. Could be :code:`8883` for
TLSv1.2 Mutual Authentication or :code:`443` for Websocket SigV4.
**Returns**
None
"""
endpoint_provider = EndpointProvider()
endpoint_provider.set_host(hostName)
endpoint_provider.set_port(portNumber)
self._mqtt_core.configure_endpoint(endpoint_provider)
def configureIAMCredentials(self, AWSAccessKeyID, AWSSecretAccessKey, AWSSessionToken=""):
"""
**Description**
Used to configure/update the custom IAM credentials for Websocket SigV4 connection to
AWS IoT. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureIAMCredentials(obtainedAccessKeyID, obtainedSecretAccessKey, obtainedSessionToken)
.. note::
Hard-coding credentials into custom script is NOT recommended. Please use AWS Cognito identity service
or other credential provider.
**Parameters**
*AWSAccessKeyID* - AWS Access Key Id from user-specific IAM credentials.
*AWSSecretAccessKey* - AWS Secret Access Key from user-specific IAM credentials.
*AWSSessionToken* - AWS Session Token for temporary authentication from STS.
**Returns**
None
"""
iam_credentials_provider = IAMCredentialsProvider()
iam_credentials_provider.set_access_key_id(AWSAccessKeyID)
iam_credentials_provider.set_secret_access_key(AWSSecretAccessKey)
iam_credentials_provider.set_session_token(AWSSessionToken)
self._mqtt_core.configure_iam_credentials(iam_credentials_provider)
def configureCredentials(self, CAFilePath, KeyPath="", CertificatePath=""): # Should be good for MutualAuth certs config and Websocket rootCA config
"""
**Description**
Used to configure the rootCA, private key and certificate files. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureCredentials("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE")
**Parameters**
*CAFilePath* - Path to read the root CA file. Required for all connection types.
*KeyPath* - Path to read the private key. Required for X.509 certificate based connection.
*CertificatePath* - Path to read the certificate. Required for X.509 certificate based connection.
**Returns**
None
"""
cert_credentials_provider = CertificateCredentialsProvider()
cert_credentials_provider.set_ca_path(CAFilePath)
cert_credentials_provider.set_key_path(KeyPath)
cert_credentials_provider.set_cert_path(CertificatePath)
self._mqtt_core.configure_cert_credentials(cert_credentials_provider)
def configureAutoReconnectBackoffTime(self, baseReconnectQuietTimeSecond, maxReconnectQuietTimeSecond, stableConnectionTimeSecond):
"""
**Description**
Used to configure the auto-reconnect backoff timing. Should be called before connect.
**Syntax**
.. code:: python
# Configure the auto-reconnect backoff to start with 1 second and use 128 seconds as a maximum back off time.
# Connection over 20 seconds is considered stable and will reset the back off time back to its base.
myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 128, 20)
**Parameters**
*baseReconnectQuietTimeSecond* - The initial back off time to start with, in seconds.
Should be less than the stableConnectionTime.
*maxReconnectQuietTimeSecond* - The maximum back off time, in seconds.
*stableConnectionTimeSecond* - The number of seconds for a connection to last to be considered as stable.
Back off time will be reset to base once the connection is stable.
**Returns**
None
"""
self._mqtt_core.configure_reconnect_back_off(baseReconnectQuietTimeSecond, maxReconnectQuietTimeSecond, stableConnectionTimeSecond)
def configureOfflinePublishQueueing(self, queueSize, dropBehavior=DROP_NEWEST):
"""
**Description**
Used to configure the queue size and drop behavior for the offline requests queueing. Should be
called before connect. Queueable offline requests include publish, subscribe and unsubscribe.
**Syntax**
.. code:: python
import AWSIoTPythonSDK.MQTTLib as AWSIoTPyMQTT
# Configure the offline queue for publish requests to be 20 in size and drop the oldest
request when the queue is full.
myAWSIoTMQTTClient.configureOfflinePublishQueueing(20, AWSIoTPyMQTT.DROP_OLDEST)
**Parameters**
*queueSize* - Size of the queue for offline publish requests queueing.
If set to 0, the queue is disabled. If set to -1, the queue size is set to be infinite.
*dropBehavior* - the type of drop behavior when the queue is full.
Could be :code:`AWSIoTPythonSDK.core.util.enums.DropBehaviorTypes.DROP_OLDEST` or
:code:`AWSIoTPythonSDK.core.util.enums.DropBehaviorTypes.DROP_NEWEST`.
**Returns**
None
"""
self._mqtt_core.configure_offline_requests_queue(queueSize, dropBehavior)
def configureDrainingFrequency(self, frequencyInHz):
"""
**Description**
Used to configure the draining speed to clear up the queued requests when the connection is back.
Should be called before connect.
**Syntax**
.. code:: python
# Configure the draining speed to be 2 requests/second
myAWSIoTMQTTClient.configureDrainingFrequency(2)
.. note::
Make sure the draining speed is fast enough and faster than the publish rate. Slow draining
could result in inifinite draining process.
**Parameters**
*frequencyInHz* - The draining speed to clear the queued requests, in requests/second.
**Returns**
None
"""
self._mqtt_core.configure_draining_interval_sec(1/float(frequencyInHz))
def configureConnectDisconnectTimeout(self, timeoutSecond):
"""
**Description**
Used to configure the time in seconds to wait for a CONNACK or a disconnect to complete.
Should be called before connect.
**Syntax**
.. code:: python
# Configure connect/disconnect timeout to be 10 seconds
myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10)
**Parameters**
*timeoutSecond* - Time in seconds to wait for a CONNACK or a disconnect to complete.
**Returns**
None
"""
self._mqtt_core.configure_connect_disconnect_timeout_sec(timeoutSecond)
def configureMQTTOperationTimeout(self, timeoutSecond):
"""
**Description**
Used to configure the timeout in seconds for MQTT QoS 1 publish, subscribe and unsubscribe.
Should be called before connect.
**Syntax**
.. code:: python
# Configure MQTT operation timeout to be 5 seconds
myAWSIoTMQTTClient.configureMQTTOperationTimeout(5)
**Parameters**
*timeoutSecond* - Time in seconds to wait for a PUBACK/SUBACK/UNSUBACK.
**Returns**
None
"""
self._mqtt_core.configure_operation_timeout_sec(timeoutSecond)
def configureUsernamePassword(self, username, password=None):
"""
**Description**
Used to configure the username and password used in CONNECT packet.
**Syntax**
.. code:: python
# Configure user name and password
myAWSIoTMQTTClient.configureUsernamePassword("myUsername", "myPassword")
**Parameters**
*username* - Username used in the username field of CONNECT packet.
*password* - Password used in the password field of CONNECT packet.
**Returns**
None
"""
self._mqtt_core.configure_username_password(username, password)
def enableMetricsCollection(self):
"""
**Description**
Used to enable SDK metrics collection. Username field in CONNECT packet will be used to append the SDK name
and SDK version in use and communicate to AWS IoT cloud. This metrics collection is enabled by default.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.enableMetricsCollection()
**Parameters**
None
**Returns**
None
"""
self._mqtt_core.enable_metrics_collection()
def disableMetricsCollection(self):
"""
**Description**
Used to disable SDK metrics collection.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.disableMetricsCollection()
**Parameters**
None
**Returns**
None
"""
self._mqtt_core.disable_metrics_collection()
# MQTT functionality APIs
def connect(self, keepAliveIntervalSecond=600):
"""
**Description**
Connect to AWS IoT, with user-specific keepalive interval configuration.
**Syntax**
.. code:: python
# Connect to AWS IoT with default keepalive set to 600 seconds
myAWSIoTMQTTClient.connect()
# Connect to AWS IoT with keepalive interval set to 1200 seconds
myAWSIoTMQTTClient.connect(1200)
**Parameters**
*keepAliveIntervalSecond* - Time in seconds for interval of sending MQTT ping request.
Default set to 600 seconds.
**Returns**
True if the connect attempt succeeded. False if failed.
"""
self._load_callbacks()
return self._mqtt_core.connect(keepAliveIntervalSecond)
def connectAsync(self, keepAliveIntervalSecond=600, ackCallback=None):
"""
**Description**
Connect asynchronously to AWS IoT, with user-specific keepalive interval configuration and CONNACK callback.
**Syntax**
.. code:: python
# Connect to AWS IoT with default keepalive set to 600 seconds and a custom CONNACK callback
myAWSIoTMQTTClient.connectAsync(ackCallback=my_connack_callback)
# Connect to AWS IoT with default keepalive set to 1200 seconds and a custom CONNACK callback
myAWSIoTMQTTClient.connectAsync(keepAliveInternvalSecond=1200, ackCallback=myConnackCallback)
**Parameters**
*keepAliveIntervalSecond* - Time in seconds for interval of sending MQTT ping request.
Default set to 600 seconds.
*ackCallback* - Callback to be invoked when the client receives a CONNACK. Should be in form
:code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the connect request
and :code:`data` is the connect result code.
**Returns**
Connect request packet id, for tracking purpose in the corresponding callback.
"""
self._load_callbacks()
return self._mqtt_core.connect_async(keepAliveIntervalSecond, ackCallback)
def _load_callbacks(self):
self._mqtt_core.on_online = self.onOnline
self._mqtt_core.on_offline = self.onOffline
self._mqtt_core.on_message = self.onMessage
def disconnect(self):
"""
**Description**
Disconnect from AWS IoT.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.disconnect()
**Parameters**
None
**Returns**
True if the disconnect attempt succeeded. False if failed.
"""
return self._mqtt_core.disconnect()
def disconnectAsync(self, ackCallback=None):
"""
**Description**
Disconnect asynchronously to AWS IoT.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.disconnectAsync(ackCallback=myDisconnectCallback)
**Parameters**
*ackCallback* - Callback to be invoked when the client finishes sending disconnect and internal clean-up.
Should be in form :code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the disconnect
request and :code:`data` is the disconnect result code.
**Returns**
Disconnect request packet id, for tracking purpose in the corresponding callback.
"""
return self._mqtt_core.disconnect_async(ackCallback)
def publish(self, topic, payload, QoS):
"""
**Description**
Publish a new message to the desired topic with QoS.
**Syntax**
.. code:: python
# Publish a QoS0 message "myPayload" to topic "myTopic"
myAWSIoTMQTTClient.publish("myTopic", "myPayload", 0)
# Publish a QoS1 message "myPayloadWithQos1" to topic "myTopic/sub"
myAWSIoTMQTTClient.publish("myTopic/sub", "myPayloadWithQos1", 1)
**Parameters**
*topic* - Topic name to publish to.
*payload* - Payload to publish.
*QoS* - Quality of Service. Could be 0 or 1.
**Returns**
True if the publish request has been sent to paho. False if the request did not reach paho.
"""
return self._mqtt_core.publish(topic, payload, QoS, False) # Disable retain for publish by now
def publishAsync(self, topic, payload, QoS, ackCallback=None):
"""
**Description**
Publish a new message asynchronously to the desired topic with QoS and PUBACK callback. Note that the ack
callback configuration for a QoS0 publish request will be ignored as there are no PUBACK reception.
**Syntax**
.. code:: python
# Publish a QoS0 message "myPayload" to topic "myTopic"
myAWSIoTMQTTClient.publishAsync("myTopic", "myPayload", 0)
# Publish a QoS1 message "myPayloadWithQos1" to topic "myTopic/sub", with custom PUBACK callback
myAWSIoTMQTTClient.publishAsync("myTopic/sub", "myPayloadWithQos1", 1, ackCallback=myPubackCallback)
**Parameters**
*topic* - Topic name to publish to.
*payload* - Payload to publish.
*QoS* - Quality of Service. Could be 0 or 1.
*ackCallback* - Callback to be invoked when the client receives a PUBACK. Should be in form
:code:`customCallback(mid)`, where :code:`mid` is the packet id for the disconnect request.
**Returns**
Publish request packet id, for tracking purpose in the corresponding callback.
"""
return self._mqtt_core.publish_async(topic, payload, QoS, False, ackCallback)
def subscribe(self, topic, QoS, callback):
"""
**Description**
Subscribe to the desired topic and register a callback.
**Syntax**
.. code:: python
# Subscribe to "myTopic" with QoS0 and register a callback
myAWSIoTMQTTClient.subscribe("myTopic", 0, customCallback)
# Subscribe to "myTopic/#" with QoS1 and register a callback
myAWSIoTMQTTClient.subscribe("myTopic/#", 1, customCallback)
**Parameters**
*topic* - Topic name or filter to subscribe to.
*QoS* - Quality of Service. Could be 0 or 1.
*callback* - Function to be called when a new message for the subscribed topic
comes in. Should be in form :code:`customCallback(client, userdata, message)`, where
:code:`message` contains :code:`topic` and :code:`payload`. Note that :code:`client` and :code:`userdata` are
here just to be aligned with the underneath Paho callback function signature. These fields are pending to be
deprecated and should not be depended on.
**Returns**
True if the subscribe attempt succeeded. False if failed.
"""
return self._mqtt_core.subscribe(topic, QoS, callback)
def subscribeAsync(self, topic, QoS, ackCallback=None, messageCallback=None):
"""
**Description**
Subscribe to the desired topic and register a message callback with SUBACK callback.
**Syntax**
.. code:: python
# Subscribe to "myTopic" with QoS0, custom SUBACK callback and a message callback
myAWSIoTMQTTClient.subscribe("myTopic", 0, ackCallback=mySubackCallback, messageCallback=customMessageCallback)
# Subscribe to "myTopic/#" with QoS1, custom SUBACK callback and a message callback
myAWSIoTMQTTClient.subscribe("myTopic/#", 1, ackCallback=mySubackCallback, messageCallback=customMessageCallback)
**Parameters**
*topic* - Topic name or filter to subscribe to.
*QoS* - Quality of Service. Could be 0 or 1.
*ackCallback* - Callback to be invoked when the client receives a SUBACK. Should be in form
:code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the disconnect request and
:code:`data` is the granted QoS for this subscription.
*messageCallback* - Function to be called when a new message for the subscribed topic
comes in. Should be in form :code:`customCallback(client, userdata, message)`, where
:code:`message` contains :code:`topic` and :code:`payload`. Note that :code:`client` and :code:`userdata` are
here just to be aligned with the underneath Paho callback function signature. These fields are pending to be
deprecated and should not be depended on.
**Returns**
Subscribe request packet id, for tracking purpose in the corresponding callback.
"""
return self._mqtt_core.subscribe_async(topic, QoS, ackCallback, messageCallback)
def unsubscribe(self, topic):
"""
**Description**
Unsubscribe to the desired topic.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.unsubscribe("myTopic")
**Parameters**
*topic* - Topic name or filter to unsubscribe to.
**Returns**
True if the unsubscribe attempt succeeded. False if failed.
"""
return self._mqtt_core.unsubscribe(topic)
def unsubscribeAsync(self, topic, ackCallback=None):
"""
**Description**
Unsubscribe to the desired topic with UNSUBACK callback.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.unsubscribe("myTopic", ackCallback=myUnsubackCallback)
**Parameters**
*topic* - Topic name or filter to unsubscribe to.
*ackCallback* - Callback to be invoked when the client receives a UNSUBACK. Should be in form
:code:`customCallback(mid)`, where :code:`mid` is the packet id for the disconnect request.
**Returns**
Unsubscribe request packet id, for tracking purpose in the corresponding callback.
"""
return self._mqtt_core.unsubscribe_async(topic, ackCallback)
def onOnline(self):
"""
**Description**
Callback that gets called when the client is online. The callback registration should happen before calling
connect/connectAsync.
**Syntax**
.. code:: python
# Register an onOnline callback
myAWSIoTMQTTClient.onOnline = myOnOnlineCallback
**Parameters**
None
**Returns**
None
"""
pass
def onOffline(self):
"""
**Description**
Callback that gets called when the client is offline. The callback registration should happen before calling
connect/connectAsync.
**Syntax**
.. code:: python
# Register an onOffline callback
myAWSIoTMQTTClient.onOffline = myOnOfflineCallback
**Parameters**
None
**Returns**
None
"""
pass
def onMessage(self, message):
"""
**Description**
Callback that gets called when the client receives a new message. The callback registration should happen before
calling connect/connectAsync. This callback, if present, will always be triggered regardless of whether there is
any message callback registered upon subscribe API call. It is for the purpose to aggregating the processing of
received messages in one function.
**Syntax**
.. code:: python
# Register an onMessage callback
myAWSIoTMQTTClient.onMessage = myOnMessageCallback
**Parameters**
*message* - Received MQTT message. It contains the source topic as :code:`message.topic`, and the payload as
:code:`message.payload`.
**Returns**
None
"""
pass
class AWSIoTMQTTShadowClient:
def __init__(self, clientID, protocolType=MQTTv3_1_1, useWebsocket=False, cleanSession=True):
"""
The client class that manages device shadow and accesses its functionality in AWS IoT over MQTT v3.1/3.1.1.
It is built on top of the AWS IoT MQTT Client and exposes devive shadow related operations.
It shares the same connection types, synchronous MQTT operations and partial on-top features
with the AWS IoT MQTT Client:
- Auto reconnect/resubscribe
Same as AWS IoT MQTT Client.
- Progressive reconnect backoff
Same as AWS IoT MQTT Client.
- Offline publish requests queueing with draining
Disabled by default. Queueing is not allowed for time-sensitive shadow requests/messages.
**Syntax**
.. code:: python
import AWSIoTPythonSDK.MQTTLib as AWSIoTPyMQTT
# Create an AWS IoT MQTT Shadow Client using TLSv1.2 Mutual Authentication
myAWSIoTMQTTShadowClient = AWSIoTPyMQTT.AWSIoTMQTTShadowClient("testIoTPySDK")
# Create an AWS IoT MQTT Shadow Client using Websocket SigV4
myAWSIoTMQTTShadowClient = AWSIoTPyMQTT.AWSIoTMQTTShadowClient("testIoTPySDK", useWebsocket=True)
**Parameters**
*clientID* - String that denotes the client identifier used to connect to AWS IoT.
If empty string were provided, client id for this connection will be randomly generated
n server side.
*protocolType* - MQTT version in use for this connection. Could be :code:`AWSIoTPythonSDK.MQTTLib.MQTTv3_1` or :code:`AWSIoTPythonSDK.MQTTLib.MQTTv3_1_1`
*useWebsocket* - Boolean that denotes enabling MQTT over Websocket SigV4 or not.
**Returns**
AWSIoTPythonSDK.MQTTLib.AWSIoTMQTTShadowClient object
"""
# AWSIOTMQTTClient instance
self._AWSIoTMQTTClient = AWSIoTMQTTClient(clientID, protocolType, useWebsocket, cleanSession)
# Configure it to disable offline Publish Queueing
self._AWSIoTMQTTClient.configureOfflinePublishQueueing(0) # Disable queueing, no queueing for time-sensitive shadow messages
self._AWSIoTMQTTClient.configureDrainingFrequency(10)
# Now retrieve the configured mqttCore and init a shadowManager instance
self._shadowManager = shadowManager.shadowManager(self._AWSIoTMQTTClient._mqtt_core)
# Configuration APIs
def configureLastWill(self, topic, payload, QoS):
"""
**Description**
Used to configure the last will topic, payload and QoS of the client. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0)
**Parameters**
*topic* - Topic name that last will publishes to.
*payload* - Payload to publish for last will.
*QoS* - Quality of Service. Could be 0 or 1.
**Returns**
None
"""
# AWSIoTMQTTClient.configureLastWill(srcTopic, srcPayload, srcQos)
self._AWSIoTMQTTClient.configureLastWill(topic, payload, QoS)
def clearLastWill(self):
"""
**Description**
Used to clear the last will configuration that is previously set through configureLastWill.
**Syntax**
.. code:: python
myAWSIoTShadowMQTTClient.clearLastWill()
**Parameter**
None
**Returns**
None
"""
# AWSIoTMQTTClient.clearLastWill()
self._AWSIoTMQTTClient.clearLastWill()
def configureEndpoint(self, hostName, portNumber):
"""
**Description**
Used to configure the host name and port number the underneath AWS IoT MQTT Client tries to connect to. Should be called
before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTShadowClient.configureEndpoint("random.iot.region.amazonaws.com", 8883)
**Parameters**
*hostName* - String that denotes the host name of the user-specific AWS IoT endpoint.
*portNumber* - Integer that denotes the port number to connect to. Could be :code:`8883` for
TLSv1.2 Mutual Authentication or :code:`443` for Websocket SigV4.
**Returns**
None
"""
# AWSIoTMQTTClient.configureEndpoint
self._AWSIoTMQTTClient.configureEndpoint(hostName, portNumber)
def configureIAMCredentials(self, AWSAccessKeyID, AWSSecretAccessKey, AWSSTSToken=""):
"""
**Description**
Used to configure/update the custom IAM credentials for the underneath AWS IoT MQTT Client
for Websocket SigV4 connection to AWS IoT. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTShadowClient.configureIAMCredentials(obtainedAccessKeyID, obtainedSecretAccessKey, obtainedSessionToken)
.. note::
Hard-coding credentials into custom script is NOT recommended. Please use AWS Cognito identity service
or other credential provider.
**Parameters**
*AWSAccessKeyID* - AWS Access Key Id from user-specific IAM credentials.
*AWSSecretAccessKey* - AWS Secret Access Key from user-specific IAM credentials.
*AWSSessionToken* - AWS Session Token for temporary authentication from STS.
**Returns**
None
"""
# AWSIoTMQTTClient.configureIAMCredentials
self._AWSIoTMQTTClient.configureIAMCredentials(AWSAccessKeyID, AWSSecretAccessKey, AWSSTSToken)
def configureCredentials(self, CAFilePath, KeyPath="", CertificatePath=""): # Should be good for MutualAuth and Websocket
"""
**Description**
Used to configure the rootCA, private key and certificate files. Should be called before connect.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.configureCredentials("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE")
**Parameters**
*CAFilePath* - Path to read the root CA file. Required for all connection types.
*KeyPath* - Path to read the private key. Required for X.509 certificate based connection.
*CertificatePath* - Path to read the certificate. Required for X.509 certificate based connection.
**Returns**
None
"""
# AWSIoTMQTTClient.configureCredentials
self._AWSIoTMQTTClient.configureCredentials(CAFilePath, KeyPath, CertificatePath)
def configureAutoReconnectBackoffTime(self, baseReconnectQuietTimeSecond, maxReconnectQuietTimeSecond, stableConnectionTimeSecond):
"""
**Description**
Used to configure the auto-reconnect backoff timing. Should be called before connect.
**Syntax**
.. code:: python
# Configure the auto-reconnect backoff to start with 1 second and use 128 seconds as a maximum back off time.
# Connection over 20 seconds is considered stable and will reset the back off time back to its base.
myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 128, 20)
**Parameters**
*baseReconnectQuietTimeSecond* - The initial back off time to start with, in seconds.
Should be less than the stableConnectionTime.
*maxReconnectQuietTimeSecond* - The maximum back off time, in seconds.
*stableConnectionTimeSecond* - The number of seconds for a connection to last to be considered as stable.
Back off time will be reset to base once the connection is stable.
**Returns**
None
"""
# AWSIoTMQTTClient.configureBackoffTime
self._AWSIoTMQTTClient.configureAutoReconnectBackoffTime(baseReconnectQuietTimeSecond, maxReconnectQuietTimeSecond, stableConnectionTimeSecond)
def configureConnectDisconnectTimeout(self, timeoutSecond):
"""
**Description**
Used to configure the time in seconds to wait for a CONNACK or a disconnect to complete.
Should be called before connect.
**Syntax**
.. code:: python
# Configure connect/disconnect timeout to be 10 seconds
myAWSIoTMQTTShadowClient.configureConnectDisconnectTimeout(10)
**Parameters**
*timeoutSecond* - Time in seconds to wait for a CONNACK or a disconnect to complete.
**Returns**
None
"""
# AWSIoTMQTTClient.configureConnectDisconnectTimeout
self._AWSIoTMQTTClient.configureConnectDisconnectTimeout(timeoutSecond)
def configureMQTTOperationTimeout(self, timeoutSecond):
"""
**Description**
Used to configure the timeout in seconds for MQTT QoS 1 publish, subscribe and unsubscribe.
Should be called before connect.
**Syntax**
.. code:: python
# Configure MQTT operation timeout to be 5 seconds
myAWSIoTMQTTShadowClient.configureMQTTOperationTimeout(5)
**Parameters**
*timeoutSecond* - Time in seconds to wait for a PUBACK/SUBACK/UNSUBACK.
**Returns**
None
"""
# AWSIoTMQTTClient.configureMQTTOperationTimeout
self._AWSIoTMQTTClient.configureMQTTOperationTimeout(timeoutSecond)
def configureUsernamePassword(self, username, password=None):
"""
**Description**
Used to configure the username and password used in CONNECT packet.
**Syntax**
.. code:: python
# Configure user name and password
myAWSIoTMQTTShadowClient.configureUsernamePassword("myUsername", "myPassword")
**Parameters**
*username* - Username used in the username field of CONNECT packet.
*password* - Password used in the password field of CONNECT packet.
**Returns**
None
"""
self._AWSIoTMQTTClient.configureUsernamePassword(username, password)
def enableMetricsCollection(self):
"""
**Description**
Used to enable SDK metrics collection. Username field in CONNECT packet will be used to append the SDK name
and SDK version in use and communicate to AWS IoT cloud. This metrics collection is enabled by default.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.enableMetricsCollection()
**Parameters**
None
**Returns**
None
"""
self._AWSIoTMQTTClient.enableMetricsCollection()
def disableMetricsCollection(self):
"""
**Description**
Used to disable SDK metrics collection.
**Syntax**
.. code:: python
myAWSIoTMQTTClient.disableMetricsCollection()
**Parameters**
None
**Returns**
None
"""
self._AWSIoTMQTTClient.disableMetricsCollection()
# Start the MQTT connection
def connect(self, keepAliveIntervalSecond=600):
"""
**Description**
Connect to AWS IoT, with user-specific keepalive interval configuration.
**Syntax**
.. code:: python
# Connect to AWS IoT with default keepalive set to 600 seconds
myAWSIoTMQTTShadowClient.connect()
# Connect to AWS IoT with keepalive interval set to 1200 seconds
myAWSIoTMQTTShadowClient.connect(1200)
**Parameters**
*keepAliveIntervalSecond* - Time in seconds for interval of sending MQTT ping request.
Default set to 30 seconds.
**Returns**
True if the connect attempt succeeded. False if failed.
"""
self._load_callbacks()
return self._AWSIoTMQTTClient.connect(keepAliveIntervalSecond)
def _load_callbacks(self):
self._AWSIoTMQTTClient.onOnline = self.onOnline
self._AWSIoTMQTTClient.onOffline = self.onOffline
# End the MQTT connection
def disconnect(self):
"""
**Description**
Disconnect from AWS IoT.
**Syntax**
.. code:: python
myAWSIoTMQTTShadowClient.disconnect()
**Parameters**
None
**Returns**
True if the disconnect attempt succeeded. False if failed.
"""
return self._AWSIoTMQTTClient.disconnect()
# Shadow management API
def createShadowHandlerWithName(self, shadowName, isPersistentSubscribe):
"""
**Description**
Create a device shadow handler using the specified shadow name and isPersistentSubscribe.
**Syntax**
.. code:: python
# Create a device shadow handler for shadow named "Bot1", using persistent subscription
Bot1Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot1", True)
# Create a device shadow handler for shadow named "Bot2", using non-persistent subscription
Bot2Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot2", False)
**Parameters**
*shadowName* - Name of the device shadow.
*isPersistentSubscribe* - Whether to unsubscribe from shadow response (accepted/rejected) topics
when there is a response. Will subscribe at the first time the shadow request is made and will
not unsubscribe if isPersistentSubscribe is set.
**Returns**
AWSIoTPythonSDK.core.shadow.deviceShadow.deviceShadow object, which exposes the device shadow interface.
"""
# Create and return a deviceShadow instance
return deviceShadow.deviceShadow(shadowName, isPersistentSubscribe, self._shadowManager)
# Shadow APIs are accessible in deviceShadow instance":
###
# deviceShadow.shadowGet
# deviceShadow.shadowUpdate
# deviceShadow.shadowDelete
# deviceShadow.shadowRegisterDelta
# deviceShadow.shadowUnregisterDelta
# MQTT connection management API
def getMQTTConnection(self):
"""
**Description**
Retrieve the AWS IoT MQTT Client used underneath for shadow operations, making it possible to perform
plain MQTT operations along with shadow operations using the same single connection.
**Syntax**
.. code:: python
# Retrieve the AWS IoT MQTT Client used in the AWS IoT MQTT Shadow Client
thisAWSIoTMQTTClient = myAWSIoTMQTTShadowClient.getMQTTConnection()
# Perform plain MQTT operations using the same connection
thisAWSIoTMQTTClient.publish("Topic", "Payload", 1)
...
**Parameters**
None
**Returns**
AWSIoTPythonSDK.MQTTLib.AWSIoTMQTTClient object
"""
# Return the internal AWSIoTMQTTClient instance
return self._AWSIoTMQTTClient
def onOnline(self):
"""
**Description**
Callback that gets called when the client is online. The callback registration should happen before calling
connect.
**Syntax**
.. code:: python
# Register an onOnline callback
myAWSIoTMQTTShadowClient.onOnline = myOnOnlineCallback
**Parameters**
None
**Returns**
None
"""
pass
def onOffline(self):
"""
**Description**
Callback that gets called when the client is offline. The callback registration should happen before calling
connect.
**Syntax**
.. code:: python
# Register an onOffline callback
myAWSIoTMQTTShadowClient.onOffline = myOnOfflineCallback
**Parameters**
None
**Returns**
None
"""
pass
```
#### File: AWS/project-code/custom_resource.py
```python
import custom_resource_response
import traceback
import os
import imp
import sys
from properties import ValidationError
# This is patched by unit tests
PLUGIN_DIRECTORY_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'plugin'))
def handler(event, context):
try:
resource_type = event.get('ResourceType', None)
if resource_type is None:
raise RuntimeError('No ResourceType specified.')
module_name = resource_type.replace('Custom::', '') + 'ResourceHandler'
module = sys.modules.get(module_name, None)
if module is None:
# First check for handler module in same directory as this module,
# if not found, check for module in the resource group provided
# directories.
module_file_name = module_name + '.py'
module_file_path = os.path.join(os.path.dirname(__file__), module_file_name)
if os.path.isfile(module_file_path):
module = __load_module(module_name, os.path.dirname(module_file_path))
elif os.path.isdir(PLUGIN_DIRECTORY_PATH):
plugin_directory_names = [item for item in os.listdir(PLUGIN_DIRECTORY_PATH) if os.path.isdir(os.path.join(PLUGIN_DIRECTORY_PATH, item))]
for plugin_directory_name in plugin_directory_names:
module_file_path = os.path.join(PLUGIN_DIRECTORY_PATH, plugin_directory_name, module_file_name)
if os.path.isfile(module_file_path):
module = __load_module(module_name, os.path.dirname(module_file_path))
break
if module is None:
raise RuntimeError('No handler module found for the {} resource type.'.format(resource_type))
if not hasattr(module, 'handler'):
raise RuntimeError('No handler function found for the {} resource type.'.format(resource_type))
print 'Using {}'.format(module)
module.handler(event, context)
except ValidationError as e:
custom_resource_response.fail(event, context, str(e))
except Exception as e:
print 'Unexpected error occured when processing event {} with context {}. {}'.format(event, context, traceback.format_exc())
custom_resource_response.fail(event, context, 'Unexpected error occured. Details can be found in the CloudWatch log group {} stream {}'.format(
context.log_group_name,
context.log_stream_name))
def __load_module(name, path):
imp.acquire_lock()
try:
print 'Loading module {} from {}.'.format(name, path)
sys.path.append(path)
try:
fp, pathname, description = imp.find_module(name, [ path ])
try:
module = imp.load_module(name, fp, pathname, description)
return module
finally:
if fp:
fp.close()
finally:
sys.path.remove(path)
finally:
imp.release_lock()
```
#### File: AWS/project-code/ResourceGroupConfigurationResourceHandler.py
```python
import properties
import custom_resource_response
import discovery_utils
def handler(event, context):
props = properties.load(event, {
'ConfigurationBucket': properties.String(),
'ConfigurationKey': properties.String(),
'ResourceGroupName': properties.String()})
data = {
'ConfigurationBucket': props.ConfigurationBucket,
'ConfigurationKey': '{}/resource-group/{}'.format(props.ConfigurationKey, props.ResourceGroupName),
'TemplateURL': 'https://s3.amazonaws.com/{}/{}/resource-group/{}/resource-template.json'.format(props.ConfigurationBucket, props.ConfigurationKey, props.ResourceGroupName)
}
physical_resource_id = 'CloudCanvas:LambdaConfiguration:{stack_name}:{resource_group_name}'.format(
stack_name=discovery_utils.get_stack_name_from_stack_arn(event['StackId']),
resource_group_name=props.ResourceGroupName)
custom_resource_response.succeed(event, context, data, physical_resource_id)
```
#### File: project-code/test/test_LambdaConfigurationResourceHandler.py
```python
import unittest
import mock
import boto3
import StringIO
import zipfile
import json
import types
import os
from time import time, sleep
from botocore.exceptions import ClientError
import discovery_utils
import custom_resource_response
TEST_REGION = 'us-east-1'
TEST_PROFILE = 'default'
os.environ['AWS_DEFAULT_REGION'] = TEST_REGION
os.environ['AWS_PROFILE'] = TEST_PROFILE
# import after setting AWS configuration in envionment
import LambdaConfigurationResourceHandler
import role_utils
class TestLambdaConfigurationResourceHandler(unittest.TestCase):
event = {}
context = {}
def setUp(self):
reload(LambdaConfigurationResourceHandler) # reset any accumulated state
self.event = {
'ResourceProperties': {
'ConfigurationBucket': 'TestBucket',
'ConfigurationKey': 'TestInputKey',
'FunctionName': 'TestFunction',
'Runtime': 'TestRuntime',
'Settings': {
'TestSettingKey1': 'TestSettingValue1',
'TestSettingKey2': 'TestSettingValue2'
}
},
'StackId': 'arn:aws:cloudformation:TestRegion:TestAccount:stack/TestStack/TestUUID',
'LogicalResourceId': 'TestLogicalResourceId'
}
def test_handler_create(self):
self.event['RequestType'] = 'Create'
expected_data = {
'ConfigurationBucket': 'TestBucket',
'ConfigurationKey': 'TestOutputKey',
'Runtime': 'TestRuntime',
'Role': 'TestRole'
}
expected_physical_id = 'TestStack-TestLogicalResourceId'
with mock.patch.object(custom_resource_response, 'succeed') as mock_custom_resource_response_succeed:
with mock.patch.object(role_utils, 'create_role') as mock_create_role:
mock_create_role.return_value = expected_data['Role']
with mock.patch.object(LambdaConfigurationResourceHandler, '_inject_settings') as mock_inject_settings:
mock_inject_settings.return_value = expected_data['ConfigurationKey']
LambdaConfigurationResourceHandler.handler(self.event, self.context)
mock_custom_resource_response_succeed.assert_called_once_with(
self.event,
self.context,
expected_data,
expected_physical_id)
mock_create_role.assert_called_once_with(
self.event['StackId'],
self.event['LogicalResourceId'],
LambdaConfigurationResourceHandler.POLICY_NAME,
'lambda.amazonaws.com',
LambdaConfigurationResourceHandler.DEFAULT_POLICY_STATEMENTS,
AnyFunction())
mock_inject_settings.assert_called_once_with(
self.event['ResourceProperties']['Settings'],
self.event['ResourceProperties']['Runtime'],
self.event['ResourceProperties']['ConfigurationBucket'],
'{}/lambda-function-code.zip'.format(self.event['ResourceProperties']['ConfigurationKey']),
'TestFunction')
def test_handler_update(self):
self.event['RequestType'] = 'Update'
self.event['PhysicalResourceId'] = 'TestStack-TestLogicalResourceId'
expected_data = {
'ConfigurationBucket': 'TestBucket',
'ConfigurationKey': 'TestKey/lambda-function-code.zip',
'Runtime': 'TestRuntime',
'Role': 'TestRole'
}
expected_physical_id = self.event['PhysicalResourceId']
with mock.patch.object(custom_resource_response, 'succeed') as mock_custom_resource_response_succeed:
with mock.patch.object(role_utils, 'update_role') as mock_update_role:
mock_update_role.return_value = expected_data['Role']
with mock.patch.object(LambdaConfigurationResourceHandler, '_inject_settings') as mock_inject_settings:
mock_inject_settings.return_value = expected_data['ConfigurationKey']
LambdaConfigurationResourceHandler.handler(self.event, self.context)
mock_custom_resource_response_succeed.assert_called_once_with(
self.event,
self.context,
expected_data,
expected_physical_id)
mock_update_role.assert_called_once_with(
self.event['StackId'],
self.event['LogicalResourceId'],
LambdaConfigurationResourceHandler.POLICY_NAME,
LambdaConfigurationResourceHandler.DEFAULT_POLICY_STATEMENTS,
AnyFunction())
mock_inject_settings.assert_called_once_with(
self.event['ResourceProperties']['Settings'],
self.event['ResourceProperties']['Runtime'],
self.event['ResourceProperties']['ConfigurationBucket'],
'{}/lambda-function-code.zip'.format(self.event['ResourceProperties']['ConfigurationKey']),
'TestFunction')
def test_handler_delete(self):
self.event['RequestType'] = 'Delete'
self.event['PhysicalResourceId'] = 'TestStack-TestLogicalResourceId'
expected_data = {}
expected_physical_id = self.event['PhysicalResourceId']
with mock.patch.object(custom_resource_response, 'succeed') as mock_custom_resource_response_succeed:
with mock.patch.object(role_utils, 'delete_role') as mock_delete_role:
LambdaConfigurationResourceHandler.handler(self.event, self.context)
mock_custom_resource_response_succeed.assert_called_once_with(self.event, self.context, expected_data, expected_physical_id)
mock_delete_role.assert_called_once_with(
self.event['StackId'],
self.event['LogicalResourceId'],
LambdaConfigurationResourceHandler.POLICY_NAME)
def test_inject_settings(self):
with mock.patch.object(boto3, 'client') as mock_boto3_client:
zip_content = StringIO.StringIO()
zip_file = zipfile.ZipFile(zip_content, 'w')
zip_file.close()
mock_body = mock.MagicMock()
mock_body.read = mock.MagicMock(return_value=zip_content.getvalue())
mock_s3_client = mock_boto3_client.return_value
mock_s3_client.get_object = mock.MagicMock(return_value={'Body': mock_body})
mock_s3_client.put_object = mock.MagicMock()
reload(LambdaConfigurationResourceHandler) # so it uses mocked methods
settings = self.event['ResourceProperties']['Settings']
runtime = self.event['ResourceProperties']['Runtime']
bucket = self.event['ResourceProperties']['ConfigurationBucket']
input_key = self.event['ResourceProperties']['ConfigurationKey']
function_name = self.event['ResourceProperties']['FunctionName']
mock_injector = mock.MagicMock()
LambdaConfigurationResourceHandler._SETTINGS_INJECTORS[runtime] = mock_injector
output_key = LambdaConfigurationResourceHandler._inject_settings(settings, runtime, bucket, input_key, function_name)
mock_boto3_client.assert_called_with('s3')
mock_s3_client.get_object.assert_called_once_with(Bucket=bucket, Key=input_key)
mock_injector.assert_called_once_with(AnyZipFileObject(), settings)
mock_s3_client.put_object.assert_called_once_with(Bucket='TestBucket', Key=output_key, Body=AnyValidZipFileContent())
def test_inject_settings_python(self):
expected_settings = self.event['ResourceProperties']['Settings']
expected_zip_name = 'CloudCanvas/settings.py'
zip_file = zipfile.ZipFile(StringIO.StringIO(), 'w')
LambdaConfigurationResourceHandler._inject_settings_python(zip_file, expected_settings)
with zip_file.open(expected_zip_name, 'r') as zip_content_file:
globals = {}
exec(zip_content_file.read(), globals)
actual_settings = globals['settings']
self.assertEquals(expected_settings, actual_settings)
def test_inject_settings_nodejs(self):
expected_settings = self.event['ResourceProperties']['Settings']
expected_zip_name = 'CloudCanvas/settings.js'
zip_file = zipfile.ZipFile(StringIO.StringIO(), 'w')
LambdaConfigurationResourceHandler._inject_settings_nodejs(zip_file, expected_settings)
with zip_file.open(expected_zip_name, 'r') as zip_content_file:
content = zip_content_file.read()
print content
self.assertTrue('TestSettingKey1' in content)
self.assertTrue('TestSettingValue1' in content)
self.assertTrue('TestSettingKey2' in content)
self.assertTrue('TestSettingValue2' in content)
# @unittest.skip("integration test disabled")
def test_integration_inject_settings_python(self):
# we need both the s3 client below and the one created by the
# custom resource handler to use this region
boto3.setup_default_session(region_name=TEST_REGION)
reload(LambdaConfigurationResourceHandler) # reset global s3 client object
s3 = boto3.client('s3')
bucket = 'lmbr_aws_settings_test_' + str(int(time() * 1000))
input_key = 'TestKey/lambda-function-code.zip'
output_key = None
s3.create_bucket(Bucket=bucket)
try:
zip_content = StringIO.StringIO()
with zipfile.ZipFile(zip_content, 'w') as zip_file:
zip_file.writestr('InitialName', 'InitialContent')
body = zip_content.getvalue()
s3.put_object(Bucket=bucket, Key=input_key, Body=body)
zip_content.close()
sleep(10) # seconds
expected_settings = self.event['ResourceProperties']['Settings']
runtime = 'python2.7'
function_name = 'TestFunction'
output_key = LambdaConfigurationResourceHandler._inject_settings(expected_settings, runtime, bucket, input_key, function_name)
expected_zip_name = 'CloudCanvas/settings.py'
sleep(10) # seconds
print 'output_key', output_key
print 'bucket', bucket
res = s3.get_object(Bucket=bucket, Key=output_key)
body = res['Body'].read()
zip_content = StringIO.StringIO(body)
with zipfile.ZipFile(zip_content, 'r') as zip_file:
with zip_file.open('InitialName', 'r') as zip_content_file:
actual_zip_content = zip_content_file.read()
self.assertEquals('InitialContent', actual_zip_content)
with zip_file.open(expected_zip_name, 'r') as zip_content_file:
globals = {}
exec(zip_content_file.read(), globals)
actual_settings = globals['settings']
self.assertEquals(expected_settings, actual_settings)
zip_content.close()
finally:
try:
s3.delete_object(Bucket=bucket, Key=input_key)
except Exception as e:
print 'Error when deleting object {} from bucket {}: {}'.format(input_key, bucket, e)
if output_key is not None:
try:
s3.delete_object(Bucket=bucket, Key=output_key)
except Exception as e:
print 'Error when deleting object {} from bucket {}: {}'.format(output_key, bucket, e)
try:
s3.delete_bucket(Bucket=bucket)
except Exception as e:
print 'Error when deleting bucket {}: {}'.format(bucket, e)
# @unittest.skip("integration test disabled")
def test_integration_create_update_delete_role(self):
with mock.patch.object(discovery_utils,'ResourceGroupInfo') as mock_ResourceGroupInfo:
mock_ResourceGroupInfo.return_value.resource_group_name = 'TestGroup'
mock_ResourceGroupInfo.return_value.deployment = mock.MagicMock()
mock_ResourceGroupInfo.return_value.deployment.deployment_name = 'TestDeployment'
mock_ResourceGroupInfo.return_value.deployment.project = mock.MagicMock()
mock_ResourceGroupInfo.return_value.deployment.project.project_name = 'TestProject'
with mock.patch.object(custom_resource_response, 'succeed') as mock_custom_resource_response_succeed:
with mock.patch.object(LambdaConfigurationResourceHandler, '_inject_settings') as mock_inject_settings:
mock_inject_settings.return_value = 'TestOutputConfigurationKey'
stack_arn = self._create_role_test_stack()
self.event['StackId'] = stack_arn
try:
capture_data = CaptureValue()
capture_physical_resource_id = CaptureValue()
# test create
self.event['RequestType'] = 'Create'
LambdaConfigurationResourceHandler.handler(self.event, self.context)
mock_custom_resource_response_succeed.assert_called_once_with(
self.event,
self.context,
capture_data,
capture_physical_resource_id)
created_role_arn = capture_data.value['Role']
self._validate_role(created_role_arn, stack_arn)
# test update
mock_custom_resource_response_succeed.reset_mock()
self.event['RequestType'] = 'Update'
self.event['PhysicalResourceId'] = capture_physical_resource_id.value
LambdaConfigurationResourceHandler.handler(self.event, self.context)
mock_custom_resource_response_succeed.assert_called_once_with(
self.event,
self.context,
capture_data,
capture_physical_resource_id)
updated_role_arn = capture_data.value['Role']
self.assertEquals(created_role_arn, updated_role_arn)
self._validate_role(updated_role_arn, stack_arn)
# rest delete
mock_custom_resource_response_succeed.reset_mock()
self.event['RequestType'] = 'Delete'
self.event['PhysicalResourceId'] = capture_physical_resource_id.value
LambdaConfigurationResourceHandler.handler(self.event, self.context)
mock_custom_resource_response_succeed.assert_called_once_with(
self.event,
self.context,
capture_data,
capture_physical_resource_id)
self._validate_role_deleted(created_role_arn)
finally:
# self._delete_role_test_stack(stack_arn)
pass
def _create_role_test_stack(self):
cf = boto3.client('cloudformation', region_name=TEST_REGION)
stack_name = 'lmbr-aws-update-role-test-' + str(int(time() * 1000))
print 'creating stack', stack_name
res = cf.create_stack(
StackName = stack_name,
TemplateBody = self.ROLE_TEST_STACK_TEMPLATE,
Capabilities = [ 'CAPABILITY_IAM' ])
stack_arn = res['StackId']
print 'CreateStack', res
while True:
sleep(5)
res = cf.describe_stacks(StackName=stack_arn)
print 'Checking', res
if res['Stacks'][0]['StackStatus'] != 'CREATE_IN_PROGRESS':
break
self.assertEquals(res['Stacks'][0]['StackStatus'], 'CREATE_COMPLETE')
return stack_arn
def _delete_role_test_stack(self, stack_arn):
print 'deleting stack', stack_arn
cf = boto3.client('cloudformation', region_name=TEST_REGION)
cf.delete_stack(StackName=stack_arn)
def _validate_role(self, role_arn, stack_arn):
iam = boto3.client('iam')
print 'role_arn', role_arn
res = iam.get_role(RoleName=self._get_role_name_from_role_arn(role_arn))
print 'res', res
role = res['Role']
self.assertEquals(role['Path'], '/TestProject/TestDeployment/TestGroup/TestLogicalResourceId/')
cf = boto3.client('cloudformation', region_name=TEST_REGION)
res = cf.describe_stack_resources(StackName=stack_arn)
print res
resources = res['StackResources']
expected_statement = {
'TestTableAccess': {
'Sid': 'TestTableAccess',
'Effect': 'Allow',
'Action': [ 'dynamodb:PutItem' ],
'Resource': self._get_resource_arn(stack_arn, resources, 'TestTable')
},
'TestFunctionAccess': {
'Sid': 'TestFunctionAccess',
'Effect': 'Allow',
'Action': [ 'lambda:InvokeFunction' ],
'Resource': self._get_resource_arn(stack_arn, resources, 'TestFunction')
},
'TestQueueAccess': {
'Sid': 'TestQueueAccess',
'Effect': 'Allow',
'Action': [ 'sqs:SendMessage' ],
'Resource': self._get_resource_arn(stack_arn, resources, 'TestQueue')
},
'TestTopicAccess': {
'Sid': 'TestTopicAccess',
'Effect': 'Allow',
'Action': [ 'sns:Subscribe' ],
'Resource': self._get_resource_arn(stack_arn, resources, 'TestTopic')
},
'TestBucketAccess': {
'Sid': 'TestBucketAccess',
'Effect': 'Allow',
'Action': [ 's3:GetObject', 's3:PutObject' ],
'Resource': self._get_resource_arn(stack_arn, resources, 'TestBucket') + "TestSuffix"
},
'WriteLogs': {
'Sid': 'WriteLogs',
'Action': ['logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents'],
'Resource': 'arn:aws:logs:*:*:*',
'Effect': 'Allow'
}
}
res = iam.get_role_policy(RoleName=self._get_role_name_from_role_arn(role_arn), PolicyName='FunctionAccess')
print res
actual_policy = res['PolicyDocument']
count = 0
for actual_statement in actual_policy['Statement']:
self.assertEquals(actual_statement, expected_statement.get(actual_statement['Sid'], None))
count += 1
self.assertEquals(count, 6)
def _get_resource_arn(self, stack_arn, resources, name):
arn = None
for resource in resources:
if resource['LogicalResourceId'] == name:
arn = self._make_resource_arn(stack_arn, resource['ResourceType'], resource['PhysicalResourceId'])
self.assertIsNotNone(arn)
return arn
RESOURCE_ARN_PATTERNS = {
'AWS::DynamoDB::Table': 'arn:aws:dynamodb:{region}:{account_id}:table/{resource_name}',
'AWS::Lambda::Function': 'arn:aws:lambda:{region}:{account_id}:function:{resource_name}',
'AWS::SQS::Queue': 'arn:aws:sqs:{region}:{account_id}:{resource_name}',
'AWS::SNS::Topic': 'arn:aws:sns:{region}:{account_id}:{resource_name}',
'AWS::S3::Bucket': 'arn:aws:s3:::{resource_name}'
}
def _make_resource_arn(self, stack_arn, resource_type, resource_name):
pattern = self.RESOURCE_ARN_PATTERNS.get(resource_type, None)
self.assertIsNotNone(pattern)
return pattern.format(
region=TEST_REGION,
account_id=self._get_account_id_from_stack_arn(stack_arn),
resource_name=resource_name)
def _get_account_id_from_stack_arn(self, stack_arn):
# arn:aws:cloudformation:REGION:ACCOUNT:stack/STACK/UUID
return stack_arn.split(':')[4]
def _get_role_name_from_role_arn(self, role_arn):
# arn:aws:cloudformation:REGION:ACCOUNT:stack/STACK/UUID
return role_arn.split('/')[-1]
def _validate_role_deleted(self, role_arn):
iam = boto3.client('iam')
try:
iam.get_role(RoleName=self._get_role_name_from_role_arn(role_arn))
self.assertTrue(False)
except ClientError as e:
self.assertEquals(e.response["Error"]["Code"], "NoSuchEntity")
ROLE_TEST_STACK_TEMPLATE = '''{
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"TestTable": {
"Type": "AWS::DynamoDB::Table",
"Properties": {
"AttributeDefinitions": [
{
"AttributeName": "PlayerId",
"AttributeType": "S"
}
],
"KeySchema": [
{
"AttributeName": "PlayerId",
"KeyType": "HASH"
}
],
"ProvisionedThroughput": {
"ReadCapacityUnits": "1",
"WriteCapacityUnits": "1"
}
},
"Metadata": {
"CloudCanvas": {
"FunctionAccess": [
{
"FunctionName": "TestFunction",
"Action": "dynamodb:PutItem"
}
]
}
}
},
"TestFunctionRole": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
}
}
]
},
"Policies": [
{
"PolicyName": "Execution",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": "arn:aws:logs:*:*:*"
}
]
}
}
]
}
},
"TestFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {
"Description": "Implements the custom resources used in this project's templates.",
"Handler": "index.handler",
"Role": { "Fn::GetAtt": [ "TestFunctionRole", "Arn" ] },
"Runtime": "nodejs",
"Code": {
"ZipFile": "exports.handler = function(event, context) { return 'Test'; }"
}
},
"Metadata": {
"CloudCanvas": {
"FunctionAccess": {
"FunctionName": "TestFunction",
"Action": "lambda:InvokeFunction"
}
}
}
},
"TestQueue": {
"Type": "AWS::SQS::Queue",
"Properties": {
},
"Metadata": {
"CloudCanvas": {
"FunctionAccess": [
{
"FunctionName": "TestFunction",
"Action": [ "sqs:SendMessage" ]
}
]
}
}
},
"TestTopic": {
"Type": "AWS::SNS::Topic",
"Properties": {
},
"Metadata": {
"CloudCanvas": {
"FunctionAccess": [
{
"FunctionName": "TestFunction",
"Action": "sns:Subscribe"
}
]
}
}
},
"TestBucket": {
"Type": "AWS::S3::Bucket",
"Properties": {
},
"Metadata": {
"CloudCanvas": {
"FunctionAccess": [
{
"FunctionName": "TestFunction",
"Action": [ "s3:GetObject", "s3:PutObject" ],
"ResourceSuffix": "TestSuffix"
}
]
}
}
}
}
}'''
class AnyZipFileObject(object):
def __init__(self):
pass
def __eq__(self, other):
return isinstance(other, zipfile.ZipFile)
class AnyValidZipFileContent(object):
def __init__(self):
pass
def __eq__(self, other):
zip_file = zipfile.ZipFile(StringIO.StringIO(other))
return zip_file.testzip() is None
class AnyFunction(object):
def __init__(self):
pass
def __eq__(self, other):
return isinstance(other, types.FunctionType)
class CaptureValue(object):
def __init__(self):
self.value = None
def __eq__(self, other):
self.value = other
return True
```
#### File: windows/jinja_extensions/error.py
```python
from jinja2 import nodes
from jinja2.ext import Extension
from jinja2.exceptions import TemplateRuntimeError
# ---------------------------------------------------
## Custom tag to support raising exceptions from Jinja templates
class RaiseExtension(Extension):
tags = set(['raise'])
def parse(self, parser):
# the first token is the token that started the tag. In our case we
# only listen to "raise" so this will be a name token with
# "raise" as value. We get the line number so that we can give
# that line number to the nodes we insert.
lineno = next(parser.stream).lineno
# Extract the message from the template
message_node = parser.parse_expression()
return nodes.CallBlock(
self.call_method('_raise', [message_node], lineno=lineno),
[], [], [], lineno=lineno
)
def _raise(self, msg, caller):
raise TemplateRuntimeError(msg)
```
#### File: waf-1.7.13/lmbrwaflib/run_test.py
```python
from lmbr_install_context import LmbrInstallContext
from build_configurations import PLATFORM_MAP
class RunUnitTestContext(LmbrInstallContext):
fun = 'run_unit_test'
group_name = 'run_test'
def run_unit_test(self, **kw):
"""
Creates a run test task
"""
self.process_restricted_settings(kw)
if self.is_platform_and_config_valid(**kw):
self(features='unittest_{}'.format(self.platform), group=self.group_name)
for platform_name, platform in PLATFORM_MAP.items():
for configuration in platform.get_configuration_names():
configuration_details = platform.get_configuration(configuration)
if not configuration_details.is_test:
# Skip any non-test configurations
continue
platform_config_key = '{}_{}'.format(platform_name, configuration)
# Create new class to execute run_test command with variant
class_attributes = {
'cmd' : 'run_{}'.format(platform_config_key),
'variant' : platform_config_key,
'platform' : platform_name,
'config' : configuration,
}
subclass = type('{}{}RunUnitTestContext'.format(platform_name.title(), configuration.title()), (RunUnitTestContext,), class_attributes)
```
#### File: waf-1.7.13/lmbrwaflib/unit_test_copy_task.py
```python
import pytest
from copy_tasks import preprocess_pathlen_for_windows
@pytest.mark.parametrize(
"input, expected", [
('c:\\blah\\blah\\blah\\blah\\blah\\blah\\blah', 'c:\\blah\\blah\\blah\\blah\\blah\\blah\\blah'), # short enough expect same path
(u'\\\\?\\c:\\blah\\blah\\blah\\blah\\blah\\blah\\blah', u'\\\\?\\c:\\blah\\blah\\blah\\blah\\blah\\blah\\blah'), # unc already, expect same unc path
('c:\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah', u'\\\\?\\c:\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah\\blah'), # too long expect a unc path
])
def test_preprocess_pathlen_for_windows(input, expected):
actual = preprocess_pathlen_for_windows(input)
assert expected == actual
```
#### File: waf-1.7.13/lmbrwaflib/unit_test_cry_utils.py
```python
from waflib import Utils, Errors, Configure
from cry_utils import split_comma_delimited_string, get_waf_host_platform, read_file_list, get_output_folders
import json
import utils
import unit_test
import pytest
import os
@pytest.fixture
def fake_waf_context(tmpdir):
if tmpdir:
base_path = str(tmpdir.realpath())
else:
base_path = None
yield unit_test.FakeContext(base_path)
@pytest.mark.parametrize(
"input_param,expected,enforce_uniqueness",[
pytest.param("a,b,c,d,e,f,b,d,e,g", ['a', 'b', 'c', 'd', 'e', 'f', 'b', 'd', 'e', 'g'], False, id="NoTrimNoEnforceUniqueness"),
pytest.param("a, b, c,d,e,f,b,d,e,g,a ,b,c ", ['a', 'b', 'c', 'd', 'e', 'f', 'b', 'd', 'e', 'g', 'a', 'b', 'c'], False, id="TrimNoEnforceUniqueness"),
pytest.param("a,b,c,d,e,f,b,d,e,g", ['a', 'b', 'c', 'd', 'e', 'f', 'g'], True, id="NoTrimEnforceUniqueness"),
pytest.param("a, b, c,d,e,f,b,d,e,g,a ,b,c ", ['a', 'b', 'c', 'd', 'e', 'f', 'g'], True, id="TrimEnforceUniqueness"),
pytest.param(" , , ,", [], False, id="EmptyCommasEnforceUniqueness"),
pytest.param(" , , ,", [], True, id="EmptyCommasNoEnforceUniqueness"),
pytest.param(" ", [], False, id="BlankStringEnforceUniqueness"),
pytest.param(" ", [], True, id="BlankStringNoEnforceUniqueness"),
pytest.param("", [], False, id="EmptyStringEnforceUniqueness"),
pytest.param("", [], True, id="EmptyStringNoEnforceUniqueness"),
])
def test_SplitCommaDelimitedString_ValidInputPermutations_Success(input_param, expected, enforce_uniqueness):
result = split_comma_delimited_string(input_str=input_param,
enforce_uniqueness=enforce_uniqueness)
assert result == expected
@pytest.fixture
def get_waf_host_platform_runner(mock_versioned_platform):
def _make_get_unversioned_sys_platform():
return mock_versioned_platform
old_func = Utils.unversioned_sys_platform
Utils.unversioned_sys_platform = _make_get_unversioned_sys_platform
yield
Utils.unversioned_sys_platform = old_func
@pytest.mark.parametrize(
"mock_versioned_platform,expected_unversioned_platform",[
pytest.param('win32', 'win_x64', id="get_waf_host_platform()=='win_x64'"),
pytest.param('darwin', 'darwin_x64', id="get_waf_host_platform()=='darwin_x64'"),
pytest.param('linux', 'linux_x64', id="get_waf_host_platform()=='linux_x64'"),
])
def test_GetWafHostPlatform_ValidSupportedPlatforms_Success(mock_versioned_platform, expected_unversioned_platform, fake_waf_context, get_waf_host_platform_runner):
result = get_waf_host_platform(fake_waf_context)
assert expected_unversioned_platform == result
@pytest.mark.parametrize(
"mock_versioned_platform", [
pytest.param('atari2600', id="get_waf_host_platform() [exception]")
]
)
def test_GetWafHostPlatform_UnsupportedPlatform_Exception(mock_versioned_platform, fake_waf_context, get_waf_host_platform_runner):
with pytest.raises(Errors.WafError):
get_waf_host_platform(fake_waf_context)
@pytest.mark.parametrize(
"input_waf_file_list, sample_files, dynamic_globbing, expected_results", [
pytest.param( # Basic recursive for all files No Dynamic Globbing
{ # Input waf_file content
"none": {
"root": [
"**/*"
]
}
},
# Sample Data
["a.cpp",
"b.cpp",
"includes/a.h",
"includes/b.h"],
# Enable dynamic globbing?
False,
# Expected result
{
'none': {
'root': [
'a.cpp',
'b.cpp',
'test.waf_files'
],
'includes': [
'includes/a.h',
'includes/b.h'
]
}
},
# Id
id="BasicRecursiveForAllFiles"),
pytest.param( # Basic recursive for all files Dynamic Globbing
{
"none": {
"root": [
"**/*"
]
}
},
# Sample Data
["a.cpp",
"b.cpp",
"includes/a.h",
"includes/b.h"],
# Enable dynamic globbing?
True,
# Expected result
{
'none': {
'root': [
'a.cpp',
'b.cpp',
'test.waf_files'
],
'includes': [
'includes/a.h',
'includes/b.h'
]
}
},
# Id
id="BasicRecursiveForAllFilesDynamicGlobbing"),
pytest.param( # Recursive search for .cpp/.h only
{
"none": {
"root": [
"**/*.cpp",
"**/*.h"
]
}
},
# Sample Data
["a.cpp",
"b.cpp",
"a.ignore",
"includes/a.h",
"includes/b.h",
"ignore/b.ignore"],
# Enable dynamic globbing?
False,
# Expected result
{
'none': {
'root': [
'a.cpp',
'b.cpp'
],
'includes': [
'includes/a.h',
'includes/b.h'
]
}
},
# Id
id="RecursiveSearchForCppAndHOnly"),
pytest.param( # Search using advanced search pattern and rules
{
"none": {
"root": [
{
"pattern": "**/*",
"excl": "*.waf_files"
}
]
}
},
# Sample Data
["a.cpp",
"b.cpp",
"includes/a.h",
"includes/b.h"],
# Enable dynamic globbing?
False,
# Expected result
{
'none': {
'root': [
'a.cpp',
'b.cpp'
],
'includes': [
'includes/a.h',
'includes/b.h'
]
}
},
# Id
id="SimpleNestedLevelAllExcludeThroughCustomPattern"),
pytest.param( # SingleAndNestedPatterns
{
"none": {
"root": [
"*.cpp",
"*.h"
],
"single": [
"single/*.cpp",
"single/*.h"
],
"nested": [
"nested/**/*.cpp",
"nested/**/*.h"
]
}
},
# Sample Data
["a.cpp",
"b.cpp",
"ignore/a.h",
"ignore/b.h",
"single/s1_a.cpp",
"single/s1_b.cpp",
"single/ignore/s1_a.cpp",
"nested/n1_a.cpp",
"nested/n1_b.cpp",
"nested/include/a.h",
"nested/include/b.h",
"ignore/ignore_a.cpp",
"ignore/ignore_b.cpp"],
# Enable dynamic globbing?
False,
# Expected result
{
'none': {
'nested/nested': [
'nested/n1_a.cpp',
'nested/n1_b.cpp'
],
'nested/nested/include': [
'nested/include/a.h',
'nested/include/b.h'
],
'single': [
'single/s1_a.cpp',
'single/s1_b.cpp'
],
'root': [
'a.cpp',
'b.cpp'
]
}
},
# Id
id="SingleAndNestedLevelsSpecifiedTypes")
]
)
def test_ReadFileList_SimpleGlobPatternNonDynamic_Success(fake_waf_context, mock_globbing_files, tmpdir, input_waf_file_list, sample_files, dynamic_globbing, expected_results):
# Arrange
def _mock_is_option_true(option):
assert option == 'enable_dynamic_file_globbing'
return dynamic_globbing
fake_waf_context.is_option_true = _mock_is_option_true
# Act
try:
old_config_context = Configure.ConfigurationContext
Configure.ConfigurationContext = unit_test.FakeContext
result = read_file_list(fake_waf_context, mock_globbing_files)
finally:
Configure.ConfigurationContext = old_config_context
# Assert
bintemp_path = fake_waf_context.bintemp_node.abspath()
src_code_path = fake_waf_context.path
expected_cached_waf_files = os.path.join(bintemp_path, src_code_path.name, mock_globbing_files)
if not dynamic_globbing:
assert os.path.isfile(expected_cached_waf_files)
cached_waf_file_result = utils.parse_json_file(expected_cached_waf_files)
assert cached_waf_file_result == expected_results
else:
assert not os.path.isfile(expected_cached_waf_files)
assert result == expected_results
@pytest.fixture()
def mock_globbing_files(fake_waf_context, tmpdir, input_waf_file_list, sample_files):
# We need the path relative to the tmpdir in order to write the testing temp data
path_abs = fake_waf_context.path.abspath()
tmpdir_path = str(tmpdir.realpath())
path_rel = os.path.relpath(path_abs, tmpdir_path)
# Create the temp 'test.waf_files'
test_file_waffiles = "test.waf_files"
file_list_path = os.path.normpath(os.path.join(path_rel, test_file_waffiles))
tmpdir.ensure(file_list_path)
waf_file_pypath = tmpdir.join(file_list_path)
json_file_content = json.dumps(input_waf_file_list,
sort_keys=True,
separators=(',', ': '),
indent=4)
waf_file_pypath.write(json_file_content)
# Create the sample files relative to the 'path_rel' in the temp folder, which should be where the test waf_files
# file will reside (and all files are supposed to be relative)
for sample_file in sample_files:
sample_file_target_path = os.path.normpath(os.path.join(path_rel, sample_file))
tmpdir.ensure(sample_file_target_path)
sample_file_pypath = tmpdir.join(sample_file_target_path)
sample_file_pypath.write("self.path = {}".format(sample_file_target_path))
yield test_file_waffiles
tmpdir.remove(ignore_errors=True)
@pytest.mark.parametrize(
"platform,configuration",[
pytest.param('win_x64_vs2017', 'profile')
])
def test_GetOutputFolders_CaseMismatch_Success(platform, configuration, fake_waf_context):
# Retreive the path of the output folders
for original_path in get_output_folders(fake_waf_context, platform, configuration):
parent_path,dir_name = os.path.split(original_path.abspath())
# Create the directory with the wrong casing
wrong_casing = dir_name.lower()
if wrong_casing == dir_name:
wrong_casing = dir_name.upper()
if os.path.exists(original_path.abspath()):
os.remove(original_path.abspath())
os.makedirs(os.path.join(parent_path, wrong_casing))
# If the original path does not exist, then we have a case sensitive OS and can just pass
if not os.path.exists(original_path.abspath()):
return
# Retrieve the output folders again and verify paths
for verify_path in get_output_folders(fake_waf_context, platform, configuration):
parent_path,dir_name = os.path.split(verify_path.abspath())
assert dir_name in os.listdir(parent_path)
```
#### File: waf-1.7.13/lmbrwaflib/unit_test_incredibuild.py
```python
import utils
import incredibuild
import unit_test
import pytest
import os
@pytest.fixture
def fake_waf_context(tmpdir):
if tmpdir:
base_path = str(tmpdir.realpath())
else:
base_path = None
yield unit_test.FakeContext(base_path)
@pytest.mark.parametrize(
"input_tool_elements,expected_file_hash", [
pytest.param([[]], "d0a710f30220392db25a22f5a69faac0"),
pytest.param([[
'<Tool Filename="value1"/>']
], "9c3d9373d224d3b9024f15e7e2a60afb"),
pytest.param([[
'<Tool Filename="value1"/>',
'<Tool Filename="value2"/>'
]], "344a67cc353c1dd16dda6cd6a6cad446"),
pytest.param([[
'<Tool Filename="value1"/>'
],
[
'<Tool Filename="value1"/>'
]], "ad809694895f173b2b2a054c81f38663"),
])
def test_GenerateIbProfile_Permutations_Success(tmpdir, fake_waf_context, input_tool_elements, expected_file_hash):
def _mock_generator_ib_profile_tool_elements():
return input_tool_elements
fake_waf_context.generate_ib_profile_tool_elements = _mock_generator_ib_profile_tool_elements
tmpdir.ensure('dev/BinTemp', dir=True)
incredibuild.generate_ib_profile_xml(fake_waf_context)
result_profile_xml_target = os.path.join(fake_waf_context.get_bintemp_folder_node().abspath(), 'profile.xml')
actual_hash = utils.calculate_file_hash(result_profile_xml_target)
assert actual_hash == expected_file_hash
def test_GenerateIbProfile_NoOverwrite_Success(fake_waf_context):
original_os_path_isfile = os.path.isfile
original_calculate_string_hash = utils.calculate_string_hash
original_calculate_file_hash = utils.calculate_file_hash
try:
def _mock_generator_ib_profile_tool_elements():
return [[]]
fake_waf_context.generate_ib_profile_tool_elements = _mock_generator_ib_profile_tool_elements
def _mock_is_file(path):
return True
os.path.isfile = _mock_is_file
def _mock_calculate_string_hash(content):
return "HASH"
utils.calculate_string_hash = _mock_calculate_string_hash
def _mock_calculate_file_hash(filepath):
return "HASH"
utils.calculate_file_hash = _mock_calculate_file_hash
incredibuild.generate_ib_profile_xml(fake_waf_context)
result_profile_xml_target = os.path.join(fake_waf_context.get_bintemp_folder_node().abspath(), 'profile.xml')
finally:
os.path.isfile = original_os_path_isfile
utils.calculate_string_hash = original_calculate_string_hash
utils.calculate_file_hash = original_calculate_file_hash
assert not os.path.isfile(result_profile_xml_target)
```
#### File: waf-1.7.13/lmbrwaflib/unit_test_settings_manager.py
```python
import pytest, sys
from waflib import Logs, Errors, Utils
from waflib.Configure import ConfigurationContext
from waflib.Build import BuildContext
import settings_manager
import utils
import json
import os
import copy
class MockSettingsManagerConfigureContext(ConfigurationContext):
""" Mock context class based on ConfigurationContext"""
result_by_attribute = {}
def __init__(self, **kw):
super(ConfigurationContext, self).__init__(**kw)
class MockSettingsManagerBuildContext(BuildContext):
""" Mock context class based on BuildContext"""
result_by_attribute = {}
def __init__(self, **kw):
super(BuildContext, self).__init__(**kw)
@pytest.fixture()
def test_settings_manager_context_for_override_report(is_configure_context, is_option_name, is_option_value, override_settings_attributes):
if is_configure_context:
test_context = MockSettingsManagerConfigureContext(run_dir=sys.executable)
else:
test_context = MockSettingsManagerBuildContext(run_dir=sys.executable)
setattr(test_context,'cmd', 'build_unit_test')
def _stub_check_is_option_true(option_name):
if not is_option_name:
return True
else:
return is_option_value
def _stub_override_settings_report(is_configure, is_build, attribute, default_value, settings_value):
is_value_overridden = default_value != settings_value
test_context.result_by_attribute[attribute] = (is_configure, is_build, is_value_overridden)
setattr(test_context, 'is_option_true', _stub_check_is_option_true)
if override_settings_attributes:
for override_settings_attribute in override_settings_attributes:
report_settings_override_func_name = 'report_settings_{}'.format(override_settings_attribute)
setattr(test_context, report_settings_override_func_name, _stub_override_settings_report)
return test_context
@pytest.fixture()
def mocked_lumberyard_settings(test_default_settings_map, test_settings_map):
original_default_settings_map = settings_manager.LUMBERYARD_SETTINGS.default_settings_map
original_settings_map = settings_manager.LUMBERYARD_SETTINGS.settings_map
settings_manager.LUMBERYARD_SETTINGS.settings_map = test_settings_map
settings_manager.LUMBERYARD_SETTINGS.default_settings_map = test_default_settings_map
yield
settings_manager.LUMBERYARD_SETTINGS.settings_map = original_settings_map
settings_manager.LUMBERYARD_SETTINGS.default_settings_map = original_default_settings_map
RECURSIVE_OPT = 'internal_dont_check_recursive_execution'
@pytest.mark.parametrize(
"is_configure_context, is_option_name, is_option_value, override_settings_attributes, test_default_settings_map, test_settings_map, expected_result_map", [
pytest.param(True, RECURSIVE_OPT, True, [], {}, {}, {}, id="ReportSettingsSkipRecursiveExecution"),
pytest.param(True, RECURSIVE_OPT, False,
[],
{'attr': 'default'},
{'attr': 'default'},
{},
id="ReportSettingsConfigureContextNoOverrideNoReporter"),
pytest.param(True, RECURSIVE_OPT, False,
['attr1'],
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': (True, False, False)},
id="ReportSettingsConfigureContextNoOverrideWithSomeReporters"),
pytest.param(True, RECURSIVE_OPT, False,
['attr1', 'attr2'],
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': (True, False, False),
'attr2': (True, False, False)},
id="ReportSettingsConfigureContextNoOverrideWithAllReporter"),
pytest.param(True, RECURSIVE_OPT, False,
['attr1', 'attr2'],
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': 'override', 'attr2': 'override2'},
{'attr1': (True, False, True),
'attr2': (True, False, True)},
id="ReportSettingsConfigureContextOverrideWithAllReporter"),
pytest.param(False, RECURSIVE_OPT, False,
['attr1', 'attr2'],
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': 'default', 'attr2': 'default2'},
{'attr1': (False, True, False),
'attr2': (False, True, False)},
id="ReportSettingsBuildContextNoOverrideWithAllReporter"),
])
def test_ReportSettingsOverrides_ValidSettingsScenarios_Success(test_settings_manager_context_for_override_report, mocked_lumberyard_settings, expected_result_map):
settings_manager.report_settings_overrides(test_settings_manager_context_for_override_report)
assert len(test_settings_manager_context_for_override_report.result_by_attribute) == len(expected_result_map)
for expected_key in expected_result_map.keys():
assert expected_key in test_settings_manager_context_for_override_report.result_by_attribute
assert expected_result_map[expected_key] == test_settings_manager_context_for_override_report.result_by_attribute[expected_key]
@pytest.mark.parametrize(
"input_messages", [
pytest.param(["messageA"], id="PrintSettingsOverrideSingleMessage"),
pytest.param(["messageA", "messageB"], id="PrintSettingsOverrideMultipleMessagesUnique"),
pytest.param(["messageA", "messageB", "messageA", "messageB"], id="PrintSettingsOverrideMultipleMessagesDuplicate")
])
def test_PrintSettingsOverrideMessage_PrintVariousMessages_UniqueMessagesPrinted(input_messages):
# Arrange
printed_messages = []
def _stub_log_pprint(color, msg):
printed_messages.append(msg)
old_pprint = Logs.pprint
Logs.pprint = _stub_log_pprint
test_context = MockSettingsManagerConfigureContext(run_dir=sys.executable)
# Act
for input_message in input_messages:
settings_manager.print_settings_override_message(test_context, input_message)
Logs.pprint = old_pprint
# Assert
unique_messages = set(['[SETTINGS] {}'.format(input_message) for input_message in input_messages])
assert len(printed_messages) == len(unique_messages)
for printed_message in printed_messages:
assert printed_message in unique_messages
@pytest.mark.parametrize(
"attribute, default_value, settings_value, expected_printed", [
pytest.param('attr', 'default', 'default', False, id="ReportSettingsValueUnchanged"),
pytest.param('attr', 'default', 'override', True, id="ReportSettingsValueChanged"),
])
def test_DefaultReportSettingsOverride_VariousValues_Success(attribute, default_value, settings_value, expected_printed):
# Arrange
printed_messages = []
def _stub_print_settings_override_message(msg):
printed_messages.append(msg)
test_context = MockSettingsManagerConfigureContext(run_dir=sys.executable)
setattr(test_context, 'print_settings_override_message', _stub_print_settings_override_message)
# Act
settings_manager.default_report_settings_override(test_context, attribute, default_value, settings_value)
# Assert
printed = len(printed_messages)
assert expected_printed == printed
@pytest.mark.parametrize(
"long_form, short_form, cmd_line, expected", [
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i True', 'True', id="UseShortFormValid"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build --use-incredibuild=True', 'True', id="UseLongFormValid"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i=True', type(Errors.WafError), id="UseShortFormErrorWithValue"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i=', type(Errors.WafError), id="UseShortFormErrorWithOutValue"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i True', 'True', id="UseShortFormValid"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build --use-incredibuild=True --foo-arg=False', 'True', id="UseLongFormValidWithTrailingArgs"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i=True --foo-arg=False', type(Errors.WafError), id="UseShortFormErrorWithValueWithTrailingArgs"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build -i= --foo-arg=False', type(Errors.WafError), id="UseShortFormErrorWithOutValueWithTrailingArgs"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build --use-incredibuild=', '', id="UseLongFormValidSetToEmpty"),
pytest.param('--use-incredibuild', '-i', 'lmbr_waf build --use-incredibuild= --foo-arg=False', '', id="UseLongFormValidSetToEmptyWithTrailingArgs"),
])
def test_SettingsApplyOptionalOverride_Success(long_form, short_form, cmd_line, expected):
arguments = cmd_line.split()
if isinstance(expected, str):
result = settings_manager.Settings.apply_optional_override(long_form=long_form,
short_form=short_form,
arguments=arguments)
assert expected == result
elif isinstance(expected, type(Errors.WafError)):
with pytest.raises(Errors.WafError):
settings_manager.Settings.apply_optional_override(long_form=long_form,
short_form=short_form,
arguments=arguments)
@pytest.mark.parametrize(
"name, is_monolithic, is_test, is_server, third_party_config, has_base_config, test_default_settings_map, test_settings_map, expected_name", [
pytest.param('test_a', False, False, False, 'debug', False, {},{'output_folder_ext_test_a': 'ext_test_a'}, 'test_a', id="ConfigNoMonolithicNoTestNoServerNoBaseConfig"),
pytest.param('test_b', True, False, False, 'debug', False, {},{'output_folder_ext_test_b': 'ext_test_b'}, 'test_b', id="ConfigMonolithicNoTestNoServerNoBaseConfig"),
pytest.param('test_c', False, True, False, 'release', False, {}, {'output_folder_ext_test_c': 'ext_test_c'}, 'test_c_test', id="ConfigNoMonolithicTestNoServerNoBaseConfig"),
pytest.param('test_d', False, False, True, 'release', False, {}, {'output_folder_ext_test_d': 'ext_test_d'}, 'test_d_dedicated', id="ConfigNoMonolithicNoTestServerNoBaseConfig"),
pytest.param('test_e', False, True, True, 'debug', False, {}, {'output_folder_ext_test_e': 'ext_test_e'}, 'test_e_test_dedicated', id="ConfigNotMonolithicTestServerNoBaseConfig"),
pytest.param('test_f', False, False, False, 'release', True, {}, {'output_folder_ext_test_f': 'ext_test_f'}, 'test_f', id="ConfigNoMonolithicNoTestNoServerBaseConfig")
]
)
def test_ConfigurationSettings_ValidPermutations_Success(mocked_lumberyard_settings, name, is_monolithic, is_test, is_server, third_party_config, has_base_config, test_default_settings_map, test_settings_map, expected_name):
base_config_name = 'base_{}'.format(name)
base_config = settings_manager.ConfigurationSettings(base_config_name, is_monolithic, is_test, third_party_config,None) if has_base_config else None
test_config = settings_manager.ConfigurationSettings(name, is_monolithic, is_test, third_party_config, base_config)
expected_folder_ext = 'ext_{}'.format(name)
assert test_config.name == name
assert test_config.is_monolithic == is_monolithic
assert test_config.third_party_config == third_party_config
config_name = test_config.build_config_name(is_test, is_server)
assert config_name == expected_name
output_ext = test_config.get_output_folder_ext()
assert output_ext == expected_folder_ext
assert not test_config.does_configuration_match("__foo__")
assert test_config.does_configuration_match(name)
if has_base_config:
assert test_config.does_configuration_match(base_config_name)
assert not test_config.does_configuration_match(base_config_name, False)
@pytest.mark.parametrize(
"src_dict, configuration, expected", [
pytest.param( {
"INCLUDES": [
"include_a"
],
"DEFINES": [
"define_a"
]
},
"debug",
{
'debug': {
'INCLUDES': [
'include_a'
],
'DEFINES': [
'define_a'
]
}
},
id="SimpleDebugListEnvironments"),
pytest.param( {
"SPECIAL_NAME_A": "include_a",
"DEFINES": [
"define_a"
]
},
"profile",
{
'profile': {
"SPECIAL_NAME_A": "include_a",
'DEFINES': [
'define_a'
]
}
},
id="SimpleDebugListEnvironments"),
pytest.param( {
"?CONDITION1?:INCLUDES": [
"include_a"
],
"@CONDITION2@:DEFINES": [
"define_a"
]
},
"debug",
{
'debug': {
'@CONDITION2@:DEFINES': [
'define_a'
],
'?CONDITION1?:INCLUDES': [
'include_a'
]
}
},
id="ConditionalDebugListEnvironments"),
pytest.param( {
"?CONDITION1?:SPECIAL_NAME_A": "include_a",
"DEFINES": [
"define_a"
]
},
"profile",
{
'profile': {
'?CONDITION1?:SPECIAL_NAME_A': 'include_a',
'DEFINES': [
'define_a'
]
}
},
id="ConditionalSimpleDebugListEnvironments"),
]
)
def test_ProcessEnvDictValues_ValidInputs_Success(src_dict, configuration, expected):
env_result = {}
settings_manager.process_env_dict_values(src_dict, configuration, env_result)
assert env_result == expected
@pytest.mark.parametrize(
"src_dict, expected_configurations", [
pytest.param( {
"env": {
"name": "all"
}
},
['_'],
id="SimpleAllEnv"),
pytest.param( {
"env": {
"name": "all"
},
"env/debug": {
"name": "debug"
}
},
['_', 'debug'],
id="AllAndDebugEnv"),
pytest.param( {
"env": {
"name": "all"
},
"env/debug": {
"name": "debug"
},
"settings": {
"name": "dont_include"
}
},
['_', 'debug'],
id="AllAndDebugSkipNonEnv"),
]
)
def test_ProcessEnvDict_ValidInputs_Success(src_dict, expected_configurations):
def _mock_process_env_dict(env_dict, configuration, processed_env_dict):
assert configuration in expected_configurations
if configuration=='_':
check_key = 'env'
else:
check_key = 'env/{}'.format(configuration)
assert src_dict[check_key] == env_dict
old_process_env_dict = settings_manager.process_env_dict_values
settings_manager.process_env_dict_values = _mock_process_env_dict
try:
result = {}
settings_manager.process_env_dict(src_dict, result)
finally:
settings_manager.process_env_dict_values = old_process_env_dict
@pytest.mark.parametrize(
"source_attr_dict, merged_dict, expected_result, expected_warning", [
pytest.param( {
'NEW_SETTING': 'new'
},
{
'OLD_SETTING': 'old'
},
{
'NEW_SETTING': 'new',
'OLD_SETTING': 'old'
},
False,
id="MergeNoOverwrite"),
pytest.param( {
'NEW_SETTING': 'new'
},
{
'NEW_SETTING': 'new'
},
{
'NEW_SETTING': 'new'
},
False,
id="MergeOverwriteNoChange"),
pytest.param( {
'NEW_SETTING': 'new'
},
{
'NEW_SETTING': 'old'
},
{
'NEW_SETTING': 'new'
},
True,
id="MergeOverwrite")
]
)
def test_MergeAttributesGroup_ValidInputs_Success(source_attr_dict, merged_dict, expected_result, expected_warning):
def _mock_log_warn(msg):
assert expected_warning
old_log_warn = Logs.warn
Logs.warn = _mock_log_warn
try:
settings_manager.merge_attributes_group("includes_file", source_attr_dict, merged_dict)
finally:
Logs.warn = old_log_warn
assert merged_dict == expected_result
@pytest.mark.parametrize(
"merge_settings_dict, setting_group_name, settings_value_dicts, expected_result", [
pytest.param( {},
'group',
[],
{
'group': []
},
id="EmptyTest"),
pytest.param( {
'group': []
},
'group',
[],
{
'group': []
},
id="EmptyExistingGroupTest"),
pytest.param( {
},
'group',
[{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "foo",
"description": "Use Foo"
}],
{
'group': [
{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "foo",
"description": "Use Foo"
},
]
},
id="NewGroupItem"),
pytest.param( {
'group': [
{
"short_form": "-n",
"long_form": "--not-foo",
"attribute": "not_foo",
"default_value": "not_foo",
"description": "Use Not Foo"
},
]
},
'group',
[{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "foo",
"description": "Use Foo"
}],
{
'group': [
{
"short_form": "-n",
"long_form": "--not-foo",
"attribute": "not_foo",
"default_value": "not_foo",
"description": "Use Not Foo"
},
{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "foo",
"description": "Use Foo"
},
]
},
id="NewGroupItemExistingGroup"),
pytest.param( {
'group': [
{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "old_foo",
"description": "Use Old Foo"
},
]
},
'group',
[{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "new_foo",
"description": "Use New Foo"
}],
{
'group': [
{
"short_form": "-f",
"long_form": "--foo",
"attribute": "foo",
"default_value": "new_foo",
"description": "Use New Foo"
},
]
},
id="ReplaceExistingGroupItem")
])
def test_MergeSettingsGroup_ValidInputs_Success(merge_settings_dict, setting_group_name, settings_value_dicts, expected_result):
settings_manager.merge_settings_group('includes_file', merge_settings_dict, setting_group_name, settings_value_dicts)
assert merge_settings_dict == expected_result
@pytest.mark.parametrize(
"settings_include_file, include_to_settings_dict, expected_env_dict, expected_settings_dict, expected_attributes_dict", [
pytest.param( 'testpath/test_settings.json',
{
'testpath/test_settings.json': {}
},
{},
{},
{},
id="EmptySettings"),
pytest.param( 'testpath/test_settings.json',
{
'testpath/test_settings.json': {
'env': {
'MYKEY': 'MYVALUE'
}
}
},
{
'env': {
'MYKEY': 'MYVALUE'
}
},
{},
{},
id="ProcessEnv"),
pytest.param( 'testpath/test_settings.json',
{
'testpath/test_settings.json': {
"settings": {
"MYSETTING": "SETTINGVALUE"
},
"env": {
"MYKEY": "MYVALUE"
},
"attributes": {
"MYATTRIBUTE": "ATTRIBUTEVALUE"
}
}
},
{
'env': {
'MYKEY': 'MYVALUE'
}
},
{
'MYSETTING': 'SETTINGVALUE'
},
{
'MYATTRIBUTE': 'ATTRIBUTEVALUE'
},
id="ProcessEnvAttributesSettings"),
pytest.param( 'testpath/test_settings.json',
{
'testpath/include_test_settings.json': {
"settings": {
"MYSETTING": "SETTINGVALUE"
},
"env": {
"MYKEY": "MYVALUE"
},
"attributes": {
"MYATTRIBUTE": "ATTRIBUTEVALUE"
}
},
'testpath/test_settings.json': {
"includes": [
'include_test_settings.json'
]
}
},
{
'env': {
'MYKEY': 'MYVALUE'
}
},
{
'MYSETTING': 'SETTINGVALUE'
},
{
'MYATTRIBUTE': 'ATTRIBUTEVALUE'
},
id="ProcessEnvAttributesSettingsFromInclude"),
]
)
def test_ProcessSettingsIncludeFile_ValidInputs_Success(settings_include_file,
include_to_settings_dict,
expected_env_dict,
expected_settings_dict,
expected_attributes_dict):
def _mock_read_common_config(input_file):
match_input_form = input_file.replace('\\', '/')
assert match_input_form in include_to_settings_dict
return include_to_settings_dict[match_input_form]
def _mock_process_env_dict(settings_include_dict, processed_env_dict):
for env_key, env_value in settings_include_dict.items():
if env_key.startswith('env'):
processed_env_dict[env_key] = env_value
def _mock_merge_settings_group(settings_include_file,
merge_settings_dict,
setting_group_name,
settings_value_dicts):
merge_settings_dict[setting_group_name] = settings_value_dicts
def _mock_merge_attributes_group(settings_include_file,
source_attributes_dict,
merge_attributes_dict):
if source_attributes_dict:
for key, value in source_attributes_dict.items():
merge_attributes_dict[key] = value
processed_env_dict = {}
processed_settings_dict = {}
processed_attributes_dict = {}
old_read_common_config = settings_manager.read_common_config
old_process_env_dict = settings_manager.process_env_dict
old_merge_settings_group = settings_manager.merge_settings_group
old_merge_attributes_group = settings_manager.merge_attributes_group
settings_manager.read_common_config = _mock_read_common_config
settings_manager.process_env_dict = _mock_process_env_dict
settings_manager.merge_settings_group = _mock_merge_settings_group
settings_manager.merge_attributes_group = _mock_merge_attributes_group
try:
settings_manager.process_settings_include_file(settings_include_file,
processed_env_dict,
processed_settings_dict,
processed_attributes_dict)
finally:
settings_manager.read_common_config = old_read_common_config
settings_manager.process_env_dict = old_process_env_dict
settings_manager.merge_settings_group = old_merge_settings_group
settings_manager.merge_attributes_group = old_merge_attributes_group
assert expected_env_dict == processed_env_dict
assert expected_settings_dict == processed_settings_dict
assert expected_attributes_dict == processed_attributes_dict
@pytest.mark.parametrize(
"platform_settings_file, platform_file_to_settings_dict,expected_platform,expected_display_name,expected_hosts,"
"expected_aliases,expected_has_server,expected_has_test,expected_is_monolithic,expected_enabled,"
"expected_additional_modules,expected_needs_java, expected_env_dict, expected_settings_dict, "
"expected_attributes_dict", [
pytest.param( 'testpath/test_settings.json',
{
'testpath/test_settings.json': {
'platform': 'test_platform',
'hosts': 'win32'
}
},
'test_platform', # expected_platform,
'test_platform', # expected_display_name, (default)
'win32', # expected_hosts,
set(), # expected_aliases, (default)
False, # expected_has_server, (default)
False, # expected_has_test, (default)
False, # expected_is_monolithic, (default)
True, # expected_enabled, (default)
[], # expected_additional_modules, (default)
False, # expected_needs_java (default)
{}, # expected_env_dict (default)
{}, # expected_settings_dict (default)
{}, # expected_attributes_dict (default)
id="BasicDefaultSettings"
),
pytest.param( 'testpath/test_settings.json',
{
'testpath/test_settings.json': {
'platform': 'test_platform',
'display_name': 'display_test',
'hosts': 'win32',
'aliases': 'msvc',
'has_server': True,
'has_tests': True,
'is_monolithic': True,
'enabled': False,
'needs_java': True,
'modules': [
'module_test'
],
"settings": {
"MYSETTING": "SETTINGVALUE"
},
"env": {
"MYKEY": "MYVALUE"
},
"attributes": {
"MYATTRIBUTE": "ATTRIBUTEVALUE"
}
}
},
'test_platform', # expected_platform
'display_test', # expected_display_name
'win32', # expected_hosts
{'msvc'}, # expected_aliases
True, # expected_has_server
True, # expected_has_test
True, # expected_is_monolithic
False, # expected_enabled
['module_test'], # expected_additional_modules
True, # expected_needs_java (default)
{'env': {'MYKEY': 'MYVALUE'}}, # expected_env_dict (default)
{'MYSETTING': 'SETTINGVALUE'}, # expected_settings_dict (default)
{'MYATTRIBUTE': 'ATTRIBUTEVALUE'}, # expected_attributes_dict (default)
id="BasicTestingSettings"),
pytest.param( 'testpath/test_settings.json',
{
'testpath/test_settings.json': {
'platform': 'test_platform',
'display_name': 'display_test',
'hosts': 'win32',
'aliases': 'msvc',
'has_server': True,
'has_tests': True,
'is_monolithic': True,
'enabled': False,
'needs_java': True,
'modules': ['module_test'],
'includes': [
'test_includes.json'
]
},
'testpath/test_includes.json': {
"settings": {
"MYSETTING": "SETTINGVALUE"
},
"env": {
"MYKEY": "MYVALUE"
},
"attributes": {
"MYATTRIBUTE": "ATTRIBUTEVALUE"
}
}
},
'test_platform', # expected_platform
'display_test', # expected_display_name
'win32', # expected_hosts
{'msvc'}, # expected_aliases
True, # expected_has_server
True, # expected_has_test
True, # expected_is_monolithic
False, # expected_enabled
['module_test'], # expected_additional_modules
True, # expected_needs_java (default)
{'env': {'MYKEY': 'MYVALUE'}}, # expected_env_dict (default)
{'MYSETTING': 'SETTINGVALUE'}, # expected_settings_dict (default)
{'MYATTRIBUTE': 'ATTRIBUTEVALUE'}, # expected_attributes_dict (default)
id="BasicTestingSettingsFromIncludes")
]
)
def test_PlatformSettings_ValidInputs_Success(platform_settings_file, platform_file_to_settings_dict, expected_platform, expected_display_name,
expected_hosts, expected_aliases, expected_has_server, expected_has_test, expected_is_monolithic,
expected_enabled, expected_additional_modules, expected_needs_java, expected_env_dict,
expected_settings_dict, expected_attributes_dict):
def _mock_parse_json_file(input_file, ignore_comments):
match_input_form = input_file.replace('\\', '/')
assert match_input_form in platform_file_to_settings_dict
return platform_file_to_settings_dict[match_input_form]
def _mock_process_env_dict(settings_include_dict, processed_env_dict):
for env_key, env_value in settings_include_dict.items():
if env_key.startswith('env'):
processed_env_dict[env_key] = env_value
def _mock_merge_settings_group(settings_include_file,
merge_settings_dict,
setting_group_name,
settings_value_dicts):
merge_settings_dict[setting_group_name] = settings_value_dicts
def _mock_merge_attributes_group(settings_include_file,
source_attributes_dict,
merge_attributes_dict):
if source_attributes_dict:
for key, value in source_attributes_dict.items():
merge_attributes_dict[key] = value
old_parse_json_file = utils.parse_json_file
old_process_env_dict = settings_manager.process_env_dict
old_merge_settings_group = settings_manager.merge_settings_group
old_merge_attributes_group = settings_manager.merge_attributes_group
utils.parse_json_file = _mock_parse_json_file
settings_manager.process_env_dict = _mock_process_env_dict
settings_manager.merge_settings_group = _mock_merge_settings_group
settings_manager.merge_attributes_group = _mock_merge_attributes_group
try:
platform_settings = settings_manager.PlatformSettings(platform_settings_file)
finally:
utils.parse_json_file = old_parse_json_file
settings_manager.process_env_dict = old_process_env_dict
settings_manager.merge_settings_group = old_merge_settings_group
settings_manager.merge_attributes_group = old_merge_attributes_group
assert platform_settings.hosts == expected_hosts
assert platform_settings.aliases == expected_aliases
assert platform_settings.has_server == expected_has_server
assert platform_settings.has_test == expected_has_test
assert platform_settings.enabled == expected_enabled
assert platform_settings.additional_modules == expected_additional_modules
assert platform_settings.needs_java == expected_needs_java
assert platform_settings.env_dict == expected_env_dict
assert platform_settings.settings == expected_settings_dict
assert platform_settings.attributes == expected_attributes_dict
@pytest.fixture
def mock_settings(tmpdir, default_settings, user_settings_options, build_configurations, test_platform_name, test_platform_content):
def write_json_sample(input_dict, target_path):
json_file_content = json.dumps(input_dict,
sort_keys=True,
separators=(',', ': '),
indent=4)
target_path.write(json_file_content)
tmpdir_path = str(tmpdir.realpath())
tmpdir.ensure('_WAF_/settings/platforms', dir=True)
write_json_sample(default_settings, tmpdir.join('_WAF_/default_settings.json'))
tmpdir.join('_WAF_/user_settings.options').write(user_settings_options)
write_json_sample(build_configurations, tmpdir.join('_WAF_/settings/build_configurations.json'))
write_json_sample(test_platform_content, tmpdir.join('_WAF_/settings/platforms/platform.{}.json'.format(test_platform_name)))
write_json_sample({
"FileVersion": 1,
"LumberyardVersion": "0.0.0.0",
"LumberyardCopyrightYear": 2019
}, tmpdir.join('engine.json'))
def _mock_get_cwd():
return tmpdir_path
def _mock_unversioned_sys_platform():
return 'win32'
old_get_cwd = os.getcwd
old_unversioned_sys_platform = Utils.unversioned_sys_platform
os.getcwd = _mock_get_cwd
Utils.unversioned_sys_platform = _mock_unversioned_sys_platform
try:
settings = settings_manager.Settings()
finally:
os.getcwd = old_get_cwd
Utils.unversioned_sys_platform = old_unversioned_sys_platform
yield settings
tmpdir.remove(ignore_errors=True)
MIN_DEFAULT_SETTINGS = {
"General": [
{
"long_form": "--has-test-configs",
"attribute": "has_test_configs",
"default_value": "False",
"description": "Has Test Configs"
},
{
"long_form": "--has-server-configs",
"attribute": "has_server_configs",
"default_value": "False",
"description": "Has Server Configs"
}
]
}
def join_with_default(input):
combined_items = input.items() + MIN_DEFAULT_SETTINGS.items()
result = {}
for section_name, section_list in combined_items:
if section_name not in result:
result[section_name] = copy.deepcopy(section_list)
else:
for subsection in section_list:
result[section_name].append(copy.deepcopy(subsection))
return result
BASIC_TEST_CONFIGURATIONS = {
"configurations": {
"debug": {
"is_monolithic": False,
"has_test": True,
"third_party_config": "debug",
"default_output_ext": "Debug"
},
"profile": {
"is_monolithic": False,
"has_test": True,
"third_party_config": "release",
"default_output_ext": ""
},
"performance": {
"is_monolithic": True,
"has_test": False,
"third_party_config": "release",
"default_output_ext": "Performance"
},
"release": {
"is_monolithic": True,
"has_test": False,
"third_party_config": "release",
"default_output_ext": "Release"
}
}
}
@pytest.mark.parametrize(
"default_settings, user_settings_options, build_configurations, test_platform_name, test_platform_content, expected_settings", [
pytest.param( join_with_default({
"General": [
{
"long_form": "--foo",
"attribute": "foo",
"default_value": "Foo",
"description": "Use New Foo"
}
]
}),
'[General]\n'
'foo = Bar\n'
,
BASIC_TEST_CONFIGURATIONS,
'test_platform',
{
'platform': 'test_platform',
'hosts': 'win32',
"attributes": {
"default_folder_name": "BinTest"
}
},
{
'foo': 'Bar',
'out_folder_test_platform': 'BinTest'
}
)
]
)
def test_Settings_BasicTest_Success(tmpdir, mock_settings, default_settings, user_settings_options, build_configurations, test_platform_name, test_platform_content, expected_settings):
for expected_key, expected_value in expected_settings.items():
assert expected_key in mock_settings.settings_map
assert mock_settings.settings_map[expected_key] == expected_value
assert test_platform_name in mock_settings.platform_settings_map
```
#### File: waflib/extras/build_logs.py
```python
import atexit, sys, time, os, shutil, threading
from waflib import Logs, Context
# adding the logs under the build/ directory will clash with the clean/ command
try:
up = os.path.dirname(Context.g_module.__file__)
except AttributeError:
up = '.'
LOGFILE = os.path.join(up, 'logs', '%s.log' % time.strftime('%Y_%m_%d_%H_%M'))
wlock = threading.Lock()
class log_to_file(object):
def __init__(self, stream, fileobj, filename):
self.stream = stream
self.encoding = self.stream.encoding
self.fileobj = fileobj
self.filename = filename
self.is_valid = True
def replace_colors(self, data):
for x in Logs.colors_lst.values():
if isinstance(x, str):
data = data.replace(x, '')
return data
def write(self, data):
try:
wlock.acquire()
self.stream.write(data)
self.stream.flush()
if self.is_valid:
self.fileobj.write(self.replace_colors(data))
finally:
wlock.release()
def fileno(self):
return self.stream.fileno()
def flush(self):
self.stream.flush()
if self.is_valid:
self.fileobj.flush()
def isatty(self):
return self.stream.isatty()
def init(ctx):
global LOGFILE
filename = os.path.abspath(LOGFILE)
try:
os.makedirs(os.path.dirname(os.path.abspath(filename)))
except OSError:
pass
if hasattr(os, 'O_NOINHERIT'):
fd = os.open(LOGFILE, os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT)
fileobj = os.fdopen(fd, 'w')
else:
fileobj = open(LOGFILE, 'w')
old_stderr = sys.stderr
# sys.stdout has already been replaced, so __stdout__ will be faster
#sys.stdout = log_to_file(sys.stdout, fileobj, filename)
#sys.stderr = log_to_file(sys.stderr, fileobj, filename)
sys.stdout = log_to_file(sys.__stdout__, fileobj, filename)
sys.stderr = log_to_file(sys.__stderr__, fileobj, filename)
# now mess with the logging module...
for x in Logs.log.handlers:
try:
stream = x.stream
except AttributeError:
pass
else:
if id(stream) == id(old_stderr):
x.stream = sys.stderr
def exit_cleanup():
try:
fileobj = sys.stdout.fileobj
except AttributeError:
pass
else:
sys.stdout.is_valid = False
sys.stderr.is_valid = False
fileobj.close()
filename = sys.stdout.filename
Logs.info('Output logged to %r' % filename)
# then copy the log file to "latest.log" if possible
up = os.path.dirname(os.path.abspath(filename))
try:
shutil.copy(filename, os.path.join(up, 'latest.log'))
except OSError:
# this may fail on windows due to processes spawned
#
pass
atexit.register(exit_cleanup)
```
#### File: waflib/extras/fc_xlf.py
```python
import re
from waflib import Utils,Errors
from waflib.Tools import fc,fc_config,fc_scan
from waflib.Configure import conf
from waflib.Tools.compiler_fc import fc_compiler
fc_compiler['aix'].insert(0, 'fc_xlf')
@conf
def find_xlf(conf):
"""Find the xlf program (will look in the environment variable 'FC')"""
fc = conf.find_program(['xlf2003_r', 'xlf2003', 'xlf95_r', 'xlf95', 'xlf90_r', 'xlf90', 'xlf_r', 'xlf'], var='FC')
fc = conf.cmd_to_list(fc)
conf.get_xlf_version(fc)
conf.env.FC_NAME='XLF'
@conf
def xlf_flags(conf):
v = conf.env
v['FCDEFINES_ST'] = '-WF,-D%s'
v['FCFLAGS_fcshlib'] = ['-qpic=small']
v['FCFLAGS_DEBUG'] = ['-qhalt=w']
v['LINKFLAGS_fcshlib'] = ['-Wl,-shared']
@conf
def xlf_modifier_platform(conf):
dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
xlf_modifier_func = getattr(conf, 'xlf_modifier_' + dest_os, None)
if xlf_modifier_func:
xlf_modifier_func()
@conf
def get_xlf_version(conf, fc):
"""Get the compiler version"""
cmd = fc + ['-qversion']
try:
out, err = conf.cmd_and_log(cmd, output=0)
except Errors.WafError:
conf.fatal('Could not find xlf %r' % cmd)
for v in (r"IBM XL Fortran.* V(?P<major>\d*)\.(?P<minor>\d*)",):
version_re = re.compile(v, re.I).search
match = version_re(out or err)
if match:
k = match.groupdict()
conf.env['FC_VERSION'] = (k['major'], k['minor'])
break
else:
conf.fatal('Could not determine the XLF version.')
def configure(conf):
conf.find_xlf()
conf.find_ar()
conf.fc_flags()
conf.fc_add_flags()
conf.xlf_flags()
conf.xlf_modifier_platform()
```
#### File: waflib/extras/fluid.py
```python
from waflib import Task
from waflib.TaskGen import extension
class fluid(Task.Task):
color = 'BLUE'
ext_out = ['.h']
run_str = '${FLUID} -c -o ${TGT[0].abspath()} -h ${TGT[1].abspath()} ${SRC}'
@extension('.fl')
def fluid(self, node):
"""add the .fl to the source list; the cxx file generated will be compiled when possible"""
cpp = node.change_ext('.cpp')
hpp = node.change_ext('.hpp')
self.create_task('fluid', node, [cpp, hpp])
if 'cxx' in self.features:
self.source.append(cpp)
def configure(conf):
conf.find_program('fluid', var='FLUID')
conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True)
```
#### File: waflib/extras/msvcdeps.py
```python
import os
import sys
import tempfile
import threading
from waflib import Context, Errors, Logs, Task, Utils
from waflib.Tools import c_preproc, c, cxx, msvc
from waflib.TaskGen import feature, before_method
lock = threading.Lock()
nodes = {} # Cache the path -> Node lookup
PREPROCESSOR_FLAG = '/showIncludes'
INCLUDE_PATTERN = 'Note: including file:'
# Extensible by outside tools
supported_compilers = ['msvc']
@feature('c', 'cxx')
@before_method('process_source')
def apply_msvcdeps_flags(taskgen):
if taskgen.env.CC_NAME not in supported_compilers:
return
for flag in ('CFLAGS', 'CXXFLAGS'):
if taskgen.env.get_flat(flag).find(PREPROCESSOR_FLAG) < 0:
taskgen.env.append_value(flag, PREPROCESSOR_FLAG)
# Figure out what casing conventions the user's shell used when
# launching Waf
(drive, _) = os.path.splitdrive(taskgen.bld.srcnode.abspath())
taskgen.msvcdeps_drive_lowercase = drive == drive.lower()
def path_to_node(base_node, path, cached_nodes):
# Take the base node and the path and return a node
# Results are cached because searching the node tree is expensive
# The following code is executed by threads, it is not safe, so a lock is needed...
if getattr(path, '__hash__'):
node_lookup_key = (base_node, path)
else:
# Not hashable, assume it is a list and join into a string
node_lookup_key = (base_node, os.path.sep.join(path))
try:
lock.acquire()
node = cached_nodes[node_lookup_key]
except KeyError:
node = base_node.find_resource(path)
cached_nodes[node_lookup_key] = node
finally:
lock.release()
return node
'''
Register a task subclass that has hooks for running our custom
dependency calculations rather than the C/C++ stock c_preproc
method.
'''
def wrap_compiled_task(classname):
derived_class = type(classname, (Task.classes[classname],), {})
def post_run(self):
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).post_run()
if getattr(self, 'cached', None):
return Task.Task.post_run(self)
bld = self.generator.bld
unresolved_names = []
resolved_nodes = []
lowercase = self.generator.msvcdeps_drive_lowercase
correct_case_path = bld.path.abspath()
correct_case_path_len = len(correct_case_path)
correct_case_path_norm = os.path.normcase(correct_case_path)
# Dynamically bind to the cache
try:
cached_nodes = bld.cached_nodes
except AttributeError:
cached_nodes = bld.cached_nodes = {}
for path in self.msvcdeps_paths:
node = None
if os.path.isabs(path):
# Force drive letter to match conventions of main source tree
drive, tail = os.path.splitdrive(path)
if os.path.normcase(path[:correct_case_path_len]) == correct_case_path_norm:
# Path is in the sandbox, force it to be correct. MSVC sometimes returns a lowercase path.
path = correct_case_path + path[correct_case_path_len:]
else:
# Check the drive letter
if lowercase and (drive != drive.lower()):
path = drive.lower() + tail
elif (not lowercase) and (drive != drive.upper()):
path = drive.upper() + tail
node = path_to_node(bld.root, path, cached_nodes)
else:
base_node = bld.bldnode
# when calling find_resource, make sure the path does not begin by '..'
path = [k for k in Utils.split_path(path) if k and k != '.']
while path[0] == '..':
path = path[1:]
base_node = base_node.parent
node = path_to_node(base_node, path, cached_nodes)
if not node:
raise ValueError('could not find %r for %r' % (path, self))
else:
if not c_preproc.go_absolute:
if not (node.is_child_of(bld.srcnode) or node.is_child_of(bld.bldnode)):
# System library
Logs.debug('msvcdeps: Ignoring system include %r' % node)
continue
if id(node) == id(self.inputs[0]):
# Self-dependency
continue
resolved_nodes.append(node)
bld.node_deps[self.uid()] = resolved_nodes
bld.raw_deps[self.uid()] = unresolved_names
try:
del self.cache_sig
except:
pass
Task.Task.post_run(self)
def scan(self):
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).scan()
resolved_nodes = self.generator.bld.node_deps.get(self.uid(), [])
unresolved_names = []
return (resolved_nodes, unresolved_names)
def sig_implicit_deps(self):
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).sig_implicit_deps()
try:
return Task.Task.sig_implicit_deps(self)
except Errors.WafError:
return Utils.SIG_NIL
def exec_response_command(self, cmd, **kw):
# exec_response_command() is only called from inside msvc.py anyway
assert self.env.CC_NAME in supported_compilers
# Only bother adding '/showIncludes' to compile tasks
if isinstance(self, (c.c, cxx.cxx)):
try:
# The Visual Studio IDE adds an environment variable that causes
# the MS compiler to send its textual output directly to the
# debugging window rather than normal stdout/stderr.
#
# This is unrecoverably bad for this tool because it will cause
# all the dependency scanning to see an empty stdout stream and
# assume that the file being compiled uses no headers.
#
# See http://blogs.msdn.com/b/freik/archive/2006/04/05/569025.aspx
#
# Attempting to repair the situation by deleting the offending
# envvar at this point in tool execution will not be good enough--
# its presence poisons the 'waf configure' step earlier. We just
# want to put a sanity check here in order to help developers
# quickly diagnose the issue if an otherwise-good Waf tree
# is then executed inside the MSVS IDE.
assert 'VS_UNICODE_OUTPUT' not in kw['env']
tmp = None
# This block duplicated from Waflib's msvc.py
if sys.platform.startswith('win') and isinstance(cmd, list) and len(' '.join(cmd)) >= 8192:
program = cmd[0]
cmd = [self.quote_response_command(x) for x in cmd]
(fd, tmp) = tempfile.mkstemp()
os.write(fd, '\r\n'.join(i.replace('\\', '\\\\') for i in cmd[1:]).encode())
os.close(fd)
cmd = [program, '@' + tmp]
# ... end duplication
self.msvcdeps_paths = []
kw['env'] = kw.get('env', os.environ.copy())
kw['cwd'] = kw.get('cwd', os.getcwd())
kw['quiet'] = Context.STDOUT
kw['output'] = Context.STDOUT
out = []
try:
raw_out = self.generator.bld.cmd_and_log(cmd, **kw)
ret = 0
except Errors.WafError as e:
raw_out = e.stdout
ret = e.returncode
for line in raw_out.splitlines():
if line.startswith(INCLUDE_PATTERN):
inc_path = line[len(INCLUDE_PATTERN):].strip()
Logs.debug('msvcdeps: Regex matched %s' % inc_path)
self.msvcdeps_paths.append(inc_path)
else:
out.append(line)
# Pipe through the remaining stdout content (not related to /showIncludes)
if self.generator.bld.logger:
self.generator.bld.logger.debug('out: %s' % os.linesep.join(out))
else:
sys.stdout.write(os.linesep.join(out) + os.linesep)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass
return ret
else:
# Use base class's version of this method for linker tasks
return super(derived_class, self).exec_response_command(cmd, **kw)
def can_retrieve_cache(self):
# msvcdeps and netcaching are incompatible, so disable the cache
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).can_retrieve_cache()
self.nocache = True # Disable sending the file to the cache
return False
derived_class.post_run = post_run
derived_class.scan = scan
derived_class.sig_implicit_deps = sig_implicit_deps
derived_class.exec_response_command = exec_response_command
derived_class.can_retrieve_cache = can_retrieve_cache
for k in ('c', 'cxx'):
wrap_compiled_task(k)
```
#### File: waflib/extras/netcache_client.py
```python
import os, socket, time, atexit
from waflib import Task, Logs, Utils, Build, Options, Runner
from waflib.Configure import conf
BUF = 8192 * 16
HEADER_SIZE = 128
MODES = ['PUSH', 'PULL', 'PUSH_PULL']
STALE_TIME = 30 # seconds
GET = 'GET'
PUT = 'PUT'
LST = 'LST'
BYE = 'BYE'
all_sigs_in_cache = (0.0, [])
active_connections = Runner.Queue(0)
def get_connection():
# return a new connection... do not forget to release it!
try:
ret = active_connections.get(block=False)
except Exception:
ret = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ret.connect(Task.net_cache[:2])
return ret
def release_connection(conn, msg=''):
if conn:
active_connections.put(conn)
def close_connection(conn, msg=''):
if conn:
data = '%s,%s' % (BYE, msg)
try:
conn.send(data.ljust(HEADER_SIZE))
except:
pass
try:
conn.close()
except:
pass
def close_all():
while active_connections.qsize():
conn = active_connections.get()
try:
close_connection(conn)
except:
pass
atexit.register(close_all)
def read_header(conn):
cnt = 0
buf = []
while cnt < HEADER_SIZE:
data = conn.recv(HEADER_SIZE - cnt)
if not data:
#import traceback
#traceback.print_stack()
raise ValueError('connection ended when reading a header %r' % buf)
buf.append(data)
cnt += len(data)
return ''.join(buf)
def check_cache(conn, ssig):
"""
List the files on the server, this is an optimization because it assumes that
concurrent builds are rare
"""
global all_sigs_in_cache
if not STALE_TIME:
return
if time.time() - all_sigs_in_cache[0] > STALE_TIME:
params = (LST,'')
conn.send(','.join(params).ljust(HEADER_SIZE))
# read what is coming back
ret = read_header(conn)
size = int(ret.split(',')[0])
buf = []
cnt = 0
while cnt < size:
data = conn.recv(min(BUF, size-cnt))
if not data:
raise ValueError('connection ended %r %r' % (cnt, size))
buf.append(data)
cnt += len(data)
all_sigs_in_cache = (time.time(), ''.join(buf).split('\n'))
Logs.debug('netcache: server cache has %r entries' % len(all_sigs_in_cache[1]))
if not ssig in all_sigs_in_cache[1]:
raise ValueError('no file %s in cache' % ssig)
class MissingFile(Exception):
pass
def recv_file(conn, ssig, count, p):
check_cache(conn, ssig)
params = (GET, ssig, str(count))
conn.send(','.join(params).ljust(HEADER_SIZE))
data = read_header(conn)
size = int(data.split(',')[0])
if size == -1:
raise MissingFile('no file %s - %s in cache' % (ssig, count))
# get the file, writing immediately
# TODO a tmp file would be better
f = open(p, 'wb')
cnt = 0
while cnt < size:
data = conn.recv(min(BUF, size-cnt))
if not data:
raise ValueError('connection ended %r %r' % (cnt, size))
f.write(data)
cnt += len(data)
f.close()
def put_data(conn, ssig, cnt, p):
#print "pushing %r %r %r" % (ssig, cnt, p)
size = os.stat(p).st_size
params = (PUT, ssig, str(cnt), str(size))
conn.send(','.join(params).ljust(HEADER_SIZE))
f = open(p, 'rb')
cnt = 0
while cnt < size:
r = f.read(min(BUF, size-cnt))
while r:
k = conn.send(r)
if not k:
raise ValueError('connection ended')
cnt += k
r = r[k:]
#def put_data(conn, ssig, cnt, p):
# size = os.stat(p).st_size
# params = (PUT, ssig, str(cnt), str(size))
# conn.send(','.join(params).ljust(HEADER_SIZE))
# conn.send(','*size)
# params = (BYE, 'he')
# conn.send(','.join(params).ljust(HEADER_SIZE))
def can_retrieve_cache(self):
if not Task.net_cache:
return False
if not self.outputs:
return False
if Task.net_cache[-1] == 'PUSH':
return
self.cached = False
cnt = 0
sig = self.signature()
ssig = self.uid().encode('hex') + sig.encode('hex')
conn = None
err = False
try:
try:
conn = get_connection()
for node in self.outputs:
p = node.abspath()
recv_file(conn, ssig, cnt, p)
cnt += 1
except MissingFile as e:
Logs.debug('netcache: file is not in the cache %r' % e)
err = True
except Exception as e:
Logs.debug('netcache: could not get the files %r' % e)
err = True
# broken connection? remove this one
close_connection(conn)
conn = None
finally:
release_connection(conn)
if err:
return False
for node in self.outputs:
node.sig = sig
#if self.generator.bld.progress_bar < 1:
# self.generator.bld.to_log('restoring from cache %r\n' % node.abspath())
self.cached = True
return True
@Utils.run_once
def put_files_cache(self):
if not Task.net_cache:
return
if not self.outputs:
return
if Task.net_cache[-1] == 'PULL':
return
if getattr(self, 'cached', None):
return
#print "called put_files_cache", id(self)
bld = self.generator.bld
sig = self.signature()
ssig = self.uid().encode('hex') + sig.encode('hex')
conn = None
cnt = 0
try:
for node in self.outputs:
# We could re-create the signature of the task with the signature of the outputs
# in practice, this means hashing the output files
# this is unnecessary
try:
if not conn:
conn = get_connection()
put_data(conn, ssig, cnt, node.abspath())
except Exception as e:
Logs.debug("netcache: could not push the files %r" % e)
# broken connection? remove this one
close_connection(conn)
conn = None
cnt += 1
finally:
release_connection(conn)
bld.task_sigs[self.uid()] = self.cache_sig
def hash_env_vars(self, env, vars_lst):
if not env.table:
env = env.parent
if not env:
return Utils.SIG_NIL
idx = str(id(env)) + str(vars_lst)
try:
cache = self.cache_env
except AttributeError:
cache = self.cache_env = {}
else:
try:
return self.cache_env[idx]
except KeyError:
pass
v = str([env[a] for a in vars_lst])
v = v.replace(self.srcnode.abspath().__repr__()[:-1], '')
m = Utils.md5()
m.update(v.encode())
ret = m.digest()
Logs.debug('envhash: %r %r', ret, v)
cache[idx] = ret
return ret
def uid(self):
try:
return self.uid_
except AttributeError:
m = Utils.md5()
src = self.generator.bld.srcnode
up = m.update
up(self.__class__.__name__.encode())
for x in self.inputs + self.outputs:
up(x.path_from(src).encode())
self.uid_ = m.digest()
return self.uid_
@conf
def setup_netcache(ctx, host, port, mode):
Logs.warn('Using the network cache %s, %s, %s' % (host, port, mode))
Task.net_cache = (host, port, mode)
Task.Task.can_retrieve_cache = can_retrieve_cache
Task.Task.put_files_cache = put_files_cache
Task.Task.uid = uid
Build.BuildContext.hash_env_vars = hash_env_vars
ctx.cache_global = Options.cache_global = True
def options(opt):
if not 'NETCACHE' in os.environ:
Logs.warn('the network cache is disabled, set NETCACHE=host:port@mode to enable')
else:
v = os.environ['NETCACHE']
if v in MODES:
host = socket.gethostname()
port = 51200
mode = v
else:
mode = 'PUSH_PULL'
host, port = v.split(':')
if port.find('@'):
port, mode = port.split('@')
port = int(port)
if not mode in MODES:
opt.fatal('Invalid mode %s not in %r' % (mode, MODES))
setup_netcache(opt, host, port, mode)
```
#### File: waflib/Tools/compiler_d.py
```python
import os, sys, imp, types
from waflib import Utils, Configure, Options, Logs
def configure(conf):
"""
Try to find a suitable D compiler or raise a :py:class:`waflib.Errors.ConfigurationError`.
"""
for compiler in conf.options.dcheck.split(','):
conf.env.stash()
conf.start_msg('Checking for %r (d compiler)' % compiler)
try:
conf.load(compiler)
except conf.errors.ConfigurationError as e:
conf.env.revert()
conf.end_msg(False)
Logs.debug('compiler_d: %r' % e)
else:
if conf.env.D:
conf.end_msg(conf.env.get_flat('D'))
conf.env['COMPILER_D'] = compiler
break
conf.end_msg(False)
else:
conf.fatal('no suitable d compiler was found')
def options(opt):
"""
Restrict the compiler detection from the command-line::
$ waf configure --check-d-compiler=dmd
"""
d_compiler_opts = opt.add_option_group('D Compiler Options')
d_compiler_opts.add_option('--check-d-compiler', default='gdc,dmd,ldc2', action='store',
help='check for the compiler [Default:gdc,dmd,ldc2]', dest='dcheck')
for d_compiler in ['gdc', 'dmd', 'ldc2']:
opt.load('%s' % d_compiler)
```
#### File: waflib/Tools/errcheck.py
```python
typos = {
'feature':'features',
'sources':'source',
'targets':'target',
'include':'includes',
'export_include':'export_includes',
'define':'defines',
'importpath':'includes',
'installpath':'install_path',
'iscopy':'is_copy',
}
meths_typos = ['__call__', 'program', 'shlib', 'stlib', 'objects']
from waflib import Logs, Build, Node, Task, TaskGen, ConfigSet, Errors, Utils
import waflib.Tools.ccroot
def check_same_targets(self):
mp = Utils.defaultdict(list)
uids = {}
def check_task(tsk):
if not isinstance(tsk, Task.Task):
return
for node in tsk.outputs:
mp[node].append(tsk)
try:
uids[tsk.uid()].append(tsk)
except KeyError:
uids[tsk.uid()] = [tsk]
for g in self.groups:
for tg in g:
try:
for tsk in tg.tasks:
check_task(tsk)
except AttributeError:
# raised if not a task generator, which should be uncommon
check_task(tg)
dupe = False
for (k, v) in mp.items():
if len(v) > 1:
dupe = True
msg = '* Node %r is created more than once%s. The task generators are:' % (k, Logs.verbose == 1 and " (full message on 'waf -v -v')" or "")
Logs.error(msg)
for x in v:
if Logs.verbose > 1:
Logs.error(' %d. %r' % (1 + v.index(x), x.generator))
else:
Logs.error(' %d. %r in %r' % (1 + v.index(x), x.generator.name, getattr(x.generator, 'path', None)))
if not dupe:
for (k, v) in uids.items():
if len(v) > 1:
Logs.error('* Several tasks use the same identifier. Please check the information on\n http://docs.waf.googlecode.com/git/apidocs_16/Task.html#waflib.Task.Task.uid')
for tsk in v:
Logs.error(' - object %r (%r) defined in %r' % (tsk.__class__.__name__, tsk, tsk.generator))
def check_invalid_constraints(self):
feat = set([])
for x in list(TaskGen.feats.values()):
feat.union(set(x))
for (x, y) in TaskGen.task_gen.prec.items():
feat.add(x)
feat.union(set(y))
ext = set([])
for x in TaskGen.task_gen.mappings.values():
ext.add(x.__name__)
invalid = ext & feat
if invalid:
Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method' % list(invalid))
# the build scripts have been read, so we can check for invalid after/before attributes on task classes
for cls in list(Task.classes.values()):
for x in ('before', 'after'):
for y in Utils.to_list(getattr(cls, x, [])):
if not Task.classes.get(y, None):
Logs.error('Erroneous order constraint %r=%r on task class %r' % (x, y, cls.__name__))
if getattr(cls, 'rule', None):
Logs.error('Erroneous attribute "rule" on task class %r (rename to "run_str")' % cls.__name__)
def replace(m):
"""
We could add properties, but they would not work in some cases:
bld.program(...) requires 'source' in the attributes
"""
oldcall = getattr(Build.BuildContext, m)
def call(self, *k, **kw):
ret = oldcall(self, *k, **kw)
for x in typos:
if x in kw:
if x == 'iscopy' and 'subst' in getattr(self, 'features', ''):
continue
err = True
Logs.error('Fix the typo %r -> %r on %r' % (x, typos[x], ret))
return ret
setattr(Build.BuildContext, m, call)
def enhance_lib():
"""
modify existing classes and methods
"""
for m in meths_typos:
replace(m)
# catch '..' in ant_glob patterns
def ant_glob(self, *k, **kw):
if k:
lst=Utils.to_list(k[0])
for pat in lst:
if '..' in pat.split('/'):
Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'" % k[0])
if kw.get('remove', True):
try:
if self.is_child_of(self.ctx.bldnode) and not kw.get('quiet', False):
Logs.error('Using ant_glob on the build folder (%r) is dangerous (quiet=True to disable this warning)' % self)
except AttributeError:
pass
return self.old_ant_glob(*k, **kw)
Node.Node.old_ant_glob = Node.Node.ant_glob
Node.Node.ant_glob = ant_glob
# catch conflicting ext_in/ext_out/before/after declarations
old = Task.is_before
def is_before(t1, t2):
ret = old(t1, t2)
if ret and old(t2, t1):
Logs.error('Contradictory order constraints in classes %r %r' % (t1, t2))
return ret
Task.is_before = is_before
# check for bld(feature='cshlib') where no 'c' is given - this can be either a mistake or on purpose
# so we only issue a warning
def check_err_features(self):
lst = self.to_list(self.features)
if 'shlib' in lst:
Logs.error('feature shlib -> cshlib, dshlib or cxxshlib')
for x in ('c', 'cxx', 'd', 'fc'):
if not x in lst and lst and lst[0] in [x+y for y in ('program', 'shlib', 'stlib')]:
Logs.error('%r features is probably missing %r' % (self, x))
TaskGen.feature('*')(check_err_features)
# check for erroneous order constraints
def check_err_order(self):
if not hasattr(self, 'rule') and not 'subst' in Utils.to_list(self.features):
for x in ('before', 'after', 'ext_in', 'ext_out'):
if hasattr(self, x):
Logs.warn('Erroneous order constraint %r on non-rule based task generator %r' % (x, self))
else:
for x in ('before', 'after'):
for y in self.to_list(getattr(self, x, [])):
if not Task.classes.get(y, None):
Logs.error('Erroneous order constraint %s=%r on %r (no such class)' % (x, y, self))
TaskGen.feature('*')(check_err_order)
# check for @extension used with @feature/@before_method/@after_method
def check_compile(self):
check_invalid_constraints(self)
try:
ret = self.orig_compile()
finally:
check_same_targets(self)
return ret
Build.BuildContext.orig_compile = Build.BuildContext.compile
Build.BuildContext.compile = check_compile
# check for invalid build groups #914
def use_rec(self, name, **kw):
try:
y = self.bld.get_tgen_by_name(name)
except Errors.WafError:
pass
else:
idx = self.bld.get_group_idx(self)
odx = self.bld.get_group_idx(y)
if odx > idx:
msg = "Invalid 'use' across build groups:"
if Logs.verbose > 1:
msg += '\n target %r\n uses:\n %r' % (self, y)
else:
msg += " %r uses %r (try 'waf -v -v' for the full error)" % (self.name, name)
raise Errors.WafError(msg)
self.orig_use_rec(name, **kw)
TaskGen.task_gen.orig_use_rec = TaskGen.task_gen.use_rec
TaskGen.task_gen.use_rec = use_rec
# check for env.append
def getattri(self, name, default=None):
if name == 'append' or name == 'add':
raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique')
elif name == 'prepend':
raise Errors.WafError('env.prepend does not exist: use env.prepend_value')
if name in self.__slots__:
return object.__getattr__(self, name, default)
else:
return self[name]
ConfigSet.ConfigSet.__getattr__ = getattri
def options(opt):
"""
Add a few methods
"""
enhance_lib()
def configure(conf):
pass
```
#### File: waflib/Tools/intltool.py
```python
import os, re
from waflib import Configure, TaskGen, Task, Utils, Runner, Options, Build, Logs
import waflib.Tools.ccroot
from waflib.TaskGen import feature, before_method
from waflib.Logs import error
@before_method('process_source')
@feature('intltool_in')
def apply_intltool_in_f(self):
"""
Create tasks to translate files by intltool-merge::
def build(bld):
bld(
features = "intltool_in",
podir = "../po",
flags = ["-d", "-q", "-u", "-c"],
source = 'kupfer.desktop.in',
install_path = "${DATADIR}/applications",
)
:param podir: location of the .po files
:type podir: string
:param source: source files to process
:type source: list of string
:param flags: compilation flags ("-quc" by default)
:type flags: list of string
:param install_path: installation path
:type install_path: string
"""
try: self.meths.remove('process_source')
except ValueError: pass
if not self.env.LOCALEDIR:
self.env.LOCALEDIR = self.env.PREFIX + '/share/locale'
for i in self.to_list(self.source):
node = self.path.find_resource(i)
podir = getattr(self, 'podir', 'po')
podirnode = self.path.find_dir(podir)
if not podirnode:
error("could not find the podir %r" % podir)
continue
cache = getattr(self, 'intlcache', '.intlcache')
self.env['INTLCACHE'] = os.path.join(self.path.bldpath(), podir, cache)
self.env['INTLPODIR'] = podirnode.bldpath()
self.env['INTLFLAGS'] = getattr(self, 'flags', ['-q', '-u', '-c'])
task = self.create_task('intltool', node, node.change_ext(''))
inst = getattr(self, 'install_path', '${LOCALEDIR}')
if inst:
self.bld.install_files(inst, task.outputs)
@feature('intltool_po')
def apply_intltool_po(self):
"""
Create tasks to process po files::
def build(bld):
bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}")
The relevant task generator arguments are:
:param podir: directory of the .po files
:type podir: string
:param appname: name of the application
:type appname: string
:param install_path: installation directory
:type install_path: string
The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process.
"""
try: self.meths.remove('process_source')
except ValueError: pass
if not self.env.LOCALEDIR:
self.env.LOCALEDIR = self.env.PREFIX + '/share/locale'
appname = getattr(self, 'appname', 'set_your_app_name')
podir = getattr(self, 'podir', '')
inst = getattr(self, 'install_path', '${LOCALEDIR}')
linguas = self.path.find_node(os.path.join(podir, 'LINGUAS'))
if linguas:
# scan LINGUAS file for locales to process
file = open(linguas.abspath())
langs = []
for line in file.readlines():
# ignore lines containing comments
if not line.startswith('#'):
langs += line.split()
file.close()
re_linguas = re.compile('[-a-zA-Z_@.]+')
for lang in langs:
# Make sure that we only process lines which contain locales
if re_linguas.match(lang):
node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po'))
task = self.create_task('po', node, node.change_ext('.mo'))
if inst:
filename = task.outputs[0].name
(langname, ext) = os.path.splitext(filename)
inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo'
self.bld.install_as(inst_file, task.outputs[0], chmod=getattr(self, 'chmod', Utils.O644), env=task.env)
else:
Logs.pprint('RED', "Error no LINGUAS file found in po directory")
class po(Task.Task):
"""
Compile .po files into .gmo files
"""
run_str = '${MSGFMT} -o ${TGT} ${SRC}'
color = 'BLUE'
class intltool(Task.Task):
"""
Let intltool-merge translate an input file
"""
run_str = '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}'
color = 'BLUE'
def configure(conf):
"""
Detect the program *msgfmt* and set *conf.env.MSGFMT*.
Detect the program *intltool-merge* and set *conf.env.INTLTOOL*.
It is possible to set INTLTOOL in the environment, but it must not have spaces in it::
$ INTLTOOL="/path/to/the program/intltool" waf configure
If a C/C++ compiler is present, execute a compilation test to find the header *locale.h*.
"""
conf.find_program('msgfmt', var='MSGFMT')
conf.find_perl_program('intltool-merge', var='INTLTOOL')
prefix = conf.env.PREFIX
datadir = conf.env.DATADIR
if not datadir:
datadir = os.path.join(prefix,'share')
conf.define('LOCALEDIR', os.path.join(datadir, 'locale').replace('\\', '\\\\'))
conf.define('DATADIR', datadir.replace('\\', '\\\\'))
if conf.env.CC or conf.env.CXX:
conf.check(header_name='locale.h')
```
#### File: pbr/hooks/commands.py
```python
import os
from setuptools.command import easy_install
from pbr.hooks import base
from pbr import options
from pbr import packaging
class CommandsConfig(base.BaseConfig):
section = 'global'
def __init__(self, config):
super(CommandsConfig, self).__init__(config)
self.commands = self.config.get('commands', "")
def save(self):
self.config['commands'] = self.commands
super(CommandsConfig, self).save()
def add_command(self, command):
self.commands = "%s\n%s" % (self.commands, command)
def hook(self):
self.add_command('pbr.packaging.LocalEggInfo')
self.add_command('pbr.packaging.LocalSDist')
self.add_command('pbr.packaging.LocalInstallScripts')
self.add_command('pbr.packaging.LocalDevelop')
self.add_command('pbr.packaging.LocalRPMVersion')
self.add_command('pbr.packaging.LocalDebVersion')
if os.name != 'nt':
easy_install.get_script_args = packaging.override_get_script_args
if packaging.have_sphinx():
self.add_command('pbr.builddoc.LocalBuildDoc')
self.add_command('pbr.builddoc.LocalBuildLatex')
if os.path.exists('.testr.conf') and packaging.have_testr():
# There is a .testr.conf file. We want to use it.
self.add_command('pbr.packaging.TestrTest')
elif self.config.get('nosetests', False) and packaging.have_nose():
# We seem to still have nose configured
self.add_command('pbr.packaging.NoseTest')
use_egg = options.get_boolean_option(
self.pbr_config, 'use-egg', 'PBR_USE_EGG')
# We always want non-egg install unless explicitly requested
if 'manpages' in self.pbr_config or not use_egg:
self.add_command('pbr.packaging.LocalInstall')
else:
self.add_command('pbr.packaging.InstallWithGit')
```
#### File: site-packages/PythonMagick/__init__.py
```python
from . import _PythonMagick
class Image(_PythonMagick.Image):
pass
class Blob(_PythonMagick.Blob):
def __init__(self,*args):
if len(args)==1 and type(args[0])==type(''):
_PythonMagick.Blob.__init__(self)
self.update(args[0])
else:
_PythonMagick.Blob.__init__(self,*args)
data=property(_PythonMagick.get_blob_data,_PythonMagick.Blob.update)
class Color(_PythonMagick.Color):
pass
class ColorspaceType(_PythonMagick.ColorspaceType):
pass
class CompositeOperator(_PythonMagick.CompositeOperator):
pass
class CompressionType(_PythonMagick.CompressionType):
pass
class Coordinate(_PythonMagick.Coordinate):
pass
class DecorationType(_PythonMagick.DecorationType):
pass
class DrawableAffine(_PythonMagick.DrawableAffine):
pass
class DrawableArc(_PythonMagick.DrawableArc):
pass
class DrawableBezier(_PythonMagick.DrawableBezier):
pass
class DrawableCircle(_PythonMagick.DrawableCircle):
pass
class DrawableClipPath(_PythonMagick.DrawableClipPath):
pass
class DrawableColor(_PythonMagick.DrawableColor):
pass
class DrawableCompositeImage(_PythonMagick.DrawableCompositeImage):
pass
class DrawableDashArray(_PythonMagick.DrawableDashArray):
pass
class DrawableDashOffset(_PythonMagick.DrawableDashOffset):
pass
class DrawableEllipse(_PythonMagick.DrawableEllipse):
pass
class DrawableFillColor(_PythonMagick.DrawableFillColor):
pass
class DrawableFillOpacity(_PythonMagick.DrawableFillOpacity):
pass
class DrawableFillRule(_PythonMagick.DrawableFillRule):
pass
class DrawableFont(_PythonMagick.DrawableFont):
pass
class DrawableGravity(_PythonMagick.DrawableGravity):
pass
class DrawableLine(_PythonMagick.DrawableLine):
pass
class DrawableMatte(_PythonMagick.DrawableMatte):
pass
class DrawableMiterLimit(_PythonMagick.DrawableMiterLimit):
pass
class DrawablePath(_PythonMagick.DrawablePath):
pass
class DrawablePoint(_PythonMagick.DrawablePoint):
pass
class DrawablePointSize(_PythonMagick.DrawablePointSize):
pass
class DrawablePolygon(_PythonMagick.DrawablePolygon):
pass
class DrawablePolyline(_PythonMagick.DrawablePolyline):
pass
class DrawablePopClipPath(_PythonMagick.DrawablePopClipPath):
pass
class DrawablePopGraphicContext(_PythonMagick.DrawablePopGraphicContext):
pass
class DrawablePopPattern(_PythonMagick.DrawablePopPattern):
pass
class DrawablePushClipPath(_PythonMagick.DrawablePushClipPath):
pass
class DrawablePushGraphicContext(_PythonMagick.DrawablePushGraphicContext):
pass
class DrawablePushPattern(_PythonMagick.DrawablePushPattern):
pass
class DrawableRectangle(_PythonMagick.DrawableRectangle):
pass
class DrawableRotation(_PythonMagick.DrawableRotation):
pass
class DrawableRoundRectangle(_PythonMagick.DrawableRoundRectangle):
pass
class DrawableScaling(_PythonMagick.DrawableScaling):
pass
class DrawableSkewX(_PythonMagick.DrawableSkewX):
pass
class DrawableSkewY(_PythonMagick.DrawableSkewY):
pass
class DrawableStrokeAntialias(_PythonMagick.DrawableStrokeAntialias):
pass
class DrawableStrokeColor(_PythonMagick.DrawableStrokeColor):
pass
class DrawableStrokeLineCap(_PythonMagick.DrawableStrokeLineCap):
pass
class DrawableStrokeLineJoin(_PythonMagick.DrawableStrokeLineJoin):
pass
class DrawableStrokeOpacity(_PythonMagick.DrawableStrokeOpacity):
pass
class DrawableStrokeWidth(_PythonMagick.DrawableStrokeWidth):
pass
class DrawableText(_PythonMagick.DrawableText):
pass
class DrawableTextAntialias(_PythonMagick.DrawableTextAntialias):
pass
class DrawableTextDecoration(_PythonMagick.DrawableTextDecoration):
pass
class DrawableTextUnderColor(_PythonMagick.DrawableTextUnderColor):
pass
class DrawableTranslation(_PythonMagick.DrawableTranslation):
pass
class DrawableViewbox(_PythonMagick.DrawableViewbox):
pass
class Exception(_PythonMagick.Exception):
pass
class FilterTypes(_PythonMagick.FilterTypes):
pass
class Geometry(_PythonMagick.Geometry):
pass
class GravityType(_PythonMagick.GravityType):
pass
class PathArcAbs(_PythonMagick.PathArcAbs):
pass
class PathArcArgs(_PythonMagick.PathArcArgs):
pass
class PathArcRel(_PythonMagick.PathArcRel):
pass
class PathClosePath(_PythonMagick.PathClosePath):
pass
class PathCurvetoAbs(_PythonMagick.PathCurvetoAbs):
pass
class PathCurvetoArgs(_PythonMagick.PathCurvetoArgs):
pass
class PathCurvetoRel(_PythonMagick.PathCurvetoRel):
pass
class PathLinetoAbs(_PythonMagick.PathLinetoAbs):
pass
class PathLinetoHorizontalAbs(_PythonMagick.PathLinetoHorizontalAbs):
pass
class PathLinetoHorizontalRel(_PythonMagick.PathLinetoHorizontalRel):
pass
class PathLinetoRel(_PythonMagick.PathLinetoRel):
pass
class PathLinetoVerticalAbs(_PythonMagick.PathLinetoVerticalAbs):
pass
class PathLinetoVerticalRel(_PythonMagick.PathLinetoVerticalRel):
pass
class PathMovetoAbs(_PythonMagick.PathMovetoAbs):
pass
class PathMovetoRel(_PythonMagick.PathMovetoRel):
pass
class PathQuadraticCurvetoAbs(_PythonMagick.PathQuadraticCurvetoAbs):
pass
class PathQuadraticCurvetoArgs(_PythonMagick.PathQuadraticCurvetoArgs):
pass
class PathQuadraticCurvetoRel(_PythonMagick.PathQuadraticCurvetoRel):
pass
class PathSmoothCurvetoAbs(_PythonMagick.PathSmoothCurvetoAbs):
pass
class PathSmoothCurvetoRel(_PythonMagick.PathSmoothCurvetoRel):
pass
class PathSmoothQuadraticCurvetoAbs(_PythonMagick.PathSmoothQuadraticCurvetoAbs):
pass
class PathSmoothQuadraticCurvetoRel(_PythonMagick.PathSmoothQuadraticCurvetoRel):
pass
class Pixels(_PythonMagick.Pixels):
pass
class TypeMetric(_PythonMagick.TypeMetric):
pass
class VPath(_PythonMagick.VPath):
pass
```
#### File: site-packages/easyprocess/unicodeutil.py
```python
import logging
import shlex
import sys
import unicodedata
log = logging.getLogger(__name__)
PY3 = sys.version_info[0] >= 3
if PY3:
string_types = str,
else:
string_types = basestring,
class EasyProcessUnicodeError(Exception):
pass
def split_command(cmd):
'''
- cmd is string list -> nothing to do
- cmd is string -> split it using shlex
:param cmd: string ('ls -l') or list of strings (['ls','-l'])
:rtype: string list
'''
if not isinstance(cmd, string_types):
# cmd is string list
pass
else:
if not PY3:
# cmd is string
# The shlex module currently does not support Unicode input (in
# 2.x)!
if isinstance(cmd, unicode):
try:
cmd = unicodedata.normalize(
'NFKD', cmd).encode('ascii', 'strict')
except UnicodeEncodeError:
raise EasyProcessUnicodeError('unicode command "%s" can not be processed.' % cmd +
'Use string list instead of string')
log.debug('unicode is normalized')
cmd = shlex.split(cmd)
return cmd
def uniencode(s):
if PY3:
pass
# s=s.encode()
else:
if isinstance(s, unicode):
s = s.encode('utf-8')
return s
def unidecode(s):
if PY3:
s = s.decode()
else:
if isinstance(s, str):
s = s.decode('utf-8', 'ignore')
return s
```
#### File: site-packages/jmespath/visitor.py
```python
import operator
from jmespath import functions
from jmespath.compat import string_type
from numbers import Number
def _equals(x, y):
if _is_special_integer_case(x, y):
return False
else:
return x == y
def _is_special_integer_case(x, y):
# We need to special case comparing 0 or 1 to
# True/False. While normally comparing any
# integer other than 0/1 to True/False will always
# return False. However 0/1 have this:
# >>> 0 == True
# False
# >>> 0 == False
# True
# >>> 1 == True
# True
# >>> 1 == False
# False
#
# Also need to consider that:
# >>> 0 in [True, False]
# True
if x is 0 or x is 1:
return y is True or y is False
elif y is 0 or y is 1:
return x is True or x is False
def _is_comparable(x):
# The spec doesn't officially support string types yet,
# but enough people are relying on this behavior that
# it's been added back. This should eventually become
# part of the official spec.
return _is_actual_number(x) or isinstance(x, string_type)
def _is_actual_number(x):
# We need to handle python's quirkiness with booleans,
# specifically:
#
# >>> isinstance(False, int)
# True
# >>> isinstance(True, int)
# True
if x is True or x is False:
return False
return isinstance(x, Number)
class Options(object):
"""Options to control how a JMESPath function is evaluated."""
def __init__(self, dict_cls=None, custom_functions=None):
#: The class to use when creating a dict. The interpreter
# may create dictionaries during the evalution of a JMESPath
# expression. For example, a multi-select hash will
# create a dictionary. By default we use a dict() type.
# You can set this value to change what dict type is used.
# The most common reason you would change this is if you
# want to set a collections.OrderedDict so that you can
# have predictible key ordering.
self.dict_cls = dict_cls
self.custom_functions = custom_functions
class _Expression(object):
def __init__(self, expression, interpreter):
self.expression = expression
self.interpreter = interpreter
def visit(self, node, *args, **kwargs):
return self.interpreter.visit(node, *args, **kwargs)
class Visitor(object):
def __init__(self):
self._method_cache = {}
def visit(self, node, *args, **kwargs):
node_type = node['type']
method = self._method_cache.get(node_type)
if method is None:
method = getattr(
self, 'visit_%s' % node['type'], self.default_visit)
self._method_cache[node_type] = method
return method(node, *args, **kwargs)
def default_visit(self, node, *args, **kwargs):
raise NotImplementedError("default_visit")
class TreeInterpreter(Visitor):
COMPARATOR_FUNC = {
'eq': _equals,
'ne': lambda x, y: not _equals(x, y),
'lt': operator.lt,
'gt': operator.gt,
'lte': operator.le,
'gte': operator.ge
}
_EQUALITY_OPS = ['eq', 'ne']
MAP_TYPE = dict
def __init__(self, options=None):
super(TreeInterpreter, self).__init__()
self._dict_cls = self.MAP_TYPE
if options is None:
options = Options()
self._options = options
if options.dict_cls is not None:
self._dict_cls = self._options.dict_cls
if options.custom_functions is not None:
self._functions = self._options.custom_functions
else:
self._functions = functions.Functions()
def default_visit(self, node, *args, **kwargs):
raise NotImplementedError(node['type'])
def visit_subexpression(self, node, value):
result = value
for node in node['children']:
result = self.visit(node, result)
return result
def visit_field(self, node, value):
try:
return value.get(node['value'])
except AttributeError:
return None
def visit_comparator(self, node, value):
# Common case: comparator is == or !=
comparator_func = self.COMPARATOR_FUNC[node['value']]
if node['value'] in self._EQUALITY_OPS:
return comparator_func(
self.visit(node['children'][0], value),
self.visit(node['children'][1], value)
)
else:
# Ordering operators are only valid for numbers.
# Evaluating any other type with a comparison operator
# will yield a None value.
left = self.visit(node['children'][0], value)
right = self.visit(node['children'][1], value)
num_types = (int, float)
if not (_is_comparable(left) and
_is_comparable(right)):
return None
return comparator_func(left, right)
def visit_current(self, node, value):
return value
def visit_expref(self, node, value):
return _Expression(node['children'][0], self)
def visit_function_expression(self, node, value):
resolved_args = []
for child in node['children']:
current = self.visit(child, value)
resolved_args.append(current)
return self._functions.call_function(node['value'], resolved_args)
def visit_filter_projection(self, node, value):
base = self.visit(node['children'][0], value)
if not isinstance(base, list):
return None
comparator_node = node['children'][2]
collected = []
for element in base:
if self._is_true(self.visit(comparator_node, element)):
current = self.visit(node['children'][1], element)
if current is not None:
collected.append(current)
return collected
def visit_flatten(self, node, value):
base = self.visit(node['children'][0], value)
if not isinstance(base, list):
# Can't flatten the object if it's not a list.
return None
merged_list = []
for element in base:
if isinstance(element, list):
merged_list.extend(element)
else:
merged_list.append(element)
return merged_list
def visit_identity(self, node, value):
return value
def visit_index(self, node, value):
# Even though we can index strings, we don't
# want to support that.
if not isinstance(value, list):
return None
try:
return value[node['value']]
except IndexError:
return None
def visit_index_expression(self, node, value):
result = value
for node in node['children']:
result = self.visit(node, result)
return result
def visit_slice(self, node, value):
if not isinstance(value, list):
return None
s = slice(*node['children'])
return value[s]
def visit_key_val_pair(self, node, value):
return self.visit(node['children'][0], value)
def visit_literal(self, node, value):
return node['value']
def visit_multi_select_dict(self, node, value):
if value is None:
return None
collected = self._dict_cls()
for child in node['children']:
collected[child['value']] = self.visit(child, value)
return collected
def visit_multi_select_list(self, node, value):
if value is None:
return None
collected = []
for child in node['children']:
collected.append(self.visit(child, value))
return collected
def visit_or_expression(self, node, value):
matched = self.visit(node['children'][0], value)
if self._is_false(matched):
matched = self.visit(node['children'][1], value)
return matched
def visit_and_expression(self, node, value):
matched = self.visit(node['children'][0], value)
if self._is_false(matched):
return matched
return self.visit(node['children'][1], value)
def visit_not_expression(self, node, value):
original_result = self.visit(node['children'][0], value)
if original_result is 0:
# Special case for 0, !0 should be false, not true.
# 0 is not a special cased integer in jmespath.
return False
return not original_result
def visit_pipe(self, node, value):
result = value
for node in node['children']:
result = self.visit(node, result)
return result
def visit_projection(self, node, value):
base = self.visit(node['children'][0], value)
if not isinstance(base, list):
return None
collected = []
for element in base:
current = self.visit(node['children'][1], element)
if current is not None:
collected.append(current)
return collected
def visit_value_projection(self, node, value):
base = self.visit(node['children'][0], value)
try:
base = base.values()
except AttributeError:
return None
collected = []
for element in base:
current = self.visit(node['children'][1], element)
if current is not None:
collected.append(current)
return collected
def _is_false(self, value):
# This looks weird, but we're explicitly using equality checks
# because the truth/false values are different between
# python and jmespath.
return (value == '' or value == [] or value == {} or value is None or
value is False)
def _is_true(self, value):
return not self._is_false(value)
class GraphvizVisitor(Visitor):
def __init__(self):
super(GraphvizVisitor, self).__init__()
self._lines = []
self._count = 1
def visit(self, node, *args, **kwargs):
self._lines.append('digraph AST {')
current = '%s%s' % (node['type'], self._count)
self._count += 1
self._visit(node, current)
self._lines.append('}')
return '\n'.join(self._lines)
def _visit(self, node, current):
self._lines.append('%s [label="%s(%s)"]' % (
current, node['type'], node.get('value', '')))
for child in node.get('children', []):
child_name = '%s%s' % (child['type'], self._count)
self._count += 1
self._lines.append(' %s -> %s' % (current, child_name))
self._visit(child, child_name)
```
#### File: pyscreenshot/plugins/qtgrabwindow.py
```python
from PIL import Image
import sys
PY3 = sys.version_info[0] >= 3
if PY3:
import io
BytesIO = io.BytesIO
else:
import StringIO
BytesIO = StringIO.StringIO
class QtGrabWindow(object):
'''based on: http://stackoverflow.com/questions/69645/take-a-screenshot-via-a-python-script-linux
'''
name = 'pyqt'
childprocess = False
def __init__(self):
import PyQt4
self.PyQt4 = PyQt4
from PyQt4 import QtGui
from PyQt4 import Qt
self.app = None
self.QtGui = QtGui
self.Qt = Qt
def grab_to_buffer(self, buff, file_type='png'):
QApplication = self.PyQt4.QtGui.QApplication
QBuffer = self.PyQt4.Qt.QBuffer
QIODevice = self.PyQt4.Qt.QIODevice
QPixmap = self.PyQt4.QtGui.QPixmap
if not self.app:
self.app = QApplication([])
qbuffer = QBuffer()
qbuffer.open(QIODevice.ReadWrite)
QPixmap.grabWindow(
QApplication.desktop().winId()).save(qbuffer, file_type)
buff.write(qbuffer.data())
qbuffer.close()
# del app
def grab(self, bbox=None):
strio = BytesIO()
self.grab_to_buffer(strio)
strio.seek(0)
im = Image.open(strio)
if bbox:
im = im.crop(bbox)
return im
def grab_to_file(self, filename):
file_type = 'png'
if filename.endswith('.jpeg'):
file_type = 'jpeg'
buff = open(filename, 'wb')
self.grab_to_buffer(buff, file_type)
buff.close()
def backend_version(self):
# TODO:
return 'not implemented'
```
#### File: pyxb/binding/generate.py
```python
import sys
import os.path
import logging
import logging.config
import io
import datetime
import errno
import pyxb
import pyxb.xmlschema as xs
from pyxb.utils import utility, templates, six
from pyxb.utils.utility import repr2to3
from pyxb.binding import basis, datatypes, facets
_log = logging.getLogger(__name__)
def PrefixModule (value, text=None):
if text is None:
text = value.__name__
if value.__module__ == datatypes.__name__:
return 'pyxb.binding.datatypes.%s' % (text,)
if value.__module__ == facets.__name__:
return 'pyxb.binding.facets.%s' % (text,)
raise ValueError('No standard name for module of value', value)
class ReferenceLiteral (object):
"""Base class for something that requires fairly complex activity
in order to generate its literal value."""
# Either a STD or a subclass of _Enumeration_mixin, this is the
# class in which the referenced object is a member.
__ownerClass = None
# The value to be used as a literal for this object
__literal = None
def __init__ (self, **kw):
# NB: Pre-extend __init__
self.__ownerClass = kw.get('type_definition')
def setLiteral (self, literal):
self.__literal = literal
return literal
def asLiteral (self):
return self.__literal
def _addTypePrefix (self, text, **kw):
if self.__ownerClass is not None:
text = '%s.%s' % (pythonLiteral(self.__ownerClass, **kw), text)
return text
class ReferenceFacetMember (ReferenceLiteral):
__facetClass = None
def __init__ (self, **kw):
variable = kw.get('variable')
assert (variable is None) or isinstance(variable, facets.Facet)
if variable is not None:
kw.setdefault('type_definition', variable.ownerTypeDefinition())
self.__facetClass = type(variable)
self.__facetClass = kw.get('facet_class', self.__facetClass)
super(ReferenceFacetMember, self).__init__(**kw)
self.setLiteral(self._addTypePrefix('_CF_%s' % (self.__facetClass.Name(),), **kw))
class ReferenceWildcard (ReferenceLiteral):
__wildcard = None
def __init__ (self, wildcard, **kw):
self.__wildcard = wildcard
super(ReferenceWildcard, self).__init__(**kw)
template_map = { }
template_map['Wildcard'] = 'pyxb.binding.content.Wildcard'
if (xs.structures.Wildcard.NC_any == wildcard.namespaceConstraint()):
template_map['nc'] = templates.replaceInText('%{Wildcard}.NC_any', **template_map)
elif isinstance(wildcard.namespaceConstraint(), (set, frozenset)):
namespaces = []
for ns in wildcard.namespaceConstraint():
if ns is None:
namespaces.append(None)
else:
namespaces.append(ns.uri())
template_map['nc'] = 'set([%s])' % (",".join( [ repr2to3(_ns) for _ns in namespaces ]))
else:
assert isinstance(wildcard.namespaceConstraint(), tuple)
ns = wildcard.namespaceConstraint()[1]
if ns is not None:
ns = ns.uri()
template_map['nc'] = templates.replaceInText('(%{Wildcard}.NC_not, %{namespace})', namespace=repr2to3(ns), **template_map)
template_map['pc'] = wildcard.processContents()
self.setLiteral(templates.replaceInText('%{Wildcard}(process_contents=%{Wildcard}.PC_%{pc}, namespace_constraint=%{nc})', **template_map))
class ReferenceSchemaComponent (ReferenceLiteral):
__component = None
def __init__ (self, component, **kw):
self.__component = component
binding_module = kw['binding_module']
super(ReferenceSchemaComponent, self).__init__(**kw)
rv = binding_module.referenceSchemaComponent(component)
self.setLiteral(rv)
class ReferenceNamespace (ReferenceLiteral):
__namespace = None
def __init__ (self, **kw):
self.__namespace = kw['namespace']
binding_module = kw['binding_module']
super(ReferenceNamespace, self).__init__(**kw)
rv = binding_module.referenceNamespace(self.__namespace)
self.setLiteral(rv)
class ReferenceExpandedName (ReferenceLiteral):
__expandedName = None
def __init__ (self, **kw):
self.__expandedName = kw['expanded_name']
super(ReferenceExpandedName, self).__init__(**kw)
self.setLiteral('pyxb.namespace.ExpandedName(%s, %s)' % (pythonLiteral(self.__expandedName.namespace(), **kw), pythonLiteral(self.__expandedName.localName(), **kw)))
class ReferenceFacet (ReferenceLiteral):
__facet = None
def __init__ (self, **kw):
self.__facet = kw['facet']
super(ReferenceFacet, self).__init__(**kw)
self.setLiteral('%s._CF_%s' % (pythonLiteral(self.__facet.ownerTypeDefinition(), **kw), self.__facet.Name()))
class ReferenceEnumerationMember (ReferenceLiteral):
enumerationElement = None
def __init__ (self, **kw):
# NB: Pre-extended __init__
# All we really need is the enumeration element, so we can get
# its tag, and a type definition or datatype, so we can create
# the proper prefix.
# See if we were given a value, from which we can extract the
# other information.
value = kw.get('enum_value')
assert (value is None) or isinstance(value, facets._Enumeration_mixin)
# Must provide facet_instance, or a value from which it can be
# obtained.
facet_instance = kw.get('facet_instance')
if facet_instance is None:
assert isinstance(value, facets._Enumeration_mixin)
facet_instance = value._CF_enumeration
assert isinstance(facet_instance, facets.CF_enumeration)
# Must provide the enumeration_element, or a facet_instance
# and value from which it can be identified.
self.enumerationElement = kw.get('enumeration_element')
if self.enumerationElement is None:
assert value is not None
self.enumerationElement = facet_instance.elementForValue(value)
assert isinstance(self.enumerationElement, facets._EnumerationElement)
assert self.enumerationElement.tag() is not None
# If no type definition was provided, use the value datatype
# for the facet.
kw.setdefault('type_definition', facet_instance.valueDatatype())
super(ReferenceEnumerationMember, self).__init__(**kw)
self.setLiteral(self._addTypePrefix(self.enumerationElement.tag(), **kw))
def pythonLiteral (value, **kw):
# For dictionaries, apply translation to all values (not keys)
if isinstance(value, six.dictionary_type):
return ', '.join([ '%s=%s' % (k, pythonLiteral(v, **kw)) for (k, v) in six.iteritems(value) ])
# For lists, apply translation to all members
if isinstance(value, six.list_type):
return [ pythonLiteral(_v, **kw) for _v in value ]
# ExpandedName is a tuple, but not here
if isinstance(value, pyxb.namespace.ExpandedName):
return pythonLiteral(ReferenceExpandedName(expanded_name=value, **kw))
# For other collection types, do what you do for list
if isinstance(value, (six.tuple_type, set)):
return type(value)(pythonLiteral(list(value), **kw))
# Value is a binding value for which there should be an
# enumeration constant. Return that constant.
if isinstance(value, facets._Enumeration_mixin):
return pythonLiteral(ReferenceEnumerationMember(enum_value=value, **kw))
# Value is an instance of a Python binding, e.g. one of the
# XMLSchema datatypes. Use its value, applying the proper prefix
# for the module.
if isinstance(value, basis.simpleTypeDefinition):
return PrefixModule(value, value.pythonLiteral())
if isinstance(value, pyxb.namespace.Namespace):
return pythonLiteral(ReferenceNamespace(namespace=value, **kw))
if isinstance(value, type):
if issubclass(value, basis.simpleTypeDefinition):
return PrefixModule(value)
if issubclass(value, facets.Facet):
return PrefixModule(value)
if isinstance(value, facets.Facet):
return pythonLiteral(ReferenceFacet(facet=value, **kw))
# Treat pattern elements as their value
if isinstance(value, facets._PatternElement):
return pythonLiteral(value.pattern)
# Treat enumeration elements as their value
if isinstance(value, facets._EnumerationElement):
return pythonLiteral(value.value())
# Wildcards expand to a pyxb.binding.content.Wildcard instance
if isinstance(value, xs.structures.Wildcard):
return pythonLiteral(ReferenceWildcard(value, **kw))
# Schema components have a single name through their lifespan
if isinstance(value, xs.structures._SchemaComponent_mixin):
return pythonLiteral(ReferenceSchemaComponent(value, **kw))
# Other special cases
if isinstance(value, ReferenceLiteral):
return value.asLiteral()
# Represent namespaces by their URI
if isinstance(value, pyxb.namespace.Namespace):
return repr2to3(value.uri())
# Standard Python types, including string types
if isinstance(value, (six.none_type, six.boolean_type, six.float_type, six.integer_types, six.string_types)):
return pyxb.utils.utility.repr2to3(value)
raise Exception('Unexpected literal type %s' % (type(value),))
def _GenerateAutomaton (automaton, template_map, containing_state, lines, **kw):
binding_module = kw['binding_module']
name = utility.PrepareIdentifier('BuildAutomaton', binding_module.uniqueInModule(), protected=True)
au_src = []
au_src.append(templates.replaceInText('''
def %{name} ():
# Remove this helper function from the namespace after it is invoked
global %{name}
del %{name}
import pyxb.utils.fac as fac
''', name=name))
def stateSortKey (st):
if isinstance(st.symbol, xs.structures.ModelGroup):
return st.symbol.facStateSortKey()
return st.symbol[0].facStateSortKey()
def counterConditionSortKey (cc):
return cc.metadata.facStateSortKey()
def updateInstructionSortKey (ui):
return counterConditionSortKey(ui.counterCondition)
def transitionSortKey (xit):
# The destination of a transition is not unique; need to
# differentiate using the update instructions. Which
# themselves should be sorted.
st = xit.consumingState()
# Transitions into/out-of subautomata might not include a
# consuming state. Give those a sort value None.
ssk = None
if st is not None:
ssk = stateSortKey(st)
keys = [ ssk ]
keys.extend(map(updateInstructionSortKey, sorted(xit.updateInstructions, key=updateInstructionSortKey)))
return tuple(keys)
au_src.append(' counters = set()')
counter_map = {}
sorted_counter_conditions = sorted(automaton.counterConditions, key=counterConditionSortKey)
for cc in sorted_counter_conditions:
cc_id = 'cc_%u' % (len(counter_map),)
counter_map[cc] = cc_id
au_src.append(' %s = fac.CounterCondition(min=%s, max=%s, metadata=%r)' % (cc_id, repr2to3(cc.min), repr2to3(cc.max), cc.metadata._location()))
au_src.append(' counters.add(%s)' % (cc_id,))
state_map = {}
au_src.append(' states = []')
sorted_states = sorted(automaton.states, key=stateSortKey)
for st in sorted_states:
st_id = 'st_%u' % (len(state_map),)
state_map[st] = st_id
if st.subAutomata is not None:
au_src.append(' sub_automata = []')
for sa in st.subAutomata:
au_src.append(' sub_automata.append(%s)' % (_GenerateAutomaton(sa, template_map, st_id, lines, **kw),))
if st.finalUpdate is None:
au_src.append(' final_update = None')
else:
au_src.append(' final_update = set()')
for ui in sorted(st.finalUpdate, key=updateInstructionSortKey):
au_src.append(' final_update.add(fac.UpdateInstruction(%s, %r))' % (counter_map[ui.counterCondition], ui.doIncrement))
if isinstance(st.symbol, xs.structures.ModelGroup):
au_src.append(' symbol = %r' % (st.symbol._location(),))
else:
(particle, symbol) = st.symbol
if isinstance(symbol, xs.structures.Wildcard):
au_src.append(templates.replaceInText(' symbol = pyxb.binding.content.WildcardUse(%{wildcard}, %{location})', wildcard=binding_module.literal(symbol, **kw), location=repr2to3(particle._location())))
elif isinstance(symbol, xs.structures.ElementDeclaration):
binding_module.importForDeclaration(symbol)
au_src.append(templates.replaceInText(' symbol = pyxb.binding.content.ElementUse(%{ctd}._UseForTag(%{field_tag}), %{location})', field_tag=binding_module.literal(symbol.expandedName(), **kw), location=repr2to3(particle._location()), **template_map))
au_src.append(' %s = fac.State(symbol, is_initial=%r, final_update=final_update, is_unordered_catenation=%r)' % (st_id, st.isInitial, st.isUnorderedCatenation))
if st.subAutomata is not None:
au_src.append(' %s._set_subAutomata(*sub_automata)' % (st_id,))
au_src.append(' states.append(%s)' % (st_id,))
for st in sorted_states:
au_src.append(' transitions = []')
for xit in sorted(st.transitionSet, key=transitionSortKey):
au_src.append(' transitions.append(fac.Transition(%s, [' % (state_map[xit.destination],))
sorted_ui = sorted(xit.updateInstructions, key=updateInstructionSortKey)
au_src.append(' %s ]))' % (',\n '.join(map(lambda _ui: 'fac.UpdateInstruction(%s, %r)' % (counter_map[_ui.counterCondition], _ui.doIncrement), sorted_ui))))
au_src.append(' %s._set_transitionSet(transitions)' % (state_map[st],))
au_src.append(' return fac.Automaton(states, counters, %r, containing_state=%s)' % (automaton.nullable, containing_state))
lines.extend(au_src)
return '%s()' % (name,)
def GenerateAutomaton (ctd, **kw):
aux = _CTDAuxData.Get(ctd)
binding_module = kw['binding_module']
template_map = { 'ctd' : binding_module.literal(ctd, **kw) }
automaton = aux.automaton
if automaton is None:
return None
lines = []
name = _GenerateAutomaton(automaton, template_map, 'None', lines, **kw)
return (name, lines)
def _useEnumerationTags (td):
if td is None:
return False
assert isinstance(td, xs.structures.SimpleTypeDefinition)
ptd = td.baseTypeDefinition()
python_support = None
# Atomic types that use strings as their representation
if (ptd.VARIETY_atomic == ptd.variety()):
python_support = ptd.primitiveTypeDefinition().pythonSupport()
return issubclass(python_support, six.string_types)
# Derivations from anySimpleType use strings too
if (ptd.VARIETY_absent == ptd.variety()):
return True
# Union types? Yeah, I suppose so. Though this only applies to
# members lifted up into the union.
if (ptd.VARIETY_union == ptd.variety()):
return True
# List types have spaces so no tags.
return False
def GenerateFacets (td, generator, **kw):
binding_module = kw['binding_module']
outf = binding_module.bindingIO()
facet_instances = []
gen_enum_tag = _useEnumerationTags(td)
for (fc, fi) in six.iteritems(td.facets()):
#if (fi is None) or (fi.ownerTypeDefinition() != td):
# continue
if (fi is None) and (fc in td.baseTypeDefinition().facets()):
# Nothing new here
continue
if (fi is not None) and (fi.ownerTypeDefinition() != td):
# Did this one in an ancestor
continue
argset = { }
is_collection = issubclass(fc, facets._CollectionFacet_mixin)
if issubclass(fc, facets._LateDatatype_mixin):
vdt = td
if fc.LateDatatypeBindsSuperclass():
vdt = vdt.baseTypeDefinition()
argset['value_datatype'] = vdt
if fi is not None:
if not is_collection:
argset['value'] = fi.value()
if isinstance(fi, facets.CF_enumeration):
argset['enum_prefix'] = fi.enumPrefix()
facet_var = ReferenceFacetMember(type_definition=td, facet_class=fc, **kw)
outf.write("%s = %s(%s)\n" % binding_module.literal( (facet_var, fc, argset ), **kw))
facet_instances.append(binding_module.literal(facet_var, **kw))
if (fi is not None) and is_collection:
for i in six.iteritems(fi):
if isinstance(i, facets._EnumerationElement):
if isinstance(i.value(), pyxb.namespace.ExpandedName):
enum_config = '%s.addEnumeration(value=%s, tag=%s)' % binding_module.literal( ( facet_var, i.value(), i.tag() ), **kw)
else:
enum_config = '%s.addEnumeration(unicode_value=%s, tag=%s)' % binding_module.literal( ( facet_var, i.unicodeValue(), i.tag() ), **kw)
if gen_enum_tag and (i.tag() is not None):
enum_member = ReferenceEnumerationMember(type_definition=td, facet_instance=fi, enumeration_element=i, **kw)
outf.write("%s = %s\n" % (binding_module.literal(enum_member, **kw), enum_config))
if fi.enumPrefix() is not None:
outf.write("%s_%s = %s\n" % (fi.enumPrefix(), i.tag(), binding_module.literal(enum_member, **kw)))
else:
outf.write("%s\n" % (enum_config,))
if isinstance(i, facets._PatternElement):
outf.write("%s.addPattern(pattern=%s)\n" % binding_module.literal( (facet_var, i.pattern ), **kw))
if gen_enum_tag and (xs.structures.SimpleTypeDefinition.VARIETY_union == td.variety()):
# If the union has enumerations of its own, there's no need to
# inherit anything, because they supersede anything implicitly
# inherited.
fi = td.facets().get(facets.CF_enumeration)
if fi is None:
# Need to expose any enumerations in members up in this class
for mtd in td.memberTypeDefinitions():
if not _useEnumerationTags(mtd):
continue
fi = mtd.facets().get(facets.CF_enumeration)
if fi is None:
continue
for i in six.iteritems(fi):
assert isinstance(i, facets._EnumerationElement)
etd = i.enumeration().ownerTypeDefinition()
enum_member = ReferenceEnumerationMember(type_definition=td, facet_instance=fi, enumeration_element=i, **kw)
outf.write("%-50s%s\n" % ('%s = %s' % binding_module.literal( (enum_member, i.unicodeValue()) ),
'# originally %s.%s' % (binding_module.literal(etd), i.tag())))
if 2 <= len(facet_instances):
map_args = ",\n ".join(facet_instances)
else:
map_args = ','.join(facet_instances)
outf.write("%s._InitializeFacetMap(%s)\n" % (binding_module.literal(td, **kw), map_args))
def _VCAppendAuxInit (vc_source, aux_init, binding_module, kw):
if vc_source.fixed() is not None:
aux_init.append('fixed=True')
aux_init.append('unicode_default=%s' % (binding_module.literal(vc_source.fixed(), **kw),))
elif vc_source.default() is not None:
aux_init.append('unicode_default=%s' % (binding_module.literal(vc_source.default(), **kw),))
# If std is a simple type that requires an enumeration mixin, return the
# corresponding facet; otherwise return None.
def simpleTypeOwnedEnumerationFacet (std):
if not isinstance(std, xs.structures.SimpleTypeDefinition):
return None
enum_facet = std.facets().get(facets.CF_enumeration)
if (enum_facet is not None) and (enum_facet.ownerTypeDefinition() == std):
return enum_facet
return None
def GenerateSTD (std, generator):
binding_module = generator.moduleForComponent(std)
outf = binding_module.bindingIO()
class_keywords = frozenset(basis.simpleTypeDefinition._ReservedSymbols)
class_unique = set()
kw = { }
kw['binding_module'] = binding_module
kw['class_keywords'] = class_keywords
kw['class_unique'] = class_unique
parent_classes = [ binding_module.literal(std.baseTypeDefinition(), **kw) ]
if simpleTypeOwnedEnumerationFacet(std) is not None:
parent_classes.append('pyxb.binding.basis.enumeration_mixin')
template_map = { }
binding_name = template_map['std'] = binding_module.literal(std, **kw)
if (std.expandedName() is not None) and (std.expandedName().localName() != binding_name):
_log.warning('Simple type %s renamed to %s', std.expandedName(), binding_name)
template_map['superclasses'] = ''
if 0 < len(parent_classes):
template_map['superclasses'] = ', '.join(parent_classes)
template_map['expanded_name'] = binding_module.literal(std.expandedName(), **kw)
if std.expandedName() is not None:
template_map['qname'] = six.text_type(std.expandedName())
else:
template_map['qname'] = '[anonymous]'
template_map['namespaceReference'] = binding_module.literal(std.bindingNamespace(), **kw)
template_map['xsd_location'] = repr2to3(std._location())
if std.annotation() is not None:
template_map['documentation'] = std.annotation().asDocString()
template_map['documentation_expr'] = binding_module.literal(std.annotation().text())
else:
template_map['documentation'] = ''
template_map['documentation_expr'] = binding_module.literal(None)
# @todo: Extensions of LIST will be wrong in below
common_template = '''
"""%{documentation}"""
_ExpandedName = %{expanded_name}
_XSDLocation = %{xsd_location}
_Documentation = %{documentation_expr}
'''
if xs.structures.SimpleTypeDefinition.VARIETY_absent == std.variety():
template = '''
# The ur simple type: %{qname}
class %{std} (%{superclasses}):
''' + common_template
if not template_map['documentation']:
template_map['documentation'] = 'The ur simple type.'
elif xs.structures.SimpleTypeDefinition.VARIETY_atomic == std.variety():
template = '''
# Atomic simple type: %{qname}
class %{std} (%{superclasses}):
''' + common_template
if not template_map['documentation']:
template_map['documentation'] = 'An atomic simple type.'
elif xs.structures.SimpleTypeDefinition.VARIETY_list == std.variety():
template = '''
# List simple type: %{qname}
# superclasses %{superclasses}
class %{std} (pyxb.binding.basis.STD_list):
''' + common_template + '''
_ItemType = %{itemtype}
'''
template_map['itemtype'] = binding_module.literal(std.itemTypeDefinition(), **kw)
if not template_map['documentation']:
template_map['documentation'] = templates.replaceInText('Simple type that is a list of %{itemtype}.', **template_map)
elif xs.structures.SimpleTypeDefinition.VARIETY_union == std.variety():
template = '''
# Union simple type: %{qname}
# superclasses %{superclasses}
class %{std} (pyxb.binding.basis.STD_union):
''' + common_template + '''
_MemberTypes = ( %{membertypes}, )
'''
template_map['membertypes'] = ", ".join( [ binding_module.literal(_mt, **kw) for _mt in std.memberTypeDefinitions() ])
if not template_map['documentation']:
template_map['documentation'] = templates.replaceInText('Simple type that is a union of %{membertypes}.', **template_map)
else:
raise pyxb.LogicError("Unhandled STD variety")
outf.write(templates.replaceInText(template, **template_map))
GenerateFacets(std, generator, **kw)
if std.name() is not None:
outf.write(templates.replaceInText("%{namespaceReference}.addCategoryObject('typeBinding', %{localName}, %{std})\n",
localName=binding_module.literal(std.name(), **kw), **template_map))
def elementDeclarationMap (ed, binding_module, **kw):
template_map = { }
template_map['qname'] = six.text_type(ed.expandedName())
template_map['decl_location'] = repr2to3(ed._location())
template_map['namespaceReference'] = binding_module.literal(ed.bindingNamespace(), **kw)
if (ed.SCOPE_global == ed.scope()):
binding_name = template_map['class'] = binding_module.literal(ed, **kw)
if ed.expandedName().localName() != binding_name:
_log.warning('Element %s renamed to %s', ed.expandedName(), binding_name)
template_map['localName'] = binding_module.literal(ed.name(), **kw)
template_map['map_update'] = templates.replaceInText("%{namespaceReference}.addCategoryObject('elementBinding', %{localName}, %{class})", **template_map)
else:
template_map['scope'] = binding_module.literal(ed.scope(), **kw)
if ed.annotation() is not None:
template_map['documentation'] = binding_module.literal(six.text_type(ed.annotation()))
if ed.abstract():
template_map['abstract'] = binding_module.literal(ed.abstract(), **kw)
if ed.nillable():
template_map['nillable'] = binding_module.literal(ed.nillable(), **kw)
if ed.default():
template_map['defaultValue'] = binding_module.literal(ed.default(), **kw)
template_map['typeDefinition'] = binding_module.literal(ed.typeDefinition(), **kw)
if ed.substitutionGroupAffiliation():
template_map['substitution_group'] = binding_module.literal(ed.substitutionGroupAffiliation(), **kw)
aux_init = []
for k in ( 'nillable', 'abstract', 'scope', 'documentation' ):
if k in template_map:
aux_init.append('%s=%s' % (k, template_map[k]))
aux_init.append('location=%s' % (template_map['decl_location'],))
_VCAppendAuxInit(ed, aux_init, binding_module, kw)
template_map['element_aux_init'] = ''
if 0 < len(aux_init):
template_map['element_aux_init'] = ', ' + ', '.join(aux_init)
return template_map
import pyxb.utils.fac
import operator
import functools
# A Symbol in the term tree is a pair consisting of the containing
# particle (for location information) and one of an
# ElementDeclaration, Wildcard, or tuple of sub-term-trees for All
# model groups.
def BuildTermTree (node):
"""Construct a L{FAC term tree<pyxb.utils.fac.Node>} for a L{particle<xs.structures.Particle>}.
This translates the XML schema content model of particles, model
groups, element declarations, and wildcards into a tree expressing
the corresponding content as a regular expression with numerical
constraints.
@param node: An instance of L{xs.structures.Particle}
@return: An instance of L{pyxb.utils.fac.Node}
"""
def _generateTermTree_visitor (node, entered, arg):
"""Helper for constructing a L{FAC term tree<pyxb.utils.fac.Node>}.
This is passed to L{xs.structures.Particle.walkParticleTree}.
@param node: An instance of L{xs.structures._ParticleTree_mixin}
@param entered: C{True} entering an interior tree node, C{False}
leaving an interior tree node, C{None} at a leaf node.
@param arg: A list of pairs C{(particle, terms)} where C{particle}
is the L{xs.structures.Particle} instance containing a list of
L{term trees<pyxb.utils.fac.Node>}.
"""
if entered is None:
(parent_particle, terms) = arg.peekNodeTermPair()
assert isinstance(parent_particle, xs.structures.Particle)
assert isinstance(node, (xs.structures.ElementDeclaration, xs.structures.Wildcard))
node._setFacStateSortKey(arg.nextSequenceNumber())
terms.append(pyxb.utils.fac.Symbol((parent_particle, node)))
elif entered:
node._setFacStateSortKey(arg.nextSequenceNumber())
arg.addNode(node)
else:
(xnode, terms) = arg.popNodeTermPair()
assert xnode == node
(parent_particle, siblings) = arg.peekNodeTermPair()
if 1 == len(terms):
term = terms[0]
# Either node is a Particle, or it's a single-member model
# group. If it's a non-trivial particle we need a
# numerical constraint; if it's a single-member model
# group or a trivial particle we can use the term
# directly.
if isinstance(node, xs.structures.Particle) and ((1 != node.minOccurs()) or (1 != node.maxOccurs())):
term = pyxb.utils.fac.NumericalConstraint(term, node.minOccurs(), node.maxOccurs(), metadata=node)
else:
assert isinstance(parent_particle, xs.structures.Particle), 'unexpected %s' % (parent_particle,)
assert isinstance(node, xs.structures.ModelGroup)
if node.C_CHOICE == node.compositor():
term = pyxb.utils.fac.Choice(*terms, metadata=node)
elif node.C_SEQUENCE == node.compositor():
term = pyxb.utils.fac.Sequence(*terms, metadata=node)
else:
# The quadratic state explosion and need to clone
# terms that results from a naive transformation of
# unordered catenation to choices among sequences of
# nodes and recursively-defined catenation expressions
# is not worth the pain. Create a "symbol" for the
# state and hold the alternatives in it.
assert node.C_ALL == node.compositor()
assert functools.reduce(operator.and_, map(lambda _s: isinstance(_s, pyxb.utils.fac.Node), terms), True)
term = pyxb.utils.fac.All(*terms, metadata=node)
siblings.append(term)
class TermTreeArg (object):
__sequenceNumber = None
__termTreeList = None
__nodeTermPairs = None
def __init__ (self, node):
self.__sequenceNumber = 0
self.__termTreeList = []
self.__nodeTermPairs = [ (node, self.__termTreeList) ]
def termTree (self):
assert 1 == len(self.__nodeTermPairs)
assert 1 == len(self.__termTreeList)
return self.__termTreeList[0]
def peekNodeTermPair (self):
return self.__nodeTermPairs[-1]
def popNodeTermPair (self):
return self.__nodeTermPairs.pop()
def addNode (self, node):
self.__nodeTermPairs.append((node, []))
def nextSequenceNumber (self):
rv = self.__sequenceNumber
self.__sequenceNumber += 1
return rv
assert isinstance(node, xs.structures.Particle)
ttarg = TermTreeArg(node)
node.walkParticleTree(_generateTermTree_visitor, ttarg)
term_tree = ttarg.termTree()
return term_tree
def BuildPluralityData (term_tree):
"""Walk a term tree to determine which element declarations may
appear multiple times.
The bindings need to use a list for any Python attribute
corresponding to an element declaration that can occur multiple
times in the content model. The number of occurrences is
determined by the occurrence constraints on parent particles and
the compositors of containing model groups. All this information
is available in the term tree used for the content model
automaton.
@param term_tree: A L{FAC term tree<pyxb.utils.fac.Node>}
representing the content model for a complex data type.
@return: Plurality data, as a pair C{(singles, multiples)} where
C{singles} is a set of base L{element
declarations<xs.structures.ElementDeclaration>} that are known to
occur at least once and at most once in a region of the content,
and C{multiples} is a similar set of declarations that are known
to potentially occur more than once."""
def _ttMergeSets (parent, child):
(p1, pm) = parent
(c1, cm) = child
# Anything multiple in the child becomes multiple in the parent.
pm.update(cm)
# Anything independently occuring once in both parent and child
# becomes multiple in the parent.
pm.update(c1.intersection(p1))
# Anything that was single in the parent (child) but is now
# multiple is no longer single.
p1.difference_update(pm)
c1.difference_update(pm)
# Anything that was single in the parent and also single in the
# child is no longer single in the parent.
p1.symmetric_difference_update(c1)
def _ttPrePluralityWalk (node, pos, arg):
# If there are multiple children, create a new list on which they
# will be placed.
if isinstance(node, pyxb.utils.fac.MultiTermNode):
arg.append([])
def _ttPostPluralityWalk (node, pos, arg):
# Initialize a fresh result for this node
singles = set()
multiples = set()
combined = (singles, multiples)
if isinstance(node, pyxb.utils.fac.MultiTermNode):
# Get the list of children, and examine
term_list = arg.pop()
if isinstance(node, pyxb.utils.fac.Choice):
# For choice we aggregate the singles and multiples
# separately.
for (t1, tm) in term_list:
multiples.update(tm)
singles.update(t1)
else:
# For sequence (ordered or not) we merge the children
assert isinstance(node, (pyxb.utils.fac.Sequence, pyxb.utils.fac.All))
for tt in term_list:
_ttMergeSets(combined, tt)
elif isinstance(node, pyxb.utils.fac.Symbol):
(particle, term) = node.metadata
if isinstance(term, xs.structures.ElementDeclaration):
# One instance of the base declaration for the element
singles.add(term.baseDeclaration())
elif isinstance(term, xs.structures.Wildcard):
pass
else:
assert isinstance(term, list)
# Unordered catenation is the same as ordered catenation.
for tt in term:
_ttMergeSets(combined, BuildPluralityData(tt))
else:
assert isinstance(node, pyxb.utils.fac.NumericalConstraint)
# Grab the data for the topmost tree and adjust it based on
# occurrence data.
combined = arg[-1].pop()
(singles, multiples) = combined
if 0 == node.max:
# If the node can't match at all, there are no occurrences
# at all
multiples.clear()
singles.clear()
elif 1 == node.max:
# If the node can only match once, what we've got is right
pass
else:
# If the node can match multiple times, there are no
# singles.
multiples.update(singles)
singles.clear()
arg[-1].append(combined)
# Initialize state with an implied parent that currently has no
# children
arg = [[]]
term_tree.walkTermTree(_ttPrePluralityWalk, _ttPostPluralityWalk, arg)
# The result term tree is the single child of that implied parent
assert 1 == len(arg)
arg = arg[0]
assert 1 == len(arg)
return arg[0]
class _CTDAuxData (object):
"""Helper class holding information need in both preparation and generation."""
contentBasis = None
termTree = None
edSingles = None
edMultiples = None
automaton = None
ctd = None
def __init__ (self, ctd):
self.ctd = ctd
ctd.__auxData = self
self.contentBasis = ctd.contentType()[1]
if isinstance(self.contentBasis, xs.structures.Particle):
self.termTree = BuildTermTree(self.contentBasis)
self.automaton = self.termTree.buildAutomaton()
(self.edSingles, self.edMultiples) = BuildPluralityData(self.termTree)
else:
self.edSingles = set()
self.edMultiples = set()
@classmethod
def Create (cls, ctd):
return cls(ctd)
@classmethod
def Get (cls, ctd):
return ctd.__auxData
def GenerateCTD (ctd, generator, **kw):
binding_module = generator.moduleForComponent(ctd)
outf = binding_module.bindingIO()
prolog_template = None
template_map = { }
binding_name = template_map['ctd'] = binding_module.literal(ctd, **kw)
if (ctd.expandedName() is not None) and (ctd.expandedName().localName() != binding_name):
_log.warning('Complex type %s renamed to %s', ctd.expandedName(), binding_name)
base_type = ctd.baseTypeDefinition()
content_type_tag = ctd._contentTypeTag()
template_map['base_type'] = binding_module.literal(base_type, **kw)
template_map['namespaceReference'] = binding_module.literal(ctd.bindingNamespace(), **kw)
template_map['expanded_name'] = binding_module.literal(ctd.expandedName(), **kw)
if ctd.expandedName() is not None:
template_map['qname'] = six.text_type(ctd.expandedName())
else:
template_map['qname'] = '[anonymous]'
template_map['xsd_location'] = repr2to3(ctd._location())
template_map['simple_base_type'] = binding_module.literal(None, **kw)
template_map['contentTypeTag'] = content_type_tag
template_map['is_abstract'] = repr2to3(not not ctd.abstract())
content_basis = None
if (ctd.CT_SIMPLE == content_type_tag):
content_basis = ctd.contentType()[1]
template_map['simple_base_type'] = binding_module.literal(content_basis, **kw)
elif (ctd.CT_MIXED == content_type_tag):
content_basis = ctd.contentType()[1]
elif (ctd.CT_ELEMENT_ONLY == content_type_tag):
content_basis = ctd.contentType()[1]
if ctd.annotation() is not None:
template_map['documentation'] = ctd.annotation().asDocString()
elif isinstance(ctd.owner(), xs.structures.ElementDeclaration) \
and ctd.owner().annotation() is not None:
template_map['documentation'] = ctd.owner().annotation().asDocString()
else:
template_map['documentation'] = templates.replaceInText("Complex type %{qname} with content type %{contentTypeTag}", **template_map)
prolog_template = '''
# Complex type %{qname} with content type %{contentTypeTag}
class %{ctd} (%{superclass}):
"""%{documentation}"""
_TypeDefinition = %{simple_base_type}
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_%{contentTypeTag}
_Abstract = %{is_abstract}
_ExpandedName = %{expanded_name}
_XSDLocation = %{xsd_location}
'''
# Complex types that inherit from non-ur-type complex types should
# have their base type as their Python superclass, so pre-existing
# elements and attributes can be re-used.
inherits_from_base = True
template_map['superclass'] = binding_module.literal(base_type, **kw)
if ctd._isHierarchyRoot():
inherits_from_base = False
template_map['superclass'] = 'pyxb.binding.basis.complexTypeDefinition'
assert base_type.nameInBinding() is not None
if inherits_from_base:
prolog_template += ''' _ElementMap = %{superclass}._ElementMap.copy()
_AttributeMap = %{superclass}._AttributeMap.copy()
'''
else:
prolog_template += ''' _ElementMap = {}
_AttributeMap = {}
'''
# Support for deconflicting attributes, elements, and reserved symbols
class_keywords = frozenset(basis.complexTypeDefinition._ReservedSymbols)
class_unique = set()
# Deconflict elements first, attributes are lower priority.
# Expectation is that all elements that have the same tag in the
# XML are combined into the same instance member, even if they
# have different types. Determine what name that should be, and
# whether there might be multiple instances of elements of that
# name.
element_uses = []
definitions = []
definitions.append('# Base type is %{base_type}')
# Retain in the ctd the information about the element
# infrastructure, so it can be inherited where appropriate in
# subclasses.
if isinstance(content_basis, xs.structures.Particle):
aux = _CTDAuxData.Get(ctd)
elements = aux.edSingles.union(aux.edMultiples)
outf.postscript().append("\n\n")
for ed in sorted(elements, key=lambda _c: _c.schemaOrderSortKey()):
is_plural = ed in aux.edMultiples
# @todo Detect and account for plurality change between this and base
ef_map = ed._templateMap()
if ed.scope() == ctd:
ef_map.update(elementDeclarationMap(ed, binding_module, **kw))
aux_init = []
ef_map['is_plural'] = repr2to3(is_plural)
element_uses.append(templates.replaceInText('%{use}.name() : %{use}', **ef_map))
if 0 == len(aux_init):
ef_map['aux_init'] = ''
else:
ef_map['aux_init'] = ', ' + ', '.join(aux_init)
ef_map['element_binding'] = utility.PrepareIdentifier('%s_elt' % (ef_map['id'],), class_unique, class_keywords, private=True)
if ed.annotation() is not None:
ef_map['documentation'] = binding_module.literal(six.text_type(ed.annotation()))
else:
ef_map['documentation'] = binding_module.literal(None)
if ed.scope() != ctd:
definitions.append(templates.replaceInText('''
# Element %{id} (%{qname}) inherited from %{decl_type_en}''', decl_type_en=six.text_type(ed.scope().expandedName()), **ef_map))
continue
binding_module.importForDeclaration(ed)
if ed.expandedName().localName() != ef_map['id']:
_log.warning('Element use %s.%s renamed to %s', ctd.expandedName(), ed.expandedName(), ef_map['id'])
definitions.append(templates.replaceInText('''
# Element %{qname} uses Python identifier %{id}
%{use} = pyxb.binding.content.ElementDeclaration(%{name_expr}, '%{id}', '%{key}', %{is_plural}, %{decl_location}, %{aux_init})
''', name_expr=binding_module.literal(ed.expandedName(), **kw), **ef_map))
definitions.append(templates.replaceInText('''
%{inspector} = property(%{use}.value, %{use}.set, None, %{documentation})
''', **ef_map))
outf.postscript().append(templates.replaceInText('''
%{ctd}._AddElement(pyxb.binding.basis.element(%{name_expr}, %{typeDefinition}%{element_aux_init}))
''', name_expr=binding_module.literal(ed.expandedName(), **kw), ctd=template_map['ctd'], **ef_map))
auto_defn = GenerateAutomaton(ctd, binding_module=binding_module, **kw)
if auto_defn is not None:
(automaton_ctor, lines) = auto_defn
if lines:
outf.postscript().append("\n".join(lines))
outf.postscript().append("\n")
outf.postscript().append(templates.replaceInText('%{ctd}._Automaton = %{automaton_ctor}\n', ctd=template_map['ctd'], automaton_ctor=automaton_ctor))
outf.postscript().append("\n")
# Create definitions for all attributes.
attribute_uses = []
# name - String value of expanded name of the attribute (attr_tag, attr_ns)
# name_expr - Python expression for an expanded name identifying the attribute (attr_tag)
# use - Binding variable name holding AttributeUse instance (attr_name)
# id - Python identifier for attribute (python_attr_name)
# key - String used as dictionary key holding instance value of attribute (value_attr_name)
# inspector - Name of the method used for inspection (attr_inspector)
# mutator - Name of the method use for mutation (attr_mutator)
for au in sorted(ctd.attributeUses(), key=lambda _au: _au.attributeDeclaration().schemaOrderSortKey()):
ad = au.attributeDeclaration()
assert isinstance(ad.scope(), xs.structures.ComplexTypeDefinition), 'unexpected scope %s' % (ad.scope(),)
au_map = ad._templateMap()
if ad.scope() != ctd:
definitions.append(templates.replaceInText('''
# Attribute %{id} inherited from %{decl_type_en}''', decl_type_en=six.text_type(ad.scope().expandedName()), **au_map))
continue
assert isinstance(au_map, dict)
aur = au
while aur.restrictionOf() is not None:
aur = aur.restrictionOf()
if au != aur:
au_map = aur.attributeDeclaration()._templateMap().copy()
definitions.append(templates.replaceInText('''
# Attribute %{id} is restricted from parent''', **au_map))
assert ad.typeDefinition() is not None
au_map['attr_type'] = binding_module.literal(ad.typeDefinition(), **kw)
au_map['decl_location'] = repr2to3(ad._location())
au_map['use_location'] = repr2to3(au._location())
vc_source = ad
if au.valueConstraint() is not None:
vc_source = au
aux_init = []
_VCAppendAuxInit(vc_source, aux_init, binding_module, kw)
if au.required():
aux_init.append('required=True')
if au.prohibited():
aux_init.append('prohibited=True')
if 0 == len(aux_init):
au_map['aux_init'] = ''
else:
aux_init.insert(0, '')
au_map['aux_init'] = ', '.join(aux_init)
if ad.annotation() is not None:
au_map['documentation'] = binding_module.literal(six.text_type(ad.annotation()))
else:
au_map['documentation'] = binding_module.literal(None)
binding_module.importForDeclaration(ad)
attribute_uses.append(templates.replaceInText('%{use}.name() : %{use}', **au_map))
if ad.expandedName().localName() != au_map['id']:
_log.warning('Attribute %s.%s renamed to %s', ctd.expandedName(), ad.expandedName(), au_map['id'])
definitions.append(templates.replaceInText('''
# Attribute %{qname} uses Python identifier %{id}
%{use} = pyxb.binding.content.AttributeUse(%{name_expr}, '%{id}', '%{key}', %{attr_type}%{aux_init})
%{use}._DeclarationLocation = %{decl_location}
%{use}._UseLocation = %{use_location}''', name_expr=binding_module.literal(ad.expandedName(), **kw), **au_map))
definitions.append(templates.replaceInText('''
%{inspector} = property(%{use}.value, %{use}.set, None, %{documentation})
''', ctd=template_map['ctd'], **au_map))
if ctd.attributeWildcard() is not None:
definitions.append('_AttributeWildcard = %s' % (binding_module.literal(ctd.attributeWildcard(), **kw),))
if ctd.hasWildcardElement():
definitions.append('_HasWildcardElement = True')
template_map['attribute_uses'] = ",\n ".join(attribute_uses)
template_map['element_uses'] = ",\n ".join(element_uses)
template_map['registration'] = ''
if ctd.name() is not None:
template_map['registration'] = templates.replaceInText("%{namespaceReference}.addCategoryObject('typeBinding', %{localName}, %{ctd})",
localName=binding_module.literal(ctd.name(), **kw), **template_map)
template = ''.join([prolog_template,
" ", "\n ".join(definitions), "\n",
''' _ElementMap.update({
%{element_uses}
})
_AttributeMap.update({
%{attribute_uses}
})
%{registration}
'''])
outf.write(template, **template_map)
def GenerateED (ed, generator, **kw):
# Unscoped declarations should never be referenced in the binding.
assert ed._scopeIsGlobal()
binding_module = generator.moduleForComponent(ed)
outf = binding_module.bindingIO()
template_map = elementDeclarationMap(ed, binding_module, **kw)
template_map.setdefault('scope', binding_module.literal(None, **kw))
template_map.setdefault('map_update', '')
binding_module.importForDeclaration(ed)
outf.write(templates.replaceInText('''
%{class} = pyxb.binding.basis.element(%{name_expr}, %{typeDefinition}%{element_aux_init})
%{namespaceReference}.addCategoryObject('elementBinding', %{class}.name().localName(), %{class})
''', name_expr=binding_module.literal(ed.expandedName(), **kw), **template_map))
if ed.substitutionGroupAffiliation() is not None:
outf.postscript().append(templates.replaceInText('''
%{class}._setSubstitutionGroup(%{substitution_group})
''', **template_map))
def _PrepareSimpleTypeDefinition (std, generator, nsm, module_context):
std._templateMap()['_unique'] = nsm.uniqueInClass(std)
if _useEnumerationTags(std):
enum_facet = simpleTypeOwnedEnumerationFacet(std)
if enum_facet is not None:
for ei in six.iteritems(enum_facet):
assert ei.tag() is None, '%s already has a tag' % (ei,)
ei._setTag(utility.PrepareIdentifier(ei.unicodeValue(), nsm.uniqueInClass(std)))
def _PrepareComplexTypeDefinition (ctd, generator, nsm, module_context):
kw = { 'binding_module' : module_context }
ctd._templateMap()['_unique'] = nsm.uniqueInClass(ctd)
aux = _CTDAuxData.Create(ctd)
multiples = aux.edMultiples
for cd in ctd.localScopedDeclarations():
_SetNameWithAccessors(cd, ctd, cd in multiples, module_context, nsm, kw)
def _SetNameWithAccessors (component, container, is_plural, binding_module, nsm, kw):
use_map = component._templateMap()
class_unique = nsm.uniqueInClass(container)
assert isinstance(component, xs.structures._ScopedDeclaration_mixin)
unique_name = utility.PrepareIdentifier(component.expandedName().localName(), class_unique)
use_map['id'] = unique_name
use_map['inspector'] = unique_name
use_map['mutator'] = utility.PrepareIdentifier('set' + unique_name[0].upper() + unique_name[1:], class_unique)
use_map['use'] = utility.MakeUnique('__' + unique_name.strip('_'), class_unique)
assert component._scope() == container
assert component.nameInBinding() is None, 'Use %s but binding name %s for %s' % (use_map['use'], component.nameInBinding(), component.expandedName())
component.setNameInBinding(use_map['use'])
key_name = six.u('%s_%s_%s') % (six.text_type(nsm.namespace()), container.nameInBinding(), component.expandedName())
use_map['key'] = utility.PrepareIdentifier(key_name, class_unique, private=True)
use_map['qname'] = six.text_type(component.expandedName())
if isinstance(component, xs.structures.ElementDeclaration) and is_plural:
use_map['appender'] = utility.PrepareIdentifier('add' + unique_name[0].upper() + unique_name[1:], class_unique)
return use_map
class BindingIO (object):
__prolog = None
__postscript = None
__templateMap = None
__stringIO = None
__bindingFilePath = None
__bindingFile = None
def __init__ (self, binding_module, **kw):
super(BindingIO, self).__init__()
self.__bindingModule = binding_module
self.__bindingFilePath = kw['binding_file_path']
self.__bindingFile = kw['binding_file']
self.__prolog = []
self.__postscript = []
self.__templateMap = kw.copy()
encoding = kw.get('encoding', pyxb._OutputEncoding)
self.__templateMap.update({ 'date' : str(datetime.datetime.now()),
'filePath' : self.__bindingFilePath,
'coding' : encoding,
'binding_module' : binding_module,
'binding_tag' : binding_module.bindingTag(),
'pyxbVersion' : pyxb.__version__,
'pythonVersion' : '.'.join(map(str, sys.version_info))})
self.__stringIO = io.StringIO()
if self.__bindingFile:
prefacet = self.expand('''# %{filePath}
# -*- coding: %{coding} -*-
# PyXB bindings for %{binding_tag}
# Generated %{date} by PyXB version %{pyxbVersion} using Python %{pythonVersion}
%{binding_preface}''', binding_preface=binding_module.bindingPreface())
self.__bindingFile.write(prefacet.encode(encoding))
self.__bindingFile.flush()
def bindingFile (self):
return self.__bindingFile
def expand (self, template, **kw):
tm = self.__templateMap.copy()
tm.update(kw)
return templates.replaceInText(template, **tm)
def write (self, template, **kw):
txt = self.expand(template, **kw)
self.__stringIO.write(txt)
def bindingModule (self):
return self.__bindingModule
__bindingModule = None
def prolog (self):
return self.__prolog
def postscript (self):
return self.__postscript
def literal (self, *args, **kw):
kw.update(self.__templateMap)
return pythonLiteral(*args, **kw)
def contents (self):
rv = self.__prolog
rv.append(self.__stringIO.getvalue())
rv.extend(self.__postscript)
return ''.join(rv)
class _ModuleNaming_mixin (object):
__anonSTDIndex = None
__anonCTDIndex = None
__uniqueInModule = None
__uniqueInClass = None
__referencedFromClass = None
_UniqueInModule = set([ 'pyxb', 'sys' ])
"""Identifiers that are reserved within a module.
Subclasses extend this with the identifiers they add to the
module. Module-level schema-derived identifiers (such as type
definition and element names) are deconflicted from this set and
from each other."""
_ReferencedFromClass = set([ 'pyxb', 'sys' ])
"""Identifiers defined in module that are accessed unqualified from class.
These include standard import module names and globals such as
references to namespaces."""
__ComponentBindingModuleMap = {}
def generator (self):
return self.__generator
__generator = None
def __init__ (self, generator, *args, **kw):
super(_ModuleNaming_mixin, self).__init__(*args, **kw)
self.__generator = generator
assert isinstance(self.__generator, Generator)
self.__anonSTDIndex = 1
self.__anonCTDIndex = 1
self.__components = []
self.__componentNameMap = {}
self.__uniqueInModule = set()
self.__referencedFromClass = self._ReferencedFromClass.copy()
self.__bindingIO = None
self.__importModulePathMap = {}
self.__namespaceDeclarations = []
self.__referencedNamespaces = {}
self.__uniqueInClass = {}
def _importModule (self, module):
assert not isinstance(module, pyxb.namespace.Namespace)
assert isinstance(module, (_ModuleNaming_mixin, pyxb.namespace.archive.ModuleRecord)), 'Unexpected type %s' % (type(module),)
if isinstance(module, NamespaceModule):
if pyxb.namespace.XMLSchema == module.namespace():
return
module = module.moduleRecord()
assert isinstance(module, (pyxb.namespace.archive.ModuleRecord, NamespaceGroupModule))
if not (module in self.__importModulePathMap):
module_path = module.modulePath()
if 'pyxb' == module_path.split('.', 2)[0]:
assert 'pyxb' in self.uniqueInModule()
assert 'pyxb' in self.__referencedFromClass
module_path = None
else:
module_path = utility.PrepareIdentifier('ImportedBinding_' + module_path.replace('.', '_'),
self.uniqueInModule(), protected=True)
self.__referencedFromClass.add(module_path)
self.__importModulePathMap[module] = module_path
def uniqueInClass (self, component):
rv = self.__uniqueInClass.get(component)
if rv is None:
rv = set()
rv.update(self.__referencedFromClass)
if isinstance(component, xs.structures.SimpleTypeDefinition):
rv.update(basis.simpleTypeDefinition._ReservedSymbols)
if simpleTypeOwnedEnumerationFacet(component) is not None:
rv.update(basis.enumeration_mixin._ReservedSymbols)
else:
assert isinstance(component, xs.structures.ComplexTypeDefinition)
if component._isHierarchyRoot():
rv.update(basis.complexTypeDefinition._ReservedSymbols)
else:
base_td = component.baseTypeDefinition()
base_unique = base_td._templateMap().get('_unique')
assert base_unique is not None, 'Base %s of %s has no unique' % (base_td.expandedName(), component.expandedName())
rv.update(base_unique)
self.__uniqueInClass[component] = rv
return rv
__referencedNamespaces = None
def bindingIO (self):
return self.__bindingIO
__moduleUID = None
def moduleUID (self):
if self.__moduleUID is None:
self.__moduleUID = pyxb.utils.utility.HashForText(self._moduleUID_vx())
return self.__moduleUID
def _moduleUID_vx (self):
return str(id(self))
def bindingTag (self):
"""Return a distinct string recorded in the first 4096 bytes of the binding file.
This is used to ensure uniqueness and avoid overwriting data
belonging to a different binding. The return value comprises
the class-specialized L{_bindingTagPrefix_vx} with the
L{moduleUID}.
"""
return '%s:%s' % (self._bindingTagPrefix_vx(), self.moduleUID())
def _bindingTagPrefix_vx (self):
raise pyxb.LogicError('Subclass %s does not define _bindingTagPrefix_vx' % (type(self),))
def bindingPreface (self):
"""Return a block of binding text (comment or code) serving as a preface.
Normally this should describe the module contents."""
return self._bindingPreface_vx()
def _bindingPreface_vx (self):
return ''
def moduleContents (self):
template_map = {}
aux_imports = []
for (mr, as_path) in six.iteritems(self.__importModulePathMap):
assert self != mr
if as_path is not None:
aux_imports.append('import %s as %s' % (mr.modulePath(), as_path))
else:
aux_imports.append('import %s' % (mr.modulePath(),))
template_map['aux_imports'] = "\n".join(aux_imports)
template_map['namespace_decls'] = "\n".join(self.__namespaceDeclarations)
template_map['module_uid'] = self.moduleUID()
template_map['generation_uid_expr'] = repr2to3(self.generator().generationUID())
self._finalizeModuleContents_vx(template_map)
return self.__bindingIO.contents()
def modulePath (self):
return self.__modulePath
def _setModulePath (self, path_data):
(binding_file_path, binding_file, module_path) = path_data
self.__bindingFilePath = binding_file_path
self.__bindingFile = binding_file
if module_path is None:
module_path = self.moduleRecord().modulePath()
if module_path is not None:
self.__modulePath = module_path
kw = self._initialBindingTemplateMap()
self.__bindingIO = BindingIO(self, binding_file=binding_file, binding_file_path=binding_file_path, **kw)
__modulePath = None
def pathFromImport (self, module, name):
"""Python code reference to an object in an imported module"""
if isinstance(module, NamespaceModule):
module = module.moduleRecord()
as_path = self.__importModulePathMap[module]
if as_path is None:
as_path = module.modulePath()
return '%s.%s' % (as_path, name)
def bindingFile (self):
return self.__bindingFile
__bindingFile = None
__bindingFilePath = None
def _initializeUniqueInModule (self, unique_in_module):
self.__uniqueInModule = set(unique_in_module)
def uniqueInModule (self):
return self.__uniqueInModule
@classmethod
def BindComponentInModule (cls, component, module):
cls.__ComponentBindingModuleMap[component] = module
return module
@classmethod
def ComponentBindingModule (cls, component):
return cls.__ComponentBindingModuleMap.get(component)
@classmethod
def _RecordModule (cls, module):
cls.__RecordModuleMap[module.moduleRecord()] = module
return module
@classmethod
def _ForRecord (cls, module_record):
return cls.__RecordModuleMap.get(module_record)
__RecordModuleMap = { }
def _bindComponent (self, component):
kw = {}
rv = component.bestNCName()
if rv is None:
if isinstance(component, xs.structures.ComplexTypeDefinition):
rv = utility.PrepareIdentifier('CTD_ANON', self.uniqueInClass(component), protected=True)
elif isinstance(component, xs.structures.SimpleTypeDefinition):
rv = utility.PrepareIdentifier('STD_ANON', self.uniqueInClass(component), protected=True)
else:
assert False
kw['protected'] = True
rv = utility.PrepareIdentifier(rv, self.__uniqueInModule, kw)
assert not component in self.__componentNameMap
self.__components.append(component)
self.__componentNameMap[component] = rv
return rv
def nameInModule (self, component):
return self.__componentNameMap.get(component)
def referenceSchemaComponent (self, component):
origin = component._objectOrigin()
assert origin is not None
module_record = origin.moduleRecord()
assert module_record is not None
if self.generator().generationUID() != module_record.generationUID():
self._importModule(module_record)
return self.pathFromImport(module_record, component.nameInBinding())
component_module = _ModuleNaming_mixin.ComponentBindingModule(component)
assert component_module is not None, 'No binding module for %s from %s in %s as %s' % (component, module_record, self.moduleRecord(), component.nameInBinding())
name = component_module.__componentNameMap.get(component)
if name is None:
assert isinstance(self, NamespaceModule) and (self.namespace() == component.bindingNamespace())
name = component.nameInBinding()
if self != component_module:
self._importModule(component_module)
name = self.pathFromImport(component_module, name)
return name
def _referencedNamespaces (self): return self.__referencedNamespaces
def defineNamespace (self, namespace, name, definition=None, **kw):
rv = self.__referencedNamespaces.get(namespace)
assert rv is None, 'Module %s already has reference to %s' % (self, namespace)
# All module-level namespace declarations are reserved.
# Some may have a protected name. The unprotected name
# shall always begin with 'Namespace'. These names may
# be referenced from class implementations as well.
assert name.startswith('Namespace'), 'unexpected %s naming %s' % (name, namespace)
name = utility.PrepareIdentifier(name, self.__uniqueInModule, **kw)
self.__referencedFromClass.add(name)
if definition is None:
if namespace.isAbsentNamespace():
definition = 'pyxb.namespace.CreateAbsentNamespace()'
else:
definition = 'pyxb.namespace.NamespaceForURI(%s, create_if_missing=True)' % (repr2to3(namespace.uri()),)
self.__namespaceDeclarations.append('%s = %s' % (name, definition))
self.__namespaceDeclarations.append("%s.configureCategories(['typeBinding', 'elementBinding'])" % (name,))
self.__referencedNamespaces[namespace] = name
return name
def referenceNamespace (self, namespace):
rv = self.__referencedNamespaces.get(namespace)
if rv is None:
assert not (isinstance(self, NamespaceModule) and (self.namespace() == namespace))
assert namespace.isBuiltinNamespace() or not namespace.isUndeclaredNamespace()
if namespace.isBuiltinNamespace():
rv = namespace.builtinNamespaceRepresentation()
if rv is None:
# Not the local namespace or a built-in. Give it a
# local name, potentially derived from its prefix.
# Then try to find an existing import that defines the
# namespace. Then define the local name within the
# binding, either as a reference to a namespace
# reachable from an import or by doing a runtime
# lookup from the namespace URI if somehow no provider
# has been imported. (This last case should apply
# only to namespace group modules.)
if namespace.prefix():
nsn = 'Namespace_%s' % (namespace.prefix(),)
else:
nsn = 'Namespace'
nsdef = None
for im in six.iterkeys(self.__importModulePathMap):
if isinstance(im, pyxb.namespace.archive.ModuleRecord):
if im.namespace() == namespace:
nsdef = self.pathFromImport(im, 'Namespace')
break
elif isinstance(im, NamespaceGroupModule):
pass
else:
assert False, 'unexpected import from type %s %s' % (type(im), im,)
# If we failed to identify the namespace in an existing import,
# and this module is not a namespace group which includes the
# namespace as part of its content, something went wrong.
if (nsdef is None) and not (isinstance(self, NamespaceGroupModule) and self.moduleForNamespace(namespace) is not None):
# This can happen if we've got a QName for some namespace for which
# we don't have any information. That's actually OK, so just go
# ahead and define a namespace we can reference.
pass
rv = self.defineNamespace(namespace, nsn, nsdef, protected=True)
assert 0 < len(self.__namespaceDeclarations)
self.__referencedNamespaces[namespace] = rv
return rv
def importForDeclaration (self, decl):
"""Import the binding from which the declaration came.
Figure out where the declaration came from. If it's not part
of this binding, make sure we import the binding associated
with the schema from which it came. We need that, if not for
something in the declaration itself, at least to be able to
get the Namespace for the declaration's name. None of this is
relevant if the declaration has no namespace."""
sdecl = decl
while sdecl._cloneSource() is not None:
sdecl = sdecl._cloneSource()
assert decl.expandedName() == sdecl.expandedName()
ns = decl.expandedName().namespace()
if ns is None:
return
mr = sdecl._objectOrigin().moduleRecord()
if isinstance(self, NamespaceModule):
need_import = self.moduleRecord().modulePath() != mr.modulePath()
elif isinstance(self, NamespaceGroupModule):
need_import = True
for nm in self.namespaceModules():
if nm.moduleRecord().modulePath() == mr.modulePath():
need_import = False
break
else:
raise pyxb.LogicError('Unhandled module naming', self)
if need_import:
self._importModule(mr)
def literal (self, *args, **kw):
return self.__bindingIO.literal(*args, **kw)
def addImportsFrom (self, module):
_log.info('Importing to %s from %s', self, module)
self._importModule(module)
for c in self.__components:
local_name = self.nameInModule(c)
assert local_name is not None
rem_name = module.nameInModule(c)
if rem_name is None:
continue
aux = ''
if local_name != rem_name:
aux = ' as %s' % (local_name,)
self.__bindingIO.write("from %s import %s%s # %s\n" % (module.modulePath(), rem_name, aux, c.expandedName()))
def writeToModuleFile (self):
if self.bindingFile():
self.bindingFile().write(self.moduleContents().encode(pyxb._OutputEncoding))
self.bindingFile().close()
_log.info('Saved binding source to %s', self.__bindingFilePath)
else:
_log.info('No binding file for %s', self)
class NamespaceModule (_ModuleNaming_mixin):
"""This class represents a Python module that holds all the
declarations belonging to a specific namespace."""
def namespace (self):
return self.__namespace
__namespace = None
def moduleRecord (self):
return self.__moduleRecord
__moduleRecord = None
def namespaceGroupModule (self):
return self.__namespaceGroupModule
def setNamespaceGroupModule (self, namespace_group_module):
self.__namespaceGroupModule = namespace_group_module
__namespaceGroupModule = None
_UniqueInModule = _ModuleNaming_mixin._UniqueInModule.copy()
_UniqueInModule.update([ 'CreateFromDOM', 'CreateFromDocument' ])
def namespaceGroupHead (self):
return self.__namespaceGroupHead
__namespaceGroupHead = None
__namespaceGroup = None
def componentsInNamespace (self):
return self.__components
__components = None
@classmethod
def ForComponent (cls, component):
return cls.__ComponentModuleMap.get(component)
__ComponentModuleMap = { }
def _bindingTagPrefix_vx (self):
return 'NM'
def _bindingPreface_vx (self):
ns = self.namespace()
rvl = ['# Namespace %s' % (ns,)]
if ns.prefix() is not None:
rvl.append(' [xmlns:%s]' % (ns.prefix(),))
rvl.append('\n')
return ''.join(rvl)
def _moduleUID_vx (self):
if self.namespace().isAbsentNamespace():
return 'Absent'
return six.text_type(self.namespace())
def namespaceGroupMulti (self):
return 1 < len(self.__namespaceGroup)
def __init__ (self, generator, module_record, mr_scc, components=None, **kw):
super(NamespaceModule, self).__init__(generator, **kw)
self._initializeUniqueInModule(self._UniqueInModule)
self.__moduleRecord = module_record
self.__namespace = self.__moduleRecord.namespace()
self.defineNamespace(self.__namespace, 'Namespace')
self._RecordModule(self)
self.__components = components
# wow! fromkeys actually IS useful!
if self.__components is not None:
self.__ComponentModuleMap.update(dict.fromkeys(self.__components, self))
self.__namespaceBindingNames = {}
self.__componentBindingName = {}
self._setModulePath(generator.modulePathData(self))
def _initialBindingTemplateMap (self):
kw = { 'moduleType' : 'namespace'
, 'targetNamespace' : repr2to3(self.__namespace.uri())
, 'namespaceURI' : self.__namespace.uri()
, 'namespaceReference' : self.referenceNamespace(self.__namespace)
, 'pyxb_version' : repr2to3(pyxb.__version__)
}
return kw
def _finalizeModuleContents_vx (self, template_map):
template_map['_TextType'] = '_six.text_type'
self.bindingIO().prolog().append(self.bindingIO().expand('''
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = %{generation_uid_expr}
# Version of PyXB used to generate the bindings
_PyXBVersion = %{pyxb_version}
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
%{aux_imports}
# NOTE: All namespace declarations are reserved within the binding
%{namespace_decls}
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, %{_TextType}):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
''', **template_map))
__components = None
__componentBindingName = None
def bindComponent (self, component):
ns_name = self._bindComponent(component)
component.setNameInBinding(ns_name)
binding_module = self
if self.__namespaceGroupModule:
self.__namespaceGroupModule._bindComponent(component)
binding_module = self.__namespaceGroupModule
return _ModuleNaming_mixin.BindComponentInModule(component, binding_module)
def __str__ (self):
return 'NM:%s@%s' % (self.namespace(), self.modulePath())
class NamespaceGroupModule (_ModuleNaming_mixin):
"""This class represents a Python module that holds all the
declarations belonging to a set of namespaces which have
interdependencies."""
def namespaceModules (self):
return self.__namespaceModules
__namespaceModules = None
def moduleForNamespace (self, namespace):
for nm in self.__namespaceModules:
if nm.namespace() == namespace:
return nm
return None
__components = None
__componentBindingName = None
__uniqueInModule = None
__UniqueInGroups = set()
_GroupPrefix = '_group'
def __init__ (self, generator, namespace_modules, **kw):
super(NamespaceGroupModule, self).__init__(generator, **kw)
assert 1 < len(namespace_modules)
self.__namespaceModules = namespace_modules
self.__namespaceGroupHead = namespace_modules[0].namespaceGroupHead()
self._initializeUniqueInModule(self._UniqueInModule)
self._setModulePath(generator.modulePathData(self))
def _initialBindingTemplateMap (self):
kw = { 'moduleType' : 'namespaceGroup' }
return kw
def _bindingTagPrefix_vx (self):
return 'NGM'
def _bindingPreface_vx (self):
rvl = ['# Group contents:\n' ]
for nsm in self.namespaceModules():
rvl.append(nsm.bindingPreface())
rvl.append('\n')
return ''.join(rvl)
def _finalizeModuleContents_vx (self, template_map):
self.bindingIO().prolog().append(self.bindingIO().expand('''
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.utils.utility
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = %{generation_uid_expr}
# Import bindings for schemas in group
%{aux_imports}
# NOTE: All namespace declarations are reserved within the binding
%{namespace_decls}
''', **template_map))
def _moduleUID_vx (self):
nss = []
for nsm in self.namespaceModules():
ns = nsm.namespace()
if ns.isAbsentNamespace():
nss.append('Absent')
else:
nss.append(six.text_type(ns))
nss.sort()
return six.u(';').join(nss)
def __str__ (self):
return 'NGM:%s' % (self.modulePath(),)
def GeneratePython (schema_location=None,
schema_text=None,
namespace=None,
module_prefix_elts=[],
**kw):
generator = Generator(allow_absent_module=True, generate_to_files=False, **kw)
if schema_location is not None:
generator.addSchemaLocation(schema_location)
elif schema_text is not None:
generator.addSchema(schema_text)
modules = generator.bindingModules()
assert 1 == len(modules), '%s produced %d modules: %s' % (namespace, len(modules), six.u(' ').join([ six.text_type(_m) for _m in modules]))
return modules.pop().moduleContents()
import optparse
import re
class Generator (object):
"""Configuration and data for a single binding-generation action."""
_DEFAULT_bindingRoot = '.'
def bindingRoot (self):
"""The directory path into which generated bindings will be written.
@rtype: C{str}"""
return self.__bindingRoot
def setBindingRoot (self, binding_root):
self.__bindingRoot = binding_root
return self
__bindingRoot = None
def __moduleFilePath (self, module_elts, inhibit_extension=False):
if isinstance(module_elts, six.string_types):
module_elts = module_elts.split('.')
else:
module_elts = module_elts[:]
assert 0 < len(module_elts)
if not inhibit_extension:
assert not module_elts[-1].endswith('.py')
module_elts[-1] = '%s.py' % (module_elts[-1],)
return os.path.join(self.bindingRoot(), *module_elts)
def generateToFiles (self):
return self.__generateToFiles
__generateToFiles = None
def modulePathData (self, module):
# file system path to where the bindings are written
# module path from which the bindings are normally imported
# file object into which bindings are written
module_path = None
if isinstance(module, NamespaceModule):
mr = module.moduleRecord()
if mr is None:
return ('/dev/null', None, None)
if self.generationUID() != mr.generationUID():
return ('/dev/null', None, None)
if not self.generateToFiles():
return ('/dev/null', None, None)
if mr.namespace().isBuiltinNamespace() and (not self.allowBuiltinGeneration()):
return ('/dev/null', None, None)
module_path = mr.modulePath()
assert module_path is not None, 'No path specified for module %s' % (mr,)
#if pyxb.namespace.XMLSchema != ns:
# return ('/dev/null', None, None)
#module_path="bogus.xsd"
module_elts = module_path.split('.')
if self.writeForCustomization():
import_file_path = self.__moduleFilePath(module_elts)
module_elts.insert(-1, 'raw')
if not os.path.exists(import_file_path):
raw_module_path = '.'.join(module_elts)
fd = pyxb.utils.utility.OpenOrCreate(import_file_path)
impt = '''# -*- coding: utf-8 -*-
from %s import *
''' % (raw_module_path,)
impd = impt.encode('utf-8')
fd.write(impd)
fd.close()
binding_file_path = self.__moduleFilePath(module_elts)
try:
binding_file = pyxb.utils.utility.OpenOrCreate(binding_file_path, tag=module.moduleUID())
except OSError as e:
if errno.EEXIST == e.errno:
raise pyxb.BindingGenerationError('Target file %s for module %s bindings exists with other content' % (binding_file_path, mr))
raise
elif isinstance(module, NamespaceGroupModule):
if not self.generateToFiles():
raise pyxb.BindingGenerationError('Generation of namespace groups requires generate-to-files')
module_elts = []
if self.modulePrefix():
module_elts.extend(self.modulePrefix().split('.'))
if self.writeForCustomization():
module_elts.append('raw')
in_use = set()
while True:
module_elts.append(pyxb.utils.utility.PrepareIdentifier('nsgroup', in_use, protected=True))
try:
binding_file_path = self.__moduleFilePath(module_elts)
_log.info('Attempting group %s uid %s at %s', module, module.moduleUID(), binding_file_path)
binding_file = pyxb.utils.utility.OpenOrCreate(binding_file_path, tag=module.moduleUID())
break
except OSError as e:
if errno.EEXIST != e.errno:
raise
module_elts.pop()
module_path = '.'.join(module_elts)
else:
assert False
if self.generateToFiles():
for n in range(len(module_elts)-1):
sub_path = self.__moduleFilePath(module_elts[:1+n], inhibit_extension=True)
init_path = os.path.join(sub_path, '__init__.py')
if not os.path.exists(init_path):
open(init_path, 'w')
return (binding_file_path, binding_file, module_path)
def schemaRoot (self):
"""The directory from which entrypoint schemas specified as
relative file paths will be read."""
return self.__schemaRoot
def setSchemaRoot (self, schema_root):
if not schema_root.endswith(os.sep):
schema_root = schema_root + os.sep
self.__schemaRoot = schema_root
return self
__schemaRoot = None
def schemaStrippedPrefix (self):
"""Optional string that is stripped from the beginning of
schemaLocation values before loading from them.
This applies only to the values of schemaLocation attributes
in C{import} and C{include} elements. Its purpose is to
convert absolute schema locations into relative ones to allow
offline processing when all schema are available in a local
directory. See C{schemaRoot}.
"""
return self.__schemaStrippedPrefix
def setSchemaStrippedPrefix (self, schema_stripped_prefix):
self.__schemaStrippedPrefix = schema_stripped_prefix
return self
__schemaStrippedPrefix = None
def locationPrefixRewriteMap (self):
"""Optional map to rewrite schema locations.
This applies only to the values of schemaLocation attributes
in C{import} and C{include} elements. Its purpose is to
convert remote or absolute schema locations into local or
relative ones to allow offline processing when all schema are
available in a local directory. See C{schemaRoot}.
"""
return self.__locationPrefixRewriteMap
def setLocationPrefixRewriteMap (self, location_prefix_rewrite_map):
self.__locationPrefixMap.clear()
self.__locationPrefixMap.update(location_prefix_rewrite_map)
return self
def addLocationPrefixRewrite (self, prefix, substituent):
"""Add a rewrite entry for schema locations.
@param prefix : A text prefix that should be removed from
schema location URIs.
@param substituent : The text prefix that should replace
C{prefix} as a prefix in a schema location URI.
"""
self.__locationPrefixRewriteMap[prefix] = substituent
return self
def argAddLocationPrefixRewrite (self, prefix_rewrite):
"""Add a rewrite entry for schema locations.
Parameter values are strings of the form C{pfx=sub}. The
effect is that a schema location that begins with C{pfx} is
rewritten so that it instead begins with C{sub}."""
try:
(prefix, substituent) = prefix_rewrite.split('=', 1)
except:
raise
self.addLocationPrefixRewrite(prefix, substituent)
__locationPrefixMap = {}
def schemaLocationList (self):
"""A list of locations from which entrypoint schemas are to be
read.
The values in the list are either URIs, or tuples consisting
of a value and a callable which, when passed the generator
object and the value, will return a
L{pyxb.xmlschema.structures.Schema} instance. See
L{addSchemaLocation}.
See also L{addSchemaLocation} and L{schemas}.
"""
return self.__schemaLocationList
def setSchemaLocationList (self, schema_location_list):
self.__schemaLocationList[:] = []
self.__schemaLocationList.extend(schema_location_list)
return self
def addSchemaLocation (self, schema_location, converter=None):
"""Add the location of an entrypoint schema.
@param schema_location: The location of the schema. This
should be a URL; if the schema location does not have a URL
scheme (e.g., C{http:}), it is assumed to be a file, and if it
is not an absolute path is located relative to the
C{schemaRoot}.
@keyword converter: Optional callable that will be invoked
with the generator instance and the schema location, and is
expected to return a L{pyxb.xmlschema.structures.Schema}
instance. If absent, the contents of the location are
converted directly.
@note: The C{converter} argument derives from WSDL support: we
need to add to the sequence of schema locations a URI of
something that will not parse as a schema, but does have inner
material that can if treated properly. "Treated properly" may
include having the archive path and other namespace
manipulations configured before anything is done to it.
"""
self.__schemaLocationList.append( (schema_location, converter) )
return self
def argAddSchemaLocation (self, schema_location):
"""Add the location of an entrypoint schema. The provided
value should be a URL; if it does not have a URL scheme (e.g.,
C{http:}), it is assumed to be a file, and if it is not an
absolute path is located relative to the C{schemaRoot}."""
self.addSchemaLocation(schema_location)
__schemaLocationList = None
def schemas (self):
"""Schema for which bindings should be generated.
These may be L{Schema<pyxb.xmlschema.structures.Schema>}
instances, or strings; the latter is preferred, and is parsed
into a Schema instance when required.
This is the list of entrypoint schemas for binding generation.
Values in L{schemaLocationList} are read and converted into
schema, then appended to this list. Values from L{moduleList}
are applied starting with the first schema in this list.
"""
return self.__schemas[:]
def setSchemas (self, schemas):
self.__schemas[:] = []
self.__schemas.extend(schemas)
return self
def addSchema (self, schema):
self.__schemas.append(schema)
return self
__schemas = None
def namespaces (self):
"""The set of L{namespaces<pyxb.namespace.Namespace>} for
which bindings will be generated.
This is the set of namespaces read from entrypoint schema,
closed under reference to namespaces defined by schema import.
@rtype: C{set}
"""
return self.__namespaces.copy()
def setNamespaces (self, namespace_set):
self.__namespaces.clear()
self.__namespaces.update(namespace_set)
return self
def addNamespace (self, namespace):
self.__namespaces.add(namespace)
return self
__namespaces = None
def moduleList (self):
"""A list of module names to be applied in order to the namespaces of entrypoint schemas"""
return self.__moduleList[:]
def _setModuleList (self, module_list):
self.__moduleList[:] = []
self.__moduleList.extend(module_list)
return self
def addModuleName (self, module_name):
"""Add a module name corresponding to an entrypoint schema.
The namespace defined by the corresponding schema will be
written to a binding using the given module name, adjusted by
L{modulePrefix}."""
self.__moduleList.append(module_name)
return self
__moduleList = None
def modulePrefix (self):
"""The prefix for binding modules.
The base name for the module holding a binding is taken from
the moduleList, moduleMap, or an XMLNS prefix associated with
the namespace in a containing schema. This value, if present,
is used as a prefix to allow a deeper module hierarchy."""
return self.__modulePrefix
def setModulePrefix (self, module_prefix):
self.__modulePrefix = module_prefix
return self
__modulePrefix = None
def namespaceModuleMap (self):
"""A map from namespace URIs to the module to be used for the
corresponding generated binding.
Module values are adjusted by L{modulePrefix} if that has been
specified.
An entry in this map for a namespace supersedes the module
specified in moduleList if the namespace is defined by an
entrypoint schema.
@return: A reference to the namespace module map.
"""
return self.__namespaceModuleMap
__namespaceModuleMap = None
def archivePath (self):
"""A colon-separated list of paths from which namespace
archives can be read.
The default path is the contents of the C{PYXB_ARCHIVE_PATH}
environment variable, or the standard path configured at
installation time. Any file with the extension C{.wxs} found
in one of these directories is examined to see whether it is a
namespace archive.
"""
return self.__archivePath
def setArchivePath (self, archive_path):
self.__archivePath = archive_path
return self
__archivePath = None
def noLoadNamespaces (self):
"""A frozenset of namespaces that must not be loaded from an archive."""
return frozenset(self.__noLoadNamespaces)
def _setNoLoadNamespaces (self, namespace_set):
"""Record the set of namespaces that should not be loaded from an archive.
The expectation is that any required entities in the namespace
will be defined by loading schema."""
self.__noLoadNamespaces.clear()
self.__noLoadNamespaces.update([ pyxb.namespace.NamespaceInstance(_ns) for _ns in namespace_set ])
def addNoLoadNamespace (self, namespace):
"""Mark that the specified namespace should not be loaded from an archive.
Use this when you are generating bindings for an application
that has a restricted profile of a namespace that would
otherwise be read from an archive. Be aware that this removes
any knowledge of any archive in which this namespace is
present as a non-private member."""
self.__noLoadNamespaces.add(pyxb.namespace.NamespaceInstance(namespace))
__noloadNamespaces = None
def importAugmentableNamespaces (self):
"""A list of namespaces for which new bindings are allowd."""
return frozenset(self.__importAugmentableNamespaces)
def _setImportAugmentableNamespaces (self, namespace_set):
"""Return the set of namespaces that may be augmented by import directives."""
self.__importAugmentableNamespaces.clear()
self.__importAugmentableNamespaces.update([ pyxb.namespace.NamespaceInstance(_ns) for _ns in namespace_set ])
def addImportAugmentableNamespace (self, namespace):
"""Mark that the specified namespace may be imported by new bindings.
Normally namespaces that are available from archives are
considered to be complete, and schema locations in import
directives are ignored. Use this to indicate that the
bindings being generated import new bindings.
Note that attempts to import schema that contributed to the
archive will only be detected if the archive was generated
from the same schemaLocation URI; if the archive was generated
from a different source component definitions might
conflict."""
self.__importAugmentableNamespaces.add(pyxb.namespace.NamespaceInstance(namespace))
__importAugmentableNamespaces = None
def archiveToFile (self):
"""Optional file into which the archive of namespaces will be written.
Subsequent generation actions can read pre-parsed namespaces
from this file, and therefore reference the bindings that were
built earlier rather than re-generating them.
The file name should normally end with C{.wxs}."""
return self.__archiveToFile
def setArchiveToFile (self, archive_to_file):
self.__archiveToFile = archive_to_file
return self
__archiveToFile = None
def setNamespaceVisibility (self, namespace, visibility):
namespace = pyxb.namespace.NamespaceInstance(namespace)
self.__namespaceVisibilityMap[namespace] = visibility
pass
def _setNamespaceVisibilities (self, public, private):
if public is None:
public = set()
if private is None:
private = set()
self.__namespaceVisibilityMap.clear()
self.__namespaceVisibilityMap.update(dict.fromkeys(public, True))
self.__namespaceVisibilityMap.update(dict.fromkeys(private, False))
def namespaceVisibilityMap (self):
"""Indicates, for specific namespaces, whether their
visibility in the archive should be public or private."""
return self.__namespaceVisibilityMap.copy()
__namespaceVisibilityMap = None
def defaultNamespacePublic (self):
"""Indicates whether unmentioned namespaces will be public or private (default) in the archive.
A namespace is I{mentioned} if it is the target namespace of
an entrypoint schema, or appears in a namespace visibility
specification. I.e., this default applies only to namespaces
that are modified as a result of including some schema, which
is generally a local customization of something.
"""
return self.__defaultNamespacePublic
def setDefaultNamespacePublic (self, default_namespace_public):
self.__defaultNamespacePublic = default_namespace_public
__defaultNamespacePublic = None
def validateChanges (self):
"""Indicates whether the bindings should validate mutations
against the content model."""
return self.__validateChanges
def setValidateChanges (self, validate_changes):
self.__validateChanges = validate_changes
return self
__validateChanges = None
def writeForCustomization (self):
"""Indicates whether the binding Python code should be written into a sub-module for customization.
If enabled, a module C{path.to.namespace} will be written to
the file C{path/to/raw/namespace.py}, so that the file
C{path/to/namespace.py} can import it and override behavior."""
return self.__writeForCustomization
def setWriteForCustomization (self, write_for_customization):
self.__writeForCustomization = write_for_customization
return self
__writeForCustomization = None
def allowAbsentModule (self):
"""Indicates whether the code generator is permitted to
process namespace for which no module path can be determined.
Use this only when generating bindings that will not be
referenced by other bindings."""
return self.__allowAbsentModule
def setAllowAbsentModule (self, allow_absent_module):
self.__allowAbsentModule = allow_absent_module
return self
__allowAbsentModule = None
def allowBuiltinGeneration (self):
"""Indicates whether bindings will be written for namespaces that are built-in to PyXB.
This must be enabled when building bindings for the XML,
XMLSchema instance, and other built-in namespaces. Normally
generation of these namespaces is inhibited lest it produce
inconsistencies."""
return self.__allowBuiltinGeneration
def setAllowBuiltinGeneration (self, allow_builtin_generation):
self.__allowBuiltinGeneration = allow_builtin_generation
return self
__allowBuiltinGeneration = None
def uriContentArchiveDirectory (self):
"""The directory path into which any content retrieved by URI will be written.
This serves as a local cache, and to give you an opportunity
to inspect material retrieved from some other system.
@rtype: C{str}"""
return self.__uriContentArchiveDirectory
def setUriContentArchiveDirectory (self, ucad):
self.__uriContentArchiveDirectory = ucad
__uriContentArchiveDirectory = None
def loggingConfigFile (self):
"""A file provided to L{logging.config.fileConfig} to control log messages.
In the absence of other configuration the Python standard logging infrastructure is used in its
default configuration.
@rtype: C{str}"""
return self.__loggingConfigFile
def setLoggingConfigFile (self, logging_config_file):
self.__loggingConfigFile = logging_config_file
__loggingConfigFile = None
def __init__ (self, *args, **kw):
"""Create a configuration to be used for generating bindings.
Arguments are treated as additions to the schema location list
after all keywords have been processed.
@keyword binding_root: Invokes L{setBindingRoot}
@keyword schema_root: Invokes L{setSchemaRoot}
@keyword schema_stripped_prefix: Invokes L{setSchemaStrippedPrefix}
@keyword location_prefix_rewrite_map: Invokes L{setLocationPrefixRewriteMap}
@keyword schema_location_list: Invokes L{setSchemaLocationList}
@keyword module_list: Invokes L{_setModuleList}
@keyword module_prefix: Invokes L{setModulePrefix}
@keyword archive_path: Invokes L{setArchivePath}
@keyword no_load_namespaces: Invokes L{_setNoLoadNamespaces}
@keyword import_augmentable_namespaces: Invokes L{_setImportAugmentableNamespaces}
@keyword archive_to_file: Invokes L{setArchiveToFile}
@keyword public_namespace: Invokes L{setNamespaceVisibility}
@keyword private_namespace: Invokes L{setNamespaceVisibility}
@keyword default_namespace_public: Invokes L{setDefaultNamespacePublic}
@keyword validate_changes: Invokes L{setValidateChanges}
@keyword namespace_module_map: Initializes L{namespaceModuleMap}
@keyword schemas: Invokes L{setSchemas}
@keyword namespaces: Invokes L{setNamespaces}
@keyword write_for_customization: Invokes L{setWriteForCustomization}
@keyword allow_builtin_generation: Invokes L{setAllowBuiltinGeneration}
@keyword allow_absent_module: Invokes L{setAllowAbsentModule}
@keyword generate_to_files: Sets L{generateToFiles}
@keyword uri_content_archive_directory: Invokes L{setUriContentArchiveDirectory}
@keyword logging_config_file: Invokes L{setLoggingConfigFile}
"""
argv = kw.get('argv')
if argv is not None:
kw = {}
self.__bindingRoot = kw.get('binding_root', self._DEFAULT_bindingRoot)
self.__schemaRoot = kw.get('schema_root', '.')
self.__schemaStrippedPrefix = kw.get('schema_stripped_prefix')
self.__locationPrefixRewriteMap = kw.get('location_prefix_rewrite_map', {})
self.__schemas = []
self.__schemaLocationList = kw.get('schema_location_list', [])[:]
self.__moduleList = kw.get('module_list', [])[:]
self.__modulePrefix = kw.get('module_prefix')
self.__archivePath = kw.get('archive_path', pyxb.namespace.archive.GetArchivePath())
self.__noLoadNamespaces = kw.get('no_load_namespaces', set()).copy()
self.__importAugmentableNamespaces = kw.get('import_augmentable_namespaces', set()).copy()
self.__archiveToFile = kw.get('archive_to_file')
self.__namespaceVisibilityMap = {}
self._setNamespaceVisibilities(kw.get('public_namespaces', set()), kw.get('private_namespaces', set()))
self.__defaultNamespacePublic = kw.get('default_namespace_public', False)
self.__validateChanges = kw.get('validate_changes', True)
self.__namespaceModuleMap = kw.get('namespace_module_map', {}).copy()
self.__schemas = kw.get('schemas', [])[:]
self.__namespaces = set(kw.get('namespaces', []))
self.__writeForCustomization = kw.get('write_for_customization', False)
self.__allowBuiltinGeneration = kw.get('allow_builtin_generation', False)
self.__allowAbsentModule = kw.get('allow_absent_module', False)
self.__generateToFiles = kw.get('generate_to_files', True)
self.__uriContentArchiveDirectory = kw.get('uri_content_archive_directory')
self.__loggingConfigFile = kw.get('logging_config_file')
self.__unnamedModulePaths = set()
if argv is not None:
self.applyOptionValues(*self.optionParser().parse_args(argv))
[ self.addSchemaLocation(_a) for _a in args ]
self.__generationUID = pyxb.utils.utility.UniqueIdentifier()
pyxb.namespace.XML.validateComponentModel()
__stripSpaces_re = re.compile('\s\s\s+')
def __stripSpaces (self, string):
return self.__stripSpaces_re.sub(' ', string)
__OptionSetters = (
('binding_root', setBindingRoot),
('schema_root', setSchemaRoot),
('schema_stripped_prefix', setSchemaStrippedPrefix),
('location_prefix_rewrite', argAddLocationPrefixRewrite),
('schema_location', setSchemaLocationList),
('module', _setModuleList),
('module_prefix', setModulePrefix),
('archive_path', setArchivePath),
('no_load_namespace', _setNoLoadNamespaces),
('import_augmentable_namespace', _setImportAugmentableNamespaces),
('archive_to_file', setArchiveToFile),
('default_namespace_public', setDefaultNamespacePublic),
('validate_changes', setValidateChanges),
('write_for_customization', setWriteForCustomization),
('allow_builtin_generation', setAllowBuiltinGeneration),
('allow_absent_module', setAllowAbsentModule),
('uri_content_archive_directory', setUriContentArchiveDirectory),
('logging_config_file', setLoggingConfigFile)
)
def applyOptionValues (self, options, args=None):
for (tag, method) in self.__OptionSetters:
v = getattr(options, tag)
if v is not None:
method(self, v)
public_namespaces = getattr(options, 'public_namespace')
private_namespaces = getattr(options, 'private_namespace')
self._setNamespaceVisibilities(public_namespaces, private_namespaces)
if args is not None:
self.__schemaLocationList.extend(args)
pyxb.utils.utility.SetLocationPrefixRewriteMap(self.locationPrefixRewriteMap())
if self.__loggingConfigFile is not None:
logging.config.fileConfig(self.__loggingConfigFile)
def setFromCommandLine (self, argv=None):
if argv is None:
argv = sys.argv[1:]
(options, args) = self.optionParser().parse_args(argv)
self.applyOptionValues(options, args)
return self
def generationUID (self):
"""A unique identifier associated with this Generator instance.
This is an instance of L{pyxb.utils.utility.UniqueIdentifier}.
Its associated objects are
L{pyxb.namespace.archive._SchemaOrigin} instances, which
identify schema that contribute to the definition of a
namespace."""
return self.__generationUID
__generationUID = None
def optionParser (self, reset=False):
"""Return an C{optparse.OptionParser} instance tied to this configuration.
@param reset: If C{False} (default), a parser created in a
previous invocation will be returned. If C{True}, any
previous option parser is discarded and a new one created.
@type reset: C{bool}
"""
if reset or (self.__optionParser is None):
parser = optparse.OptionParser(usage="%prog [options] [more schema locations...]",
version='%%prog from PyXB %s' % (pyxb.__version__,),
description='Generate bindings from a set of XML schemas')
group = optparse.OptionGroup(parser, 'Identifying Schema', 'Specify and locate schema for which bindings should be generated.')
group.add_option('--schema-location', '-u', metavar="FILE_or_URL",
action='append',
help=self.__stripSpaces(self.argAddSchemaLocation.__doc__))
group.add_option('--schema-root', metavar="DIRECTORY",
help=self.__stripSpaces(self.schemaRoot.__doc__))
group.add_option('--schema-stripped-prefix', metavar="TEXT", type='string',
help=self.__stripSpaces(self.schemaStrippedPrefix.__doc__))
group.add_option('--location-prefix-rewrite', metavar="TEXT", type='string',
help=self.__stripSpaces(self.argAddLocationPrefixRewrite.__doc__))
group.add_option('--uri-content-archive-directory', metavar="DIRECTORY",
help=self.__stripSpaces(self.uriContentArchiveDirectory.__doc__))
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Configuring Bindings', 'Specify where generated bindings should be written, and how they will be accessed from Python.')
group.add_option('--module', '-m', metavar="MODULE",
action='append',
help=self.__stripSpaces(self.addModuleName.__doc__))
group.add_option('--module-prefix', metavar="MODULE",
help=self.__stripSpaces(self.modulePrefix.__doc__))
group.add_option('--binding-root', metavar="DIRECTORY",
help=self.__stripSpaces(self.bindingRoot.__doc__))
group.add_option('-r', '--write-for-customization',
action='store_true', dest='write_for_customization',
help=self.__stripSpaces(self.writeForCustomization.__doc__ + ' This option turns on the feature.'))
group.add_option('--no-write-for-customization',
action='store_false', dest='write_for_customization',
help=self.__stripSpaces(self.writeForCustomization.__doc__ + ' This option turns off the feature (I{default}).'))
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Reading Namespace Archives', 'Locating and loading (or inhibiting load of) namespace archives.')
group.add_option('--archive-path', metavar="PATH",
help=self.__stripSpaces(self.archivePath.__doc__))
group.add_option('--import-augmentable-namespace', metavar="URI",
action='append',
help=self.__stripSpaces(self.addImportAugmentableNamespace.__doc__))
group.add_option('--no-load-namespace', metavar="URI",
action='append',
help=self.__stripSpaces(self.addNoLoadNamespace.__doc__))
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Writing Namespace Archives', 'Control the location and content of a namespace archive corresponding to a binding generation.')
group.add_option('--archive-to-file', metavar="FILE",
help=self.__stripSpaces(self.archiveToFile.__doc__))
group.add_option('--public-namespace', metavar="URI",
action='append',
help=self.__stripSpaces(self.namespaceVisibilityMap.__doc__ + ' This option adds the namespace as a public archive member.'))
group.add_option('--private-namespace', metavar="URI",
action='append',
help=self.__stripSpaces(self.namespaceVisibilityMap.__doc__ + ' This option adds the namespace as a private archive member.'))
group.add_option('--default-namespace-public',
action="store_true", dest='default_namespace_public',
help=self.__stripSpaces(self.defaultNamespacePublic.__doc__ + ' This option makes the default C{public} (I{default}).'))
group.add_option('--default-namespace-private',
action="store_false", dest='default_namespace_public',
help=self.__stripSpaces(self.defaultNamespacePublic.__doc__ + ' This option makes the default C{private}.'))
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Configuring Binding Code Generation', "Control the style and content of the generated bindings. This is not well-supported, and you are advised to pretend these options don't exist.")
group.add_option('--validate-changes',
action='store_true', dest='validate_changes',
help=self.__stripSpaces(self.validateChanges.__doc__ + ' This option turns on validation (default).'))
group.add_option('--no-validate-changes',
action='store_false', dest='validate_changes',
help=self.__stripSpaces(self.validateChanges.__doc__ + ' This option turns off validation.'))
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Miscellaneous Options', "Anything else.")
group.add_option('--logging-config-file', metavar="FILE",
help=self.__stripSpaces(self.loggingConfigFile.__doc__))
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Maintainer Options', "Don't use these. They don't exist. If they did, they'd do different things at different times, and if you used them you'd probably be sorry.")
group.add_option('--allow-absent-module',
action='store_true', dest='allow_absent_module',
help=self.__stripSpaces(self.allowAbsentModule.__doc__ + ' This option turns on the feature.'))
group.add_option('--no-allow-absent-module',
action='store_false', dest='allow_absent_module',
help=self.__stripSpaces(self.allowAbsentModule.__doc__ + ' This option turns off the feature (default).'))
group.add_option('--allow-builtin-generation',
action='store_true', dest='allow_builtin_generation',
help=self.__stripSpaces(self.allowBuiltinGeneration.__doc__ + ' This option turns on the feature.'))
group.add_option('--no-allow-builtin-generation',
action='store_false', dest='allow_builtin_generation',
help=self.__stripSpaces(self.allowBuiltinGeneration.__doc__ + ' This option turns off the feature (default).'))
parser.add_option_group(group)
self.__optionParser = parser
return self.__optionParser
__optionParser = None
def getCommandLineArgs (self):
"""Return a command line option sequence that could be used to
construct an equivalent configuration.
@note: If you extend the option parser, as is done by
C{pyxbgen}, this may not be able to reconstruct the correct
command line."""
opts = []
module_list = self.moduleList()
schema_list = self.schemaLocationList()
while module_list and schema_list:
ml = module_list.pop(0)
sl = schema_list.pop(0)
if isinstance(sl, tuple):
sl = sl[0]
opts.extend(['--schema-location=' + sl, '--module=' + ml])
for sl in schema_list:
opts.append('--schema-location=' + sl)
if self.schemaRoot() is not None:
opts.append('--schema-root=' + self.schemaRoot())
if self.schemaStrippedPrefix() is not None:
opts.append('--schema-stripped-prefix=%s' + self.schemaStrippedPrefix())
for (pfx, sub) in self.locationPrefixRewriteMap():
opts.append('--location-prefix-rewrite=%s=%s' % (pfx, sub))
if self.modulePrefix() is not None:
opts.append('--module-prefix=' + self.modulePrefix())
opts.append('--binding-root=' + self.bindingRoot())
if self.archivePath() is not None:
opts.append('--archive-path=' + self.archivePath())
for ns in self.noLoadNamespaces():
opts.append('--no-load-namespace=' + ns.uri())
for ns in self.importAugmentableNamespaces():
opts.append('--import-augmentable-namespace=' + ns.uri())
if self.archiveToFile() is not None:
opts.append('--archive-to-file=' + self.archiveToFile())
for (ns, visibility) in self.namespaceVisibilityMap():
if visibility:
opts.append('--public-namespace=' + ns.uri())
else:
opts.append('--private-namespace=' + ns.uri())
if self.defaultNamespacePublic():
opts.append('--default-namespace-public')
else:
opts.append('--default-namespace-private')
for (val, opt) in ( (self.validateChanges(), 'validate-changes'),
(self.writeForCustomization(), 'write-for-customization'),
(self.allowAbsentModule(), 'allow-absent-module'),
(self.allowBuiltinGeneration(), 'allow-builtin-generation') ):
if val:
opts.append('--' + opt)
else:
opts.append('--no-' + opt)
if self.uriContentArchiveDirectory() is not None:
opts.append('--uri-content-archive-directory=%s' + self.uriContentArchiveDirectory())
return opts
def normalizeSchemaLocation (self, sl):
ssp = self.schemaStrippedPrefix()
if ssp and sl.startswith(ssp):
sl = sl[len(ssp):]
return pyxb.utils.utility.NormalizeLocation(sl, self.schemaRoot())
def assignModulePath (self, module_record, module_path=None):
"""Provide a Python module path for the module record.
This is the path by which the module bindings associated with
C{module_record} will be imported.
If a path had already been assigned to the module, it is left
in place.
@param module_record: Information about a collection of related bindings
@type module_record: L{pyxb.namespace.archive.ModuleRecord}
@param module_path: Default path to use
@type module_path: C{str}
@return: C{module_record}
"""
if module_record.modulePath() is not None:
return module_record
namespace = module_record.namespace()
if not namespace.isAbsentNamespace():
# Use the namespace prefix from a referencing schema if no other clue was given
if (module_path is None) and not (namespace.prefix() is None):
module_path = namespace.prefix()
# Prefer an existing assignment over a new one
module_path = self.namespaceModuleMap().get(namespace.uri(), module_path)
if (module_path is None) and self.generateToFiles():
module_path = pyxb.utils.utility.MakeUnique('binding', self.__unnamedModulePaths)
if (module_path is not None) and self.modulePrefix(): # non-empty value
# Prepend a configured module prefix
module_path = '.'.join([self.modulePrefix(), module_path])
module_record.setModulePath(module_path)
return module_record
__didResolveExternalSchema = False
def resolveExternalSchema (self):
if self.__didResolveExternalSchema:
return
# Locate all relevant archives and the namespaces they
# provide.
pyxb.namespace.archive.NamespaceArchive.PreLoadArchives(self.archivePath())
# Mark the namespaces we were told not to load. These may be
# namespaces for which we already have bindings in the search
# path, but we want to generate completely new ones.
for ns in self.noLoadNamespaces():
assert isinstance(ns, pyxb.namespace.Namespace)
_log.info("Namespace %s marked not loadable" % (ns,))
ns.markNotLoadable()
# Mark the namespaces that we permit to be extended by import
# statements.
for ns in self.importAugmentableNamespaces():
assert isinstance(ns, pyxb.namespace.Namespace)
_log.info("Namespace %s marked import-augmentable" % (ns,))
ns.setImportAugmentable(True)
# Read all the schema we were told about.
while self.__schemaLocationList:
sl = self.__schemaLocationList.pop(0)
if isinstance(sl, tuple):
(sl, converter) = sl
else:
converter = None
try:
if converter is None:
schema = xs.schema.CreateFromLocation(absolute_schema_location=self.normalizeSchemaLocation(sl),
generation_uid=self.generationUID(),
uri_content_archive_directory=self.uriContentArchiveDirectory())
else:
schema = converter(self, sl)
self.addSchema(schema)
except pyxb.SchemaUniquenessError as e:
_log.info('Skipped redundant translation of %s defining %s', e.schemaLocation(), e.namespace())
self.addSchema(e.existingSchema())
# Assign Python modules to hold bindings for the schema we're
# processing.
for schema in self.__schemas:
if isinstance(schema, six.string_types):
schema = xs.schema.CreateFromDocument(schema, generation_uid=self.generationUID())
origin = schema.originRecord()
assert origin is not None
module_path = None
if self.__moduleList:
module_path = self.__moduleList.pop(0)
self.assignModulePath(origin.moduleRecord(), module_path)
assert schema.targetNamespace() == origin.moduleRecord().namespace()
self.addNamespace(schema.targetNamespace())
self.__didResolveExternalSchema = True
# Discard any existing component information
self.__componentGraph = None
self.__componentOrder = None
def __graphFromComponents (self, components, include_lax):
components = components.copy()
component_graph = pyxb.utils.utility.Graph()
need_visit = components.copy()
bindable_fn = lambda _c: isinstance(_c, xs.structures.ElementDeclaration) or _c.isTypeDefinition()
while 0 < len(need_visit):
c = need_visit.pop()
assert c is not None
assert bindable_fn(c) or include_lax
assert c._objectOrigin() is not None, '%s %s has no origin' % (type(c), c)
component_graph.addNode(c)
br = c.bindingRequires(reset=True, include_lax=include_lax)
for cd in br:
assert bindable_fn(cd) or include_lax, '%s produced %s in requires' % (type(c), type(cd))
if cd._objectOrigin() is None:
assert isinstance(cd, (pyxb.xmlschema.structures.Annotation, pyxb.xmlschema.structures.Wildcard))
continue
if (cd._objectOrigin().moduleRecord() in self.__moduleRecords) and not (cd in components):
components.add(cd)
need_visit.add(cd)
if cd in components:
component_graph.addEdge(c, cd)
return component_graph
def __resolveComponentDependencies (self):
self.resolveExternalSchema()
bindable_fn = lambda _c: isinstance(_c, xs.structures.ElementDeclaration) or _c.isTypeDefinition()
self.__moduleRecords = set()
all_components = set()
for origin in self.generationUID().associatedObjects():
mr = origin.moduleRecord()
if not (mr in self.__moduleRecords):
self.__moduleRecords.add(mr)
mr.completeGenerationAssociations()
all_components.update(origin.originatedObjects())
namespaces = set()
for mr in self.__moduleRecords:
if mr.namespace().isBuiltinNamespace() and not self.allowBuiltinGeneration():
continue
namespaces.add(mr.namespace())
pyxb.namespace.resolution.ResolveSiblingNamespaces(namespaces)
# Mark module visibility. Entry-point namespaces default to
# public.
for ns in self.namespaces():
self.__namespaceVisibilityMap.setdefault(ns, True)
# Generate the graph from all components and descend into lax
# requirements; otherwise we might miss anonymous types hidden
# inside attribute declarations and the like.
component_graph = self.__graphFromComponents(all_components, True)
binding_components = set(filter(bindable_fn, component_graph.nodes()))
for c in binding_components:
assert bindable_fn(c), 'Unexpected %s in binding components' % (type(c),)
c._setBindingNamespace(c._objectOrigin().moduleRecord().namespace())
component_order = []
root_sets = self.__graphFromComponents(binding_components, False).rootSetOrder()
if root_sets is None:
raise pyxb.BindingGenerationError('Unable to partial-order named components')
for rs in root_sets:
component_order.extend(sorted(rs, key=lambda _c: _c.schemaOrderSortKey()))
self.__componentGraph = component_graph
self.__componentOrder = component_order
__moduleRecords = None
__componentGraph = None
__componentOrder = None
def moduleRecords (self):
"""The set of L{pyxb.namespace.archive.ModuleRecord} instances
associated with schema processed in this generation
instance.
These should be in one-to-one correspondence with the
namespaces for which bindings are being generated. Multiple
input schemas may contribute to a single module record; all
material in that record is placed in a single binding file.
"""
if self.__moduleRecords is None:
self.__resolveComponentDependencies()
return self.__moduleRecords
def componentGraph (self):
if self.__componentGraph is None:
self.__resolveComponentDependencies()
return self.__componentGraph
def componentOrder (self):
if self.__componentOrder is None:
self.__resolveComponentDependencies()
return self.__componentOrder
def __generateBindings (self):
# Note that module graph may have fewer nodes than
# self.moduleRecords(), if a module has no components that
# require binding generation.
module_graph = pyxb.utils.utility.Graph()
[ module_graph.addRoot(_mr) for _mr in self.moduleRecords() ]
for (s, t) in self.componentGraph().edges():
module_graph.addEdge(s._objectOrigin().moduleRecord(), t._objectOrigin().moduleRecord())
module_scc_order = module_graph.sccOrder()
record_binding_map = {}
modules = []
nsvm = self.namespaceVisibilityMap()
for mr_scc in module_scc_order:
scc_modules = [ ]
for mr in mr_scc:
mr._setIsPublic(nsvm.get(mr.namespace(), self.defaultNamespacePublic()))
self.assignModulePath(mr)
if (mr.modulePath() is None) and self.generateToFiles():
raise pyxb.BindingGenerationError('No prefix or module name available for %s' % (mr,))
if (not mr.isPublic()) and (mr.modulePath() is not None):
elts = mr.modulePath().split('.')
elts[-1] = '_%s' % (elts[-1],)
mr.setModulePath('.'.join(elts))
nsm = NamespaceModule(self, mr, mr_scc)
record_binding_map[mr] = nsm
scc_modules.append(nsm)
scc_modules.sort(key=lambda _nm: _nm.namespace().uri())
modules.extend(scc_modules)
if 1 < len(mr_scc):
ngm = NamespaceGroupModule(self, scc_modules)
modules.append(ngm)
for nsm in scc_modules:
nsm.setNamespaceGroupModule(ngm)
element_declarations = []
type_definitions = []
for c in self.componentOrder():
if isinstance(c, xs.structures.ElementDeclaration) and c._scopeIsGlobal():
# Only bind elements this pass, so their names get priority in deconfliction
nsm = record_binding_map[c._objectOrigin().moduleRecord()]
nsm.bindComponent(c)
element_declarations.append(c)
elif c.isTypeDefinition():
type_definitions.append(c)
else:
# No binding generation required
pass
simple_type_definitions = []
complex_type_definitions = []
for td in type_definitions:
nsm = record_binding_map[td._objectOrigin().moduleRecord()]
assert nsm is not None, 'No namespace module for %s type %s scope %s namespace %s' % (td.expandedName(), type(td), td._scope(), td.bindingNamespace)
module_context = nsm.bindComponent(td)
assert isinstance(module_context, _ModuleNaming_mixin), 'Unexpected type %s' % (type(module_context),)
if isinstance(td, xs.structures.SimpleTypeDefinition):
_PrepareSimpleTypeDefinition(td, self, nsm, module_context)
simple_type_definitions.append(td)
elif isinstance(td, xs.structures.ComplexTypeDefinition):
_PrepareComplexTypeDefinition(td, self, nsm, module_context)
complex_type_definitions.append(td)
else:
assert False, 'Unexpected component type %s' % (type(td),)
for ngm in modules:
if isinstance(ngm, NamespaceGroupModule):
for m in ngm.namespaceModules():
m.addImportsFrom(ngm)
for std in simple_type_definitions:
GenerateSTD(std, self)
for ctd in complex_type_definitions:
GenerateCTD(ctd, self)
for ed in element_declarations:
GenerateED(ed, self)
self.__bindingModules = modules
__bindingModules = None
def bindingModules (self):
if self.__componentGraph is None:
self.__resolveComponentDependencies()
if self.__bindingModules is None:
self.__generateBindings()
return self.__bindingModules
def writeNamespaceArchive (self):
archive_file = self.archiveToFile()
if archive_file is not None:
ns_archive = pyxb.namespace.archive.NamespaceArchive(generation_uid=self.generationUID())
try:
ns_archive.writeNamespaces(pyxb.utils.utility.OpenOrCreate(archive_file))
_log.info('Saved parsed schema to %s URI', archive_file)
except Exception as e:
_log.exception('Failure saving preprocessed schema to %s', archive_file)
#try:
# os.unlink(component_model_file)
#except (OSError, IOError), e:
# pass
if isinstance(e, (AssertionError, AttributeError, TypeError)):
raise
def moduleForComponent (self, component):
return _ModuleNaming_mixin.ComponentBindingModule(component)
```
#### File: site-packages/pyxb/exceptions_.py
```python
import pyxb
from pyxb.utils import six
class PyXBException (Exception):
"""Base class for exceptions that indicate a problem that the user should fix."""
"""The arguments passed to the exception constructor."""
_args = None
"""The keywords passed to the exception constructor.
@note: Do not pop values from the keywords array in subclass
constructors that recognize and extract values from them. They
should be kept around so they're accessible generically."""
_kw = None
def __init__ (self, *args, **kw):
"""Create an exception indicating a PyXB-related problem.
If no args are present, a default argument is taken from the
C{message} keyword.
@keyword message : Text to provide the user with information about the problem.
"""
if 0 == len(args) and 'message' in kw:
args = (kw.pop('message'),)
self._args = args
self._kw = kw
super(PyXBException, self).__init__(*args)
if six.PY2:
def _str_from_unicode (self):
return unicode(self).encode(pyxb._OutputEncoding)
class PyXBVersionError (PyXBException):
"""Raised on import of a binding generated with a different version of PYXB"""
pass
class DOMGenerationError (PyXBException):
"""A non-validation error encountered converting bindings to DOM."""
pass
@six.unicode_convertible
class UnboundElementError (DOMGenerationError):
"""An instance converting to DOM had no bound element."""
instance = None
"""The binding instance. This is missing an element binding (via
L{pyxb.binding.basis._TypeBinding_mixin._element}) and no
C{element_name} was passed."""
def __init__ (self, instance):
super(UnboundElementError, self).__init__(instance)
self.instance = instance
def __str__ (self):
return six.u('Instance of type %s has no bound element for start tag') % (self.instance._diagnosticName(),)
class SchemaValidationError (PyXBException):
"""Raised when the XML hierarchy does not appear to be valid for an XML schema."""
pass
class NamespaceError (PyXBException):
"""Violation of some rule relevant to XML Namespaces"""
def __init__ (self, namespace, *args, **kw):
PyXBException.__init__(self, *args, **kw)
self.__namespace = namespace
def namespace (self): return self.__namespace
class NamespaceArchiveError (PyXBException):
"""Problem related to namespace archives"""
pass
class SchemaUniquenessError (PyXBException):
"""Raised when somebody tries to create a schema component using a
schema that has already been used in that namespace. Import and
include processing would have avoided this, so somebody asked for
it specifically."""
def __init__ (self, namespace, schema_location, existing_schema, *args, **kw):
super(SchemaUniquenessError, self).__init__(*args, **kw)
self.__namespace = namespace
self.__schemaLocation = schema_location
self.__existingSchema = existing_schema
def namespace (self): return self.__namespace
def schemaLocation (self): return self.__schemaLocation
def existingSchema (self): return self.__existingSchema
class BindingGenerationError (PyXBException):
"""Raised when something goes wrong generating the binding classes"""
pass
class NamespaceUniquenessError (NamespaceError):
"""Raised when an attempt is made to record multiple objects of the same name in the same namespace category."""
pass
class NotInNamespaceError (PyXBException):
'''Raised when a name is referenced that is not defined in the appropriate namespace.'''
__namespace = None
__ncName = None
class QNameResolutionError (NamespaceError):
'''Raised when a QName cannot be associated with a namespace.'''
namespaceContext = None
qname = None
def __init__ (self, message, qname, xmlns_context):
self.qname = qname
self.namespaceContext = xmlns_context
super(QNameResolutionError, self).__init__(message, qname, xmlns_context)
class BadDocumentError (PyXBException):
"""Raised when processing document content and an error is encountered."""
pass
class StructuralBadDocumentError (BadDocumentError):
"""Raised when processing document and the content model is not satisfied."""
@property
def element_use (self):
"""The L{pyxb.binding.content.ElementDeclaration} instance to which the content should conform, if available."""
return self.__elementUse
@property
def container (self):
"""The L{pyxb.binding.basis.complexTypeDefinition} instance to which the content would belong, if available."""
return self.__container
@property
def content (self):
"""The value which could not be reconciled with the content model."""
return self.__content
def __init__ (self, *args, **kw):
"""Raised when processing document and the content model is not satisfied.
@keyword content : The value that could not be reconciled with the content model
@keyword container : Optional binding instance into which the content was to be assigned
@keyword element_use : Optional reference to an element use identifying the element to which the value was to be reconciled
"""
self.__content = kw.pop('content', None)
if args:
self.__content = args[0]
self.__container = kw.pop('container', None)
self.__elementUse = kw.pop('element_use', None)
if self.__content is not None:
if self.__container is not None:
kw.setdefault('message', '%s cannot accept wildcard content %s' % (self.__container._Name(), self.__content))
elif self.__elementUse is not None:
kw.setdefault('message', '%s not consistent with content model for %s' % (self.__content, self.__elementUse))
else:
kw.setdefault('message', six.text_type(self.__content))
BadDocumentError.__init__(self, **kw)
class UnrecognizedDOMRootNodeError (StructuralBadDocumentError):
"""A root DOM node could not be resolved to a schema element"""
node = None
"""The L{xml.dom.Element} instance that could not be recognized"""
def __get_node_name (self):
"""The QName of the L{node} as a L{pyxb.namespace.ExpandedName}"""
import pyxb.namespace
return pyxb.namespace.ExpandedName(self.node.namespaceURI, self.node.localName)
node_name = property(__get_node_name)
def __init__ (self, node):
"""@param node: the value for the L{node} attribute."""
self.node = node
super(UnrecognizedDOMRootNodeError, self).__init__(node)
class ValidationError (PyXBException):
"""Raised when something in the infoset fails to satisfy a content model or attribute requirement.
All validation errors include a L{location} attribute which shows
where in the original XML the problem occurred. The attribute may
be C{None} if the content did not come from an XML document, or
the underlying XML infrastructure did not provide a location.
More refined validation error exception classes add more attributes."""
location = None
"""Where the error occurred in the document being parsed, if
available. This will be C{None}, or an instance of
L{pyxb.utils.utility.Location}."""
def details (self):
"""Provide information describing why validation failed.
In many cases, this is simply the informal string content that
would be obtained through the C{str} built-in function. For
certain errors this method gives more details on what would be
acceptable and where the descriptions can be found in the
original schema.
@return: a string description of validation failure"""
return six.text_type(self)
@six.unicode_convertible
class NonElementValidationError (ValidationError):
"""Raised when an element (or a value bound to an element) appears
in context that does not permit an element."""
element = None
"""The content that is not permitted. This may be an element, or
a DOM representation that would have been made into an element had
matters progressed further."""
def __init__ (self, element, location=None):
"""@param element: the value for the L{element} attribute.
@param location: the value for the L{location} attribute.
"""
self.element = element
if (location is None) and isinstance(element, pyxb.utils.utility.Locatable_mixin):
location = element._location()
self.location = location
super(NonElementValidationError, self).__init__(element, location)
def __str__ (self):
import pyxb.binding.basis
import xml.dom
value = ''
boundto = ''
location = ''
if isinstance(self.element, pyxb.binding.basis._TypeBinding_mixin):
eb = self.element._element()
boundto = ''
if eb is not None:
boundto = ' bound to %s' % (eb.name(),)
if isinstance(self.element, pyxb.binding.basis.simpleTypeDefinition):
value = self.element.xsdLiteral()
elif self.element._IsSimpleTypeContent():
value = six.text_type(self.element.value())
else:
value = 'Complex value'
elif isinstance(self.element, xml.dom.Node):
value = 'DOM node %s' % (self.element.nodeName,)
else:
value = '%s type %s' % (six.text_type(self.element), type(self.element))
if self.location is not None:
location = ' at %s' % (self.location,)
return six.u('%s%s not permitted%s') % (value, boundto, location)
class ElementValidationError (ValidationError):
"""Raised when a validation requirement for an element is not satisfied."""
pass
@six.unicode_convertible
class AbstractElementError (ElementValidationError):
"""Attempt to create an instance of an abstract element.
Raised when an element is created and the identified binding is
abstract. Such elements cannot be created directly; instead the
creation must derive from an instance of the abstract element's
substitution group.
Since members of the substitution group self-identify using the
C{substitutionGroup} attribute, there is no general way to find
the set of elements which would be acceptable in place of the
abstract element."""
element = None
"""The abstract L{pyxb.binding.basis.element} in question"""
value = None
"""The value proposed for the L{element}. This is usually going
to be a C{xml.dom.Node} used in the attempt to create the element,
C{None} if the abstract element was invoked without a node, or
another type if
L{pyxb.binding.content.ElementDeclaration.toDOM} is
mis-used."""
def __init__ (self, element, location, value=None):
"""@param element: the value for the L{element} attribute.
@param location: the value for the L{location} attribute.
@param value: the value for the L{value} attribute."""
self.element = element
self.location = location
self.value = value
super(AbstractElementError, self).__init__(element, location, value)
def __str__ (self):
return six.u('Cannot instantiate abstract element %s directly') % (self.element.name(),)
@six.unicode_convertible
class ContentInNilInstanceError (ElementValidationError):
"""Raised when an element that is marked to be nil is assigned content."""
instance = None
"""The binding instance which is xsi:nil"""
content = None
"""The content that was to be assigned to the instance."""
def __init__ (self, instance, content, location=None):
"""@param instance: the value for the L{instance} attribute.
@param content: the value for the L{content} attribute.
@param location: the value for the L{location} attribute. Default taken from C{instance} if possible."""
self.instance = instance
self.content = content
if location is None:
location = self.instance._location()
self.location = location
super(ContentInNilInstanceError, self).__init__(instance, content, location)
def __str__ (self):
from pyxb.namespace.builtin import XMLSchema_instance as XSI
return six.u('%s with %s=true cannot have content') % (self.instance._diagnosticName(), XSI.nil)
class NoNillableSupportError (ElementValidationError):
"""Raised when invoking L{_setIsNil<pyxb.binding.basis._TypeBinding_mixin._setIsNil>} on a type that does not support nillable."""
instance = None
"""The binding instance on which an inappropriate operation was invoked."""
def __init__ (self, instance, location=None):
"""@param instance: the value for the L{instance} attribute.
@param location: the value for the L{location} attribute. Default taken from C{instance} if possible."""
self.instance = instance
if location is None:
location = self.instance._location()
self.location = location
super(NoNillableSupportError, self).__init__(instance, location)
@six.unicode_convertible
class ElementChangeError (ElementValidationError):
"""Attempt to change an element that has a fixed value constraint."""
element = None
"""The L{pyxb.binding.basis.element} that has a fixed value."""
value = None
"""The value that was to be assigned to the element."""
def __init__ (self, element, value, location=None):
"""@param element: the value for the L{element} attribute.
@param value: the value for the L{value} attribute.
@param location: the value for the L{location} attribute. Default taken from C{value} if possible."""
import pyxb.utils.utility
self.element = element
self.value = value
if (location is None) and isinstance(value, pyxb.utils.utility.Locatable_mixin):
location = value._location()
self.location = location
super(ElementChangeError, self).__init__(element, value, location)
def __str__ (self):
return six.u('Value %s for element %s incompatible with fixed content') % (self.value, self.element.name())
class ComplexTypeValidationError (ValidationError):
"""Raised when a validation requirement for a complex type is not satisfied."""
pass
@six.unicode_convertible
class AbstractInstantiationError (ComplexTypeValidationError):
"""Attempt to create an instance of an abstract complex type.
These types are analogous to abstract base classes, and cannot be
created directly. A type should be used that extends the abstract
class.
When an incoming document is missing the xsi:type attribute which
redirects an element with an abstract type to the correct type,
the L{node} attribute is provided so the user can get a clue as to
where the problem occured. When this exception is a result of
constructor mis-use in Python code, the traceback will tell you
where the problem lies.
"""
type = None
"""The abstract L{pyxb.binding.basis.complexTypeDefinition} subclass used."""
node = None
"""The L{xml.dom.Element} from which instantiation was attempted, if available."""
def __init__ (self, type, location, node):
"""@param type: the value for the L{type} attribute.
@param location: the value for the L{location} attribute.
@param node: the value for the L{node} attribute."""
self.type = type
self.location = location
self.node = node
super(AbstractInstantiationError, self).__init__(type, location, node)
def __str__ (self):
# If the type is abstract, it has to have a name.
return six.u('Cannot instantiate abstract type %s directly') % (self.type._ExpandedName,)
@six.unicode_convertible
class AttributeOnSimpleTypeError (ComplexTypeValidationError):
"""Attempt made to set an attribute on an element with simple type.
Note that elements with complex type and simple content may have
attributes; elements with simple type must not."""
instance = None
"""The simple type binding instance on which no attributes exist."""
tag = None
"""The name of the proposed attribute."""
value = None
"""The value proposed to be assigned to the non-existent attribute."""
def __init__ (self, instance, tag, value, location=None):
"""@param instance: the value for the L{instance} attribute.
@param tag: the value for the L{tag} attribute.
@param value: the value for the L{value} attribute.
@param location: the value for the L{location} attribute. Default taken from C{instance} if possible."""
self.instance = instance
self.tag = tag
self.value = value
if location is None:
location = self.instance._location()
self.location = location
super(AttributeOnSimpleTypeError, self).__init__(instance, tag, value, location)
def __str__ (self):
return six.u('Simple type %s cannot support attribute %s') % (self.instance._Name(), self.tag)
class ContentValidationError (ComplexTypeValidationError):
"""Violation of a complex type content model."""
pass
@six.unicode_convertible
class ContentNondeterminismExceededError (ContentValidationError):
"""Content validation exceeded the allowed limits of nondeterminism."""
instance = None
"""The binding instance being validated."""
def __init__ (self, instance):
"""@param instance: the value for the L{instance} attribute."""
self.instance = instance
super(ContentNondeterminismExceededError, self).__init__(instance)
def __str__ (self):
return six.u('Nondeterminism exceeded validating %s') % (self.instance._Name(),)
@six.unicode_convertible
class SimpleContentAbsentError (ContentValidationError):
"""An instance with simple content was not provided with a value."""
instance = None
"""The binding instance for which simple content is missing."""
def __init__ (self, instance, location):
"""@param instance: the value for the L{instance} attribute.
@param location: the value for the L{location} attribute."""
self.instance = instance
self.location = location
super(SimpleContentAbsentError, self).__init__(instance, location)
def __str__ (self):
return six.u('Type %s requires content') % (self.instance._Name(),)
@six.unicode_convertible
class ExtraSimpleContentError (ContentValidationError):
"""A complex type with simple content was provided too much content."""
instance = None
"""The binding instance that already has simple content assigned."""
value = None
"""The proposed addition to that simple content."""
def __init__ (self, instance, value, location=None):
"""@param instance: the value for the L{instance} attribute.
@param value: the value for the L{value} attribute.
@param location: the value for the L{location} attribute."""
self.instance = instance
self.value = value
self.location = location
super(ExtraSimpleContentError, self).__init__(instance, value, location)
def __str__ (self):
return six.u('Instance of %s already has simple content value assigned') % (self.instance._Name(),)
@six.unicode_convertible
class NonPluralAppendError (ContentValidationError):
"""Attempt to append to an element which does not accept multiple instances."""
instance = None
"""The binding instance containing the element"""
element_declaration = None
"""The L{pyxb.binding.content.ElementDeclaration} contained in C{instance} that does not accept multiple instances"""
value = None
"""The proposed addition to the element in the instance"""
def __init__ (self, instance, element_declaration, value):
"""@param instance: the value for the L{instance} attribute.
@param element_declaration: the value for the L{element_declaration} attribute.
@param value: the value for the L{value} attribute."""
self.instance = instance
self.element_declaration = element_declaration
self.value = value
super(NonPluralAppendError, self).__init__(instance, element_declaration, value)
def __str__ (self):
return six.u('Instance of %s cannot append to element %s') % (self.instance._Name(), self.element_declaration.name())
@six.unicode_convertible
class MixedContentError (ContentValidationError):
"""Non-element content added to a complex type instance that does not support mixed content."""
instance = None
"""The binding instance."""
value = None
"""The non-element content."""
def __init__ (self, instance, value, location=None):
"""@param instance: the value for the L{instance} attribute.
@param value: the value for the L{value} attribute.
@param location: the value for the L{location} attribute."""
self.instance = instance
self.value = value
self.location = location
super(MixedContentError, self).__init__(instance, value, location)
def __str__ (self):
if self.location is not None:
return six.u('Invalid non-element content at %s') % (self.location,)
return six.u('Invalid non-element content')
@six.unicode_convertible
class UnprocessedKeywordContentError (ContentValidationError):
"""A complex type constructor was provided with keywords that could not be recognized."""
instance = None
"""The binding instance being constructed."""
keywords = None
"""The keywords that could not be recognized. These may have been
intended to be attributes or elements, but cannot be identified as
either."""
def __init__ (self, instance, keywords, location=None):
"""@param instance: the value for the L{instance} attribute.
@param keywords: the value for the L{keywords} attribute.
@param location: the value for the L{location} attribute."""
self.instance = instance
self.keywords = keywords
self.location = location
super(UnprocessedKeywordContentError, self).__init__(instance, keywords, location)
def __str__ (self):
return six.u('Unprocessed keywords instantiating %s: %s') % (self.instance._Name(), ' '.join(six.iterkeys(self.keywords)))
class IncrementalElementContentError (ContentValidationError):
"""Element or element-like content could not be validly associated with an sub-element in the content model.
This exception occurs when content is added to an element during
incremental validation, such as when positional arguments are used
in a constructor or material is appended either explicitly or
through parsing a DOM instance."""
instance = None
"""The binding for which the L{value} could not be associated with an element."""
automaton_configuration = None
"""The L{pyxb.binding.content.AutomatonConfiguration} representing the current state of the L{instance} content."""
value = None
"""The value that could not be associated with allowable content."""
def __init__ (self, instance, automaton_configuration, value, location=None):
"""@param instance: the value for the L{instance} attribute.
@param automaton_configuration: the value for the L{automaton_configuration} attribute.
@param value: the value for the L{value} attribute.
@param location: the value for the L{location} attribute."""
self.instance = instance
self.automaton_configuration = automaton_configuration
self.value = value
self.location = location
super(IncrementalElementContentError, self).__init__(instance, automaton_configuration, value, location)
def _valueDescription (self):
import xml.dom
if isinstance(self.value, pyxb.binding.basis._TypeBinding_mixin):
return self.value._diagnosticName()
if isinstance(self.value, xml.dom.Node):
return self.value.nodeName
return six.text_type(self.value)
@six.unicode_convertible
class UnrecognizedContentError (IncrementalElementContentError):
"""Element or element-like content could not be validly associated with an sub-element in the content model.
This exception occurs when content is added to an element during incremental validation."""
def __str__ (self):
value = self._valueDescription()
acceptable = self.automaton_configuration.acceptableContent()
if 0 == acceptable:
expect = 'no more content'
else:
import pyxb.binding.content
seen = set()
names = []
for u in acceptable:
if isinstance(u, pyxb.binding.content.ElementUse):
n = six.text_type(u.elementBinding().name())
else:
assert isinstance(u, pyxb.binding.content.WildcardUse)
n = 'xs:any'
if not (n in seen):
names.append(n)
seen.add(n)
expect = ' or '.join(names)
location = ''
if self.location is not None:
location = ' at %s' % (self.location,)
return six.u('Invalid content %s%s (expect %s)') % (value, location, expect)
def details (self):
import pyxb.binding.basis
import pyxb.binding.content
i = self.instance
rv = [ ]
if i._element() is not None:
rv.append('The containing element %s is defined at %s.' % (i._element().name(), i._element().xsdLocation()))
rv.append('The containing element type %s is defined at %s' % (self.instance._Name(), six.text_type(self.instance._XSDLocation)))
if self.location is not None:
rv.append('The unrecognized content %s begins at %s' % (self._valueDescription(), self.location))
else:
rv.append('The unrecognized content is %s' % (self._valueDescription(),))
rv.append('The %s automaton %s in an accepting state.' % (self.instance._Name(), self.automaton_configuration.isAccepting() and "is" or "is not"))
if isinstance(self.instance, pyxb.binding.basis.complexTypeDefinition) and self.instance._IsMixed():
rv.append('Character information content would be permitted.')
acceptable = self.automaton_configuration.acceptableContent()
if 0 == len(acceptable):
rv.append('No elements or wildcards would be accepted at this point.')
else:
rv.append('The following element and wildcard content would be accepted:')
rv2 = []
for u in acceptable:
if isinstance(u, pyxb.binding.content.ElementUse):
rv2.append('An element %s per %s' % (u.elementBinding().name(), u.xsdLocation()))
else:
assert isinstance(u, pyxb.binding.content.WildcardUse)
rv2.append('A wildcard per %s' % (u.xsdLocation(),))
rv.append('\t' + '\n\t'.join(rv2))
return '\n'.join(rv)
class BatchElementContentError (ContentValidationError):
"""Element/wildcard content cannot be reconciled with the required content model.
This exception occurs in post-construction validation using a
fresh validating automaton."""
instance = None
"""The binding instance being constructed."""
fac_configuration = None
"""The L{pyxb.utils.fac.Configuration} representing the current state of the L{instance} automaton."""
symbols = None
"""The sequence of symbols that were accepted as content prior to the error."""
symbol_set = None
"""The leftovers from L{pyxb.binding.basis.complexTypeDefinition._symbolSet} that could not be reconciled with the content model."""
def __init__ (self, instance, fac_configuration, symbols, symbol_set):
"""@param instance: the value for the L{instance} attribute.
@param fac_configuration: the value for the L{fac_configuration} attribute.
@param symbols: the value for the L{symbols} attribute.
@param symbol_set: the value for the L{symbol_set} attribute."""
self.instance = instance
self.fac_configuration = fac_configuration
self.symbols = symbols
self.symbol_set = symbol_set
super(BatchElementContentError, self).__init__(instance, fac_configuration, symbols, symbol_set)
def details (self):
import pyxb.binding.basis
import pyxb.binding.content
i = self.instance
rv = [ ]
if i._element() is not None:
rv.append('The containing element %s is defined at %s.' % (i._element().name(), i._element().xsdLocation()))
rv.append('The containing element type %s is defined at %s' % (self.instance._Name(), six.text_type(self.instance._XSDLocation)))
rv.append('The %s automaton %s in an accepting state.' % (self.instance._Name(), self.fac_configuration.isAccepting() and "is" or "is not"))
if self.symbols is None:
rv.append('Any accepted content has been stored in instance')
elif 0 == len(self.symbols):
rv.append('No content has been accepted')
else:
rv.append('The last accepted content was %s' % (self.symbols[-1].value._diagnosticName(),))
if isinstance(self.instance, pyxb.binding.basis.complexTypeDefinition) and self.instance._IsMixed():
rv.append('Character information content would be permitted.')
acceptable = self.fac_configuration.acceptableSymbols()
if 0 == len(acceptable):
rv.append('No elements or wildcards would be accepted at this point.')
else:
rv.append('The following element and wildcard content would be accepted:')
rv2 = []
for u in acceptable:
if isinstance(u, pyxb.binding.content.ElementUse):
rv2.append('An element %s per %s' % (u.elementBinding().name(), u.xsdLocation()))
else:
assert isinstance(u, pyxb.binding.content.WildcardUse)
rv2.append('A wildcard per %s' % (u.xsdLocation(),))
rv.append('\t' + '\n\t'.join(rv2))
if (self.symbol_set is None) or (0 == len(self.symbol_set)):
rv.append('No content remains unconsumed')
else:
rv.append('The following content was not processed by the automaton:')
rv2 = []
for (ed, syms) in six.iteritems(self.symbol_set):
if ed is None:
rv2.append('xs:any (%u instances)' % (len(syms),))
else:
rv2.append('%s (%u instances)' % (ed.name(), len(syms)))
rv.append('\t' + '\n\t'.join(rv2))
return '\n'.join(rv)
class IncompleteElementContentError (BatchElementContentError):
"""Validation of an instance failed to produce an accepting state.
This exception occurs in batch-mode validation."""
pass
class UnprocessedElementContentError (BatchElementContentError):
"""Validation of an instance produced an accepting state but left element material unconsumed.
This exception occurs in batch-mode validation."""
pass
class InvalidPreferredElementContentError (BatchElementContentError):
"""Use of a preferred element led to inability to generate a valid document"""
preferred_symbol = None
"""The element symbol which was not accepted."""
def __init__ (self, instance, fac_configuration, symbols, symbol_set, preferred_symbol):
"""@param instance: the value for the L{instance} attribute.
@param fac_configuration: the value for the L{fac_configuration} attribute.
@param symbols: the value for the L{symbols} attribute.
@param symbol_set: the value for the L{symbol_set} attribute.
@param preferred_symbol: the value for the L{preferred_symbol} attribute.
"""
self.instance = instance
self.fac_configuration = fac_configuration
self.symbols = symbols
self.symbol_set = symbol_set
self.preferred_symbol = preferred_symbol
# Bypass immediate parent so we preserve the last argument
super(BatchElementContentError, self).__init__(instance, fac_configuration, symbols, symbol_set, preferred_symbol)
@six.unicode_convertible
class OrphanElementContentError (ContentValidationError):
"""An element expected to be used in content is not present in the instance.
This exception occurs in batch-mode validation when
L{pyxb.ValidationConfig.contentInfluencesGeneration} applies,
L{pyxb.ValidationConfig.orphanElementInContent} is set to
L{pyxb.ValidationConfig.RAISE_EXCEPTION}, and the content list
includes an element that is not in the binding instance
content.
"""
instance = None
"""The binding instance."""
preferred = None
"""An element value from the L{instance} L{content<pyxb.binding.basis.complexTypeDefinition.content>} list which was not found in the L{instance}."""
def __init__ (self, instance, preferred):
"""@param instance: the value for the L{instance} attribute.
@param preferred: the value for the L{preferred} attribute.
"""
self.instance = instance
self.preferred = preferred
super(OrphanElementContentError, self).__init__(instance, preferred)
def __str__ (self):
return six.u('Preferred content element not found in instance')
@six.unicode_convertible
class SimpleTypeValueError (ValidationError):
"""Raised when a simple type value does not satisfy its constraints."""
type = None
"""The L{pyxb.binding.basis.simpleTypeDefinition} that constrains values."""
value = None
"""The value that violates the constraints of L{type}. In some
cases this is a tuple of arguments passed to a constructor that
failed with a built-in exception likeC{ValueError} or
C{OverflowError}."""
def __init__ (self, type, value, location=None):
"""@param type: the value for the L{type} attribute.
@param value: the value for the L{value} attribute.
@param location: the value for the L{location} attribute. Default taken from C{value} if possible."""
import pyxb.utils.utility
self.type = type
self.value = value
if (location is None) and isinstance(value, pyxb.utils.utility.Locatable_mixin):
location = value._location()
self.location = location
super(SimpleTypeValueError, self).__init__(type, value, location)
def __str__ (self):
import pyxb.binding.basis
if isinstance(self.value, pyxb.binding.basis._TypeBinding_mixin):
return six.u('Type %s cannot be created from %s: %s') % (self.type._Name(), self.value._Name(), self.value)
return six.u('Type %s cannot be created from: %s') % (self.type._Name(), self.value)
@six.unicode_convertible
class SimpleListValueError (SimpleTypeValueError):
"""Raised when a list simple type contains a member that does not satisfy its constraints.
In this case, L{type} is the type of the list, and value
C{type._ItemType} is the type for which the L{value} is
unacceptable."""
def __str__ (self):
return six.u('Member type %s of list type %s cannot accept %s') % (self.type._ItemType._Name(), self.type._Name(), self.value)
@six.unicode_convertible
class SimpleUnionValueError (SimpleTypeValueError):
"""Raised when a union simple type contains a member that does not satisfy its constraints.
In this case, L{type} is the type of the union, and the value
C{type._MemberTypes} is the set of types for which the value is
unacceptable.
The L{value} itself is the tuple of arguments passed to the
constructor for the union."""
def __str__ (self):
return six.u('No memberType of %s can be constructed from %s') % (self.type._Name(), self.value)
@six.unicode_convertible
class SimpleFacetValueError (SimpleTypeValueError):
"""Raised when a simple type value does not satisfy a facet constraint.
This extends L{SimpleTypeValueError} with the L{facet} field which
can be used to determine why the value is unacceptable."""
type = None
"""The L{pyxb.binding.basis.simpleTypeDefinition} that constrains values."""
value = None
"""The value that violates the constraints of L{type}. In some
cases this is a tuple of arguments passed to a constructor that
failed with a built-in exception likeC{ValueError} or
C{OverflowError}."""
facet = None
"""The specific facet that is violated by the value."""
def __init__ (self, type, value, facet, location=None):
"""@param type: the value for the L{type} attribute.
@param value: the value for the L{value} attribute.
@param facet: the value for the L{facet} attribute.
@param location: the value for the L{location} attribute. Default taken from C{value} if possible."""
import pyxb.utils.utility
self.type = type
self.value = value
self.facet = facet
if (location is None) and isinstance(value, pyxb.utils.utility.Locatable_mixin):
location = value._location()
self.location = location
# Bypass immediate parent
super(SimpleTypeValueError, self).__init__(type, value, facet)
def __str__ (self):
return six.u('Type %s %s constraint violated by value %s') % (self.type._Name(), self.facet._Name, self.value)
class SimplePluralValueError (SimpleTypeValueError):
"""Raised when context requires a plural value.
Unlike L{SimpleListValueError}, in this case the plurality is
external to C{type}, for example when an element has simple
content and allows multiple occurrences."""
pass
class AttributeValidationError (ValidationError):
"""Raised when an attribute requirement is not satisfied."""
type = None
"""The L{pyxb.binding.basis.complexTypeDefinition} subclass of the instance."""
tag = None
"""The name of the attribute."""
instance = None
"""The binding instance, if available."""
def __init__ (self, type, tag, instance=None, location=None):
"""@param type: the value for the L{type} attribute.
@param tag: the value for the L{tag} attribute.
@param instance: the value for the L{instance} attribute.
@param location: the value for the L{location} attribute. Default taken from C{instance} if possible.
"""
import pyxb.utils.utility as utility
self.type = type
self.tag = tag
self.instance = instance
if (location is None) and isinstance(instance, utility.Locatable_mixin):
location = instance._location()
self.location = location
super(AttributeValidationError, self).__init__(type, tag, instance, location)
class UnrecognizedAttributeError (AttributeValidationError):
"""Attempt to reference an attribute not sanctioned by content model."""
pass
@six.unicode_convertible
class ProhibitedAttributeError (AttributeValidationError):
"""Raised when an attribute that is prohibited is set or referenced in an element."""
def __str__ (self):
return six.u('Attempt to reference prohibited attribute %s in type %s') % (self.tag, self.type)
@six.unicode_convertible
class MissingAttributeError (AttributeValidationError):
"""Raised when an attribute that is required is missing in an element."""
def __str__ (self):
return six.u('Instance of %s lacks required attribute %s') % (self.type, self.tag)
@six.unicode_convertible
class AttributeChangeError (AttributeValidationError):
"""Attempt to change an attribute that has a fixed value constraint."""
def __str__ (self):
return six.u('Cannot change fixed attribute %s in type %s') % (self.tag, self.type)
class BindingError (PyXBException):
"""Raised when the bindings are mis-used.
These are not validation errors, but rather structural errors.
For example, attempts to extract complex content from a type that
requires simple content, or vice versa. """
@six.unicode_convertible
class NotSimpleContentError (BindingError):
"""An operation that requires simple content was invoked on a
complex type instance that does not have simple content."""
instance = None
"""The binding instance which should have had simple content."""
def __init__ (self, instance):
"""@param instance: the binding instance that was mis-used.
This will be available in the L{instance} attribute."""
self.instance = instance
super(BindingError, self).__init__(instance)
pass
def __str__ (self):
return six.u('type %s does not have simple content') % (self.instance._Name(),)
@six.unicode_convertible
class NotComplexContentError (BindingError):
"""An operation that requires a content model was invoked on a
complex type instance that has empty or simple content."""
instance = None
"""The binding instance which should have had a content model."""
def __init__ (self, instance):
"""@param instance: the binding instance that was mis-used.
This will be available in the L{instance} attribute."""
self.instance = instance
super(BindingError, self).__init__(instance)
def __str__ (self):
return six.u('type %s has simple/empty content') % (self.instance._Name(),)
@six.unicode_convertible
class ReservedNameError (BindingError):
"""Reserved name set in binding instance."""
instance = None
"""The binding instance."""
name = None
"""The name that was caught being assigned"""
def __init__ (self, instance, name):
"""@param instance: the value for the L{instance} attribute.
p@param name: the value for the L{name} attribute."""
self.instance = instance
self.name = name
super(ReservedNameError, self).__init__(instance, name)
def __str__ (self):
return six.u('%s is a reserved name within %s') % (self.name, self.instance._Name())
class PyXBError (Exception):
"""Base class for exceptions that indicate a problem that the user probably can't fix."""
pass
class UsageError (PyXBError):
"""Raised when the code detects user violation of an API."""
class LogicError (PyXBError):
"""Raised when the code detects an implementation problem."""
class IncompleteImplementationError (LogicError):
"""Raised when required capability has not been implemented.
This is only used where it is reasonable to expect the capability
to be present, such as a feature of XML schema that is not
supported (e.g., the redefine directive)."""
```
#### File: pyxb/namespace/resolution.py
```python
import logging
import pyxb
import pyxb.utils.utility
from pyxb.namespace import archive, utility
from pyxb.utils import six
_log = logging.getLogger(__name__)
class _Resolvable_mixin (pyxb.cscRoot):
"""Mix-in indicating that this object may have references to unseen named components.
This class is mixed-in to those XMLSchema components that have a reference
to another component that is identified by a QName. Resolution of that
component may need to be delayed if the definition of the component has
not yet been read.
"""
#_TraceResolution = True
_TraceResolution = False
def isResolved (self):
"""Determine whether this named component is resolved.
Override this in the child class."""
raise NotImplementedError("_Resolvable_mixin.isResolved in %s"% (type(self).__name__,))
def _resolve (self):
"""Perform whatever steps are required to resolve this component.
Resolution is performed in the context of the namespace to which the
component belongs. Invoking this method may fail to complete the
resolution process if the component itself depends on unresolved
components. The sole caller of this should be
L{_NamespaceResolution_mixin.resolveDefinitions}.
This method is permitted (nay, encouraged) to raise an exception if
resolution requires interpreting a QName and the named component
cannot be found.
Override this in the child class. In the prefix, if L{isResolved} is
true, return right away. If something prevents you from completing
resolution, invoke L{self._queueForResolution()} (so it is retried
later) and immediately return self. Prior to leaving after successful
resolution discard any cached dom node by setting C{self.__domNode=None}.
@return: C{self}, whether or not resolution succeeds.
@raise pyxb.SchemaValidationError: if resolution requlres a reference to an unknown component
"""
raise NotImplementedError("_Resolvable_mixin._resolve in %s"% (type(self).__name__,))
def _queueForResolution (self, why=None, depends_on=None):
"""Short-hand to requeue an object if the class implements _namespaceContext().
"""
if (why is not None) and self._TraceResolution:
_log.info('Resolution delayed for %s: %s\n\tDepends on: %s', self, why, depends_on)
self._namespaceContext().queueForResolution(self, depends_on)
class _NamespaceResolution_mixin (pyxb.cscRoot):
"""Mix-in that aggregates those aspects of XMLNamespaces relevant to
resolving component references.
"""
# A set of namespaces which some schema imported while processing with
# this namespace as target.
__importedNamespaces = None
# A set of namespaces which appear in namespace declarations of schema
# with this namespace as target.
__referencedNamespaces = None
# A list of Namespace._Resolvable_mixin instances that have yet to be
# resolved.
__unresolvedComponents = None
# A map from Namespace._Resolvable_mixin instances in
# __unresolvedComponents to sets of other unresolved objects on which they
# depend.
__unresolvedDependents = None
def _reset (self):
"""CSC extension to reset fields of a Namespace.
This one handles component-resolution--related data."""
getattr(super(_NamespaceResolution_mixin, self), '_reset', lambda *args, **kw: None)()
self.__unresolvedComponents = []
self.__unresolvedDependents = {}
self.__importedNamespaces = set()
self.__referencedNamespaces = set()
def _getState_csc (self, kw):
kw.update({
'importedNamespaces': self.__importedNamespaces,
'referencedNamespaces': self.__referencedNamespaces,
})
return getattr(super(_NamespaceResolution_mixin, self), '_getState_csc', lambda _kw: _kw)(kw)
def _setState_csc (self, kw):
self.__importedNamespaces = kw['importedNamespaces']
self.__referencedNamespaces = kw['referencedNamespaces']
return getattr(super(_NamespaceResolution_mixin, self), '_setState_csc', lambda _kw: self)(kw)
def importNamespace (self, namespace):
self.__importedNamespaces.add(namespace)
return self
def _referenceNamespace (self, namespace):
self._activate()
self.__referencedNamespaces.add(namespace)
return self
def importedNamespaces (self):
"""Return the set of namespaces which some schema imported while
processing with this namespace as target."""
return frozenset(self.__importedNamespaces)
def _transferReferencedNamespaces (self, module_record):
assert isinstance(module_record, archive.ModuleRecord)
module_record._setReferencedNamespaces(self.__referencedNamespaces)
self.__referencedNamespaces.clear()
def referencedNamespaces (self):
"""Return the set of namespaces which appear in namespace declarations
of schema with this namespace as target."""
return frozenset(self.__referencedNamespaces)
def queueForResolution (self, resolvable, depends_on=None):
"""Invoked to note that a component may have references that will need
to be resolved.
Newly created named components are often unresolved, as are components
which, in the course of resolution, are found to depend on another
unresolved component.
@param resolvable: An instance of L{_Resolvable_mixin} that is later to
be resolved.
@keyword depends_on: C{None}, or an instance of L{_Resolvable_mixin}
which C{resolvable} requires to be resolved in order to resolve
itself.
@return: C{resolvable}
"""
assert isinstance(resolvable, _Resolvable_mixin)
if not resolvable.isResolved():
assert depends_on is None or isinstance(depends_on, _Resolvable_mixin)
self.__unresolvedComponents.append(resolvable)
if depends_on is not None and not depends_on.isResolved():
from pyxb.xmlschema import structures
assert isinstance(depends_on, _Resolvable_mixin)
assert isinstance(depends_on, structures._NamedComponent_mixin)
self.__unresolvedDependents.setdefault(resolvable, set()).add(depends_on)
return resolvable
def needsResolution (self):
"""Return C{True} iff this namespace has not been resolved."""
return self.__unresolvedComponents is not None
def _replaceComponent_csc (self, existing_def, replacement_def):
"""Replace a component definition if present in the list of unresolved components.
"""
try:
index = self.__unresolvedComponents.index(existing_def)
if (replacement_def is None) or (replacement_def in self.__unresolvedComponents):
del self.__unresolvedComponents[index]
else:
assert isinstance(replacement_def, _Resolvable_mixin)
self.__unresolvedComponents[index] = replacement_def
# Rather than assume the replacement depends on the same
# resolvables as the original, just wipe the dependency record:
# it'll get recomputed later if it's still important.
if existing_def in self.__unresolvedDependents:
del self.__unresolvedDependents[existing_def]
except ValueError:
pass
return getattr(super(_NamespaceResolution_mixin, self), '_replaceComponent_csc', lambda *args, **kw: replacement_def)(existing_def, replacement_def)
def resolveDefinitions (self, allow_unresolved=False):
"""Loop until all references within the associated resolvable objects
have been resolved.
This method iterates through all components on the unresolved list,
invoking the _resolve method of each. If the component could not be
resolved in this pass, it iis placed back on the list for the next
iteration. If an iteration completes without resolving any of the
unresolved components, a pyxb.NotInNamespaceError exception is raised.
@note: Do not invoke this until all top-level definitions for the
namespace have been provided. The resolution routines are entitled to
raise a validation exception if a reference to an unrecognized
component is encountered.
"""
if not self.needsResolution():
return True
while 0 < len(self.__unresolvedComponents):
# Save the list of unresolved objects, reset the list to capture
# any new objects defined during resolution, and attempt the
# resolution for everything that isn't resolved.
unresolved = self.__unresolvedComponents
self.__unresolvedComponents = []
self.__unresolvedDependents = {}
for resolvable in unresolved:
# Attempt the resolution.
resolvable._resolve()
# Either we resolved it, or we queued it to try again later
assert resolvable.isResolved() or (resolvable in self.__unresolvedComponents), 'Lost resolvable %s' % (resolvable,)
# We only clone things that have scope None. We never
# resolve things that have scope None. Therefore, we
# should never have resolved something that has
# clones.
if (resolvable.isResolved() and (resolvable._clones() is not None)):
assert False
if self.__unresolvedComponents == unresolved:
if allow_unresolved:
return False
# This only happens if we didn't code things right, or the
# there is a circular dependency in some named component
# (i.e., the schema designer didn't do things right).
failed_components = []
from pyxb.xmlschema import structures
for d in self.__unresolvedComponents:
if isinstance(d, structures._NamedComponent_mixin):
failed_components.append('%s named %s' % (d.__class__.__name__, d.name()))
else:
failed_components.append('Anonymous %s' % (d.__class__.__name__,))
raise pyxb.NotInNamespaceError('Infinite loop in resolution:\n %s' % ("\n ".join(failed_components),))
# Replace the list of unresolved components with None, so that
# attempts to subsequently add another component fail.
self.__unresolvedComponents = None
self.__unresolvedDependents = None
# NOTE: Dependencies may require that we keep these around for a while
# longer.
#
# Remove the namespace context from everything, since we won't be
# resolving anything else.
self._releaseNamespaceContexts()
return True
def _unresolvedComponents (self):
"""Returns a reference to the list of unresolved components."""
return self.__unresolvedComponents
def _unresolvedDependents (self):
"""Returns a map from unresolved components to sets of components that
must be resolved first."""
return self.__unresolvedDependents
def ResolveSiblingNamespaces (sibling_namespaces):
"""Resolve all components in the sibling_namespaces.
@param sibling_namespaces : A set of namespaces expected to be closed
under dependency."""
for ns in sibling_namespaces:
ns.configureCategories([archive.NamespaceArchive._AnonymousCategory()])
ns.validateComponentModel()
def __keyForCompare (dependency_map):
"""Sort namespaces so dependencies get resolved first.
Uses the trick underlying functools.cmp_to_key(), but optimized for
this special case. The dependency map is incorporated into the class
definition by scope.
"""
class K (object):
def __init__ (self, ns, *args):
self.__ns = ns
# self compares less than other if self.ns is in the dependency set
# of other.ns but not vice-versa.
def __lt__ (self, other):
return ((self.__ns in dependency_map.get(other.__ns, set())) \
and not (other.__ns in dependency_map.get(self.__ns, set())))
# self compares equal to other if their namespaces are either
# mutually dependent or independent.
def __eq__ (self, other):
return (self.__ns in dependency_map.get(other.__ns, set())) == (other.__ns in dependency_map.get(self.__ns, set()))
# All other order metrics are derived.
def __ne__ (self, other):
return not self.__eq__(other)
def __le__ (self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__ (self, other):
return other.__lt__(self.__ns)
def __ge__ (self, other):
return other.__lt__(self.__ns) or self.__eq__(other)
return K
need_resolved_set = set(sibling_namespaces)
dependency_map = {}
last_state = None
while need_resolved_set:
need_resolved_list = list(need_resolved_set)
if dependency_map:
need_resolved_list.sort(key=__keyForCompare(dependency_map))
need_resolved_set = set()
dependency_map = {}
for ns in need_resolved_list:
if not ns.needsResolution():
continue
if not ns.resolveDefinitions(allow_unresolved=True):
deps = dependency_map.setdefault(ns, set())
for (c, dcs) in six.iteritems(ns._unresolvedDependents()):
for dc in dcs:
dns = dc.expandedName().namespace()
if dns != ns:
deps.add(dns)
_log.info('Holding incomplete resolution %s depending on: ', ns.uri(), six.u(' ; ').join([ six.text_type(_dns) for _dns in deps ]))
need_resolved_set.add(ns)
# Exception termination check: if we have the same set of incompletely
# resolved namespaces, and each has the same number of unresolved
# components, assume there's an truly unresolvable dependency: either
# due to circularity, or because there was an external namespace that
# was missed from the sibling list.
state = []
for ns in need_resolved_set:
state.append( (ns, len(ns._unresolvedComponents())) )
state = tuple(state)
if last_state == state:
raise pyxb.LogicError('Unexpected external dependency in sibling namespaces: %s' % (six.u('\n ').join( [six.text_type(_ns) for _ns in need_resolved_set ]),))
last_state = state
@six.unicode_convertible
class NamespaceContext (object):
"""Records information associated with namespaces at a DOM node.
"""
def __str__ (self):
rv = [ six.u('NamespaceContext ') ]
if self.defaultNamespace() is not None:
rv.extend([ '(defaultNamespace=', six.text_type(self.defaultNamespace()), ') '])
if self.targetNamespace() is not None:
rv.extend([ '(targetNamespace=', six.text_type(self.targetNamespace()), ') '])
rv.append("\n")
for (pfx, ns) in six.iteritems(self.inScopeNamespaces()):
if pfx is not None:
rv.append(' xmlns:%s=%s' % (pfx, six.text_type(ns)))
return six.u('').join(rv)
__ContextStack = []
@classmethod
def PushContext (cls, ctx):
"""Make C{ctx} the currently active namespace context.
Prior contexts are retained on a LIFO stack."""
assert isinstance(ctx, cls)
cls.__ContextStack.append(ctx)
return ctx
@classmethod
def Current (cls):
"""Access the currently active namespace context.
If no context is active, C{None} is returned. This probably
represents mis-use of the infrastructure (viz., failure to record the
context within which a QName must be resolved)."""
if cls.__ContextStack:
return cls.__ContextStack[-1]
return None
@classmethod
def PopContext (cls):
"""Discard the currently active namespace context, restoring its
predecessor.
The discarded context is returned."""
return cls.__ContextStack.pop()
__TargetNamespaceAttributes = { }
@classmethod
def _AddTargetNamespaceAttribute (cls, expanded_name, attribute_name):
assert expanded_name is not None
cls.__TargetNamespaceAttributes[expanded_name] = attribute_name
@classmethod
def _TargetNamespaceAttribute (cls, expanded_name):
return cls.__TargetNamespaceAttributes.get(expanded_name)
# Support for holding onto referenced namespaces until we have a target
# namespace to give them to.
__pendingReferencedNamespaces = None
def defaultNamespace (self):
"""The default namespace in effect at this node. E.g., C{xmlns="URN:default"}."""
return self.__defaultNamespace
__defaultNamespace = None
def setDefaultNamespace (self, default_namespace):
"""Set the default namespace for the generated document.
Even if invoked post construction, the default namespace will affect
the entire document, as all namespace declarations are placed in the
document root.
@param default_namespace: The namespace to be defined as the default
namespace in the top-level element of the document. May be provided
as a real namespace, or just its URI.
@type default_namespace: L{pyxb.namespace.Namespace} or C{str} or
C{unicode}.
"""
if isinstance(default_namespace, six.string_types):
default_namespace = utility.NamespaceForURI(default_namespace, create_if_missing=True)
if (default_namespace is not None) and default_namespace.isAbsentNamespace():
raise pyxb.UsageError('Default namespace must not be an absent namespace')
self.__defaultNamespace = default_namespace
# If C{True}, this context is within a schema that has no target
# namespace, and we should use the target namespace as a fallback if no
# default namespace is available and no namespace prefix appears on a
# QName. This situation arises when a top-level schema has an absent
# target namespace, or when a schema with an absent target namespace is
# being included into a schema with a non-absent target namespace.
__fallbackToTargetNamespace = False
def targetNamespace (self):
"""The target namespace in effect at this node. Usually from the
C{targetNamespace} attribute. If no namespace is specified for the
schema, an absent namespace was assigned upon creation and will be
returned."""
return self.__targetNamespace
__targetNamespace = None
def inScopeNamespaces (self):
"""Map from prefix strings to L{Namespace} instances associated with those
prefixes. The prefix C{None} identifies the default namespace."""
return self.__inScopeNamespaces
__inScopeNamespaces = None
"""Map from L{Namespace} instances to sets of prefix strings associated
with the namespace. The default namespace is not represented."""
__inScopePrefixes = None
def __removePrefixMap (self, pfx):
ns = self.__inScopeNamespaces.pop(pfx, None)
if ns is not None:
pfxs = self.__inScopePrefixes.get(ns)
if pfxs is not None:
pfxs.discard(pfx)
def __addPrefixMap (self, pfx, ns):
# Any previous assignment must have already been removed
self.__inScopeNamespaces[pfx] = ns
self.__inScopePrefixes.setdefault(ns, set()).add(pfx)
def __clonePrefixMap (self):
self.__inScopeNamespaces = self.__inScopeNamespaces.copy()
isp = {}
for (ns, pfxs) in six.iteritems(self.__inScopePrefixes):
isp[ns] = pfxs.copy()
self.__inScopePrefixes = isp
# Class-scope initial map from prefix to namespace
__InitialScopeNamespaces = None
# Class-scope initial map from namespace to prefix(es)
__InitialScopePrefixes = None
# Instance-specific initial map from prefix to namespace
__initialScopeNamespaces = None
# Instance-specific initial map from namespace to prefix(es)
__initialScopePrefixes = None
@classmethod
def __BuildInitialPrefixMap (cls):
if cls.__InitialScopeNamespaces is not None:
return
from pyxb.namespace import builtin
cls.__InitialScopeNamespaces = builtin._UndeclaredNamespaceMap
cls.__InitialScopePrefixes = {}
for (pfx, ns) in six.iteritems(cls.__InitialScopeNamespaces):
cls.__InitialScopePrefixes.setdefault(ns, set()).add(pfx)
def prefixForNamespace (self, namespace):
"""Return a prefix associated with the given namespace in this
context, or None if the namespace is the default or is not in
scope."""
pfxs = self.__inScopePrefixes.get(namespace)
if pfxs:
return next(iter(pfxs))
return None
@classmethod
def GetNodeContext (cls, node, **kw):
"""Get the L{NamespaceContext} instance that was assigned to the node.
If none has been assigned and keyword parameters are present, create
one treating this as the root node and the keyword parameters as
configuration information (e.g., default_namespace).
@raise pyxb.LogicError: no context is available and the keywords
required to create one were not provided
"""
try:
return node.__namespaceContext
except AttributeError:
return NamespaceContext(node, **kw)
def setNodeContext (self, node):
node.__namespaceContext = self
# Integer counter to help generate unique namespace prefixes
__namespacePrefixCounter = None
def declareNamespace (self, namespace, prefix=None, add_to_map=False):
"""Record the given namespace as one to be used in this document.
@param namespace: The namespace to be associated with the document.
@type namespace: L{pyxb.namespace.Namespace}
@keyword prefix: Optional prefix to be used with this namespace. If
not provided, a unique prefix is generated or a standard prefix is
used, depending on the namespace.
@return: a prefix that may be used with the namespace. If C{prefix}
was C{None} the return value may be a previously-assigned prefix.
@todo: ensure multiple namespaces do not share the same prefix
@todo: provide default prefix in L{pyxb.namespace.Namespace}
"""
if not isinstance(namespace, pyxb.namespace.Namespace):
raise pyxb.UsageError('declareNamespace: must be given a namespace instance')
if namespace.isAbsentNamespace():
raise pyxb.UsageError('declareNamespace: namespace must not be an absent namespace')
if prefix is None:
prefix = namespace.prefix()
if prefix is None:
pfxs = self.__inScopePrefixes.get(namespace)
if pfxs:
prefix = next(iter(pfxs))
while prefix is None:
self.__namespacePrefixCounter += 1
candidate_prefix = 'ns%d' % (self.__namespacePrefixCounter,)
if not (candidate_prefix in self.__inScopeNamespaces):
prefix = candidate_prefix
ns = self.__inScopePrefixes.get(prefix)
if ns:
if ns != namespace:
raise pyxb.LogicError('Prefix %s is already in use for %s' % (prefix, ns))
return prefix
if not self.__mutableInScopeNamespaces:
self.__clonePrefixMap()
self.__mutableInScopeNamespaces = True
self.__addPrefixMap(prefix, namespace)
return prefix
def processXMLNS (self, prefix, uri):
from pyxb.namespace import builtin
if not self.__mutableInScopeNamespaces:
self.__clonePrefixMap()
self.__mutableInScopeNamespaces = True
if builtin.XML.boundPrefix() == prefix:
# Bound prefix xml is permitted if it's bound to the right URI, or
# if the scope is being left. In neither case is the mapping
# adjusted.
if (uri is None) or builtin.XML.uri() == uri:
return
raise pyxb.LogicError('Cannot manipulate bound prefix xml')
if uri:
if prefix is None:
ns = self.__defaultNamespace = utility.NamespaceForURI(uri, create_if_missing=True)
self.__inScopeNamespaces[None] = self.__defaultNamespace
else:
ns = utility.NamespaceForURI(uri, create_if_missing=True)
self.__removePrefixMap(prefix)
self.__addPrefixMap(prefix, ns)
if self.__targetNamespace:
self.__targetNamespace._referenceNamespace(ns)
else:
self.__pendingReferencedNamespaces.add(ns)
else:
# NB: XMLNS 6.2 says that you can undefine a default
# namespace, but does not say anything explicitly about
# undefining a prefixed namespace. XML-Infoset 2.2
# paragraph 6 implies you can do this, but expat blows up
# if you try it. I don't think it's legal.
if prefix is not None:
raise pyxb.NamespaceError(self, 'Attempt to undefine non-default namespace %s' % (prefix,))
self.__removePrefixMap(prefix)
self.__defaultNamespace = None
def finalizeTargetNamespace (self, tns_uri=None, including_context=None):
if tns_uri is not None:
assert 0 < len(tns_uri)
# Do not prevent overwriting target namespace; need this for WSDL
# files where an embedded schema inadvertently inherits a target
# namespace from its enclosing definitions element. Note that if
# we don't check this here, we do have to check it when schema
# documents are included into parent schema documents.
self.__targetNamespace = utility.NamespaceForURI(tns_uri, create_if_missing=True)
elif self.__targetNamespace is None:
if including_context is not None:
self.__targetNamespace = including_context.targetNamespace()
self.__fallbackToTargetNamespace = True
elif tns_uri is None:
self.__targetNamespace = utility.CreateAbsentNamespace()
else:
self.__targetNamespace = utility.NamespaceForURI(tns_uri, create_if_missing=True)
if self.__pendingReferencedNamespaces is not None:
[ self.__targetNamespace._referenceNamespace(_ns) for _ns in self.__pendingReferencedNamespaces ]
self.__pendingReferencedNamespace = None
assert self.__targetNamespace is not None
if (not self.__fallbackToTargetNamespace) and self.__targetNamespace.isAbsentNamespace():
self.__fallbackToTargetNamespace = True
def reset (self):
"""Reset this instance to the state it was when created, exclusive of
XMLNS directives passed in a constructor C{dom_node} parameter.
This preserves parent context and constructor-specified prefix maps,
but clears the namespace-prefix mapping of any additions made while
processing namespace directives in DOM nodes, or manually added
post-construction.
The defaultNamespace is also retained."""
self.__inScopeNamespaces = self.__initialScopeNamespaces
self.__inScopePrefixes = self.__initialScopePrefixes
self.__mutableInScopeNamespaces = False
self.__namespacePrefixCounter = 0
def __init__ (self,
dom_node=None,
parent_context=None,
including_context=None,
recurse=True,
default_namespace=None,
target_namespace=None,
in_scope_namespaces=None,
expanded_name=None,
finalize_target_namespace=True): # MUST BE True for WSDL to work with minidom
"""Determine the namespace context that should be associated with the
given node and, optionally, its element children.
Primarily this class maintains a map between namespaces and prefixes
used in QName instances. The initial map comprises the bound prefixes
(C{xml} and C{xmlns}), prefixes inherited from C{parent_context}, and
prefixes passed through the C{in_scope_namespaces}
parameter to the constructor. This map is then augmented by any
namespace declarations present in a passed C{dom_node}. The initial
map prior to augmentation may be restored through the L{reset()}
method.
@param dom_node: The DOM node
@type dom_node: C{xml.dom.Element}
@keyword parent_context: Optional value that specifies the context
associated with C{dom_node}'s parent node. If not provided, only the
C{xml} namespace is in scope.
@type parent_context: L{NamespaceContext}
@keyword recurse: If True (default), create namespace contexts for all
element children of C{dom_node}
@type recurse: C{bool}
@keyword default_namespace: Optional value to set as the default
namespace. Values from C{parent_context} would override this, as
would an C{xmlns} attribute in the C{dom_node}.
@type default_namespace: L{NamespaceContext}
@keyword target_namespace: Optional value to set as the target
namespace. Values from C{parent_context} would override this, as
would a C{targetNamespace} attribute in the C{dom_node}
@type target_namespace: L{NamespaceContext}
@keyword in_scope_namespaces: Optional value to set as the initial set
of in-scope namespaces. The always-present namespaces are added to
this if necessary.
@type in_scope_namespaces: C{dict} mapping prefix C{string} to L{Namespace}.
"""
from pyxb.namespace import builtin
if dom_node is not None:
try:
assert dom_node.__namespaceContext is None
except AttributeError:
pass
dom_node.__namespaceContext = self
self.__defaultNamespace = default_namespace
self.__targetNamespace = target_namespace
if self.__InitialScopeNamespaces is None:
self.__BuildInitialPrefixMap()
self.__inScopeNamespaces = self.__InitialScopeNamespaces
self.__inScopePrefixes = self.__InitialScopePrefixes
self.__mutableInScopeNamespaces = False
self.__namespacePrefixCounter = 0
if parent_context is not None:
self.__inScopeNamespaces = parent_context.__inScopeNamespaces
self.__inScopePrefixes = parent_context.__inScopePrefixes
if parent_context.__mutableInScopeNamespaces:
self.__clonePrefixMap()
self.__defaultNamespace = parent_context.defaultNamespace()
self.__targetNamespace = parent_context.targetNamespace()
self.__fallbackToTargetNamespace = parent_context.__fallbackToTargetNamespace
if in_scope_namespaces is not None:
self.__clonePrefixMap()
self.__mutableInScopeNamespaces = True
for (pfx, ns) in six.iteritems(in_scope_namespaces):
self.__removePrefixMap(pfx)
self.__addPrefixMap(pfx, ns)
# Record a copy of the initial mapping, exclusive of namespace
# directives from C{dom_node}, so we can reset to that state.
self.__initialScopeNamespaces = self.__inScopeNamespaces
self.__initialScopePrefixes = self.__inScopePrefixes
self.__mutableInScopeNamespaces = False
if self.__targetNamespace is None:
self.__pendingReferencedNamespaces = set()
attribute_map = {}
if dom_node is not None:
if expanded_name is None:
expanded_name = pyxb.namespace.ExpandedName(dom_node)
for ai in range(dom_node.attributes.length):
attr = dom_node.attributes.item(ai)
if builtin.XMLNamespaces.uri() == attr.namespaceURI:
prefix = attr.localName
if 'xmlns' == prefix:
prefix = None
self.processXMLNS(prefix, attr.value)
else:
if attr.namespaceURI is not None:
uri = utility.NamespaceForURI(attr.namespaceURI, create_if_missing=True)
key = pyxb.namespace.ExpandedName(uri, attr.localName)
else:
key = pyxb.namespace.ExpandedName(None, attr.localName)
attribute_map[key] = attr.value
if finalize_target_namespace:
tns_uri = None
tns_attr = self._TargetNamespaceAttribute(expanded_name)
if tns_attr is not None:
tns_uri = attribute_map.get(tns_attr)
self.finalizeTargetNamespace(tns_uri, including_context=including_context)
# Store in each node the in-scope namespaces at that node;
# we'll need them for QName interpretation of attribute
# values.
if (dom_node is not None) and recurse:
from xml.dom import Node
assert Node.ELEMENT_NODE == dom_node.nodeType
for cn in dom_node.childNodes:
if Node.ELEMENT_NODE == cn.nodeType:
NamespaceContext(dom_node=cn, parent_context=self, recurse=True)
def interpretQName (self, name, namespace=None, default_no_namespace=False):
"""Convert the provided name into an L{ExpandedName}, i.e. a tuple of
L{Namespace} and local name.
If the name includes a prefix, that prefix must map to an in-scope
namespace in this context. Absence of a prefix maps to
L{defaultNamespace()}, which must be provided (or defaults to the
target namespace, if that is not absent).
@param name: A QName.
@type name: C{str} or C{unicode}
@param name: Optional namespace to use for unqualified names when
there is no default namespace. Note that a defined default namespace,
even if absent, supersedes this value.
@keyword default_no_namespace: If C{False} (default), an NCName in a
context where C{namespace} is C{None} and no default or fallback
namespace can be identified produces an exception. If C{True}, such an
NCName is implicitly placed in no namespace.
@return: An L{ExpandedName} tuple: ( L{Namespace}, C{str} )
@raise pyxb.QNameResolutionError: The prefix is not in scope
@raise pyxb.QNameResolutionError: No prefix is given and the default namespace is absent
"""
if isinstance(name, pyxb.namespace.ExpandedName):
return name
assert isinstance(name, six.string_types)
if 0 <= name.find(':'):
(prefix, local_name) = name.split(':', 1)
assert self.inScopeNamespaces() is not None
namespace = self.inScopeNamespaces().get(prefix)
if namespace is None:
raise pyxb.QNameResolutionError('No namespace declaration for prefix', name, self)
else:
local_name = name
# Context default supersedes caller-provided namespace
if self.defaultNamespace() is not None:
namespace = self.defaultNamespace()
# If there's no default namespace, but there is a fallback
# namespace, use that instead.
if (namespace is None) and self.__fallbackToTargetNamespace:
namespace = self.targetNamespace()
if (namespace is None) and not default_no_namespace:
raise pyxb.QNameResolutionError('NCName with no fallback/default namespace cannot be resolved', name, self)
return pyxb.namespace.ExpandedName(namespace, local_name)
def queueForResolution (self, component, depends_on=None):
"""Forwards to L{queueForResolution()<Namespace.queueForResolution>} in L{targetNamespace()}."""
assert isinstance(component, _Resolvable_mixin)
return self.targetNamespace().queueForResolution(component, depends_on)
## Local Variables:
## fill-column:78
## End:
```
#### File: pyxb/utils/xmlre.py
```python
import re
import logging
import pyxb.utils.unicode
from pyxb.utils import six
_log = logging.getLogger(__name__)
# AllEsc maps all the possible escape codes and wildcards in an XML schema
# regular expression into the corresponding CodePointSet.
_AllEsc = { }
def _InitializeAllEsc ():
"""Set the values in _AllEsc without introducing C{k} and C{v} into
the module."""
_AllEsc.update({ six.u('.'): pyxb.utils.unicode.WildcardEsc })
bs = six.unichr(0x5c)
for k, v in six.iteritems(pyxb.utils.unicode.SingleCharEsc):
_AllEsc[bs + six.text_type(k)] = v
for k, v in six.iteritems(pyxb.utils.unicode.MultiCharEsc):
_AllEsc[bs + six.text_type(k)] = v
for k, v in six.iteritems(pyxb.utils.unicode.catEsc):
_AllEsc[bs + six.text_type(k)] = v
for k, v in six.iteritems(pyxb.utils.unicode.complEsc):
_AllEsc[bs + six.text_type(k)] = v
for k, v in six.iteritems(pyxb.utils.unicode.IsBlockEsc):
_AllEsc[bs + six.text_type(k)] = v
_InitializeAllEsc()
class RegularExpressionError (ValueError):
"""Raised when a regular expression cannot be processed.."""
def __init__ (self, position, description):
self.position = position
ValueError.__init__(self, 'At %d: %s' % (position, description))
_CharClassEsc_re = re.compile(r'\\(?:(?P<cgProp>[pP]{(?P<charProp>[-A-Za-z0-9]+)})|(?P<cgClass>[^pP]))')
def _MatchCharClassEsc(text, position):
"""Parse a U{charClassEsc<http://www.w3.org/TR/xmlschema-2/#nt-charClassEsc>} term.
This is one of:
- U{SingleCharEsc<http://www.w3.org/TR/xmlschema-2/#nt-SingleCharEsc>},
an escaped single character such as C{E{\}n}
- U{MultiCharEsc<http://www.w3.org/TR/xmlschema-2/#nt-MultiCharEsc>},
an escape code that can match a range of characters,
e.g. C{E{\}s} to match certain whitespace characters
- U{catEsc<http://www.w3.org/TR/xmlschema-2/#nt-catEsc>}, the
C{E{\}pE{lb}...E{rb}} Unicode property escapes including
categories and blocks
- U{complEsc<http://www.w3.org/TR/xmlschema-2/#nt-complEsc>},
the C{E{\}PE{lb}...E{rb}} inverted Unicode property escapes
If the parsing fails, throws a RegularExpressionError.
@return: A pair C{(cps, p)} where C{cps} is a
L{pyxb.utils.unicode.CodePointSet} containing the code points
associated with the character class, and C{p} is the text offset
immediately following the escape sequence.
@raise RegularExpressionError: if the expression is syntactically
invalid.
"""
mo = _CharClassEsc_re.match(text, position)
if mo:
escape_code = mo.group(0)
cps = _AllEsc.get(escape_code)
if cps is not None:
return (cps, mo.end())
char_prop = mo.group('charProp')
if char_prop is not None:
if char_prop.startswith('Is'):
raise RegularExpressionError(position, 'Unrecognized Unicode block %s in %s' % (char_prop[2:], escape_code))
raise RegularExpressionError(position, 'Unrecognized character property %s' % (escape_code,))
raise RegularExpressionError(position, 'Unrecognized character class %s' % (escape_code,))
raise RegularExpressionError(position, "Unrecognized escape identifier at %s" % (text[position:],))
def _MatchPosCharGroup(text, position):
'''Parse a U{posCharGroup<http://www.w3.org/TR/xmlschema-2/#nt-posCharGroup>} term.
@return: A tuple C{(cps, fs, p)} where:
- C{cps} is a L{pyxb.utils.unicode.CodePointSet} containing the code points associated with the group;
- C{fs} is a C{bool} that is C{True} if the next character is the C{-} in a U{charClassSub<http://www.w3.org/TR/xmlschema-2/#nt-charClassSub>} and C{False} if the group is not part of a charClassSub;
- C{p} is the text offset immediately following the closing brace.
@raise RegularExpressionError: if the expression is syntactically
invalid.
'''
start_position = position
# DASH is just some unique object, used as a marker.
# It can't be unicode or a CodePointSet.
class DashClass:
pass
DASH = DashClass()
# We tokenize first, then go back and stick the ranges together.
tokens = []
has_following_subtraction = False
while True:
if position >= len(text):
raise RegularExpressionError(position, "Incomplete character class expression, missing closing ']'")
ch = text[position]
if ch == six.u('['):
# Only allowed if this is a subtraction
if not tokens or tokens[-1] is not DASH:
raise RegularExpressionError(position, "'[' character not allowed in character class")
has_following_subtraction = True
# For a character class subtraction, the "-[" are not part of the
# posCharGroup, so undo reading the dash
tokens.pop()
position = position - 1
break
elif ch == six.u(']'):
# End
break
elif ch == six.unichr(0x5c): # backslash
cps, position = _MatchCharClassEsc(text, position)
single_char = cps.asSingleCharacter()
if single_char is not None:
tokens.append(single_char)
else:
tokens.append(cps)
elif ch == six.u('-'):
# We need to distinguish between "-" and "\-". So we use
# DASH for a plain "-", and u"-" for a "\-".
tokens.append(DASH)
position = position + 1
else:
tokens.append(ch)
position = position + 1
if not tokens:
raise RegularExpressionError(position, "Empty character class not allowed")
# At the start or end of the character group, a dash has to be a literal
if tokens[0] is DASH:
tokens[0] = six.u('-')
if tokens[-1] is DASH:
tokens[-1] = six.u('-')
result_cps = pyxb.utils.unicode.CodePointSet()
cur_token = 0
while cur_token < len(tokens):
start = tokens[cur_token]
if cur_token + 2 < len(tokens) and tokens[cur_token + 1] is DASH:
end = tokens[cur_token + 2]
if not isinstance(start, six.text_type) or not isinstance(end, six.text_type):
if start is DASH or end is DASH:
raise RegularExpressionError(start_position, 'Two dashes in a row is not allowed in the middle of a character class.')
raise RegularExpressionError(start_position, 'Dashes must be surrounded by characters, not character class escapes. %r %r' %(start, end))
if start > end:
raise RegularExpressionError(start_position, 'Character ranges must have the lowest character first')
result_cps.add((ord(start), ord(end)))
cur_token = cur_token + 3
else:
if start is DASH:
raise RegularExpressionError(start_position, 'Dash without an initial character')
elif isinstance(start, six.text_type):
result_cps.add(ord(start))
else:
result_cps.extend(start)
cur_token = cur_token + 1
return result_cps, has_following_subtraction, position
def _MatchCharClassExpr(text, position):
'''Parse a U{charClassExpr<http://www.w3.org/TR/xmlschema-2/#nt-charClassExpr>}.
These are XML regular expression classes such as C{[abc]}, C{[a-c]}, C{[^abc]}, or C{[a-z-[q]]}.
@param text: The complete text of the regular expression being
translated. The first character must be the C{[} starting a
character class.
@param position: The offset of the start of the character group.
@return: A pair C{(cps, p)} where C{cps} is a
L{pyxb.utils.unicode.CodePointSet} containing the code points
associated with the property, and C{p} is the text offset
immediately following the closing brace.
@raise RegularExpressionError: if the expression is syntactically
invalid.
'''
if position >= len(text):
raise RegularExpressionError(position, 'Missing character class expression')
if six.u('[') != text[position]:
raise RegularExpressionError(position, "Expected start of character class expression, got '%s'" % (text[position],))
position = position + 1
if position >= len(text):
raise RegularExpressionError(position, 'Missing character class expression')
negated = (text[position] == '^')
if negated:
position = position + 1
result_cps, has_following_subtraction, position = _MatchPosCharGroup(text, position)
if negated:
result_cps = result_cps.negate()
if has_following_subtraction:
assert text[position] == six.u('-')
assert text[position + 1] == six.u('[')
position = position + 1
sub_cps, position = _MatchCharClassExpr(text, position)
result_cps.subtract(sub_cps)
if position >= len(text) or text[position] != six.u(']'):
raise RegularExpressionError(position, "Expected ']' to end character class")
return result_cps, position + 1
def MaybeMatchCharacterClass (text, position):
"""Attempt to match a U{character class expression
<http://www.w3.org/TR/xmlschema-2/#nt-charClassExpr>}.
@param text: The complete text of the regular expression being
translated
@param position: The offset of the start of the potential
expression.
@return: C{None} if C{position} does not begin a character class
expression; otherwise a pair C{(cps, p)} where C{cps} is a
L{pyxb.utils.unicode.CodePointSet} containing the code points associated with
the property, and C{p} is the text offset immediately following
the closing brace."""
if position >= len(text):
return None
c = text[position]
np = position + 1
if '.' == c:
return (pyxb.utils.unicode.WildcardEsc, np)
if '[' == c:
return _MatchCharClassExpr(text, position)
if '\\' == c:
return _MatchCharClassEsc(text, position)
return None
def XMLToPython (pattern):
"""Convert the given pattern to the format required for Python
regular expressions.
@param pattern: A Unicode string defining a pattern consistent
with U{XML regular
expressions<http://www.w3.org/TR/xmlschema-2/index.html#regexs>}.
@return: A Unicode string specifying a Python regular expression
that matches the same language as C{pattern}."""
assert isinstance(pattern, six.text_type)
new_pattern_elts = []
new_pattern_elts.append('^')
position = 0
while position < len(pattern):
cg = MaybeMatchCharacterClass(pattern, position)
if cg is None:
ch = pattern[position]
if ch == six.u('^') or ch == six.u('$'):
# These characters have no special meaning in XSD. But they
# match start and end of string in Python, so they have to
# be escaped.
new_pattern_elts.append(six.unichr(0x5c) + ch)
else:
new_pattern_elts.append(ch)
position += 1
else:
(cps, position) = cg
new_pattern_elts.append(cps.asPattern())
new_pattern_elts.append('$')
return ''.join(new_pattern_elts)
```
#### File: special/_precompute/gammainc_asy.py
```python
from __future__ import division, print_function, absolute_import
import os
from scipy.special._precompute.utils import lagrange_inversion
try:
import mpmath as mp
except ImportError:
pass
def compute_a(n):
"""a_k from DLMF 5.11.6"""
a = [mp.sqrt(2)/2]
for k in range(1, n):
ak = a[-1]/k
for j in range(1, len(a)):
ak -= a[j]*a[-j]/(j + 1)
ak /= a[0]*(1 + mp.mpf(1)/(k + 1))
a.append(ak)
return a
def compute_g(n):
"""g_k from DLMF 5.11.3/5.11.5"""
a = compute_a(2*n)
g = []
for k in range(n):
g.append(mp.sqrt(2)*mp.rf(0.5, k)*a[2*k])
return g
def eta(lam):
"""Function from DLMF 8.12.1 shifted to be centered at 0."""
if lam > 0:
return mp.sqrt(2*(lam - mp.log(lam + 1)))
elif lam < 0:
return -mp.sqrt(2*(lam - mp.log(lam + 1)))
else:
return 0
def compute_alpha(n):
"""alpha_n from DLMF 8.12.13"""
coeffs = mp.taylor(eta, 0, n - 1)
return lagrange_inversion(coeffs)
def compute_d(K, N):
"""d_{k, n} from DLMF 8.12.12"""
M = N + 2*K
d0 = [-mp.mpf(1)/3]
alpha = compute_alpha(M + 2)
for n in range(1, M):
d0.append((n + 2)*alpha[n+2])
d = [d0]
g = compute_g(K)
for k in range(1, K):
dk = []
for n in range(M - 2*k):
dk.append((-1)**k*g[k]*d[0][n] + (n + 2)*d[k-1][n+2])
d.append(dk)
for k in range(K):
d[k] = d[k][:N]
return d
header = \
r"""/* This file was automatically generated by _precomp/gammainc.py.
* Do not edit it manually!
*/
#ifndef IGAM_H
#define IGAM_H
#define K {}
#define N {}
double d[K][N] =
{{"""
footer = \
r"""
#endif
"""
def main():
print(__doc__)
K = 25
N = 25
with mp.workdps(50):
d = compute_d(K, N)
fn = os.path.join(os.path.dirname(__file__), '..', 'cephes', 'igam.h')
with open(fn + '.new', 'w') as f:
f.write(header.format(K, N))
for k, row in enumerate(d):
row = map(lambda x: mp.nstr(x, 17, min_fixed=0, max_fixed=0), row)
f.write('{')
f.write(", ".join(row))
if k < K - 1:
f.write('},\n')
else:
f.write('}};\n')
f.write(footer)
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
```
#### File: python2.7/test/test_multibytecodec_support.py
```python
import codecs
import os
import re
import sys
import unittest
from httplib import HTTPException
from test import test_support
from StringIO import StringIO
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
tstring = '' # string to test StreamReader
codectests = None # must set. codec test tuple
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
unmappedunicode = u'\udeee' # a unicode code point that is not mapped.
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
self.encode = self.codec.encode
self.decode = self.codec.decode
self.reader = self.codec.streamreader
self.writer = self.codec.streamwriter
self.incrementalencoder = self.codec.incrementalencoder
self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
for native, utf8 in zip(*[StringIO(f).readlines()
for f in self.tstring]):
u = self.decode(native)[0]
self.assertEqual(u, utf8.decode('utf-8'))
if self.roundtriptest:
self.assertEqual(native, self.encode(u)[0])
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if isinstance(source, bytes):
func = self.decode
else:
func = self.encode
if expected:
result = func(source, scheme)[0]
if func is self.decode:
self.assertTrue(type(result) is unicode, type(result))
self.assertEqual(result, expected,
'%r.decode(%r, %r)=%r != %r'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertTrue(type(result) is bytes, type(result))
self.assertEqual(result, expected,
'%r.encode(%r, %r)=%r != %r'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertRaises(UnicodeError, func, source, scheme)
def test_xmlcharrefreplace(self):
if self.has_iso10646:
self.skipTest('encoding contains full ISO 10646 map')
s = u"\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
self.encode(s, "xmlcharrefreplace")[0],
"ଓଣୠ nd eggs"
)
def test_customreplace_encode(self):
if self.has_iso10646:
self.skipTest('encoding contains full ISO 10646 map')
from htmlentitydefs import codepoint2name
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
if ord(c) in codepoint2name:
l.append(u"&%s;" % codepoint2name[ord(c)])
else:
l.append(u"&#%d;" % ord(c))
return (u"".join(l), exc.end)
codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
if self.xmlcharnametest:
sin, sout = self.xmlcharnametest
else:
sin = u"\xab\u211c\xbb = \u2329\u1234\u232a"
sout = "«ℜ» = ⟨ሴ⟩"
self.assertEqual(self.encode(sin,
"test.xmlcharnamereplace")[0], sout)
def test_callback_wrong_objects(self):
def myreplace(exc):
return (ret, exc.end)
codecs.register_error("test.cjktest", myreplace)
for ret in ([1, 2, 3], [], None, object(), 'string', ''):
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_long_index(self):
def myreplace(exc):
return (u'x', long(exc.end))
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdxefgh', 9))
def myreplace(exc):
return (u'x', sys.maxint + 1)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_None_index(self):
def myreplace(exc):
return (u'x', None)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_backward_index(self):
def myreplace(exc):
if myreplace.limit > 0:
myreplace.limit -= 1
return (u'REPLACED', 0)
else:
return (u'TERMINAL', exc.end)
myreplace.limit = 3
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'),
('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
def test_callback_forward_index(self):
def myreplace(exc):
return (u'REPLACED', exc.end + 2)
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdREPLACEDgh', 9))
def test_callback_index_outofbound(self):
def myreplace(exc):
return (u'TERM', 100)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_incrementalencoder(self):
UTF8Reader = codecs.getreader('utf-8')
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = StringIO()
encoder = self.incrementalencoder()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
e = encoder.encode(data)
ostream.write(e)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_incrementaldecoder(self):
UTF8Writer = codecs.getwriter('utf-8')
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = StringIO(self.tstring[0])
ostream = UTF8Writer(StringIO())
decoder = self.incrementaldecoder()
while 1:
data = istream.read(sizehint)
if not data:
break
else:
u = decoder.decode(data)
ostream.write(u)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_incrementalencoder_error_callback(self):
inv = self.unmappedunicode
e = self.incrementalencoder()
self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
e.reset()
def tempreplace(exc):
return (u'called', exc.end)
codecs.register_error('test.incremental_error_callback', tempreplace)
e.errors = 'test.incremental_error_callback'
self.assertEqual(e.encode(inv, True), 'called')
# again
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
for name in ["read", "readline", "readlines"]:
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = self.reader(StringIO(self.tstring[0]))
ostream = UTF8Writer(StringIO())
func = getattr(istream, name)
while 1:
data = func(sizehint)
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = self.writer(StringIO())
func = getattr(istream, name)
while 1:
if sizehint is not None:
data = func(sizehint)
else:
data = func()
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[0])
class TestBase_Mapping(unittest.TestCase):
pass_enctest = []
pass_dectest = []
supmaps = []
codectests = []
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
try:
self.open_mapping_file().close() # test it to report the error early
except (IOError, HTTPException):
self.skipTest("Could not retrieve "+self.mapfileurl)
def open_mapping_file(self):
return test_support.open_urlresource(self.mapfileurl)
def test_mapping_file(self):
if self.mapfileurl.endswith('.xml'):
self._test_mapping_file_ucm()
else:
self._test_mapping_file_plain()
def _test_mapping_file_plain(self):
_unichr = lambda c: eval("u'\\U%08x'" % int(c, 16))
unichrs = lambda s: u''.join(_unichr(c) for c in s.split('+'))
urt_wa = {}
with self.open_mapping_file() as f:
for line in f:
if not line:
break
data = line.split('#')[0].strip().split()
if len(data) != 2:
continue
csetval = eval(data[0])
if csetval <= 0x7F:
csetch = chr(csetval & 0xff)
elif csetval >= 0x1000000:
csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x10000:
csetch = chr(csetval >> 16) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x100:
csetch = chr(csetval >> 8) + chr(csetval & 0xff)
else:
continue
unich = unichrs(data[1])
if unich == u'\ufffd' or unich in urt_wa:
continue
urt_wa[unich] = csetch
self._testpoint(csetch, unich)
def _test_mapping_file_ucm(self):
with self.open_mapping_file() as f:
ucmdata = f.read()
uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata)
for uni, coded in uc:
unich = unichr(int(uni, 16))
codech = ''.join(chr(int(c, 16)) for c in coded.split())
self._testpoint(codech, unich)
def test_mapping_supplemental(self):
for mapping in self.supmaps:
self._testpoint(*mapping)
def _testpoint(self, csetch, unich):
if (csetch, unich) not in self.pass_enctest:
try:
self.assertEqual(unich.encode(self.encoding), csetch)
except UnicodeError, exc:
self.fail('Encoding failed while testing %s -> %s: %s' % (
repr(unich), repr(csetch), exc.reason))
if (csetch, unich) not in self.pass_dectest:
try:
self.assertEqual(csetch.decode(self.encoding), unich)
except UnicodeError, exc:
self.fail('Decoding failed while testing %s -> %s: %s' % (
repr(csetch), repr(unich), exc.reason))
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if isinstance(source, bytes):
func = source.decode
else:
func = source.encode
if expected:
if isinstance(source, bytes):
result = func(self.encoding, scheme)
self.assertTrue(type(result) is unicode, type(result))
self.assertEqual(result, expected,
'%r.decode(%r, %r)=%r != %r'
% (source, self.encoding, scheme, result,
expected))
else:
result = func(self.encoding, scheme)
self.assertTrue(type(result) is bytes, type(result))
self.assertEqual(result, expected,
'%r.encode(%r, %r)=%r != %r'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertRaises(UnicodeError, func, self.encoding, scheme)
def load_teststring(name):
dir = os.path.join(os.path.dirname(__file__), 'cjkencodings')
with open(os.path.join(dir, name + '.txt'), 'rb') as f:
encoded = f.read()
with open(os.path.join(dir, name + '-utf8.txt'), 'rb') as f:
utf8 = f.read()
return encoded, utf8
```
#### File: python2.7/test/test_syntax.py
```python
import re
import unittest
import warnings
from test import test_support
class SyntaxTestCase(unittest.TestCase):
def _check_error(self, code, errtext,
filename="<testcase>", mode="exec", subclass=None):
"""Check that compiling code raises SyntaxError with errtext.
errtest is a regular expression that must be present in the
test of the exception raised. If subclass is specified it
is the expected subclass of SyntaxError (e.g. IndentationError).
"""
try:
compile(code, filename, mode)
except SyntaxError, err:
if subclass and not isinstance(err, subclass):
self.fail("SyntaxError is not a %s" % subclass.__name__)
mo = re.search(errtext, str(err))
if mo is None:
self.fail("%s did not contain '%r'" % (err, errtext,))
else:
self.fail("compile() did not raise SyntaxError")
def test_paren_arg_with_default(self):
self._check_error("def f((x)=23): pass",
"parenthesized arg with default")
def test_assign_call(self):
self._check_error("f() = 1", "assign")
def test_assign_del(self):
self._check_error("del f()", "delete")
def test_global_err_then_warn(self):
# Bug tickler: The SyntaxError raised for one global statement
# shouldn't be clobbered by a SyntaxWarning issued for a later one.
source = re.sub('(?m)^ *:', '', """\
:def error(a):
: global a # SyntaxError
:def warning():
: b = 1
: global b # SyntaxWarning
:""")
warnings.filterwarnings(action='ignore', category=SyntaxWarning)
self._check_error(source, "global")
warnings.filters.pop(0)
def test_break_outside_loop(self):
self._check_error("break", "outside loop")
def test_delete_deref(self):
source = re.sub('(?m)^ *:', '', """\
:def foo(x):
: def bar():
: print x
: del x
:""")
self._check_error(source, "nested scope")
def test_unexpected_indent(self):
self._check_error("foo()\n bar()\n", "unexpected indent",
subclass=IndentationError)
def test_no_indent(self):
self._check_error("if 1:\nfoo()", "expected an indented block",
subclass=IndentationError)
def test_bad_outdent(self):
self._check_error("if 1:\n foo()\n bar()",
"unindent does not match .* level",
subclass=IndentationError)
def test_kwargs_last(self):
self._check_error("int(base=10, '2')", "non-keyword arg")
def test_main():
test_support.run_unittest(SyntaxTestCase)
from test import test_syntax
with test_support.check_py3k_warnings(("backquote not supported",
SyntaxWarning)):
test_support.run_doctest(test_syntax, verbosity=True)
if __name__ == "__main__":
test_main()
```
|
{
"source": "JeiKeiLim/integer_calculator",
"score": 3
}
|
#### File: integer_calculator/tests/test_calculator.py
```python
from src.calculator import Adder, Divider, Multiplier, Subtractor
class TestCalculator:
"""Calculator tester class."""
def test_add(self):
"""Adder test function."""
assert Adder().operate(5, 2) == 7
def test_sub(self):
"""Subtractor test function."""
assert Subtractor().operate(5, 2) == 3
def test_mul(self):
"""Multiplier test function."""
assert Multiplier().operate(5, 2) == 10
def test_div(self):
"""Divider test function."""
assert Divider().operate(5, 2) == 2
```
|
{
"source": "JeiKeiLim/torch-kindle",
"score": 3
}
|
#### File: kindle/generator/identity.py
```python
from typing import Any, Dict, List
import numpy as np
from torch import nn
from kindle.generator.base_generator import GeneratorAbstract
class IdentityGenerator(GeneratorAbstract):
"""Identity module."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@property
def out_channel(self) -> int:
return self.in_channel
@property
def in_channel(self) -> int:
return self.in_channels[self.from_idx] # type: ignore
@property
def kwargs(self) -> Dict[str, Any]:
return self._get_kwargs(nn.Identity, self.args)
def compute_out_shape(self, size: np.ndarray, repeat: int = 1) -> List[int]:
return list(size)
def __call__(self, repeat: int = 1) -> nn.Module:
return self._get_module(nn.Identity(**self.kwargs))
```
#### File: kindle/generator/mobilevit.py
```python
from typing import Any, Dict, List, Union
import numpy as np
import torch
from torch import nn
from kindle.generator.base_generator import GeneratorAbstract
class MobileViTBlockGenerator(GeneratorAbstract):
"""MobileViTBlock generator."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def in_channel(self) -> int:
"""Get in channel size."""
# error: Value of type "Optional[List[int]]" is not indexable
return self.in_channels[self.from_idx] # type: ignore
@property
def out_channel(self) -> int:
"""Get out channel size."""
return self.in_channel
def compute_out_shape(
self, size: Union[list, np.ndarray], repeat: int = 1
) -> List[int]:
"""Compute output shape."""
with torch.no_grad():
module: nn.Module = self(repeat=repeat)
module_out: torch.Tensor = module(torch.zeros([1, *list(size)]))
return list(module_out.shape[-3:])
@property
def base_module(self) -> nn.Module:
"""Returns module class from kindle.common_modules based on the class name."""
return getattr(__import__("kindle.modules", fromlist=[""]), self.name)
@property
def kwargs(self) -> Dict[str, Any]:
args = [self.in_channel, *self.args]
kwargs = self._get_kwargs(self.base_module, args)
return kwargs
def __call__(self, repeat: int = 1):
module = self.base_module(**self.kwargs)
return self._get_module(module)
```
#### File: kindle/modules/mobilevit.py
```python
from typing import Tuple, Union
import torch
from einops import rearrange
from torch import nn
from kindle.modules.activation import Activation
from kindle.modules.conv import Conv
class PreNorm(nn.Module):
"""Pre-normalization layer."""
def __init__(self, channels: int, layer: nn.Module) -> None:
"""Initialize PreNorm module.
Args:
channels: number of channels to normalize.
layer: layer module to pre-norm.
"""
super().__init__()
self.norm = nn.LayerNorm(channels)
self.layer = layer
def forward(self, x, **kwargs):
"""Normalize input and forward."""
return self.layer(self.norm(x), **kwargs)
class FeedForward(nn.Module):
"""FeedForward module for Transformer."""
def __init__(
self,
channels: int,
hidden_channels: int,
dropout: float = 0.0,
activation: Union[str, None] = "SiLU",
) -> None:
"""Initialize FeedForward module.
Args:
channels: input channels
hidden_channels: hidden channels
dropout: dropout probability.
activation: Name of the activation to use in the middle of Linear modules.
"""
super().__init__()
self.net = nn.Sequential(
nn.Linear(channels, hidden_channels),
Activation(activation)(),
nn.Dropout(dropout),
nn.Linear(hidden_channels, channels),
nn.Dropout(dropout),
)
def forward(self, x):
"""Forward module."""
return self.net(x)
class Attention(nn.Module):
"""Attention module for Transformer."""
def __init__(
self,
channels: int,
heads: int = 8,
channels_head: int = 64,
dropout: float = 0.0,
) -> None:
"""Initialize Attention module.
Args:
channels: input channels.
heads: number of heads in multi-head attention.
channels_head: number of channes to use in heads.
dropout: dropout probability.
"""
super().__init__()
hidden_channels = channels_head * heads
project_out = not (heads == 1 and channels_head == channels)
self.heads = heads
self.scale = channels_head ** -0.5
self.attend = nn.Softmax(dim=-1)
self.to_qkv = nn.Linear(channels, hidden_channels * 3, bias=False)
self.to_out = (
nn.Sequential(nn.Linear(hidden_channels, channels), nn.Dropout(dropout))
if project_out
else nn.Identity()
)
def forward(self, x):
"""Forward attention module."""
qkv = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map( # pylint: disable=invalid-name
lambda t: rearrange(t, "b p n (h d) -> b p h n d", h=self.heads), qkv
)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
out = torch.matmul(attn, v)
out = rearrange(out, "b p h n d -> b p n (h d)")
return self.to_out(out)
class Transformer(nn.Module):
"""Transformer module for MobileViTBlock."""
def __init__(
self,
channels: int,
depth: int,
heads: int,
channels_head: int,
channels_mlp: int,
dropout: float = 0.0,
activation: Union[str, None] = "SiLU",
) -> None:
"""Initialize Transformer module.
Args:
channels: input channels.
depth: depth of the transformer.
heads: number of heads to use in multi-head attention.
channels_head: number of channes to use in heads.
channels_mlp: number of channes to use in MLP.
dropout: dropout probability.
activation: Name of the activation to use in the middle of Linear modules.
"""
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PreNorm(
channels, Attention(channels, heads, channels_head, dropout)
),
PreNorm(
channels,
FeedForward(
channels, channels_mlp, dropout, activation=activation
),
),
]
)
)
def forward(self, x):
"""Forward Transformer layer."""
for attention, feed_forward in self.layers:
x = attention(x) + x
x = feed_forward(x) + x
return x
class MobileViTBlock(nn.Module):
"""Mobile ViT Block (https://arxiv.org/pdf/2110.02178.pdf)."""
def __init__(
self,
in_channels: int,
conv_channels: int,
mlp_channels: int,
depth: int,
kernel_size: int = 3,
patch_size: Union[int, Tuple[int, int]] = 2,
dropout: float = 0.0,
activation: Union[str, None] = "SiLU",
) -> None:
"""Initialize Mobile ViT Block.
Args:
in_channels: number of incoming channels
conv_channels: number of channels to use in convolution.
mlp_channels: number of channels to use in MLP.
depth: depth of the transformer.
kernel_size: kernel size in nxn convolution.
dropout: dropout probability.
activation: Name of the activation to use in the middle of Linear modules.
"""
super().__init__()
self.patch_w, self.patch_h = (
(patch_size, patch_size) if isinstance(patch_size, int) else patch_size
)
self.conv1_nxn = Conv(
in_channels, in_channels, kernel_size, activation=activation
)
self.conv2_1x1 = Conv(in_channels, conv_channels, 1, activation=activation)
self.transformer = Transformer(
conv_channels, depth, 1, 32, mlp_channels, dropout=dropout
)
self.conv3_1x1 = Conv(conv_channels, in_channels, 1, activation=activation)
self.conv4_nxn = Conv(
2 * in_channels, in_channels, kernel_size, activation=activation
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward MobileViTBlock."""
y = x.clone()
x = self.conv1_nxn(x)
x = self.conv2_1x1(x)
_, _, h, w = x.shape # pylint: disable=invalid-name
x = rearrange(
x,
"b d (h ph) (w pw) -> b (ph pw) (h w) d",
ph=self.patch_h,
pw=self.patch_w,
)
x = self.transformer(x)
x = rearrange(
x,
"b (ph pw) (h w) d -> b d (h ph) (w pw)",
h=h // self.patch_h,
w=w // self.patch_w,
ph=self.patch_h,
pw=self.patch_w,
)
x = self.conv3_1x1(x)
x = torch.cat((x, y), 1)
x = self.conv4_nxn(x)
return x
```
#### File: kindle/utils/torch_utils.py
```python
import math
from typing import List, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.utils.data.sampler import SubsetRandomSampler
def split_dataset_index(
n_data: int, split_ratio: float = 0.1
) -> Tuple[SubsetRandomSampler, SubsetRandomSampler]:
"""Split dataset indices with split_ratio.
Args:
n_data: number of total data
split_ratio: split ratio (0.0 ~ 1.0)
Returns:
SubsetRandomSampler ({split_ratio} ~ 1.0)
SubsetRandomSampler (0 ~ {split_ratio})
"""
indices = np.arange(n_data)
split = int(split_ratio * indices.shape[0])
train_idx = indices[split:]
valid_idx = indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
return train_sampler, valid_sampler
def make_divisible(n_channel: Union[int, float], divisor: int = 8) -> int:
"""Convert {n_channel} to divisible by {divisor}
Args:
n_channel: number of channels.
divisor: divisor to be used.
Returns:
Ex) n_channel=22, divisor=8
ceil(22/8) * 8 = 24
"""
return int(math.ceil(n_channel / divisor) * divisor)
def autopad(
kernel_size: Union[int, List[int]], padding: Union[int, None] = None
) -> Union[int, List[int]]:
"""Auto padding calculation for pad='same' in TensorFlow."""
# Pad to 'same'
if isinstance(kernel_size, int):
kernel_size = [kernel_size]
return padding or [x // 2 for x in kernel_size]
def count_model_params(
model: torch.nn.Module,
) -> int:
"""Count model's parameters."""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fuse_conv_and_batch_norm(conv: nn.Conv2d, batch_norm: nn.BatchNorm2d) -> nn.Conv2d:
"""Fuse convolution and batchnorm layers.
https://tehnokv.com/posts/fusing-batchnorm-and-conv/
Args:
conv: convolution module.
batch_norm: Batch normalization module directly connected to the conv module.
Return:
Fused conv with batch norm.
"""
fused_conv = (
nn.Conv2d(
conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size, # type: ignore
stride=conv.stride, # type: ignore
padding=conv.padding, # type: ignore
groups=conv.groups,
bias=True,
)
.requires_grad_(False)
.to(conv.weight.device)
)
# Fusing weight
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_batch_norm = torch.diag(
batch_norm.weight.div(
torch.sqrt(batch_norm.eps + batch_norm.running_var) # type: ignore
)
)
fused_conv.weight.copy_(
torch.mm(w_batch_norm, w_conv).view(fused_conv.weight.size())
)
# Fusing bias
if conv.bias is None:
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device)
else:
b_conv = conv.bias
b_batch_norm = batch_norm.bias - batch_norm.weight.mul(
batch_norm.running_mean # type: ignore
).div(
torch.sqrt(batch_norm.running_var + batch_norm.eps) # type: ignore
)
fused_conv.bias.copy_( # type: ignore
torch.mm(w_batch_norm, b_conv.reshape(-1, 1)).reshape(-1) + b_batch_norm
)
return fused_conv
```
#### File: torch-kindle/tests/test_yolo_head.py
```python
import os
import random
import torch
from torch import nn
from kindle.model import YOLOModel
def test_yolo_head_initialize_bias_class_probability(n_test: int = 5):
for n in range(n_test):
model = YOLOModel(
os.path.join("tests", "test_configs", "yolo_sample.yaml"), verbose=n == 0
)
class_probability = random.random()
n_object_per_image = (random.randint(4, 16), random.randint(320, 1024))
model.initialize_biases(
class_probability=class_probability, n_object_per_image=n_object_per_image
)
class_prob = torch.log(
torch.tensor(class_probability / (model.model[-1].n_classes - 0.99))
)
for i in range(model.model[-1].n_layers):
class_bias = model.model[-1].conv[i].bias.view(3, -1)[:, 5:].mean(0)
assert torch.isclose(class_bias, class_prob, rtol=0.1).sum() == 10
for i in range(model.model[-1].n_layers):
obj_bias = model.model[-1].conv[i].bias.view(3, -1)[:, 4].mean()
obj_log = torch.log(
n_object_per_image[0]
/ (n_object_per_image[1] / model.model[-1].stride[i]) ** 2
)
assert torch.isclose(obj_bias, obj_log, rtol=0.1)
def test_yolo_head_initialize_bias(n_test: int = 5):
for n in range(n_test):
model = YOLOModel(
os.path.join("tests", "test_configs", "yolo_sample.yaml"), verbose=n == 0
)
class_frequency = torch.randint(100, 5000, (10,))
n_object_per_image = (random.randint(4, 16), random.randint(320, 1024))
model.initialize_biases(
class_frequency=class_frequency, n_object_per_image=n_object_per_image
)
freq_log = torch.log(class_frequency / class_frequency.sum())
for i in range(model.model[-1].n_layers):
class_bias = model.model[-1].conv[i].bias.view(3, -1)[:, 5:].mean(0)
assert torch.isclose(class_bias, freq_log, rtol=0.1).sum() == 10
for i in range(model.model[-1].n_layers):
obj_bias = model.model[-1].conv[i].bias.view(3, -1)[:, 4].mean()
obj_log = torch.log(
n_object_per_image[0]
/ (n_object_per_image[1] / model.model[-1].stride[i]) ** 2
)
assert torch.isclose(obj_bias, obj_log, rtol=0.1)
def test_yolo_model_v2():
model = YOLOModel(
os.path.join("tests", "test_configs", "yolo_samplev2.yaml"),
verbose=True,
init_bias=True,
)
in_tensor = torch.rand((1, 3, 480, 380))
out_tensor = model(in_tensor)
assert out_tensor[0].shape == (1, 3, 60, 48, 15)
assert out_tensor[1].shape == (1, 3, 30, 24, 15)
assert out_tensor[2].shape == (1, 3, 15, 12, 15)
profiler = model.profile(n_run=1)
n_param = profiler.get_parameter_numbers()
n_macs = profiler.get_macs()
assert n_param == 7087815
assert n_macs == 1316596800
def test_yolo_head():
model = YOLOModel(
os.path.join("tests", "test_configs", "yolo_sample.yaml"), verbose=True
)
model.initialize_biases()
in_tensor = torch.rand((1, 3, 480, 380))
out_tensor = model(in_tensor)
assert out_tensor[0].shape == (1, 3, 60, 48, 15)
assert out_tensor[1].shape == (1, 3, 30, 24, 15)
assert out_tensor[2].shape == (1, 3, 15, 12, 15)
model.eval()
out_tensor = model(in_tensor)
assert out_tensor[0].shape == (1, 11340, 15)
assert out_tensor[1][0].shape == (1, 3, 60, 48, 15)
assert out_tensor[1][1].shape == (1, 3, 30, 24, 15)
assert out_tensor[1][2].shape == (1, 3, 15, 12, 15)
profiler = model.profile(n_run=1)
n_param = profiler.get_parameter_numbers()
n_macs = profiler.get_macs()
assert (
n_param == 7279367
), f"Number of parameter is not matched! Current: {n_param:,d}"
assert n_macs == 1350937664, f"Number of MACs is not matched! Current: {n_macs}"
def test_yolo_head_fused():
model = YOLOModel(
os.path.join("tests", "test_configs", "yolo_sample.yaml"), verbose=True
)
model.initialize_biases()
in_tensor = torch.rand((1, 3, 480, 380))
model.eval()
out_tensor = model(in_tensor)
model.fuse().eval()
out_tensor_fused = model(in_tensor)
assert torch.all(torch.isclose(out_tensor[0], out_tensor_fused[0], rtol=1e-6))
for i in range(model.model[-1].n_layers):
assert torch.all(
torch.isclose(out_tensor[1][i], out_tensor_fused[1][i], rtol=1e-6)
)
def test_yolo_head_xyxy():
model = YOLOModel(
os.path.join("tests", "test_configs", "yolo_samplev2.yaml"),
verbose=True,
init_bias=True,
)
in_tensor = torch.rand((1, 3, 480, 380))
model.eval()
model.model[-1].out_xyxy = False
out_tensor_xywh = model(in_tensor)
model.model[-1].out_xyxy = True
out_tensor_xyxy = model(in_tensor)
xywh = out_tensor_xywh[0][0, :, :4]
xyxy = out_tensor_xyxy[0][0, :, :4]
x1y1_from_xywh = xywh[:, :2] - (xywh[:, 2:4] / 2)
x2y2_from_xywh = xywh[:, :2] + (xywh[:, 2:4] / 2)
wh_from_xyxy = xyxy[:, 2:] - xyxy[:, :2]
cxcy_from_xyxy = xyxy[:, :2] + (wh_from_xyxy / 2)
assert torch.isclose(torch.cat((x1y1_from_xywh, x2y2_from_xywh), -1), xyxy).all()
assert torch.isclose(torch.cat((cxcy_from_xyxy, wh_from_xyxy), -1), xywh).all()
def test_yolo_head_export():
model = YOLOModel(
os.path.join("tests", "test_configs", "yolo_samplev2.yaml"),
verbose=True,
init_bias=True,
)
in_tensor = torch.rand((1, 3, 480, 380))
model.eval()
out_tensor = model(in_tensor)
model.export(verbose=True)
out_tensor_export = model(in_tensor)
assert torch.all(torch.isclose(out_tensor[0], out_tensor_export[0], rtol=1e-6))
def test_yolo_model_v3():
device = (
torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
)
model = YOLOModel(
os.path.join("tests", "test_configs", "yolo_samplev3.yaml"),
verbose=True,
init_bias=True,
).to(device)
in_tensor = torch.rand((1, 3, 512, 512)).to(device)
out_tensor = model(in_tensor)
assert out_tensor[0].shape == (1, 3, 64, 64, 15)
assert out_tensor[1].shape == (1, 3, 32, 32, 15)
assert out_tensor[2].shape == (1, 3, 16, 16, 15)
profiler = model.profile(n_run=100)
n_param = profiler.get_parameter_numbers()
n_macs = profiler.get_macs()
assert n_param == 7087815
assert n_macs == 1316629568
def test_yolo_model_mobilevit():
device = (
torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
)
model = YOLOModel(
os.path.join("tests", "test_configs", "yolo_mobilevit_sample.yaml"),
verbose=True,
init_bias=True,
).to(device)
in_tensor = torch.rand((1, 3, 512, 512)).to(device)
out_tensor = model(in_tensor)
assert out_tensor[0].shape == (1, 3, 64, 64, 15)
assert out_tensor[1].shape == (1, 3, 32, 32, 15)
assert out_tensor[2].shape == (1, 3, 16, 16, 15)
profiler = model.profile(n_run=100)
n_param = profiler.get_parameter_numbers()
n_macs = profiler.get_macs()
assert n_param == 4910455
assert n_macs == 1747833760
if __name__ == "__main__":
# test_yolo_head()
# test_yolo_head_initialize_bias()
# test_yolo_head_initialize_bias_class_probability()
# test_yolo_head_fused()
# test_yolo_model_v2()
# test_yolo_head_xyxy()
# test_yolo_head_export()
test_yolo_model_v3()
test_yolo_model_mobilevit()
```
|
{
"source": "jeikerxiao/pythonStudy",
"score": 3
}
|
#### File: meiwenProject/spiders/neihanSpider.py
```python
import scrapy
from scrapy.http import Request
from meiwenProject.items import MeiwenprojectItem
from lxml import etree
class NeihanspiderSpider(scrapy.Spider):
name = 'neihanSpider'
allowed_domains = ['neihanshequ.com']
start_urls = ['http://neihanshequ.com/']
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
def start_requests(self):
# for id in range(1, 10, 1):
url = 'http://neihanshequ.com'
yield Request(url, headers=self.headers, callback=self.parse)
def parse(self, response):
items = response.xpath("//ul[@id='detail-list']/li")
for item in items:
save_item = MeiwenprojectItem()
save_item['author'] = item.xpath(".//div[@class='name-time-wrapper left']/span[@class='name']/text()").extract_first()
save_item['date'] = item.xpath(".//span[@class='time']/text()").extract_first()
save_item['content'] = item.xpath(".//h1[@class='title']/p/text()").extract_first()
yield save_item
pass
```
#### File: meizituSpider/spiders/meizituSpider.py
```python
import scrapy
from scrapy.spiders import CrawlSpider, Spider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from meizituSpider.items import MeizituspiderItem
class MeizituSpider(CrawlSpider):
name = "meizituSpider"
allowed_domains = ["meizitu.com"]
start_urls = ['http://www.meizitu.com/a/list_1_1.html']
rules = [
Rule(LinkExtractor(allow='http://www.meizitu.com/a/list_1_\d\.html'), callback='parse_item', follow=True)
]
def parse_item(self, response):
sel = Selector(response)
for_xijie = sel.xpath('//ul[@class="wp-list clearfix"]/li')
for yige in for_xijie:
xijieurl = yige.xpath('.//a[1]/@href').extract()[0]
request = scrapy.Request(xijieurl, callback=self.parse_xijie)
yield request
def parse_xijie(self, response):
sel = Selector(response)
item = MeizituspiderItem()
rawdate1 = sel.xpath('//div[@class="month_Year"]/text()').extract()[0]
rawdate2 = sel.xpath('//div[@class="day"]/text()').extract()[0]
date = rawdate1[-4:] + '-' + rawdate1[:2] + '-' + rawdate2
title = sel.xpath('//div[@class="metaRight"]/h2/a/text()').extract()[0]
for_pic = sel.xpath('//div[@id="picture"]//img')
for yige in for_pic:
item['date'] = date
item['title'] = title
item['image_urls'] = [yige.xpath('./@src').extract()[0]]
yield item
```
#### File: movieSpider/pipeLine/ExcelPipeline.py
```python
import openpyxl
class ExcelPipeline(object):
def __init__(self):
self.file_name = 'movieSpider/download/movies.xlsx'
self.cur_row = 2
self.creatwb(wbname=self.file_name)
# 写入头信息
title = ['rank', 'name', 'alias', 'rating_num', 'quote', 'url']
sheet_name = 'movies'
self.write_header(headers=title, sheetname=sheet_name, wbname=self.file_name)
def process_item(self, item, spider):
# 写入数据
self.savetoexcel(item, wbname=self.file_name)
return item
# 新建excel
def creatwb(self, wbname):
wb = openpyxl.Workbook()
wb.save(filename=wbname)
# 写入表头信息
def write_header(self, headers, sheetname, wbname):
wb = openpyxl.load_workbook(filename=wbname)
sheet = wb.active
sheet.title = sheetname
field = 1
for field in range(1, len(headers) + 1):
_ = sheet.cell(row=1, column=field, value=str(headers[field - 1]))
wb.save(filename=wbname)
# 写入excel文件中 item 数据
def savetoexcel(self, item, wbname):
wb = openpyxl.load_workbook(filename=wbname)
sheet = wb.active
sheet.cell(row=self.cur_row, column=1, value=str(item['rank']))
sheet.cell(row=self.cur_row, column=2, value=str(item['name']))
sheet.cell(row=self.cur_row, column=3, value=str(item['alias']))
sheet.cell(row=self.cur_row, column=4, value=str(item['rating_num']))
sheet.cell(row=self.cur_row, column=5, value=str(item['quote']))
sheet.cell(row=self.cur_row, column=6, value=str(item['url']))
self.cur_row += 1
wb.save(filename=wbname)
```
#### File: ximaProject/ximaProject/pipelines.py
```python
import codecs
import json
class XimaprojectPipeline(object):
def process_item(self, item, spider):
return item
# 需要在setting.py里设置'coolscrapy.piplines.TxtPipeline':300
class TxtPipeline(object):
def __init__(self):
self.file = codecs.open('./data/logs.txt', 'w', encoding='utf-8')
def process_item(self, item, spider):
# 从内存以追加的方式打开文件,并写入对应的数据
if type(item['title']) == str and type(item['nickname']) == str:
self.file.write('------------------------------' + '\n')
self.file.write('标题:' + item['title'] + '\n')
self.file.write('昵称:' + item['nickname'] + '\n')
self.file.write('音频路径:' + item['play_path'] + '\n')
self.file.write('图片路径:' + item['cover_url'] + '\n')
self.file.write('创建时间:' + item['formatted_created_at'] + '\n')
return item
def spider_closed(self, spider):
self.file.close()
# 以下两种写法保存json格式,需要在settings里面设置'coolscrapy.pipelines.JsonPipeline': 200
class JsonPipeline(object):
def __init__(self):
self.file = codecs.open('./data/logs.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
if type(item['title']) == str and type(item['nickname']) == str:
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
```
|
{
"source": "Jeilef/battlebot",
"score": 2
}
|
#### File: Jeilef/battlebot/Battleground.py
```python
import math
import random
from PyQt5 import QtGui, QtCore
from PyQt5.QtCore import QPoint
from PyQt5.QtGui import QPainter, QPen, QColor
from PyQt5.QtWidgets import QWidget
import orderedset
from orderedset import OrderedSet
class Battleground(QWidget):
def __init__(self, cells, size, parent):
super().__init__(parent)
self.colors = ["black", "red", "orange", "blue"]
self.painter = QPainter(self)
brush = QtGui.QBrush(QtCore.Qt.black)
self.painter.setBrush(brush)
self.selected_fighter = 0
self.fighters = OrderedSet([])
self.ground_size = size
self.cell_size = size // cells
self.setMinimumSize(size, size)
self.setMaximumSize(size, size)
self.cells = cells
self.draw_grid()
def color_codes(self):
return list(map(lambda x: QColor(x).name(), self.colors))
def fight_finished(self, life_values):
health_totals = {}
for idx, f in enumerate(self.fighters):
health_totals.setdefault(f.team, 0)
health_totals[f.team] += life_values[idx][-1]
return min(health_totals.values()) <= 0
def paintEvent(self, event):
self.painter.begin(self)
self.draw_grid()
self.painter.end()
def draw_grid(self):
for x_coord in range(0, self.ground_size + 1, self.ground_size // self.cells):
self.painter.drawLine(x_coord, 0, x_coord, self.ground_size)
for y_coord in range(0, self.ground_size + 1, self.ground_size // self.cells):
self.painter.drawLine(0, y_coord, self.ground_size, y_coord)
half_cell_size = (self.ground_size // (self.cells * 2))
for idx, f in enumerate(self.fighters):
pen = QPen()
pen.setColor(QColor(self.colors[f.num]))
pen.setWidth(1)
self.painter.setPen(pen)
self.painter.drawText(f.x_pos,
f.y_pos,
half_cell_size, half_cell_size, QtCore.Qt.AlignCenter, str(idx))
def mousePressEvent(self, mouse_event: QtGui.QMouseEvent) -> None:
x_cell = (mouse_event.x() // self.cells) * self.cells
y_cell = (mouse_event.y() // self.cells) * self.cells
quarter_cell_size = (self.ground_size // (self.cells * 4))
x_coord = x_cell + quarter_cell_size
y_coord = y_cell + quarter_cell_size
list(self.fighters)[self.selected_fighter].x_pos = x_coord
list(self.fighters)[self.selected_fighter].y_pos = y_coord
self.update()
def find_target(self, fighter, f_idx, live_values):
min_dist = self.ground_size * 2
t_idx = 0
for idx, target in enumerate(self.fighters):
distance = math.sqrt(math.pow(fighter.x_pos - target.x_pos, 2) +
math.pow(fighter.y_pos - target.y_pos, 2))
if f_idx != idx and target.team != fighter.team and distance < min_dist and live_values[idx][-1] > 0:
t_idx = idx
min_dist = distance
return t_idx
def battle_round(self, live_values, waffen, fight_round):
for idx, fighter in enumerate(self.fighters):
if live_values[idx][-1] <= 0:
continue
t_idx = self.find_target(fighter, idx, live_values)
target = self.fighters[t_idx]
distance = math.sqrt(math.pow(fighter.x_pos - target.x_pos, 2) + math.pow(fighter.y_pos - target.y_pos, 2))
table_value = "".join(filter(lambda x: x.isdigit(), waffen[fighter.waffe1_fighter][9]))
reichweite = int(table_value) if table_value else 1
if distance // self.cell_size <= reichweite:
self.fighter_attacks(fight_round, fighter, live_values, t_idx, target)
else:
self.fighter_moves(fight_round, fighter, live_values, t_idx, target)
# fill up life of all fighters that were not hit
max_len = max(live_values, key=len)
for lv in live_values:
while len(lv) < len(max_len):
lv.append(lv[-1])
def fighter_moves(self, fight_round, fighter, live_values, t_idx, target):
movement = fighter.flinkheit * self.cell_size
old_x_pos = fighter.x_pos
if target.x_pos < fighter.x_pos - self.cell_size:
fighter.x_pos = fighter.x_pos - min(movement,
fighter.x_pos - target.x_pos + self.cell_size)
elif target.x_pos > fighter.x_pos + self.cell_size:
fighter.x_pos = fighter.x_pos + min(movement,
target.x_pos - fighter.x_pos - self.cell_size)
movement -= abs(old_x_pos - fighter.x_pos)
if target.y_pos < fighter.y_pos - self.cell_size:
fighter.y_pos = fighter.y_pos - min(movement,
fighter.y_pos - target.y_pos + self.cell_size)
elif target.y_pos > fighter.y_pos + self.cell_size:
fighter.y_pos = fighter.y_pos + min(movement,
target.y_pos - fighter.y_pos - self.cell_size)
if len(live_values[t_idx]) == fight_round:
live_values[t_idx].append(live_values[t_idx][-1])
def fighter_attacks(self, fight_round, fighter, live_values, t_idx, target):
dmg = fighter.angriff()
if random.random() < float(target.dodge_chance):
dmg = target.ausweichen(dmg)
else:
dmg = target.blocken(dmg)
if len(live_values[t_idx]) > fight_round:
live_values[t_idx][-1] -= max(0, dmg)
else:
live_values[t_idx].append(live_values[t_idx][-1] - max(0, dmg))
```
|
{
"source": "Jeilef/FoSA",
"score": 2
}
|
#### File: ex3/3_1_de_facto_coupling_matrix/3_1_de_facto_coupling_matrix.py
```python
import argparse
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def compute_de_facto_graph(output_file):
history = subprocess.run(["git", "log", "--format=%ae?%ct", "--name-only"], capture_output=True, text=True)
history = history.stdout.split("\n")
history = [ele for ele in history if ele != '']
de_facto_dict = {}
current_key = []
authors = set()
files = set()
for line in history:
if "?" in line:
split_line = line.split("?")
split_line[1] = int(split_line[1])/60/60//24 + 0.5
current_key = tuple(split_line)
de_facto_dict[current_key] = []
authors.add(split_line[0])
else:
de_facto_dict[current_key].append(line)
files.add(line)
de_facto_graph = [[0 for i in range(len(files))] for j in range(len(files))]
files = list(files)
for f_mail, f_time in de_facto_dict:
for s_mail, s_time in de_facto_dict:
if f_mail == s_mail and f_time != s_time and np.abs(f_time - s_time) <= 3.5*24*60*60:
f_files = de_facto_dict[f_mail, f_time]
s_files = de_facto_dict[s_mail, s_time]
for f_file in f_files:
f_index = files.index(f_file)
for s_file in s_files:
s_index = files.index(s_file)
if s_index != f_index:
de_facto_graph[f_index][s_index] += 1
fig, ax = plt.subplots()
im = ax.imshow(de_facto_graph, cmap='YlGn')
ax.set_xticks(np.arange(len(files)))
ax.set_yticks(np.arange(len(files)))
# ... and label them with the respective list entries
fontsize = max(1, min(9, 200//len(files)))
ax.set_xticklabels(files, fontsize=fontsize)
ax.set_yticklabels(files, fontsize=fontsize)
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=90, ha="left",
rotation_mode="anchor")
fig.tight_layout()
print("showing file")
#plt.show()
fig.set_size_inches(20,20)
plt.savefig(output_file, bbox_inches='tight')
if __name__ == "__main__":
parser = argparse.ArgumentParser('XYZ')
parser.add_argument('output', type=str, help='filename of output file (pdf)', nargs="?", default='result.pdf')
args = parser.parse_args()
compute_de_facto_graph(args.output)
```
#### File: ex3/3_2_commit_similarity_plot/3_2_commit_similarity_plot.py
```python
import subprocess, argparse, re
from collections import Counter
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import random
random.seed(43)
# This requires matplotlib version 3.1.0
def compare_commits(output_file):
history = subprocess.run(["git", "log", "-p", "--format=?%ae"], capture_output=True, text=True)
history = history.stdout.split("\n")
history = [ele for ele in history if ele != ""]
author_history = [ele[1:] for ele in history if ele[0] == '?']
words_history = [ele.lower() for ele in history if ele[:2] == '+ ']
words = re.findall(r'\w+', " ".join(words_history))
most_common_words = [w for w,c in Counter(words).most_common(256)]
current_commit = ''
commits = []
for line in history:
if line[0] != "?" and line[:2] == '+ ':
current_commit += line
elif line[0] == "?":
line_words = re.findall(r'\w+', current_commit)
bag_of_words_vector = [0] * len(most_common_words)
for w in line_words:
if w in most_common_words:
index = most_common_words.index(w)
bag_of_words_vector[index] += 1
commits.append(bag_of_words_vector)
current_commit = ""
pca = PCA(n_components=2, random_state=43)
reduced_commits = pca.fit_transform(commits)
unique_authors = np.unique(author_history)
c = np.random.rand(len(unique_authors))
fig, ax = plt.subplots()
author_colors = []
for u_author in author_history:
a_index = list(unique_authors).index(u_author)
author_colors.append(c[a_index])
xs = []
ys = []
for x, y in reduced_commits:
xs.append(x)
ys.append(y)
scatter = ax.scatter(xs, ys, c=author_colors, s=1)
handles, labels = scatter.legend_elements(num=len(unique_authors))
plt.legend(handles, unique_authors, loc='center left', bbox_to_anchor=(1, 0.2))
fig.set_size_inches(20, 10)
plt.tight_layout()
plt.savefig(output_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser('XYZ')
parser.add_argument('output', type=str, help='filename of output file (pdf)',nargs="?" , default='result.pdf')
args = parser.parse_args()
compare_commits(args.output)
```
#### File: ex4/4_1_complexity_prediction/4_1_complexity_prediction.py
```python
import argparse
from sklearn.linear_model import LinearRegression
import pandas as pd
def train_model(train_dataset, train_columns, predict_columns):
data = pd.read_csv(train_dataset, sep=";")
data = data.fillna(0)
X = data[train_columns.split(";")]
Y = data[predict_columns.split(";")]
return LinearRegression().fit(X.to_numpy(), Y.to_numpy())
def model_predict(model, predict_dataset, train_columns):
data = pd.read_csv(predict_dataset, sep=";")
files = data[["filename"]].to_numpy()
prediction_data = data[train_columns.split(";")]
prediction = model.predict(prediction_data.to_numpy())
for f, p in zip(files, prediction):
print(str(f[0]) + ";" + str(p[0]))
if __name__ == "__main__":
parser = argparse.ArgumentParser('FSA')
parser.add_argument('--training-columns', type=str, help='columns that should be used for training')
parser.add_argument('--prediction-column', type=str, help='columns that should be predicted')
parser.add_argument('--train', type=str, help='training dataset', default='training-data.csv')
parser.add_argument('--predict', type=str, help='prediction dataset', default='prediction-data.csv')
args = parser.parse_args()
model = train_model(args.train, args.training_columns, args.prediction_column)
model_predict(model, args.predict, args.training_columns)
```
|
{
"source": "Jeilef/qrs_compare",
"score": 2
}
|
#### File: qrs_compare/src/application.py
```python
import json
import os
from flask import Flask, render_template, flash, request, redirect
from algorithm_evaluation import evaluate_algorithm, read_evaluated_algorithms, read_single_algorithm_results
from algorithm_store import AlgorithmStore
from docker_execution import setup_docker
from util.util import BEAT_CODE_DESCRIPTIONS
UPLOAD_FOLDER = "algorithms"
if not os.path.exists(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
ALLOWED_EXTENSIONS = {'zip'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = b'<KEY>'
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def root(metrics=None):
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
alg_store = AlgorithmStore.for_new_alg(file, app.config['UPLOAD_FOLDER'])
setup_msg = setup_docker(alg_store)
evaluate_algorithm(alg_store)
print(setup_msg)
# return redirect(url_for('uploaded_file', filename="filename"))
ms = read_evaluated_algorithms()
print(ms)
return render_template('root.html', metrics=json.dumps(ms).replace("'", '"'))
@app.route('/uploads/#')
def uploaded_file(filename):
return "upload successful of " + filename
@app.route('/reeval')
def reeval(metrics=None):
alg_stores = AlgorithmStore.for_all_existing(app.config['UPLOAD_FOLDER'])
for alg_store in alg_stores:
evaluate_algorithm(alg_store)
ms = read_evaluated_algorithms()
return render_template('root.html', metrics=json.dumps(ms).replace("'", '"'))
@app.route('/details/<algname>')
def details(algname, metrics=None, code_mapping=None):
metric_values = read_single_algorithm_results(algname)
return render_template('details.html', metrics=json.dumps(metric_values).replace("'", '"'),
code_mapping=json.dumps(BEAT_CODE_DESCRIPTIONS).replace("'", '"'))
```
#### File: src/util/util.py
```python
import bisect
from itertools import chain, combinations
BEAT_CODE_DESCRIPTIONS = {
"N": "Normal beat",
"L": "Left bundle branch block beat",
"R": "Right bundle branch block beat",
"B": "Bundle branch block beat (unspecified)",
"A": "Atrial premature beat",
"a": "Aberrated atrial premature beat",
"J": "Nodal (junctional) premature beat",
"S": "Supraventricular premature or ectopic beat (atrial or nodal)",
"V": "Premature ventricular contraction",
"r": "R-on-T premature ventricular contraction",
"F": "Fusion of ventricular and normal beat",
"e": "Atrial escape beat",
"j": "Nodal (junctional) escape beat",
"n": "Supraventricular escape beat (atrial or nodal)",
"E": "Ventricular escape beat",
"f": "Fusion of paced and normal beat",
"Q": "Unclassifiable beat"
}
def get_closest(a, x):
"""
:param a: sorted array
:param x: value
:return: closest value to x in a
"""
if len(a) == 0:
return -1
idx = bisect.bisect_left(a, x)
if idx == len(a):
return a[-1]
if idx == 0:
return a[0]
if abs(x - a[idx - 1]) < abs(x - a[idx]):
return a[idx - 1]
else:
return a[idx]
def mapper_beat_code_to_desciption(code):
return BEAT_CODE_DESCRIPTIONS[code]
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
```
#### File: qrs_compare/test/data_generation.py
```python
import os
import re
import time
from functools import reduce
import numpy as np
import concurrent.futures as cf
import sys
import wfdb
from func_timeout import func_timeout, FunctionTimedOut
sys.path.append("../src")
from algorithm_evaluation import read_ann_files, evaluate_algorithm_with_metric, create_evaluation_format, \
match_for_metric_on_data_part
from algorithm_store import AlgorithmStore
from data_handling.data_docker_setup import ECGData
from metrics.adapted_fixed_window_metric import AdTP, AdFP, AdFN, AdTN
from metrics.metric import MeanError, join
from util.easy_algorithm_execution import algs_with_name
class MockAlgStore(AlgorithmStore):
def __init__(self, pred_alg_dir, gt_alg_dir="../comparison_data/annotations"):
self.pred_alg_dir = pred_alg_dir
self.gt_alg_dir = gt_alg_dir
def groundtruth_dir(self):
return self.gt_alg_dir
def prediction_dir(self):
return self.pred_alg_dir
def create_all_data(base_path="../comparison_data_large_slices"):
ecg = ECGData(base_path + "/signal",
base_path + "/annotations", min_noise=1, max_noise=1, num_noise=1)
ecg.read_noise_data()
ecg.__records_per_beat_type__ = 100000
ecg.__splice_size_start__ = 15
ecg.__splice_size_end__ = 16
ecg.__splice_size_step_size__ = 2
ecg.__stepping_beat_position__ = False
ecg.__num_noises__ = 3
ecg.setup_evaluation_data()
print("failed writes: ", ecg.failed_writes)
def generate_predictions(base_save_path, comp_data_path="../comparison_data_noise/"):
p = re.compile('.*N_[0-9].[0-9]+[a-zA-Z_]*_[0-9]+.*')
for dirpath, subdir, filenames in os.walk(comp_data_path + "signal"):
print("Scanning: ", dirpath)
if p.match(dirpath) and "RECORDS" in filenames:
print(dirpath, "confirmed")
with open(os.path.join(dirpath, "RECORDS"), 'r') as f:
records = list(f.readlines())
for r in records:
print("Processing", r)
ann_file_name = os.path.join(comp_data_path + "annotations", r.rstrip('\n'))
ann_file_exists = os.path.exists(ann_file_name + '.atr') and os.path.isfile(
ann_file_name + '.atr')
rec_file_name = os.path.join(dirpath, r.rstrip('\n'))
rec_file_exists = os.path.exists(rec_file_name + '.dat') and os.path.isfile(
rec_file_name + '.dat')
print("Exist check", ann_file_name, ann_file_exists, rec_file_name, rec_file_exists)
if ann_file_exists and rec_file_exists:
try:
sample, meta = func_timeout(5, wfdb.rdsamp, args=(rec_file_name,), kwargs={"channels": [0]})
except:
print("Failed reading sample", r)
continue
# sample, meta = wfdb.rdsamp(rec_file_name, channels=[0])
rec_name = rec_file_name.split(os.sep)[-1]
# 150 is a restriction for some qrs detectors
if len(sample) > 150:
for alg_name, alg_func in algs_with_name().items():
print(alg_name)
save_path = base_save_path + alg_name
os.makedirs(save_path, exist_ok=True)
alg_func(meta, rec_name, save_path, sample)
print("Done", alg_name)
def generate_metric_values(prediction_dir, splice_save_dir):
alg_splice_size = {}
metrics = [MeanError, AdTP, AdFP, AdTN, AdFN]
for alg_name, alg_func in algs_with_name().items():
mas = MockAlgStore(prediction_dir + alg_name)
alg_splice_size[alg_name] = {}
print(alg_name)
# need to be adapted to the available values
for noise_level in np.linspace(0, 2, 5):
noise_str = str(noise_level).replace(".", "-")
alg_splice_size[alg_name][noise_level] = {}
for splice_size in range(3, 11, 2):
reg = "[a-zA-Z]_{}_{}.*_[0-9]+\\.atr".format(noise_str, splice_size)
loaded_data = read_ann_files(mas, reg)
alg_splice_size[alg_name][noise_level][splice_size] = {}
with cf.ProcessPoolExecutor(max_workers=len(metrics)) as pool:
met_per_beats = list(pool.map(evaluate_algorithm_with_metric, metrics,
list([loaded_data] * len(metrics))))
for met_idx, met_per_beat in enumerate(met_per_beats):
reduced_metric = reduce(join, met_per_beat.values())
alg_splice_size[alg_name][noise_level][splice_size][
metrics[met_idx].__abbrev__] = reduced_metric.compute()
print(alg_splice_size)
for alg in alg_splice_size:
os.makedirs(splice_save_dir, exist_ok=True)
for noise_level, noise_vals in alg_splice_size[alg].items():
write_strs = {}
for spli, vals in noise_vals.items():
for metric, metric_value in vals.items():
write_strs.setdefault(metric, "")
write_strs[metric] += "{} {}\n".format(spli, metric_value)
for metrics_abbrev, write_str in write_strs.items():
with open(splice_save_dir + "/{}-{}-{}.dat".format(metrics_abbrev, alg, noise_level),
"w") as splice_file:
splice_file.write(write_str)
def generate_predictions_with_metrics(comp_data_path="../comparison_data_noise/",
metric_path="data/latex_data/direct-metrics",
regex='.*[a-zA-Z]_[0-9].[0-9]+[a-zA-Z_]*_[0-9]+.*'):
p = re.compile(regex)
metrics = [MeanError, AdTP, AdFP, AdTN, AdFN]
os.makedirs(metric_path, exist_ok=True)
for dirpath, subdir, filenames in os.walk(comp_data_path + "signal"):
# print("Scanning: ", dirpath)
if p.match(dirpath) and "RECORDS" in filenames:
# print(dirpath, "confirmed")
with open(os.path.join(dirpath, "RECORDS"), 'r') as f:
records = list(f.readlines())
current_folder = dirpath.split(os.sep)[-1]
typed_metrics = {}
for r in records:
# print("Processing", r)
ann_file_name = os.path.join(comp_data_path + "annotations", r.rstrip('\n'))
ann_file_exists = os.path.exists(ann_file_name + '.atr') and os.path.isfile(
ann_file_name + '.atr')
rec_file_name = os.path.join(dirpath, r.rstrip('\n'))
rec_file_exists = os.path.exists(rec_file_name + '.dat') and os.path.isfile(
rec_file_name + '.dat')
if ann_file_exists and rec_file_exists:
try:
annotation = wfdb.rdann(ann_file_name, 'atr')
sample, meta = wfdb.rdsamp(rec_file_name, channels=[0])
except:
# print("Failed reading sample", r)
continue
# sample, meta = wfdb.rdsamp(rec_file_name, channels=[0])
rec_name = rec_file_name.split(os.sep)[-1]
# 150 is a restriction for some qrs detectors
if len(sample) > 150:
# print("Generating Metrics")
for alg_name, alg_func in algs_with_name().items():
typed_metrics.setdefault(alg_name, {})
metric_file_name = metric_path + "/{}--{}--{}.dat".format(current_folder, alg_name,
metrics[-1].__abbrev__)
if os.path.isfile(metric_file_name):
# print("exists")
continue
typed_metrics.setdefault(alg_name, {})
# print(alg_name, "start")
if alg_name == "xqrs":
# needed because xqrs sometimes hangs itself
r_peaks = func_timeout(5, alg_func, args=(meta, rec_name, "", sample),
kwargs={"save": False})
else:
r_peaks = alg_func(meta, rec_name, "", sample, save=False)
# print(alg_name, "end")
if len(r_peaks) == 0:
eval_tuple = [annotation.sample[1]], [annotation.symbol[1]], [], meta['fs'], r[0]
else:
eval_tuple = create_evaluation_format(annotation.symbol[1], annotation.sample,
r[0], r_peaks, meta['fs'])
for m_idx, m in enumerate(metrics):
beat_type, metric = match_for_metric_on_data_part(eval_tuple, m)
typed_metrics[alg_name].setdefault(m_idx, []).append(metric)
# print("Saving Metrics per folder", current_folder)
for alg_name in typed_metrics:
for metric_idx in typed_metrics[alg_name]:
combined_metric = reduce(join, typed_metrics[alg_name][metric_idx])
with open(metric_path + "/{}--{}--{}.dat".format(current_folder, alg_name, combined_metric.__abbrev__),
"w") as splice_file:
splice_file.write(str(combined_metric.compute()))
if __name__ == "__main__":
base_path = "only-mit-bih"
# generate_predictions("data/algorithm_prediction/", "../comparison_data/")
create_all_data(base_path)
generate_predictions_with_metrics(base_path, "data/latex_data/only-mit-bih", '.*[a-zA-Z]_[0-9.]+[a-zA-Z_]*[0-9_.]+.*')
```
#### File: qrs_compare/test/test_data_manipulation.py
```python
import os
import unittest
import numpy as np
import wfdb
class TestManipData(unittest.TestCase):
def setUp(self) -> None:
self.mitdb = "/mnt/dsets/physionet/mitdb/1.0.0/"
def test_save_read_sample(self):
record_name = self.mitdb + '100'
sig, fields = wfdb.rdsamp(record_name, channels=[0])
print(sig)
sig = list(map(lambda x: list(x), list(sig)))
sig = np.array(sig)
wfdb.wrsamp('m_100', fields['fs'], fields['units'], fields['sig_name'], sig,
comments=fields['comments'], base_date=fields['base_date'],
base_time=fields['base_time'], write_dir='data/samples/')
def test_save_read_record(self):
record_name = self.mitdb + '100'
record_read = wfdb.rdrecord(record_name, physical=False)
record_read.wrsamp(write_dir='data/samples/', expanded=False)
def test_manip_save_record(self):
record_name = self.mitdb + '102'
record_read = wfdb.rdrecord(record_name, physical=False)
record_read.d_signal += 1
record_read.wrsamp(write_dir='data/samples/', expanded=False)
def test_save_annotation(self):
wfdb.wrann("test_record_save_ann", 'atr', np.array([1]), np.array(["N"]), fs=360,
write_dir="data/ann")
a = [1, 2, 4, 4, 5, 6, 7, 8, 1, 5, 3, 5, 6, 6]
a = list(map(lambda x: list([x]), a))
wfdb.wrsamp("test_record_save_ann", 360, ["mV"], ["I"], np.array(a, np.float64),
comments=None, base_date=None, base_time=None,
write_dir="data/ann")
wfdb.rdann("data/ann/test_record_save_ann_fs", "atr")
def test_fs_is_kept_when_saving(self):
wfdb.wrann("test_record_save_ann_fs", 'atr', np.array([1]), np.array(["N"]), fs=360,
write_dir="data/ann")
ann = wfdb.rdann("data/ann/test_record_save_ann_fs", "atr")
self.assertEqual(360, ann.fs)
```
#### File: qrs_compare/test/test_fixed_window_metric.py
```python
import unittest
from metrics.fixed_window_classification_metric import FixedWindow
class TestFixedWindowMetric(unittest.TestCase):
def test_perfect_prediction_no_tol(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10], ["n"], [10], 1)
self.assertEqual(1, tp)
self.assertEqual(0, fp)
self.assertEqual(2, tn)
self.assertEqual(0, fn)
def test_almost_perfect_prediction_no_tol(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10], ["n"], [9], 2)
self.assertEqual(1, tp)
self.assertEqual(0, fp)
self.assertEqual(2, tn)
self.assertEqual(0, fn)
def test_almost_perfect_prediction_no_tol_larger(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10], ["n"], [11], 2)
self.assertEqual(1, tp)
self.assertEqual(0, fp)
self.assertEqual(2, tn)
self.assertEqual(0, fn)
def test_no_prediction(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10], ["n"], [0], 2)
self.assertEqual(0, tp)
self.assertEqual(1, fp)
self.assertEqual(1, tn)
self.assertEqual(1, fn)
def test_higher_right_and_wrong_prediction(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10], ["n"], [0, 11], 2)
self.assertEqual(1, tp)
self.assertEqual(1, fp)
self.assertEqual(1, tn)
self.assertEqual(0, fn)
def test_lower_right_and_wrong_prediction(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10], ["n"], [0, 9], 2)
self.assertEqual(1, tp)
self.assertEqual(1, fp)
self.assertEqual(1, tn)
self.assertEqual(0, fn)
def test_two_right_and_wrong_prediction(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10], ["n"], [0, 9, 11], 2)
self.assertEqual(1, tp)
self.assertEqual(1, fp)
self.assertEqual(1, tn)
self.assertEqual(0, fn)
def test_lower_right_and_two_lower_wrong_prediction(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10], ["n"], [0, 5, 9, 11], 2)
self.assertEqual(1, tp)
self.assertEqual(1, fp)
self.assertEqual(1, tn)
self.assertEqual(0, fn)
def test_multiple_beats_two_right_predictions(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10, 20], ["n"], [10, 20], 2)
self.assertEqual(2, tp)
self.assertEqual(0, fp)
self.assertEqual(3, tn)
self.assertEqual(0, fn)
def test_multiple_beats_two_right_predictions_three_missed(self):
hw = FixedWindow()
tp, fp, tn, fn = hw.match_classification_annotations([10, 20], ["n"], [5, 10, 15, 20, 25], 2)
self.assertEqual(2, tp)
self.assertEqual(3, fp)
self.assertEqual(0, tn)
self.assertEqual(0, fn)
```
|
{
"source": "jeimynoriega/uip-prog3",
"score": 4
}
|
#### File: uip-prog3/Laboratorios/menu.py
```python
def menu(principal,*opc):
opciones = {}
validacion = False
while validacion!=True:
print(" MENU".center(45, "-"))
print("\n\t\t -%s- \n"%(principal.upper().title())) # titulo centrado
for i in range (len(opc)):
print("\t",i+1,"- ",str(opc[i]).capitalize()) # tabulado con primera letra en mayuscula
opciones[i+1]=opc[i]
print("".center(45, "-"))
eleccion = int(input("Esciba la opcion a Realizar --> "))
if eleccion in opciones:
validacion= True
else:
print("Opcion incorrecta ")
validacion=False
return eleccion
#Ejemplo
# men=("uno","dos","tres","cuarto","cinco")
# menu("principal",*men)
```
|
{
"source": "jeinarsson/app-template",
"score": 3
}
|
#### File: project/db/initial_setup.py
```python
from sqlalchemy import create_engine
import project.db as db
import os
from project.db.models import *
def create_from_empty(database_url = None):
import project.db.models
if database_url is None:
database_url = os.environ['DATABASE_URL']
engine = create_engine(database_url)
# create schema
print('Initializing DB in\n{}'.format(database_url))
db.Base.metadata.drop_all(bind=engine)
db.Base.metadata.create_all(bind=engine, checkfirst=True)
print('Adding some initial data.')
# add any initial data
s = db.make_session(database_url)
p = Poem(author='<NAME>')
text = [
"'Tis better to have loved and lost",
"Than never to have loved at all."
]
for l in text:
p.lines.append(PoemLine(text=l))
s.add(p)
s.commit()
print('Done.')
```
#### File: project/db/__init__.py
```python
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
def make_www_session(app):
engine = create_engine(app.config['DATABASE_URL'])
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base.query = db_session.query_property()
return db_session
def make_session(database_url = None):
if database_url is None:
from os import environ
database_url = environ['DATABASE_URL']
engine = create_engine(database_url)
sm = sessionmaker(bind=engine)
return sm()
```
#### File: project/www/routes.py
```python
from flask import render_template, request, escape, redirect, abort, jsonify
from project.www import app, db_session
from project.db.models import *
##
## routes
##
@app.route("/")
def index():
poems = db_session.query(Poem).all()
return render_template('index.html', title="Project", poems=poems)
@app.route("/api/")
def api_index():
poems = db_session.query(Poem).all()
result = [{
'author': poem.author,
'lines': [line.text for line in poem.lines]
} for poem in poems ]
return jsonify(result)
```
|
{
"source": "jeinarsson/josync",
"score": 2
}
|
#### File: jeinarsson/josync/jobs.py
```python
import utils
import json
import os
import logging
import subprocess as sp
import re
import datetime
logger = logging.getLogger(__name__)
class Job(object):
"""Parent class for backup jobs."""
def __init__(self,params):
super(Job, self).__init__()
logger.debug("Entering Job constructor.")
try:
self.target = params['target']
self.raw_sources = params['sources']
except KeyError as e:
raise utils.JobDescriptionKeyError(e.message)
try:
self.global_excludes = params['global_excludes']
except KeyError:
self.global_excludes = []
self.rsync_base_options = ['--stats','--chmod=ugo=rwX','--compress']
if not utils.config['is_pythonw']:
self.rsync_base_options += ['--verbose']
target_drive, target_path = os.path.splitdrive(self.target)
if utils.is_net_drive(target_drive):
unc = utils.net_drives[target_drive]
self.target = unc + target_path
logger.debug("Replacing target drive {} with UNC path {}".format(target_drive, unc))
self.cygtarget = utils.get_cygwin_path(self.target)
if not os.path.isdir(self.target):
raise utils.TargetNotFoundError(self.target)
self.sources = {}
for s in self.raw_sources:
drive, path = os.path.splitdrive(s['path'])
if utils.is_net_drive(drive):
logger.warning("The source path {} is a mounted net drive (ignoring source).".format(drive+path))
elif not os.path.isdir(drive+path):
logger.warning("The source directory {} does not exist (ignoring source).".format(drive+path))
else:
relative_source = {
'path': path,
'excludes': []
}
if 'excludes' in s:
relative_source['excludes'] = s['excludes']
if drive in self.sources:
self.sources[drive].append(relative_source)
else:
self.sources[drive] = [relative_source]
self.stats = {}
def run(self):
raise NotImplementedError("Run method of job was not implemented.")
def excludes_to_options(self,excludes):
"""Convert a list of strings to a list of exclude options to rsync.
:param excludes: List of excludes.
"""
options = []
for excl in excludes:
options.append("--exclude={}".format(excl))
return options
def run_rsync(self):
rsync_options = self.rsync_base_options + self.rsync_options
rsync_process = utils.Rsync(self.rsync_source,self.rsync_target,rsync_options)
rsync_process.wait()
if rsync_process.returncode != 0:
# Appropriate exception type?
raise IOError("rsync returned with exit code {}.".format(rsync_process.returncode))
else:
logger.info("rsync finished successfully.")
# Parse rsync stats output, typically finde the numbers in lines like:
# Number of files: 211009
# Number of files transferred: 410
# Total file size: 903119614118 bytes
# Total transferred file size: 9046197739 bytes
pattern_dict = {
"num_files": re.compile("Number of files:\s+(\d+)"),
"files_transferred": re.compile("Number of files transferred:\s+(\d+)"),
"tot_file_size": re.compile("Total file size:\s+(\d+)"),
"file_size_transferred": re.compile("Total transferred file size:\s+(\d+)")
}
for line in rsync_process.output_buffer:
for key,pattern in pattern_dict.items():
match = pattern.match(line)
if match:
value = float(match.group(1))
if key in self.stats:
self.stats[key] += value
else:
self.stats[key] = value
class BaseSyncJob(Job):
"""Base class for sync-type jobs."""
def __init__(self,params):
super(BaseSyncJob, self).__init__(params)
self.rsync_base_options += ['--archive']
def run(self):
"""Run rsync to sync one or more sources with one target directory."""
self.rsync_base_options += self.excludes_to_options(self.global_excludes)
for drive,sources in self.sources.items():
logger.info("Backing up sources on {}".format(drive))
with utils.volume_shadow(drive) as shadow_root:
for s in sources:
logger.info("Backing up {}{} to {}".format(drive,s['path'],self.target))
logger.debug("Drive root is found at {} and source path is {}.".format(shadow_root,s['path']))
drive_letter = drive[0]
self.rsync_source = '{}/./{}{}'.format(
utils.get_cygwin_path(shadow_root),
drive_letter,
utils.get_cygwin_path(s['path']))
self.rsync_target = self.cygtarget
self.rsync_options = self.excludes_to_options(s['excludes'])
self.run_rsync()
class SyncJob(BaseSyncJob):
"""Simple backup syncing multiple sources to a target directory with full tree structure."""
def __init__(self,params):
super(SyncJob, self).__init__(params)
logger.debug("SyncJob constructor.")
# Delete option (also excluded) to keep up-to-date with sources
# Relative option to create directory tree at target
self.rsync_base_options += ['--delete','--delete-excluded','--relative']
class AdditiveJob(BaseSyncJob):
"""Updating target with new files from sources."""
def __init__(self,params):
super(AdditiveJob, self).__init__(params)
logger.debug("AdditiveJob constructor.")
for s in self.sources:
s['path'] += '/'
# enumerate all possible job types and their constructors
job_types = {
'sync': SyncJob,
'add': AdditiveJob
}
def create_job_from_file(job_file):
"""Creates a job from a JSON job specification.
:param job_file: Path to job file.
:type job_file: str
:returns: Job object of specified type.
"""
logger.info("Creating Job from {}.".format(job_file))
with open(job_file) as f:
params = json.loads(f.read())
try:
if not params['type'] in job_types:
raise utils.JobDescriptionValueError('Job type {} is not valid.'.format(params['type']))
except KeyError as e:
raise utils.JobDescriptionKeyError(e.message)
params['job_file'] = job_file
return job_types[params['type']](params)
```
|
{
"source": "jeintos/SCHiRM",
"score": 2
}
|
#### File: jeintos/SCHiRM/experiments.py
```python
import numpy as np
import pandas as pd
import schirm_tools as schirm
import matplotlib.pyplot as plt
def run_testing_with_simulated_data(M,Mact_temp,n_tests,job_id,run_name,
data_path,data_file_name,result_path):
if Mact_temp == 0:
Mact = None
else:
Mact = Mact_temp
# load model and determine prior parameters
stan_model = schirm.define_model(compile_model = False)
prior_params = {'beta0_prior_std' :10.0,
'beta_prior_std' :1.0,
'log_mux_prior_mean':-1.64,
'log_mux_prior_std' :1.84,
'alpha_prior_std' :0.15}
# simulation setup
alpha = 0.3
c_true = np.loadtxt(data_path+data_file_name+'_norm_consts.txt') # cell sizes are taken form real data
N = len(c_true) # number of cells
bounds = {'beta0':np.array([-1,1]),'beta':np.array([-1,1])} # bounds for simulated regression coefficients
average_expression_prior = np.loadtxt(data_path+'average_expression_mean_and_std.txt')
# bounds for truncated normal distribution from which the average expression levels are drawn
lb = np.log(0.5)
ub = np.log(100)
# load estimated normalization constants
norm_consts_est = np.loadtxt(data_path+'jittered_norm_consts.txt')
# matrices for convergence diagnostics
PSRF = np.zeros((n_tests,2*M + 3))
np.random.seed(123*job_id) # random seed job_id based
for i in range(n_tests):
# sample input genes from D and for X and log_CX
log_mux_true = schirm.sim_truncnormal_dist(average_expression_prior[0],average_expression_prior[1],lb,ub,M)
mux_true = np.exp(log_mux_true)
X, log_CX = schirm.sim_indep_data(mux_true,alpha,c_true,N)
# sample beta0 and beta and simulate target
beta0, beta = schirm.sim_beta(M,bounds,Mact)
y, log_cy = schirm.sim_output_data(log_CX,c_true,alpha,beta0,beta,N)
# hierarchical regression
fit = schirm.run_inference(X,y,prior_params,norm_consts_est,stan_model)
result_p1, result_p2 = schirm.fit2table(fit,'',M)
PSRF[i,:] = fit.summary()['summary'][(M+1)*N:,-1]
# run other methods
result_reg_p1, result_reg_p2 = schirm.run_standard_reg(X,y,norm_consts_est)
# combine results
temp1 = pd.concat([pd.DataFrame(data={'beta_true' : beta}),
pd.DataFrame(data={'mux_true' : mux_true}),
result_p1,
result_reg_p1], axis=1)
temp2 = pd.concat([pd.DataFrame(index=np.array([0]),data={'beta0_true' : beta0}),
result_p2,
result_reg_p2], axis=1)
if i > 0:
df1 = df1.append(temp1, ignore_index=True)
df2 = df2.append(temp2, ignore_index=True)
else:
df1 = temp1
df2 = temp2
print(str(100*(i+1)/n_tests)+'% finished.')
df1.to_csv(result_path+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '_p1' + '.csv')
df2.to_csv(result_path+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '_p2' + '.csv')
np.savetxt(result_path+run_name+'rhat_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '.txt', PSRF)
def run_testing_with_real_data(M,Mact_temp,n_tests,job_id,run_name,
data_path,data_file_name,result_path):
if Mact_temp == 0:
Mact = None
else:
Mact = Mact_temp
# load data and normalization constants
df = pd.read_csv(data_path + data_file_name + '.csv',index_col = 0)
gene_names = list(df.columns.values)
norm_consts_est = np.loadtxt(data_path+'GSM1599500_K562_cells_norm_consts.txt')
N = len(norm_consts_est)
# list of cell cycle genes
cell_cyc_genes =['AURKA','E2F5','CCNE1','CDC25A','CDC6','CDKN1A','CDKN3','E2F1','MCM2','MCM6','NPAT','PCNA','BRCA1','BRCA2','CCNG2','CDKN2C','DHFR','PLK1','MSH2','NASP','RRM1','RRM2','TYMS','CCNA2','CCNF','CENPF','TOP2A','BIRC5','BUB1','BUB1B','CCNB1','CCNB2','CDC20','CDC25B','CDC25C','CDKN2D','CENPA','CKS2']
# remove cell cycle genes from the full gene list
for name in cell_cyc_genes:
gene_names.remove(name)
# load model
stan_model = schirm.define_model(compile_model = False)
prior_params = {'beta0_prior_std' :10.0,
'beta_prior_std' :1.0,
'log_mux_prior_mean':-1.64,
'log_mux_prior_std' :1.84,
'alpha_prior_std' :0.15}
# matrices for convergence diagnostics
PSRF = np.zeros((n_tests,2*(M) + 3))
np.random.seed(100*job_id) # job_id based -> different subsets for individual jobs
for i in range(n_tests):
#
if Mact == None:
if M%2 == 0:
Mact = int(M/2)
else:
Mact = int(np.ceil(M/2)) - np.random.randint(2)
target_index = np.random.choice(len(cell_cyc_genes), 1, replace=False)
true_indices = np.random.choice(len(cell_cyc_genes), Mact, replace=False)
false_indices = np.random.choice(len(gene_names), M - Mact, replace=False)
target_name = cell_cyc_genes[target_index[0]]
true_driver_names = list( cell_cyc_genes[xxx] for xxx in true_indices )
false_driver_names = list( gene_names[xxx] for xxx in false_indices )
X = df[true_driver_names+false_driver_names].as_matrix()
y = df[target_name].as_matrix()
beta = np.zeros(M)
beta[0:Mact] = 1
# hierarchical regression
fit = schirm.run_inference(X,y,prior_params,norm_consts_est,stan_model)
result_p1, result_p2 = schirm.fit2table(fit,'',M)
PSRF[i,:] = fit.summary()['summary'][(M+1)*N:,-1]
# run other methods
result_reg_p1, result_reg_p2 = schirm.run_standard_reg(X,y,norm_consts_est)
# combine results
temp1 = pd.concat([pd.DataFrame(data={'beta_true' : beta}),
result_p1,
result_reg_p1], axis=1)
temp2 = pd.concat([result_p2,
result_reg_p2], axis=1)
if i > 0:
df1 = df1.append(temp1, ignore_index=True)
df2 = df2.append(temp2, ignore_index=True)
else:
df1 = temp1
df2 = temp2
print(str(100*(i+1)/n_tests)+'% finished.')
df1.to_csv(result_path+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '_p1' + '.csv')
df2.to_csv(result_path+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '_p2' + '.csv')
np.savetxt(result_path+run_name+'rhat_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '.txt', PSRF)
def combine_results(M,Mact_temp,n_jobs,run_name,result_path,result_path_comb):
job_id = 1
df1 = pd.read_csv(result_path+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '_p1' + '.csv',index_col=0)
df2 = pd.read_csv(result_path+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '_p2' + '.csv',index_col=0)
rhat = np.loadtxt(result_path+run_name+'rhat_'+ str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '.txt')
for job_id in range(2,n_jobs+1):
temp1 = pd.read_csv(result_path+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '_p1' + '.csv',index_col=0)
temp2 = pd.read_csv(result_path+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '_p2' + '.csv',index_col=0)
rhat_temp = np.loadtxt(result_path+run_name+'rhat_'+ str(M) + '_' + str(Mact_temp) + '_' + str(job_id) + '.txt')
# append
df1 = df1.append(temp1, ignore_index=True)
df2 = df2.append(temp2, ignore_index=True)
rhat = np.append(rhat,rhat_temp,axis=0)
# save
df1.to_csv(result_path_comb+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_p1' + '.csv')
df2.to_csv(result_path_comb+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_p2' + '.csv')
np.savetxt(result_path_comb+run_name+'rhat_' + str(M) + '_' + str(Mact_temp) + '.txt', rhat)
def illust_AUC_summary(MMM,Mact,run_name1,run_name2,result_path_comb):
run_name = run_name1
n = len(MMM)
AUC = np.zeros((n,7))
names = ['SCHiRMp','SCHiRMpm','OLS', 'LASSO','ENET','RIDGE','PCC']
plt.subplot(2,1,1)
for i in range(n):
M = MMM[i]
Mact_temp = Mact[i]
df = pd.read_csv(result_path_comb+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_p1' + '.csv',index_col=0)
th = 0
y_true = np.abs(df['beta_true'].as_matrix()) > th
for j, name in enumerate(names):
score = np.abs(df[name].as_matrix())
AUC[i,j] = schirm.roc_analysis(y_true, score, name, illust = False)
for j, name in enumerate(names):
plt.plot(range(1,len(MMM)+1),AUC[:,j],'-o',label = name)
tick_names = []
for i in range(len(MMM)):
tick_names.append(str(MMM[i]) + '('+str(Mact[i]) + ')')
plt.xticks(range(1,len(MMM)+1),tick_names)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('Number of inputs (number of active inputs)')
plt.ylabel('AUC')
plt.title('Simulated data')
########
run_name = run_name2
n = len(MMM)
AUC = np.zeros((n,7))
plt.subplot(2,1,2)
for i in range(n):
M = MMM[i]
Mact_temp = Mact[i]
df = pd.read_csv(result_path_comb+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_p1' + '.csv',index_col=0)
th = 0
y_true = np.abs(df['beta_true'].as_matrix()) > th
for j, name in enumerate(names):
score = np.abs(df[name].as_matrix())
AUC[i,j] = schirm.roc_analysis(y_true, score, name, illust = False)
for j, name in enumerate(names):
plt.plot(range(1,len(MMM)+1),AUC[:,j],'-o',label = name)
tick_names = []
for i in range(len(MMM)):
tick_names.append(str(MMM[i]) + '('+str(Mact[i]) + ')')
plt.xticks(range(1,len(MMM)+1),tick_names)
plt.xlabel('Number of inputs (number of cell cycle genes)')
plt.ylabel('AUC')
plt.title('Real data')
def illust_errors(Mall,Mact_all,run_name,result_path_comb):
n = len(Mall)
names = ['SCHiRMpm','OLS', 'LASSO','ENET','RIDGE']
names_show = ['SCHiRM','OLS', 'LASSO','ENET','RIDGE']
# regression coefficients
for i in range(n):
M = Mall[i]
Mact_temp = Mact_all[i]
df = pd.read_csv(result_path_comb+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_p1' + '.csv',index_col=0)
true_vals = df['beta_true'].as_matrix()
for j, name in enumerate(names):
error = np.abs(df[name].as_matrix() - true_vals)**2
plt.subplot(5,2,2*j+1)
ind = np.argsort(true_vals)
plt.scatter(true_vals[ind],error[ind],c='teal',alpha = 0.05)
plt.title(names_show[j])
if j == 2:
plt.ylabel('Squared error')
if j < 4:
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
else:
plt.xlabel('True coefficient')
# intercept
names = ['beta0_mean','beta0_ols', 'beta0_las', 'beta0_ene', 'beta0_rid']
for i in range(n):
M = Mall[i]
Mact_temp = Mact_all[i]
df = pd.read_csv(result_path_comb+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_p2' + '.csv',index_col=0)
true_vals = df['beta0_true'].as_matrix()
for j, name in enumerate(names):
error = np.abs(df[name].as_matrix() - true_vals)**2
plt.subplot(5,2,2*j+2)
ind = np.argsort(true_vals)
plt.scatter(true_vals[ind],error[ind],c='teal',alpha = 0.05)
if j < 4:
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
else:
plt.xlabel('True intercept')
def param_est_fig(run_name,result_path_comb):
n_rows = 4
n_cols = 5
#Mact= np.array([3,4,4,4,4])
#MMM = np.array([7,8,12,16,20])
Mact= np.array([1,1,2,2,3])
MMM = np.array([2,3,4,5,6])
n_tests = len(MMM)
fig = plt.figure(figsize=(2*n_cols,8))
for i in range(n_tests):
M = MMM[i]
Mact_temp = Mact[i]
df1 = pd.read_csv(result_path_comb+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_p1' + '.csv',index_col=0)
df2 = pd.read_csv(result_path_comb+run_name+'res_' + str(M) + '_' + str(Mact_temp) + '_p2' + '.csv',index_col=0)
# true betas vs est
plt.subplot(n_rows,n_cols,i+1)
plt.plot([-1,1],[-1,1],'k')
plt.scatter(df1['beta_true'].as_matrix(),df1['SCHiRMpm'].as_matrix(),s = 0.2)
plt.title('M = ' + str(MMM[i]))
if i == 0:
plt.xlabel('True $\\beta$')
plt.ylabel('Est. $\\beta$')
# true beta0 vs est
plt.subplot(n_rows,n_cols,i + n_cols + 1)
plt.plot([-1,1],[-1,1],'k')
plt.scatter(df2['beta0_true'].as_matrix(),df2['beta0_mean'].as_matrix(),s = 0.2)
if i == 0:
plt.xlabel('True $\\beta_0$')
plt.ylabel('Est. $\\beta_0$')
# true mu vs est
plt.subplot(n_rows,n_cols,i + 2*n_cols + 1)
plt.plot([0,100],[0,100],'k')
plt.scatter(df1['mux_true'].as_matrix(),df1['mux_mean'].as_matrix(),s = 0.6)
if i == 0:
plt.xlabel('True $\mu$')
plt.ylabel('Est. $\mu$')
plt.subplot(n_rows,n_cols,i + 3*n_cols + 1)
plt.hist(df2['alpha_mean'].as_matrix())
if i == 0:
plt.xlabel('$\\alpha$')
plt.ylabel('Freq.')
```
|
{
"source": "jeiros/BP-Hack",
"score": 3
}
|
#### File: BP-Hack/Copilot/polly.py
```python
from boto3 import Session
from botocore.exceptions import BotoCoreError, ClientError
from contextlib import closing
import os
import sys
import subprocess
from tempfile import gettempdir
import audio
# Create a client using the credentials and region defined in the [adminuser]
# section of the AWS credentials file (~/.aws/credentials).
session = Session(profile_name="default", region_name='eu-west-1')
polly = session.client("polly")
id_n = 0
def polly_play(input_str):
global id_n
id_n = id_n + 1
try:
# Request speech synthesis
response = polly.synthesize_speech(Text=input_str, OutputFormat="mp3",
VoiceId="Kendra")
except (BotoCoreError, ClientError) as error:
# The service returned an error, exit gracefully
print(error)
sys.exit(-1)
# Access the audio stream from the response
if "AudioStream" in response:
# Note: Closing the stream is important as the service throttles on the
# number of parallel connections. Here we are using contextlib.closing to
# ensure the close method of the stream object will be called automatically
# at the end of the with statement's scope.
with closing(response["AudioStream"]) as stream:
output = os.path.join(gettempdir(), "speech"+str(id_n)+".mp3")
try:
# Open a file for writing the output as a binary stream
with open(output, "wb") as file:
file.write(stream.read())
file.close()
except IOError as error:
# Could not write to file, exit gracefully
print(error)
sys.exit(-1)
else:
# The response didn't contain audio data, exit gracefully
print("Could not stream audio")
sys.exit(-1)
# Play the audio using python
audio.play_mp3(output, 1.0)
```
#### File: BP-Hack/full_system/main (copy).py
```python
from blinks import *
import cv2
import dlib
if conf.is_raspi:
import picamera
from utils import *
def main():
# construct the argument parse and parse the arguments
counter_asleep = {}
COUNTER = 0
TOTAL = 0
frames_count = 0
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(conf.shape_predictor)
# vs = cv2.VideoCapture(1)
# for i in range(200):
if conf.is_raspi:
camera = picamera.PiCamera()
stream = picamera.array.PiRGBArray(camera)
else:
stream = cv2.VideoCapture(1)
no_faces = True
detected_frames = 0
while no_faces == True:
ret, frame = stream.read()
img, eyes, faces = face_recognition(frame)
print(faces)
if len(faces) != 0:
detected_frames += 1
else:
detected_frames = 0
if detected_frames > 5:
no_faces = False
while 1:
frame, frames_count, counter_asleep, ASLEEP = initiate_asleep_detector(frames_count, counter_asleep, detector, predictor, stream)
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# Check response for the server
response = False #
if response is True and ASLEEP:
print 'Asleep !!!!! '
send = True# send_trigger()
if response:
break
if conf.is_raspi:
stream.seek(0)
stream.truncate()
# do a bit of cleanup
cv2.destroyAllWindows()
# vs.stop()
# Entry point of the script
if __name__ == "__main__":
main()
```
|
{
"source": "jeis2497052/rentals-assisted-rates-validator",
"score": 2
}
|
#### File: jeis2497052/rentals-assisted-rates-validator/AssistedRateSpecTest.py
```python
import collections
import datetime
import hashlib
import hmac
import json
import logging
import unittest
import uuid
import pytz
import requests
POSSIBLE_CURRENCIES = {'AUD', 'CAD', 'CHF', 'EUR', 'GBP', 'SEK', 'THB', 'USD'}
MIN_STAY_VIOLATION = 'MIN_STAY_VIOLATION'
TURNOVER_VIOLATION = 'TURNOVER_VIOLATION'
DATE_RANGE_UNAVAILABLE = 'DATE_RANGE_UNAVAILABLE'
VIOLATION_CODES = {MIN_STAY_VIOLATION,
TURNOVER_VIOLATION, DATE_RANGE_UNAVAILABLE}
TURNOVER_DAYS = {'MONDAY', 'TUESDAY', 'WEDNESDAY',
'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY'}
ERROR_REASONS = {'PROPERTY_INACTIVE', 'DATE_RANGE_INVALID',
'PARTY_SIZE_INVALID', 'RATE_UNAVAILABLE', 'OTHER'}
# TODO: Update the following variables to match your system
SECRET_KEY = '<KEY>'
BASE_URL = 'https://example.com'
PATH = '/path/to/your/endpoint'
EXTERNAL_LISTING_REFERENCE = 'abc123'
EXTERNAL_ACCOUNT_REFERENCE = 'xyz123'
CLIENT_NAME = 'tripadvisor-vr'
TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
SIGNATURE_FORMAT = "VRS-HMAC-SHA512 timestamp={timestamp}, client={client}, signature={signature}"
QUERY_STRING_FORMAT = 'guests={guests}&externalListingReference={external_listing_reference}&externalAccountReference={external_account_reference}&arrival={arrival}&departure={departure}&requestId={request_id}'
logging.basicConfig(
format='%(asctime)s %(levelname)s %(funcName)s %(message)s',
level=logging.INFO
)
QueryParameters = collections.namedtuple(
'QueryParameters',
[
'guests',
'external_listing_reference',
'external_account_reference',
'arrival',
'departure',
]
)
# TODO: Update the following test inputs to match your system
# Comment out a top-level key, value pair to skip that particular test
TEST_CASES = {
'successful_response': QueryParameters(
guests=7,
external_listing_reference=EXTERNAL_LISTING_REFERENCE,
external_account_reference=EXTERNAL_ACCOUNT_REFERENCE,
arrival='2018-07-01',
departure='2018-08-01',
),
'min_stay_violation': QueryParameters(
guests=16,
external_listing_reference=EXTERNAL_LISTING_REFERENCE,
external_account_reference=EXTERNAL_ACCOUNT_REFERENCE,
arrival='2018-08-01',
departure='2018-08-05',
),
'date_range_unavailable_violation': QueryParameters(
guests=17,
external_listing_reference=EXTERNAL_LISTING_REFERENCE,
external_account_reference=EXTERNAL_ACCOUNT_REFERENCE,
arrival='2018-08-01',
departure='2018-08-02',
),
'turnday_violation': QueryParameters(
guests=18,
external_listing_reference=EXTERNAL_LISTING_REFERENCE,
external_account_reference=EXTERNAL_ACCOUNT_REFERENCE,
arrival='2018-08-02',
departure='2018-08-03',
),
'property_inactive_error': QueryParameters(
guests=10,
external_listing_reference=EXTERNAL_LISTING_REFERENCE,
external_account_reference=EXTERNAL_ACCOUNT_REFERENCE,
arrival='2018-08-03',
departure='2018-08-04',
),
'date_range_invalid_error': QueryParameters(
guests=11,
external_listing_reference=EXTERNAL_LISTING_REFERENCE,
external_account_reference=EXTERNAL_ACCOUNT_REFERENCE,
arrival='2018-08-03',
departure='2018-08-04',
),
'party_size_invalid_error': QueryParameters(
guests=12,
external_listing_reference=EXTERNAL_LISTING_REFERENCE,
external_account_reference=EXTERNAL_ACCOUNT_REFERENCE,
arrival='2018-08-03',
departure='2018-08-04',
),
'other_error': QueryParameters(
guests=13,
external_listing_reference=EXTERNAL_LISTING_REFERENCE,
external_account_reference=EXTERNAL_ACCOUNT_REFERENCE,
arrival='2018-08-03',
departure='2018-08-04',
),
}
class AssistedRateSpecTest(unittest.TestCase):
"""Class AssistedRatespecTest"""
s = requests.Session()
def _send_request(self, request):
"""
:type request: requests.PreparedRequest
:rtype: tuple[requests.Response, dict|None]
:return: tuple[response, response body as dict, if present]
"""
response = self.s.send(request)
try:
body = response.json()
except ValueError:
body = None
if response.status_code == 200:
self.validate_200_response(body)
elif response.status_code == 400:
self.validate_400_response(body)
else:
raise RuntimeError('Unexpected HTTP response code')
return response, body
def validate_200_response(self, body):
"""validate_200_response"""
self.assertIn('details', body)
details = body['details']
self.assertIn('baseRate', details)
self.assertGreater(details['baseRate']['amount'], 0)
self.assertIn(details['baseRate']['currency'], POSSIBLE_CURRENCIES)
self.assertIn('tax', details)
self.assertGreaterEqual(details['tax']['amount'], 0)
self.assertIn(details['tax']['currency'], POSSIBLE_CURRENCIES)
if 'deposit' in details:
self.assertGreater(details['deposit']['amount'], 0)
self.assertIn(details['deposit']['currency'], POSSIBLE_CURRENCIES)
if 'customFees' in details:
self.assertGreaterEqual(len(details['customFees']), 1)
for custom_fee in details['customFees']:
self.assertGreaterEqual(len(custom_fee['name']), 1)
self.assertLessEqual(len(custom_fee['name']), 255)
self.assertGreater(custom_fee['rate']['amount'], 0)
self.assertIn(custom_fee['rate']
['currency'], POSSIBLE_CURRENCIES)
self.assertEqual(
{'baseRate', 'tax', 'deposit', 'customFees'} | set(details.keys()),
{'baseRate', 'tax', 'deposit', 'customFees'}
)
if 'eligibility' in body:
self.assertIn('tripViolations', body['eligibility'])
self.assertEqual(set(body['eligibility'].keys()), {
'tripViolations'})
trip_violations = body['eligibility']['tripViolations']
self.assertGreaterEqual(len(trip_violations), 1)
self.assertEqual(
len(trip_violations),
len(set([trip_violation['violationCode']
for trip_violation in trip_violations]))
)
for trip_violation in trip_violations:
self.assertIn(trip_violation['violationCode'], VIOLATION_CODES)
if trip_violation['violationCode'] == TURNOVER_VIOLATION:
self.assertEqual(set(trip_violation.keys()), {
'violationCode', 'turnover'})
self.assertIn(trip_violation['turnover'], TURNOVER_DAYS)
elif trip_violation['violationCode'] == MIN_STAY_VIOLATION:
self.assertEqual(set(trip_violation.keys()), {
'violationCode', 'minStay'})
self.assertIsInstance(trip_violation['minStay'], int)
self.assertGreater(trip_violation['minStay'], 1)
else:
self.assertEqual(set(trip_violation.keys()), {
'violationCode'})
def validate_400_response(self, body):
"""validate_400_response"""
self.assertIn('errors', body)
errors = body['errors']
self.assertGreaterEqual(len(errors), 1)
for error in errors:
self.assertEqual(
{'reason', 'description'} | set(error.keys()),
{'reason', 'description'}
)
self.assertIn('reason', error)
self.assertIn(error['reason'], ERROR_REASONS)
if 'description' in error:
self.assertGreaterEqual(len(error['description']), 1)
self.assertLessEqual(len(error['description']), 255)
self.assertEqual(
len(errors),
len(set([e['reason'] for e in errors]))
)
@unittest.skipIf('successful_response' not in TEST_CASES, 'Test case not implemented')
def test_successful_response(self):
"""test_successful_response"""
response, body = self._send_request(
_get_request(TEST_CASES['successful_response']))
self.assertEqual(response.status_code, 200)
@unittest.skipIf('min_stay_violation' not in TEST_CASES, 'Test case not implemented')
def test_min_stay_violation(self):
"""test_min_stay_violation"""
response, body = self._send_request(
_get_request(TEST_CASES['min_stay_violation']))
self.assertEqual(response.status_code, 200)
min_stay_violations = [
v for v in body['eligibility']['tripViolations']
if v['violationCode'] == 'MIN_STAY_VIOLATION'
]
self.assertEqual(len(min_stay_violations), 1)
@unittest.skipIf('date_range_unavailable_violation' not in TEST_CASES, 'Test case not implemented')
def test_date_range_unavailable(self):
"""test_data_range_unavailable"""
response, body = self._send_request(_get_request(
TEST_CASES['date_range_unavailable_violation']))
self.assertEqual(response.status_code, 200)
date_range_unavailable_violations = [
v for v in body['eligibility']['tripViolations']
if v['violationCode'] == 'DATE_RANGE_UNAVAILABLE'
]
self.assertEqual(len(date_range_unavailable_violations), 1)
@unittest.skipIf('turnday_violation' not in TEST_CASES, 'Test case not implemented')
def test_turnday(self):
"""test_turnday"""
response, body = self._send_request(
_get_request(TEST_CASES['turnday_violation']))
self.assertEqual(response.status_code, 200)
turnover_violations = [
v for v in body['eligibility']['tripViolations']
if v['violationCode'] == 'TURNOVER_VIOLATION'
]
self.assertEqual(len(turnover_violations), 1)
@unittest.skipIf('property_inactive_error' not in TEST_CASES, 'Test case not implemented')
def test_property_inactive_error(self):
"""test_property_inactive_error"""
response, body = self._send_request(
_get_request(TEST_CASES['property_inactive_error']))
self.assertEqual(response.status_code, 400)
self.assertIn('errors', body)
property_inactive_errors = [
v for v in body['errors']
if v['reason'] == 'PROPERTY_INACTIVE'
]
self.assertEqual(len(property_inactive_errors), 1)
@unittest.skipIf('date_range_invalid_error' not in TEST_CASES, 'Test case not implemented')
def test_date_range_invalid_error(self):
"""test_data_range_invalid_error"""
response, body = self._send_request(
_get_request(TEST_CASES['date_range_invalid_error']))
self.assertEqual(response.status_code, 400)
self.assertIn('errors', body)
property_inactive_errors = [
v for v in body['errors']
if v['reason'] == 'DATE_RANGE_INVALID'
]
self.assertEqual(len(property_inactive_errors), 1)
@unittest.skipIf('party_size_invalid_error' not in TEST_CASES, 'Test case not implemented')
def test_party_size_invalid_error(self):
"""test_party_size_invalid_error"""
response, body = self._send_request(
_get_request(TEST_CASES['party_size_invalid_error']))
self.assertEqual(response.status_code, 400)
self.assertIn('errors', body)
property_inactive_errors = [
v for v in body['errors']
if v['reason'] == 'PARTY_SIZE_INVALID'
]
self.assertEqual(len(property_inactive_errors), 1)
@unittest.skipIf('other_error' not in TEST_CASES, 'Test case not implemented')
def test_other_error(self):
"""test_other_error"""
response, body = self._send_request(
_get_request(TEST_CASES['other_error']))
self.assertEqual(response.status_code, 400)
self.assertIn('errors', body)
property_inactive_errors = [
v for v in body['errors']
if v['reason'] == 'OTHER'
]
self.assertGreaterEqual(len(property_inactive_errors), 1)
def _get_request(query_parameters):
now = datetime.datetime.now(tz=pytz.UTC)
body = ''
query_string = QUERY_STRING_FORMAT.format(
guests=query_parameters.guests,
external_listing_reference=query_parameters.external_listing_reference,
arrival=query_parameters.arrival,
departure=query_parameters.departure,
request_id=uuid.uuid4()
)
r_r = requests.Request(
'GET',
"{}{}?{}".format(BASE_URL, PATH, query_string),
)
signature = SIGNATURE_FORMAT.format(
timestamp=now.strftime(TIMESTAMP_FORMAT),
client=CLIENT_NAME,
signature=_get_signature(
r_r.method,
PATH,
query_string,
now,
body,
)
)
r_r.headers['Authorization'] = signature
logging.info(
"Request {}".format(json.dumps({
'url': r_r.url,
'method': r_r.method,
'path': PATH,
'query_string': query_string,
'body': body,
'timestamp': now.strftime(TIMESTAMP_FORMAT),
'client': CLIENT_NAME,
'secret': SECRET_KEY,
'signature': signature,
}))
)
return r_r.prepare()
def _get_signature(
method,
path,
query_string,
timestamp,
body
):
canonical_request = '\n'.join([
method,
path,
query_string,
timestamp.strftime(TIMESTAMP_FORMAT),
hashlib.sha512(body.encode('utf-8')).hexdigest()
])
canonical_request_hash = hashlib.sha512(
canonical_request.encode('utf-8')).hexdigest()
return hmac.new(SECRET_KEY.encode('utf-8'), canonical_request_hash.encode('utf-8'), hashlib.sha512).hexdigest()
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeisenma/traceSelectionInMaya",
"score": 2
}
|
#### File: traceSelectionInMaya/scripts/traceSelectTool.py
```python
import maya.cmds as mc
import maya.mel as mm
from Plane import * # for interaction plane calculations
from Trajectory import * # for building motion trajectory curves
import buildMotionTraces as bmt
import sys, time
debug = 0
class TraceSelection:
def __init__( self, xformRoots, traceableObjs ):
self.xformRoots = xformRoots # all rigs
self.traceableObjs = traceableObjs # the list of joints to pay attention to (ignore the rest)
self.substeps = 1 # precision settings
self.dragDensity = 0.2
self.selectedMotions = {} # a dictionary to hold the selected motions for each root
self.motionPathsVisible = {} # a dictionary to remember whether (or not) the motion paths for a root were visible when the interaction started
self.forceReload = False # force the re-definition of the tool?
# for keeping track of time when mousePressed (when selection started)
self.startTime = 0
# make an empty trajectory dictionary
self.trace = {}
self.nearestRoot = None
self.nearestPath = None
self.interactionPlane = None
# measure the fit-one truck distance for the camera
mc.select( xformRoots[0] )
mc.viewFit()
self.viewFitDistance = self.CameraToPopDist()
# zoom back out to view all
mc.select(clear=True)
mc.viewFit()
def TraceGesturePress( self ):
""" Procedure called on press """
if debug>0: print("begin PRESS")
# Clean up: if there are any locators or groups in the scene, delete them
if( len(mc.ls("locator*")) > 0 ):
mc.delete(mc.ls("locator*"))
if( len(mc.ls("*Grp")) > 0 ):
mc.delete(mc.ls("*Grp"))
# find the position of the mouse click
pressPosition = Vector( mc.draggerContext( 'TraceGesture', query=True, anchorPoint=True) )
camPos = self.CameraPosition()
viewAxis = self.CameraViewAxis()
closestObj2Cam = self.FindNearestObjectToCamera()
self.interactionPlane = Plane(viewAxis,closestObj2Cam)
pressPosition = self.FindRayPlaneIntersect( camPos, pressPosition, self.interactionPlane )
if debug > 0:
mc.group(n="traceGrp",empty=True)
loc = mc.spaceLocator(p=pressPosition)
mc.parent(loc,"traceGrp")
for root in self.xformRoots:
# remember whether (or not) the motion paths for a root were visible when the interaction started
self.motionPathsVisible[root] = mc.getAttr("%s_MotionTraces.visibility"%root)
# set up all the traces
self.trace[root].Clear()
# set the trace normal to the viewing normal of the camera
self.trace[root].normal = self.interactionPlane.normal #Vector( mc.xform(mc.lookThru(q=True), q=True, m=True)[8:11] )
self.trace[root].planePt = self.interactionPlane.point #closestObj2Cam
self.nearestRoot, rootDist = self.FindNearestRoot( pressPosition, self.interactionPlane )
mc.setAttr("%s_MotionTraces.visibility"%self.nearestRoot, 1) # vis the new display layer
# make a group to hold the trace locators
if debug > 0:
mc.group(name="trace%sGrp"%self.nearestRoot,empty=True)
loc = mc.spaceLocator(p=pressPosition)
mc.parent(loc,"trace%sGrp"%self.nearestRoot)
# reset the trace
self.trace[self.nearestRoot].Clear()
# start the timer
self.startTime = time.time()
# set the trace normal to the viewing normal of the camera
self.trace[self.nearestRoot].normal = Vector( mc.xform(mc.lookThru(q=True), q=True, m=True)[8:11] )
self.trace[self.nearestRoot].planePt = closestObj2Cam
# add the initial click position to the trace
self.trace[self.nearestRoot].AddPoint( pressPosition )
self.nearestPath, pathDist = self.FindNearestMotionPath( pressPosition, self.nearestRoot )
if not mc.draggerContext( 'TraceGesture', query=True, modifier=True) == "ctrl":
self.trace[self.nearestRoot].SetUpDTWs()
mc.refresh(currentView=True,force=True)
if debug>0: print("end PRESS")
def TraceGestureDrag( self ):
""" Procedure called on drag """
# find the current position of the mouse drag
if debug>0: print("begin DRAG")
if not mc.draggerContext( 'TraceGesture', query=True, modifier=True) == 'shift':
if self.nearestRoot in self.selectedMotions.keys():
for sm in self.selectedMotions[ self.nearestRoot ]:
if mc.objExists(sm):
mc.delete( sm )
del self.selectedMotions[ self.nearestRoot ]
currentCam = mc.lookThru(q=True)
camPos = Vector(mc.xform(currentCam,q=True,t=True))
dragPosition = Vector( mc.draggerContext( 'TraceGesture', query=True, dragPoint=True) )
dragPosition = self.FindRayPlaneIntersect( camPos, dragPosition, Plane(self.trace[self.nearestRoot].normal,self.trace[self.nearestRoot].planePt) )
# find the last recorded drag position
lastDragPosition = self.trace[self.nearestRoot].points[ sorted(self.trace[self.nearestRoot].points.keys())[-1] ]
# find the drag distance
dragDist = (dragPosition-lastDragPosition).mag()
# if far enough away from last drag position, add a new trace point and re-solve the DTWs
if dragDist > self.dragDensity :
self.trace[self.nearestRoot].AddPoint( dragPosition )
if debug > 0:
loc = mc.spaceLocator(p=dragPosition)
mc.parent(loc,"traceGrp")
if self.trace[self.nearestRoot].timespan and len(self.trace[self.nearestRoot].points) > 4*self.substeps and \
not mc.draggerContext( 'TraceGesture', query=True, modifier=True) == "ctrl":
if debug > 0: print "DTW solved to timespan of ",trace[nearestRoot].timespan
mc.currentTime( self.trace[self.nearestRoot].timespan[1] )
if dragDist > self.dragDensity:
mc.refresh(currentView=True,force=True)
elif dragDist > self.dragDensity:
if debug > 0: print "No DTW, attempting closest path... point..."
self.ScrubToNearestTimeOnPath( dragPosition, self.nearestRoot, self.nearestPath )
if debug>0: print("end DRAG")
def TraceGestureRelease( self ):
""" when the mouse is released, find the matching joint trajectory """
if debug>0: print("begin RELEASE")
releasePosition = Vector( mc.draggerContext( 'TraceGesture', query=True, dragPoint=True) ).projectToPlane( self.trace[self.nearestRoot].normal, planePt=self.trace[self.nearestRoot].planePt )
if debug>0: print "release! ", releasePosition
theTrace = self.trace[self.nearestRoot]
selectedMotion = None
if theTrace.closestJoint and theTrace.timespan and (theTrace.timespan[1]-theTrace.timespan[0]) > 1 and \
not mc.draggerContext( 'TraceGesture', query=True, modifier=True) == "ctrl":
theDTW = theTrace.dtws[theTrace.closest]
if debug > 0:
print "closest = ", theTrace.closestJoint, theTrace.timespan
if not mc.objExists("DTW_Y"):
mc.group(n="DTW_Y",empty=True)
for pt in theDTW.Y:
loc = mc.spaceLocator(p=pt)
mc.parent(loc,"DTW_Y")
## ghostJoint(trace[nearestRoot].closestJoint,trace[nearestRoot].timespan)
# Build the motion curve and store it's name in the selectedMotions dictionary
duration = [ int(theTrace.timespan[0]), int(theTrace.timespan[1]+1) ]
keyframes = [ theTrace.searchList[theTrace.closestJoint].points[frame] for frame in range(duration[0],duration[1]) ]
selectedMotion = bmt.CurveMotionTrace( theTrace.closestJoint, keys=keyframes ) #duration=theTrace.timespan )
else:
self.ScrubToNearestTimeOnPath( releasePosition, self.nearestRoot, self.nearestPath )
cam2pop = self.CameraToPopDist()
path = theTrace.searchList[self.nearestPath]
pointKey = path.ClosestTimeTo(releasePosition)
closestPoint = path.points[pointKey]
closestPtOnPath = closestPoint.projectToPlane( theTrace.normal, planePt=theTrace.planePt )
mouse2path = (releasePosition - closestPtOnPath).mag()
# if motion paths are visible and no drag happened
# and releasePosition is very close to the path,
# then select the whole motion path
if self.motionPathsVisible[self.nearestRoot] and mouse2path < 0.3:
# Build the motion curve and store it's name in the selectedMotions dictionary
duration = [ mc.playbackOptions(q=True,min=True),mc.playbackOptions(q=True,max=True)+1 ]
keyframes = [ theTrace.searchList[self.nearestPath].points[frame] for frame in range(duration[0],duration[1]) ]
selectedMotion = bmt.CurveMotionTrace( self.nearestPath, keys=keyframes ) #duration=[mc.playbackOptions(q=True,min=True),mc.playbackOptions(q=True,max=True)] )
# if not scrubbing
if not mc.draggerContext( 'TraceGesture', query=True, modifier=True) == "ctrl" and \
cam2pop >= self.viewFitDistance: # if trucked out, and mouse is clicked (w/ no drag)
mc.select(self.nearestRoot) # zoom in on the nearest root for a better view
mc.viewFit(fitFactor=2.5) # the invisible parts of the roots can artificially enlarge the BB, so truck in a little extra
mc.select(clear=True)
if selectedMotion:
selectedMotionCurve = selectedMotion.construct(self.nearestPath)
if not self.nearestRoot in self.selectedMotions.keys():
self.selectedMotions[self.nearestRoot] = []
mc.setAttr("%s_MotionTraces.visibility"%self.nearestRoot, 0)
selectedMotionCurve = mc.rename(selectedMotionCurve, "%s_selection%d"%(self.nearestPath,len(self.selectedMotions[self.nearestRoot])))
self.selectedMotions[self.nearestRoot].append( selectedMotionCurve )
self.AddCustomAttr( selectedMotionCurve, "isTraceSelection", True )
self.AddCustomAttr( selectedMotionCurve, "interactTime", time.time()-self.startTime )
self.AddCustomAttr( selectedMotionCurve, "startFrame", duration[0] )
self.AddCustomAttr( selectedMotionCurve, "endFrame", duration[1] )
mc.select(cl=True)
## # select the related keyframes
## if theTrace.closestJoint:
## if debug > 0: print "anim channels are", self.GetAnimChans( mc.listRelatives( theTrace.closestJoint, parent=True ) )
## mc.selectKey( clear=True )
## jointParent = mc.listRelatives( theTrace.closestJoint, parent=True )[0]
## for channel in self.GetAnimChans( jointParent ):
## mc.selectKey( jointParent, time=(theTrace.timespan[0],theTrace.timespan[1]), attribute=channel.split(jointParent)[1].lstrip('_'), add=True )
def GetAnimChans( self, joint ):
""" Given a joint name, it finds the attached animation channels """
mc.select(joint)
atls = mc.listConnections(type="animCurveTL")
atus = mc.listConnections(type="animCurveTU")
atas = mc.listConnections(type="animCurveTA")
atts = mc.listConnections(type="animCurveTT")
chans = []
if atls:
chans.extend(atls)
if atus:
chans.extend(atus)
if atas:
chans.extend(atas)
if atts:
chans.extend(atts)
return chans
def AddCustomAttr( self, object, attrName, attrVal ):
""" Adds a new attribute to an object and assigns the given name and value """
typ = str(type(attrVal)).split('\'')[1]
mc.select(object)
if typ == 'str':
mc.addAttr( longName=attrName, dataType='string' )
mc.setAttr( "%s.%s"%(object, attrName), attrVal, type="string")
elif typ == 'int':
mc.addAttr( longName=attrName, attributeType='long' )
mc.setAttr( "%s.%s"%(object, attrName), attrVal )
else:
mc.addAttr( longName=attrName, attributeType=typ )
mc.setAttr( "%s.%s"%(object, attrName), attrVal )
def CameraToPopDist( self ):
camPos = Vector(mc.xform(mc.lookThru(q=True),q=True,t=True))
if self.nearestRoot:
popPos = Vector(mc.objectCenter(self.nearestRoot))
else:
popPos = self.FindNearestObjectToCamera()
return (camPos-popPos).mag()
def ghostJoint( self, joint, framespan ):
""" ghosts a given joint for a given span of frames """
mc.setAttr( "%s.ghosting"%joint, 1)
mc.setAttr( "%s.ghostingControl"%joint, 1)
frameList = range(framespan[0],framespan[1])
mc.setAttr( "%s.ghostFrames"%joint, frameList, type="Int32Array" )
def ToggleAllMotionPathsVisibility( self ):
""" shows/hides all the motion paths """
allVizs = [mc.getAttr("%s.visibility"%group) for group in mc.ls(type="transform") if group.endswith("_MotionTraces")]
if not all(allVizs):
for group in mc.ls(type="transform"):
if group.endswith("_MotionTraces"):
mc.setAttr("%s.visibility"%group, 1)
else:
for group in mc.ls(type="transform"):
if group.endswith("_MotionTraces"):
mc.setAttr("%s.visibility"%group, 0)
def FindRayPlaneIntersect( self, source, dest, plane ):
""" given a source and destination (location) and a plane definition,
return the point along a line between the source and the destination
that intersects the given plane """
return plane.intersectWithRay(source,dest)
def CameraViewAxis( self ):
""" return the Z-axis of the lookThru camera """
currentCam = mc.lookThru(q=True)
return Vector( mc.xform(currentCam, q=True, m=True)[8:11] )
def CameraPosition( self ):
""" return the Z-axis of the lookThru camera """
currentCam = mc.lookThru(q=True)
return Vector(mc.xform(currentCam,q=True,t=True))
def FindNearestObjectToCamera( self ):
camPos = self.CameraPosition()
minDist = float("inf")
closestObj = Vector()
for obj in self.xformRoots:
objPos = Vector(mc.objectCenter(obj))
dist = ( camPos - objPos ).mag()
if dist < minDist:
minDist = dist
closestObj = objPos
return closestObj
def FindNearestRoot( self, mousePos, plane ):
""" find the root nearest to the mouse """
nearest = None
minDist = float("inf")
for root in self.xformRoots: #trueCenterMatchString):
path, dist = self.FindNearestMotionPath( mousePos, root, plane=plane)
if minDist > dist:
minDist = dist
nearest = root
if debug > 0: print "nearest root is ", nearest
return nearest, minDist
def FindNearestMotionPath( self, mousePos, root, plane=None ):
""" Finds the motion path of the given root that is nearest the given mouse position """
minDist = float("inf")
for joint in sorted(self.trace[root].searchList.keys()):
closestPoint = self.trace[root].searchList[joint].ClosestPointTo( mousePos, plane=plane ) #DistanceTo( mousePos, plane=plane )
dist = (mousePos - closestPoint.projectToPlane(self.interactionPlane.normal,planePt=self.interactionPlane.point)).mag()
if minDist > dist:
minDist = dist
nearestPath = joint
if debug>0: print "FindNearestMotionPath found nearest path: ", nearestPath
return nearestPath, minDist
def ScrubToNearestTimeOnPath( self, mousePos, root, motionPath ):
""" Given the name of a joint (motion path), find the nearest
point in time to the mouse location and change the current
playback time to match """
path = self.trace[root].searchList[motionPath]
frame = path.ClosestTimeTo( mousePos, plane=self.interactionPlane )
if debug>0: print "ScrubToNearestTimeOnPath setting time to: ", frame
mc.currentTime( frame )
def DrawJointMotionPaths( self, roots ):
""" Gathers points for each joint of each root and build motion paths (optimized to only cycle thru timeline once) """
# prep the data structure
keypoints = {}
for root in roots:
keypoints[root] = {}
#.split(':')[-1]
joints = [j for j in mc.listRelatives( root, allDescendents=True ) if j in self.traceableObjs] # TODO: just get the nearby joints by projecting them all onto the viewing plane and finding the distance
for j in joints:
keypoints[root][j] = []
# cycle through the timeline and record data
for t in range(int(mc.playbackOptions(q=True,minTime=True)), int(mc.playbackOptions(q=True,maxTime=True))+1):
mc.currentTime(t)
for root in roots:
#.split(':')[-1]
joints = [j for j in mc.listRelatives( root, allDescendents=True ) if j in self.traceableObjs]
for j in joints:
keypoints[root][j].append( mc.xform( j, q=True, ws=True, translation=True ) )
# use the data to build motion curves
cols = [9,12,13,14,15,17,18,23,29,31] # color indices for the display layers
for root in roots:
joints = [j for j in mc.listRelatives( root, allDescendents=True ) if j in self.traceableObjs]
if len(joints) > 0:
traceGroup = mc.group(n="%s_MotionTraces"%root,empty=True)
curves = []
for num, j in enumerate(joints):
curve = bmt.CurveMotionTrace( j, keys=keypoints[root][j] )
curveGeom = curve.construct("%s_trace"%j)
curves.append( curveGeom ) # add the motion paths to the trace's search list and set up the DTWs
displayLayerName = "%s_MotionPaths"%j#.split(':')[-1]
if not mc.objExists(displayLayerName):
mc.createDisplayLayer(name=displayLayerName)
mc.setAttr("%s.color"%displayLayerName, cols[num])
mc.editDisplayLayerMembers( displayLayerName, curveGeom )
else:
objs = mc.editDisplayLayerMembers(displayLayerName, query=True )
if objs:
objs.append( curveGeom )
else:
objs = [curveGeom]
mc.editDisplayLayerMembers( displayLayerName, objs )
mc.parent(curves, traceGroup)
mc.parent(traceGroup, root)
mc.select(cl=True)
def LoadJointMotionPaths( self, roots ):
""" prep the data structure that holds the motion paths """
animPaths = {}
for root in roots:
animPaths[root] = {}
self.trace[root] = Trajectory("%sTrace"%root)
# find all nearby joints
joints = [j for j in mc.listRelatives( root, allDescendents=True ) if j in self.traceableObjs] # TODO: just get the nearby joints by projecting them all onto the viewing plane and finding the distance
# get the motion path of each nearby joint
if len(joints) > 0:
for j in joints:
animPaths[root][j] = Trajectory( "%s_path"%j )
if debug > 0:
mc.group(name="%sGrp"%j,empty=True)
startFrame = mc.playbackOptions(q=True,minTime=True)
endFrame = mc.playbackOptions(q=True,maxTime=True)+1
for t in [float(x)/self.substeps+startFrame for x in range(0, int(endFrame-startFrame)*self.substeps)]:
mc.currentTime(t)
for root in roots:
joints = [j for j in mc.listRelatives( root, allDescendents=True ) if j in self.traceableObjs]
for j in joints:
point = Vector( mc.xform(j, q=True, ws=True, t=True) )
animPaths[root][j].AddPoint( point, t )
if debug > 0:
loc = mc.spaceLocator(p=point)
mc.parent(loc,"%sGrp"%j)
self.trace[root].SetSearchList( animPaths[root] )
def findXformRoot( jointRoot ):
""" Returns the transform root of a given jointRoot """
for parent in mc.listRelatives( jointRoot, allParents=True ):
if not mc.listRelatives( parent, parent=True ):
return parent
return jointRoot
def findIKhandles():
""" Returns a list of all IK handles in the scene """
return mc.ls(type="ikHandle")
def findRoots():
""" Returns a list that contains the root joint for all joint heirarchies in the scene """
roots = []
for joint in mc.ls(type="joint"):
if( mc.nodeType(mc.listRelatives(joint,parent=True)) != "joint" ):
roots.append(joint)
return roots
def findEndEffectorJoints( root ):
""" Returns a list of the leaf joints in a given rig (specified by the root joint).
Ignores leaf joints that have an incoming IK handle connection. """
leafs = []
for joint in mc.listRelatives(root, allDescendents=True, type="joint"):
if( not( mc.listRelatives(joint, type="joint", children=True) ) and # if joint has no joint children
not( 'ikEffector' in [mc.nodeType(n) for n in mc.listConnections(joint)] ) ): # if joint has no incoming IK connection
leafs.append( joint )
return leafs
def findRootsFromTraceables(traceables):
jointRoots = []
xformRoots = []
for thing in traceables:
itr = thing
pitr = mc.listRelatives( itr, parent=True )
while pitr:
itr = pitr[0]
pitr = mc.listRelatives( itr, parent=True )
if mc.nodeType(itr) == "joint" and (not pitr or mc.nodeType(pitr) != "joint") and not itr in jointRoots:
jointRoots.append( itr )
if not itr in xformRoots:
xformRoots.append( itr )
return jointRoots, xformRoots
def autoFindTraceables():
jointRoots = findRoots()
xformRoots = [findXformRoot(r) for r in jointRoots]
# find the important parts of each rig -- so that trajectories can be built for them
traceableObjs = []
traceableObjs.append( jointRoots ) # joint roots of each joint hierarchy
traceableObjs.append( xformRoots ) # DAG roots of each joint hierarchy (topmost transform)
traceableObjs.extend( findIKhandles() ) # all IK handles in the scene
for root in jointRoots:
traceableObjs.extend( findEndEffectorJoints(root) ) # all end-effector joints under the root
return jointRoots, xformRoots, traceableObjs
def main( traceables=None ):
global traceSelect
if(traceables):
traceableObjs = traceables
jointRoots, xformRoots = findRootsFromTraceables(traceables)
else:
jointRoots, xformRoots, traceableObjs = autoFindTraceables()
# if nothing traceable is in the scene, give up
if len(xformRoots) == 0:
mc.warning("no trace-able objects (e.g. joints or IK handles) are in your scene.")
return None
# otherwise, keep going...
traceSelect = TraceSelection( xformRoots, traceableObjs )
# Define draggerContext with press and drag procedures
if not traceSelect.forceReload and mc.draggerContext( 'TraceGesture', exists=True ) :
mc.draggerContext( 'TraceGesture', edit=True, space='world',
pressCommand='traceSelect.TraceGesturePress()',
dragCommand='traceSelect.TraceGestureDrag()',
releaseCommand='traceSelect.TraceGestureRelease()',
cursor='default')
else:
mc.draggerContext( 'TraceGesture', space='world',
pressCommand='traceSelect.TraceGesturePress()',
dragCommand='traceSelect.TraceGestureDrag()',
releaseCommand='traceSelect.TraceGestureRelease()',
cursor='default')
if len(mc.ls('*_MotionPaths')) == 0:
# if first time script has been called on this scene
traceSelect.DrawJointMotionPaths(xformRoots)
traceSelect.LoadJointMotionPaths(xformRoots)
mc.setToolTo('TraceGesture')
return traceSelect
if __name__ == "__main__":
main()
```
#### File: traceSelectionInMaya/scripts/Vector.py
```python
from random import uniform as _VectorUniform # be careful here to not import on top of
from math import sqrt as _VectorSqrt # other imports that may already exist
from math import acos as _VectorAcos
class Vector(list):
""" Vector class: 3D vector storage and operations """
def __init__(self, x=0, y=0, z=0):
""" Constructor -- you can either pass in a
Vector or three separate values or a list
with three values """
try:
list.__init__(self, [x.x, x.y, x.z])
self.x = x.x
self.y = x.y
self.z = x.z
except:
try:
list.__init__(self, x)
self.x = x[0]
self.y = x[1]
self.z = x[2]
except:
list.__init__(self, [x, y, z])
self.x = x
self.y = y
self.z = z
def asList(self):
""" Returns the vector as a list """
return [self.x, self.y, self.z]
def mag(self):
""" Returns the length of the vector. """
return _VectorSqrt(self.dot(self))
def norm(self):
""" Returns a normalized version of the vector. """
return self*(1.0/self.mag())
def distTo(self, other):
""" Returns the length of the vector between this point and another. """
return (other-self).mag()
def angleBetween(self,other):
""" Returns the angle between this vector and another (radians) """
if(self.mag() == 0 or other.mag() == 0):
return 0
else:
#return _VectorAcos(min(1,max(0,self.dot(other)/(self.mag()*other.mag()))))
return _VectorAcos(min(1,max(-1,self.dot(other)/(self.mag()*other.mag()))))
def random(self, hi=0.0, lo=1.0):
""" Assigns random values [hi,lo] to the vector components. """
self.x = _VectorUniform(hi,lo)
self.y = _VectorUniform(hi,lo)
self.z = _VectorUniform(hi,lo)
def add(self, other):
""" Adds the other vector to myself. """
self.x += other.x
self.y += other.y
self.z += other.z
def __len__(self):
""" Returns the length -- always 3 """
return 3
def __add__(a,b):
""" Returns the addition of two vectors. """
result = Vector(a.x,a.y,a.z)
result.add(b)
return result
def sub(self, other):
""" Subtracts the other vector from myself. """
self.x -= other.x
self.y -= other.y
self.z -= other.z
def __sub__(a,b):
""" Returns the subtraction of two vectors. """
result = Vector(a.x,a.y,a.z)
result.sub(b)
return result
def __neg__(a):
""" Returns the negation of a vector. """
result = Vector(a.x,a.y,a.z)
result.mult(-1)
return result
def mult(self, factor):
""" Multiplies my values by a factor. """
self.x *= factor
self.y *= factor
self.z *= factor
def dot(self, other):
""" Returns the dot product between another vector and myself. """
return self.x*other.x + self.y*other.y + self.z*other.z
def __div__(self, factor):
""" divides each element in this vector by the given factor """
result = Vector(self)
result *= 1.0/factor
return result
def __mul__(self, other):
""" If two vectors are provided, returns the dot product.
If a vector and a number are provided, returns the
multiplication of the two. """
result = Vector(self)
try:
return result.dot(other)
except:
result.mult(other)
return result
def __rmul__(self,other):
""" If two vectors are provided, returns the dot product.
If a vector and a number are provided, returns the
multiplication of the two. """
result = Vector(self)
try:
return result.dot(other)
except:
result.mult(other)
return result
def power(self, factor):
""" Raise each of my values to a power specified by factor """
self.x = self.x**factor
self.y = self.y**factor
self.z = self.z**factor
def cross(self, other):
""" Returns the cross product of myself with the other vector. """
return Vector(self.y*other.z - other.y*self.z,
self.z*other.x - other.z*self.x,
self.x*other.y - other.x*self.y)
def projectToPlane(self, normal, planePt=None):
""" projects this point onto an origin-intersecting plane with
the given normal """
temp = Vector(self)
normal = normal.norm() # Make sure normal is normalized
if planePt:
length = (temp-planePt).dot(normal) #/(normal*normal) # Find the length along the normal from the point
return temp - normal*length # to plane intersecting the origin
else:
length = (temp).dot(normal) #/(normal*normal) # Find the length along the normal from the point
return temp - normal*length # to plane intersecting the origin
def __pow__(a,b):
""" If two vectors are provided, returns the cross product.
If a vector and a number are provided, returns the
vector raised to a power specified by the number. """
result = Vector(a.x,a.y,a.z)
try:
return result.cross(b)
except:
result.power(b)
return result
def __getitem__(self, index):
""" Returns the value corresponding to a numerical index:
0 -> x, 1 -> y, 2 -> z """
if(index == 0):
return self.x
elif(index == 1):
return self.y
elif(index == 2):
return self.z
else:
raise Exception("Index %d is out of bounds: Vector class only has valid indices 0-2"%index)
def __setitem__(self, index, value):
""" Sets the value corresponding to a numerical index:
0 -> x, 1 -> y, 2 -> z """
if(index == 0):
self.x = value
elif(index == 1):
self.y = value
elif(index == 2):
self.z = value
else:
raise Exception("Index %d is out of bounds: Vector class only has valid indices 0-2"%index)
def __len__(self):
return 3
def __repr__(self):
""" So we can call print on a vector object """
return "< %.3f, %.3f, %.3f >"%(self.x, self.y, self.z)
```
|
{
"source": "JeisonJHA/Plugins-Development",
"score": 3
}
|
#### File: JeisonJHA/Plugins-Development/delphiIDE.py
```python
import sublime_plugin
class MethodDeclaration(object):
"""docstring for MethodDeclaration"""
def __init__(self):
self._methodclass = None
self.has_implementation = False
self.has_interface = False
@property
def has_implementation(self):
return self._has_implementation
@has_implementation.setter
def has_implementation(self, value):
self._has_implementation = value
@property
def has_interface(self):
return self._has_interface
@has_interface.setter
def has_interface(self, value):
self._has_interface = value
@property
def methodname(self):
return self._methodname
@methodname.setter
def methodname(self, value):
self._methodname = value
@property
def methodregion(self):
return self._methodregion
@methodregion.setter
def methodregion(self, value):
self._methodregion = value
@property
def visibility(self):
return self._visibility
@visibility.setter
def visibility(self, value):
self._visibility = value
@property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = value
@property
def methodclass(self):
return self._methodclass
@methodclass.setter
def methodclass(self, value):
self._methodclass = value
class ClassDeclaration(object):
"""docstring for ClassDeclaration"""
@property
def classname(self):
return self._classname
@classname.setter
def classname(self, value):
self._classname = value
@property
def classregion(self):
return self._classregion
@classregion.setter
def classregion(self, value):
self._classregion = value
@property
def privateregion(self):
return self._privateregion
@privateregion.setter
def privateregion(self, value):
self._privateregion = value
@property
def protectedregion(self):
return self._protectedregion
@protectedregion.setter
def protectedregion(self, value):
self._protectedregion = value
@property
def publicregion(self):
return self._publicregion
@publicregion.setter
def publicregion(self, value):
self._publicregion = value
@property
def publishedregion(self):
return self._publishedregion
@publishedregion.setter
def publishedregion(self, value):
self._publishedregion = value
class DelphiIdeCommand(sublime_plugin.TextCommand):
# // { "keys": ["ctrl+shift+x"], "command": "delphi_ide", "args": {"teste": "delphimethodnav"}}
# view.window().run_command('show_panel',
# args={"panel": 'output.find_results', "toggle": True})
def run(self, edit, teste):
print('teste[0]:%s' % teste)
method = None
try:
method = getattr(self, teste)
except AttributeError:
raise NotImplementedError("Class `{}` does not implement `{}`".
format(self.__class__.__name__,
teste))
method()
def delphimethodnav(self):
print('vai doido')
def getMethodInformation(self):
view = self.view
cursor_region = view.sel()[0]
cursor_pt = view.sel()[0].begin()
if not view.match_selector(cursor_pt,
'function.implementation.delphi'):
# exit because it is not in a method
return None
def params(region):
params_region = view.find_by_selector(
'meta.function.parameters.delphi')
param_name_region = view.find_by_selector(
'variable.parameter.function.delphi')
params_region_filt = [
s for s in params_region if region.contains(s)]
params_region_filt = [
s for s in param_name_region if
params_region_filt[0].contains(s)]
return params_region_filt
def paramsFromRegion(region):
try:
params_region_filt = params(region)
x = [view.substr(x) for x in params_region_filt]
return x
except:
return []
def getFunctionName():
functionname = view.find_by_selector('entity.name.function')
functionnamefiltered = [
n for n in functionname if method.methodregion[0].contains(n)]
return view.substr(functionnamefiltered[0])
# has_implementation
# has_interface
# methodname
# methodregion
# visibility
# params
# methodclass
method = MethodDeclaration()
selector = view.find_by_selector
method.methodregion = [r for r in selector('meta.function.delphi')
if cursor_region.intersects(r)]
method.methodname = getFunctionName()
method.params = self.paramsFromRegion(method.methodregion[0])
return method
def getClassInformation(self):
pass
```
#### File: JeisonJHA/Plugins-Development/syncronizemethoddeclaration.py
```python
import sublime_plugin
from . import *
class syncronizemethoddeclaration(sublime_plugin.TextCommand):
def run(self, edit):
print("Syncronize method")
self.syncronizemethod(edit)
def syncronizemethod(self, edit):
classe = GetClassName(self.view)
if classe:
classe += '.'
method_type = GetMethodType(self.view)
method, region_method = GetName(self.view, True)
regions = None
parameters = GetParametersName(self.view, region_method)
if parameters:
parameters = "".join(parameters)
regions = GetMethodRegionsWithParams(self.view, method, parameters)
if (regions is None) or len(regions) == 1:
regions = self.GetMethodRegions(self.view, method)
parameters = GetParameters(self.view, region_method)
method_with_params = not (parameters is None)
method_type = GetMethodType(self.view)
function_return = ''
if (method_type == 'function') or (method_type == 'class function'):
function_return, dummy = GetReturnType(
self.view, regions[0], method_with_params)
regions[0] = sublime.Region(regions[0].begin(),
dummy.end())
function_return, dummy = GetReturnType(
self.view, regions[1], method_with_params)
regions[1] = sublime.Region(regions[1].begin(),
dummy.end())
function_return = ': ' + function_return
if method_with_params:
parameters = '(' + parameters + ')'
else:
parameters = ''
method_declaration = method_type + ' ' + classe + \
method + parameters + function_return + ';'
self.view.replace(edit, regions[1], method_declaration)
method_declaration = ' ' + method_type + \
' ' + method + parameters + function_return + ';'
self.view.replace(edit, regions[0], method_declaration)
def GetMethodRegions(self, view, method):
result = []
for region in view.sel():
region_row, region_col = view.rowcol(region.begin())
function_regions = view.find_by_selector('meta.function')
if function_regions:
for r in (function_regions):
pos = 0
lines = view.substr(r).splitlines()
if (len(lines[0].split(".")) > 1 and
lines[0].split(".")[1] == ''):
pos = 1
name = clean_name.sub('', lines[pos])
s = name.strip()
if (len(s.split(".")) > 1):
s = s.split(".")[1]
s = s.split("(")[0]
if s.find(':') >= 0:
s = s.split(":")[0]
if s.find(';') >= 0:
s = s.split(";")[0]
if s == method:
result.append(r)
return result
```
#### File: JeisonJHA/Plugins-Development/tryfinally.py
```python
import sublime_plugin
import re
class tryfinally(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
selection_region = view.sel()[0]
word_region = view.word(selection_region)
word = view.substr(word_region).strip()
word = re.sub('[\=\:\(\)\{\}\s]', '', word)
for selectedReg in view.sel():
line, regionColumn = view.rowcol(selectedReg.begin())
linha = line + 1
nav_line = linha - 1
nav_pt = view.text_point(nav_line, 0)
view.set_viewport_position(view.text_to_layout(nav_pt))
qtdLinha = 0
ponto = view.text_point(line, 0)
texto_linha = view.substr(view.line(ponto))
if texto_linha.find('.') > 0:
if len(texto_linha) > 15:
while texto_linha.find(';') < 0:
ponto = view.text_point(line, 0)
texto_linha = view.substr(view.line(ponto))
if texto_linha.find(';') < 0:
line = line + 1
qtdLinha = qtdLinha + 1
if (qtdLinha == 6):
print('muito grande')
break
pass
fn_line = line + 1
pt = view.text_point(fn_line, 0)
view.insert(edit, pt, '\n' + 'try' + '\n')
linha_selecao = fn_line + 2
pt = view.text_point(linha_selecao, 0)
view.insert(edit, pt, ' ' + view.substr(word_region) + ' \n')
linha_finally = linha_selecao + 1 + qtdLinha
pt = view.text_point(linha_finally, 0)
view.insert(edit, pt, 'finally' + '\n')
linha_freeandnil = linha_finally + 1
pt = view.text_point(linha_freeandnil, 0)
if word.find('.') >= 0:
word = word.split(".")[0]
view.insert(edit, pt, ' FreeAndNil(' + word + ');' + '\n')
linha_end = linha_freeandnil + 1
pt = view.text_point(linha_end, 0)
view.insert(edit, pt, 'end;' + '\n')
```
|
{
"source": "jeisonroa1/FisicaSemiconductoresPython",
"score": 3
}
|
#### File: FisicaSemiconductoresPython/Schodinger/Coef_TR_Escalon.py
```python
import numpy as np
import scipy.constants as sc
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.figure import Figure
from matplotlib.widgets import RadioButtons, Slider
from matplotlib import colors
from Tkinter import *
#Valor del potencial en eV:
Vo = 1
xmin=0; xmax=Vo+2; ymin=-0.5; ymax=2.2; xval=0.05; yval =2.0;
#Valores de energia de la particula en eV:
En0=np.linspace(xmin,Vo-0.00001,1000)
En1=np.linspace(Vo+0.00001,xmax,1000)
x0=[Vo,Vo]
y0=[-0.2,1.2]
def fx1(En0):
return En0-En0
def fx2(En1):
return 4*np.sqrt(En1**2-En1*Vo)/((En1**(1/2.0)+np.sqrt(En1-Vo))**2)
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.1, bottom=0.16, right=0.9, top=0.75)
rax = plt.axes([0.1, 0.82, 0.8, 0.15], axisbg='white')
radio = RadioButtons(rax, ('Coeficiente de Transmicion (T)', 'Coeficiente de Reflexion (R)',
'Coeficiente de Transmicion (T) y Reflexion (R)'))
axdot = plt.axes([0.1, 0.03, 0.8, 0.04], axisbg='gray')
sdot = Slider(axdot, "$E \\ [e\cdot V ]$", 0, xmax, valinit=0)
plt.subplot(111)
plt.xlabel("$Energia \ \ (E) \ \ [e\cdot V ]$")
plt.ylabel(r'$Coeficiente \ \ de \ \ Transmision \ \ (T)$')
plt.title("$Coeficiente \ \ de \ \ Transmision \ \ (T) \ \ para \ \ un \ \ escalon \ \ de \ \ potencial $")
plt.axis([xmin,xmax,ymin,ymax])
#Grafica Coeficiente de transmicion
t0, = plt.plot(En0, fx1(En0), lw=1, color='blue', label="$T = 0, \\ \\ E < V_o$")
t1, = plt.plot(En1, fx2(En1), lw=1, color='blue', label="$T = 4 k_1 k_2 /(k_1 + k_2)^2, \\ \\ E < V_o$")
legend = plt.legend(loc=1, prop={'size':10})
puntox = ax.text(xval,yval, 'Nivel de Energia (E) [eV] = %f' % 0)
puntot = ax.text(xval,yval-0.2, 'Coef. de Transmicion (T) = %f'% 0)
##puntor = ax.text(1.7,-0.1, 'Coef. de Reflexion = %f'% 0.001)
##Punto Coef. de Transmicion
pt, = ax.plot(0, fx1(0), 'o',color='blue')
#Barrera
b1, = plt.plot(x0, y0, lw=1, c='black', ls='--')
xano1=((Vo-xmin)/2.0)-0.1
xano2= Vo+((xmax-Vo)/2.0)-0.1
ax.annotate("$V_o$", xy=(Vo-0.04,-0.4), color='black',
horizontalalignment='left',
verticalalignment='up',
)
ax.annotate("$E > V_o$", xy=(xano2,-0.4), color='black',
horizontalalignment='left',
verticalalignment='up',
)
ax.annotate("$E < V_o$", xy=(xano1,-0.4), color='black',
horizontalalignment='left',
verticalalignment='up',
)
band_g = 0
def sel_graf(label):
global band_g, pt, pr
global puntox, puntot, puntor, band, xmin,xmax,ymin,ymax
sdot.reset()
plt.cla()
plt.axis([xmin,xmax,ymin,ymax])
#Barrera
b1, = plt.plot(x0, y0, lw=1, c='black', ls='--')
puntox = ax.text(xval,yval, 'Nivel de Energia (E) [eV] = %f' %0)
sel_coef = {'Coeficiente de Transmicion (T)': 0,
'Coeficiente de Reflexion (R)': 1,
'Coeficiente de Transmicion (T) y Reflexion (R)': 2}
label1 = sel_coef[label]
if label1 == 0:
band_g = 0
#Grafica Coeficiente de transmicion
t0, = plt.plot(En0, fx1(En0), lw=1, color='blue', label="$T = 0, \\ \\ E < V_o$")
t1, = plt.plot(En1, fx2(En1), lw=1, color='blue', label="$T = 4 k_1 k_2 /(k_1 + k_2)^2, \\ \\ E > V_o$")
#Grafica Punto Coef. de Transmicion
pt, = ax.plot(xmin, fx1(xmin), 'o',color='blue')
plt.xlabel("$Energia \ \ (E) \ \ [e\cdot V ]$")
plt.ylabel("$Coeficiente \ \ de \ \ Transmision \ \ (T)$")
plt.title("$Coeficiente \ \ de \ \ Transmision \ \ (T) \ \ para \ \ un \ \ escalon \ \ de \ \ potencial $")
puntot = ax.text(xval,yval-0.2, 'Coef. de Transmicion (T) = %f'% 0)
elif label1 == 1:
band_g = 1
#Grafica Coeficiente de Reflexion
r0, = plt.plot(En0, 1-fx1(En0), lw=1, color='red', label="$R = 1, \\ \\ E < V_o$")
r1, = plt.plot(En1, 1-fx2(En1), lw=1, color='red', label="$R = (k_1 - k_2)^2/(k_1 + k_2)^2, \\ \\ E > V_o$")
#Grafica Punto Coef. de Reflexion
pr, = ax.plot(xmin, 1-fx1(xmin), 'o',color='red')
plt.xlabel("$Energia \ \ (E) \ \ [e\cdot V ]$")
plt.ylabel("$Coeficiente \ \ de \ \ Reflexion \ \ (R)$")
plt.title("$Coeficiente \ \ de \ \ Reflexion \ \ (R) \ \ para \ \ un \ \ escalon \ \ de \ \ potencial $")
puntor = ax.text(xval,yval-0.2, 'Coef. de Reflexion (R)= %f'% 1)
elif label1 == 2:
band_g = 2
#Grafica Punto Coef. de Transmicion
pt, = ax.plot(xmin, fx1(xmin), 'o',color='blue')
#Grafica Punto Coef. de Reflexion
pr, = ax.plot(xmin, 1-fx1(xmin), 'o',color='red')
#Grafica Coeficiente de Reflexion
t0, = plt.plot(En0, fx1(En0), lw=1, color='blue', label="$T = 0, \\ \\ E < V_o$")
t1, = plt.plot(En1, fx2(En1), lw=1, color='blue', label="$T = 4 k_1 k_2 /(k_1 + k_2)^2, \\ \\ E > V_o$")
r0, = plt.plot(En0, 1-fx1(En0), lw=1, color='red', label="$R = 1, \\ \\ E < V_o$")
r1, = plt.plot(En1, 1-fx2(En1), lw=1, color='red', label="$R = (k_1 - k_2)^2/(k_1 + k_2)^2, \\ \\ E > V_o$")
puntot = ax.text(xval,yval-0.2, 'Coef. de Transmicion (T) = %f'% 0)
puntor = ax.text(xval,yval-0.4, 'Coef. de Reflexion (R)= %f'% 1)
plt.xlabel("$Energia \ \ (E) \ \ [e\cdot V ]$")
plt.ylabel("$Coeficiente \ \ de \ \ Transmision \ \ (T) \ \ y \ \ Reflexion \ \ (R)$")
plt.title("$Coeficiente \ \ de \ \ Transmision \ \ (T) \ \ y \ \ Reflexion \ \ (R) \ \ para \ \
un \ \ escalon \ \ de \ \ potencial $")
ax.annotate("$V_o$", xy=(Vo-0.04,-0.4), color='black',
horizontalalignment='left',
verticalalignment='up',
)
ax.annotate("$E > V_o$", xy=(xano2,-0.4), color='black',
horizontalalignment='left',
verticalalignment='up',
)
ax.annotate("$E < V_o$", xy=(xano1,-0.4), color='black',
horizontalalignment='left',
verticalalignment='up',
)
legend = plt.legend(loc=1, prop={'size':10})
plt.ylim(ymin,ymax)
plt.xlim(xmin,xmax)
plt.draw()
print band_g
radio.on_clicked(sel_graf)
def graf_p(val):
global puntox, puntot, puntor, band
dot = sdot.val
if band_g==0:
if dot<=Vo:
pt.set_xdata(dot)
pt.set_ydata(fx1(dot))
t = fx1(dot)
elif dot>Vo:
pt.set_xdata(dot)
pt.set_ydata(fx2(dot))
t = fx2(dot)
puntox.remove()
puntot.remove()
puntox = ax.text(xval,yval, 'Nivel de Energia (E) [eV] = %f' % dot)
puntot = ax.text(xval,yval-0.2, 'Coef. de Transmicion (T) = %f'% t)
elif band_g==1:
if dot<=Vo:
pr.set_xdata(dot)
pr.set_ydata(1-fx1(dot))
r = 1-fx1(dot)
elif dot>Vo:
pr.set_xdata(dot)
pr.set_ydata(1-fx2(dot))
r = 1-fx2(dot)
puntox.remove()
puntor.remove()
puntox = ax.text(xval,yval, 'Nivel de Energia (E) [eV] = %f' % dot)
puntor = ax.text(xval,yval-0.2, 'Coef. de Reflexion (R) = %f'% r)
elif band_g==2:
if dot<=Vo:
pt.set_xdata(dot)
pt.set_ydata(fx1(dot))
pr.set_xdata(dot)
pr.set_ydata(1-fx1(dot))
t = fx1(dot)
r = 1-fx1(dot)
elif dot>Vo:
pt.set_xdata(dot)
pt.set_ydata(fx2(dot))
pr.set_xdata(dot)
pr.set_ydata(1-fx2(dot))
t = fx2(dot)
r = 1-fx2(dot)
puntox.remove()
puntot.remove()
puntor.remove()
puntox = ax.text(xval,yval, 'Nivel de Energia (E) [eV] = %f' % dot)
puntot = ax.text(xval,yval-0.2, 'Coef. de Transmicion (T) = %f'% t)
puntor = ax.text(xval,yval-0.4, 'Coef. de Reflexion (R)= %f'% r)
else:
pass
fig.canvas.draw()
sdot.on_changed(graf_p)
plt.show()
```
|
{
"source": "jeisonsrz/Curso-Deep-Learning-CreaTIC",
"score": 3
}
|
#### File: deep_api/deep/context_model_init.py
```python
from keras.models import load_model
import tensorflow as tf
import os
#AQUI ME EQUIVOQUE POR ESO NO FUNCIONABA EL MODELO
dirname = os.path.dirname(__file__)
#Configure aqui las rutas para los distintos modelos a usar
model = os.path.join(dirname, '../../save/models/MLP')
weights = os.path.join(dirname, '../../save/weights/weights.mlp.hdf5')
def load_model_from_path(path_model,path_weigth):
loaded_model = load_model(path_model)
loaded_model.load_weights(path_weigth)
return loaded_model
def load_all_models():
graph = tf.get_default_graph()
nn_models_dict = dict()
nn_models_dict = {
'MLP' : load_model_from_path(model, weights)
#Agregar al diccionarios los modelos guardados
}
return nn_models_dict, graph
```
#### File: Curso-Deep-Learning-CreaTIC/experiments/3_evaluator.py
```python
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import svm, datasets
import itertools
import ipdb
import sys, os
sys.path.append(os.getcwd())
class Evaluator():
def __init__(self):
return
def save_confussion_matrix(self, y_test, y_pred, class_names, normalize=True,
file_path='./saved/results/confussion_matrix.png',
cmap=plt.cm.Blues, title=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
cm = confusion_matrix(y_test, y_pred)
plt.figure()
if normalize:
cm = cm.astype('float') / (cm.sum(axis=1)[:, np.newaxis])
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
#plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if title:
plt.title(title)
plt.savefig(file_path)
plt.close()
return
def save_precision(self, training_precision, validation_precision,
file_path='./saved/precision/precision.png',
title=None):
"""
This function plots together the precision curves for training and validation steps.
"""
plt.figure()
x = [step[0] for step in training_precision]
y = [step[1] for step in training_precision]
plt.plot(x, y)
x = [step[0] for step in validation_precision]
y = [step[1] for step in validation_precision]
plt.plot(x, y)
plt.axis([0, 2000, 0, 1])
#plt.tight_layout()
plt.xlabel('Number of steps')
plt.ylabel('Precision')
if title:
plt.title(title)
plt.legend(['Precision @ Training', 'Precision @ Validation'], loc='lower right')
plt.savefig(file_path)
plt.close()
def save_loss(self, loss_values, file_path='./saved/precision/loss.png',
title=None):
"""
This function plots together the loss curves for training and validation steps.
"""
plt.figure()
x = [step[0] for step in loss_values]
y = [step[1] for step in loss_values]
plt.plot(x, y)
plt.axis([0, 2000, 0, max(y)])
#plt.tight_layout()
plt.xlabel('Number of steps')
plt.ylabel('Loss value')
if title:
plt.title(title)
plt.legend(['Loss @ Training'], loc='upper right')
plt.savefig(file_path)
plt.close()
def save_model_comparison(self, results, file_path='./saved/precision/precision_comparison.png'):
models = sorted(results.keys())
average_precision = [results[model] for model in models]
# create barplot using matplotlib
plt.bar(models, average_precision)
plt.xlabel('Model names')
plt.ylabel('Average precision')
plt.title('Precision comparison between models')
plt.savefig(file_path)
plt.close()
def print_accuracy_sunnary(self, y_test, y_pred, class_names,
file_path='./classification_report.txt'):
text = classification_report(y_test, y_pred, target_names=class_names)
with open(file_path, 'w') as file:
print(text, file=file)
return
```
#### File: Curso-Deep-Learning-CreaTIC/models/07_convolution_net.py
```python
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dropout
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras.layers import Input
from keras.models import Model
from keras.regularizers import *
def get_model():
Input_1 = Input(shape=(28, 28, 1))
Conv2D_6 = Conv2D(activation= 'relu' ,nb_col= 3,nb_filter= 32,nb_row= 3,border_mode= 'same' )(Input_1)
MaxPooling2D_4 = MaxPooling2D(strides= (2,2),border_mode= 'same' ,pool_size= (2,2))(Conv2D_6)
Dropout_1 = Dropout(p= 0.3)(MaxPooling2D_4)
Conv2D_8 = Conv2D(nb_col= 3,nb_filter= 64,nb_row= 3,border_mode= 'same' ,init= 'glorot_normal' ,activation= 'relu' )(Dropout_1)
MaxPooling2D_6 = MaxPooling2D(strides= (2,2),border_mode= 'same' ,pool_size= (2,2))(Conv2D_8)
Dropout_2 = Dropout(p= 0.3)(MaxPooling2D_6)
Conv2D_9 = Conv2D(nb_col= 3,nb_filter= 128,nb_row= 3,border_mode= 'same' ,init= 'glorot_normal' ,activation= 'relu' )(Dropout_2)
MaxPooling2D_7 = MaxPooling2D(border_mode= 'same' ,pool_size= (2,2))(Conv2D_9)
Flatten_2 = Flatten()(MaxPooling2D_7)
Dropout_3 = Dropout(p= 0.3)(Flatten_2)
Dense_4 = Dense(activation= 'relu' ,init= 'glorot_normal' ,output_dim= 625)(Dropout_3)
Dropout_4 = Dropout(p= 0.5)(Dense_4)
Dense_5 = Dense(activation= 'softmax' ,output_dim= 10)(Dropout_4)
return Model([Input_1],[Dense_5])
from keras.optimizers import *
def get_optimizer():
return Adadelta()
def get_loss_function():
return 'categorical_crossentropy'
def get_batch_size():
return 64
def get_num_epoch():
return 10
def get_data_config():
return {"mapping": {"Image": {"port": "InputPort0", "type": "Image"}, "Digit Label": {"port": "OutputPort0", "type": "Categorical"}}, "samples": {"split": 1, "validation": 14000, "test": 14000, "training": 42000}, "dataset": {"samples": 70000, "type": "public", "name": "mnist"}, "datasetLoadOption": "batch", "numPorts": 1}
```
|
{
"source": "Jeisse/content-type-identifier",
"score": 3
}
|
#### File: Jeisse/content-type-identifier/MediaType_test.py
```python
import unittest
from src.media_identifier import identifier
class TestIdentifier(unittest.TestCase):
def test_get_type_when_file_name_valied(self):
media = identifier.MediaType()
jpeg_file = "test.jpeg"
png_file = "test.png"
self.assertEqual(media.get_type(jpeg_file), identifier.MediaType.jpeg)
self.assertEqual(media.get_type(png_file), identifier.MediaType.png)
def test_get_type_when_file_has_substring(self):
media = identifier.MediaType()
xlsx_file = "bhbdhbhdb.xlsx"
tiff_file = "njnjnsj.tiff"
self.assertEqual(media.get_type(xlsx_file), identifier.MediaType.xlsx)
self.assertNotEqual(media.get_type(xlsx_file), identifier.MediaType.xls)
self.assertEqual(media.get_type(tiff_file), identifier.MediaType.tiff)
self.assertNotEqual(media.get_type(tiff_file), identifier.MediaType.tif)
def test_get_type_when_file_name_has_string_that_match_media_types_but_are_not_at_the_end(self):
media = identifier.MediaType()
file = "bhbd.pnghbhdb"
file2 = ".jpeg.png"
self.assertEqual(media.get_type(file), "No identified")
self.assertEqual(media.get_type(file2), identifier.MediaType.png)
def test_get_type_when_does_not_identify_file(self):
media = identifier.MediaType()
no_identifyed = "hshsh"
self.assertEqual(media.get_type(no_identifyed), "No identified")
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "Jeitto/cnabera",
"score": 3
}
|
#### File: cnabera/cnabera/format_rules.py
```python
from collections import namedtuple
from datetime import datetime
def _truncate_value(value, length):
return str(value)[:length]
def return_the_same_entry(value, **kwargs):
return value
def complete_with_space_the_right(value, **kwargs):
truncated_value = _truncate_value(value, kwargs['length'])
return truncated_value.ljust(kwargs['length'])
def complete_with_space_the_left(value, **kwargs):
truncated_value = _truncate_value(value, kwargs['length'])
return truncated_value.rjust(kwargs['length'])
def complete_with_zero_the_left(value, **kwargs):
truncated_value = _truncate_value(value, kwargs['length'])
return truncated_value.zfill(kwargs['length'])
def get_date_as_dd_mm_yy(date: datetime, **kwargs):
return date.strftime('%d%m%y')
def get_string_space_by_length(value, **kwargs):
return ' ' * kwargs['length']
def transform_float_to_int_with_zero_left(value, **kwargs):
convert_result = int(value * 100)
return complete_with_zero_the_left(convert_result, **kwargs)
Rules = namedtuple('Rules', 'position_init position_end length method_formatter')
class BaseFormatRules:
"""
Example data:
id_registry = Rules(0, 0, 1, return_the_same_entry)
"""
...
class FormatRulesHeaders(BaseFormatRules):
...
class FormatRulesTransaction(BaseFormatRules):
...
class FormatRulesFooter(BaseFormatRules):
...
class GenFile:
LENGTH = 444
formatter_headers = FormatRulesHeaders
formatter_transactions = FormatRulesTransaction
formatter_footer = FormatRulesFooter
line_number = 1
def run_rules_format(self, formatter, basestring, data):
for key, value in data.items():
rules = getattr(formatter, key)
current_value = str(rules.method_formatter(value, **rules._asdict()))
before_pointer_cut = basestring[:rules.position_init]
after_pointer_cut = basestring[rules.position_end + 1:]
basestring = f'{before_pointer_cut}{current_value}{after_pointer_cut}'
return basestring
def do_header(self, data):
basestring = " " * self.LENGTH
data["sequence_registry"] = self.line_number
self.line_number += 1
return self.run_rules_format(self.formatter_headers, basestring, data)
def do_line_transaction(self, line):
basestring = " " * self.LENGTH
line["sequential_number"] = self.line_number
self.line_number += 1
return self.run_rules_format(self.formatter_transactions, basestring, line)
def do_transaction(self, data):
lines = []
for line in data:
lines.append(self.do_line_transaction(line))
return lines
def do_footer(self, data):
basestring = " " * self.LENGTH
data["sequential_number"] = self.line_number
return self.run_rules_format(self.formatter_footer, basestring, data)
def execute(self, file_name, header_data, transaction_data, footer_data):
line_header = self.do_header(header_data)
lines_transactions = self.do_transaction(transaction_data)
line_footer = self.do_footer(footer_data)
with open(file_name, 'w') as file:
file.write(f'{line_header}\n')
for line_data in lines_transactions:
file.write(f'{line_data}\n')
file.write(line_footer)
```
|
{
"source": "Jeiyoon/emoji",
"score": 2
}
|
#### File: Jeiyoon/emoji/utils.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import torch
from pathlib import Path
from tensorflow import keras
import numpy as np
from konlpy.tag import Twitter
from collections import Counter
from threading import Thread
import six
from torch import nn
class Config:
def __init__(self, json_path):
with open(json_path, mode='r') as io:
params = json.loads(io.read())
self.__dict__.update(params)
def save(self, json_path):
with open(json_path, mode='w') as io:
json.dump(self.__dict__, io, indent=4)
def update(self, json_path):
with open(json_path, mode='r') as io:
params = json.loads(io.read())
self.__dict__.update(params)
@property
def dict(self):
return self.__dict__
class CheckpointManager:
def __init__(self, model_dir):
if not isinstance(model_dir, Path):
model_dir = Path(model_dir)
self._model_dir = model_dir
def save_checkpoint(self, state, filename):
torch.save(state, self._model_dir / filename)
def load_checkpoint(self, filename):
state = torch.load(self._model_dir / filename, map_location=torch.device('cpu'))
return state
class SummaryManager:
def __init__(self, model_dir):
if not isinstance(model_dir, Path):
model_dir = Path(model_dir)
self._model_dir = model_dir
self._summary = {}
def save(self, filename):
with open(self._model_dir / filename, mode='w') as io:
json.dump(self._summary, io, indent=4)
def load(self, filename):
with open(self._model_dir / filename, mode='r') as io:
metric = json.loads(io.read())
self.update(metric)
def update(self, summary):
self._summary.update(summary)
def reset(self):
self._summary = {}
@property
def summary(self):
return self._summary
class Vocabulary(object):
"""Vocab Class"""
def __init__(self, token_to_idx=None):
self.token_to_idx = {}
self.idx_to_token = {}
self.idx = 0
self.PAD = self.padding_token = "[PAD]"
self.START_TOKEN = "<S>"
self.END_TOKEN = "<T>"
self.UNK = "[UNK]"
self.CLS = "[CLS]"
self.MASK = "[MASK]"
self.SEP = "[SEP]"
self.SEG_A = "[SEG_A]"
self.SEG_B = "[SEG_B]"
self.NUM = "<num>"
self.cls_token = self.CLS
self.sep_token = self.SEP
self.special_tokens = [self.PAD,
self.START_TOKEN,
self.END_TOKEN,
self.UNK,
self.CLS,
self.MASK,
self.SEP,
self.SEG_A,
self.SEG_B,
self.NUM]
self.init_vocab()
if token_to_idx is not None:
self.token_to_idx = token_to_idx
self.idx_to_token = {v: k for k, v in token_to_idx.items()}
self.idx = len(token_to_idx) - 1
# if pad token in token_to_idx dict, get pad_id
if self.PAD in self.token_to_idx:
self.PAD_ID = self.transform_token2idx(self.PAD)
else:
self.PAD_ID = 0
def init_vocab(self):
for special_token in self.special_tokens:
self.add_token(special_token)
self.PAD_ID = self.transform_token2idx(self.PAD)
def __len__(self):
return len(self.token_to_idx)
def to_indices(self, tokens):
return [self.transform_token2idx(X_token) for X_token in tokens]
def add_token(self, token):
if not token in self.token_to_idx:
self.token_to_idx[token] = self.idx
self.idx_to_token[self.idx] = token
self.idx += 1
def transform_token2idx(self, token, show_oov=False):
try:
return self.token_to_idx[token]
except:
if show_oov is True:
print("key error: " + str(token))
token = self.UNK
return self.token_to_idx[token]
def transform_idx2token(self, idx):
try:
return self.idx_to_token[idx]
except:
print("key error: " + str(idx))
idx = self.token_to_idx[self.UNK]
return self.idx_to_token[idx]
def build_vocab(self, list_of_str, threshold=1, vocab_save_path="./data_in/token_vocab.json",
split_fn=Twitter().morphs):
"""Build a token vocab"""
def do_concurrent_tagging(start, end, text_list, counter):
for i, text in enumerate(text_list[start:end]):
text = text.strip()
text = text.lower()
try:
tokens_ko = split_fn(text)
# tokens_ko = [str(pos[0]) + '/' + str(pos[1]) for pos in tokens_ko]
counter.update(tokens_ko)
if i % 1000 == 0:
print("[%d/%d (total: %d)] Tokenized input text." % (
start + i, start + len(text_list[start:end]), len(text_list)))
except Exception as e: # OOM, Parsing Error
print(e)
continue
counter = Counter()
num_thread = 4
thread_list = []
num_list_of_str = len(list_of_str)
for i in range(num_thread):
thread_list.append(Thread(target=do_concurrent_tagging, args=(
int(i * num_list_of_str / num_thread), int((i + 1) * num_list_of_str / num_thread), list_of_str,
counter)))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
# vocab_report
print(counter.most_common(10)) # print most common tokens
tokens = [token for token, cnt in counter.items() if cnt >= threshold]
for i, token in enumerate(tokens):
self.add_token(str(token))
print("len(self.token_to_idx): ", len(self.token_to_idx))
import json
with open(vocab_save_path, 'w', encoding='utf-8') as f:
json.dump(self.token_to_idx, f, ensure_ascii=False, indent=4)
return self.token_to_idx
def keras_pad_fn(token_ids_batch, maxlen, pad_id=0, padding='post', truncating='post'):
padded_token_ids_batch = pad_sequences(token_ids_batch,
value=pad_id, # vocab.transform_token2idx(PAD),
padding=padding,
truncating=truncating,
maxlen=maxlen)
return padded_token_ids_batch
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the end.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
for x in sequences:
try:
lengths.append(len(x))
except TypeError:
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
class Tokenizer:
""" Tokenizer class"""
def __init__(self, vocab, split_fn, pad_fn, maxlen):
self._vocab = vocab
self._split = split_fn
self._pad = pad_fn
self._maxlen = maxlen
# def split(self, string: str) -> list[str]:
def split(self, string):
tokens = self._split(string)
return tokens
# def transform(self, list_of_tokens: list[str]) -> list[int]:
def transform(self, tokens):
indices = self._vocab.to_indices(tokens)
pad_indices = self._pad(indices, pad_id=0, maxlen=self._maxlen) if self._pad else indices
return pad_indices
# def split_and_transform(self, string: str) -> list[int]:
def split_and_transform(self, string):
return self.transform(self.split(string))
@property
def vocab(self):
return self._vocab
def list_of_tokens_to_list_of_token_ids(self, X_token_batch):
X_ids_batch = []
for X_tokens in X_token_batch:
X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])
return X_ids_batch
def list_of_string_to_list_of_tokens(self, X_str_batch):
X_token_batch = [self._split(X_str) for X_str in X_str_batch]
return X_token_batch
def list_of_tokens_to_list_of_token_ids(self, X_token_batch):
X_ids_batch = []
for X_tokens in X_token_batch:
X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])
return X_ids_batch
def list_of_string_to_list_token_ids(self, X_str_batch):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
X_ids_batch = self.list_of_tokens_to_list_of_token_ids(X_token_batch)
return X_ids_batch
def list_of_string_to_arr_of_pad_token_ids(self, X_str_batch, add_start_end_token=False):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
# print("X_token_batch: ", X_token_batch)
if add_start_end_token is True:
return self.add_start_end_token_with_pad(X_token_batch)
else:
X_ids_batch = self.list_of_tokens_to_list_of_token_ids(X_token_batch)
pad_X_ids_batch = self._pad(X_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
return pad_X_ids_batch
def list_of_tokens_to_list_of_cls_sep_token_ids(self, X_token_batch):
X_ids_batch = []
for X_tokens in X_token_batch:
X_tokens = [self._vocab.cls_token] + X_tokens + [self._vocab.sep_token]
X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])
return X_ids_batch
def list_of_string_to_arr_of_cls_sep_pad_token_ids(self, X_str_batch):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
X_ids_batch = self.list_of_tokens_to_list_of_cls_sep_token_ids(X_token_batch)
pad_X_ids_batch = self._pad(X_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
return pad_X_ids_batch
def list_of_string_to_list_of_cls_sep_token_ids(self, X_str_batch):
X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)
X_ids_batch = self.list_of_tokens_to_list_of_cls_sep_token_ids(X_token_batch)
return X_ids_batch
def add_start_end_token_with_pad(self, X_token_batch):
dec_input_token_batch = [[self._vocab.START_TOKEN] + X_token for X_token in X_token_batch]
dec_output_token_batch = [X_token + [self._vocab.END_TOKEN] for X_token in X_token_batch]
dec_input_token_batch = self.list_of_tokens_to_list_of_token_ids(dec_input_token_batch)
pad_dec_input_ids_batch = self._pad(dec_input_token_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
dec_output_ids_batch = self.list_of_tokens_to_list_of_token_ids(dec_output_token_batch)
pad_dec_output_ids_batch = self._pad(dec_output_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)
return pad_dec_input_ids_batch, pad_dec_output_ids_batch
def decode_token_ids(self, token_ids_batch):
list_of_token_batch = []
for token_ids in token_ids_batch:
token_token = [self._vocab.transform_idx2token(token_id) for token_id in token_ids]
# token_token = [self._vocab[token_id] for token_id in token_ids]
list_of_token_batch.append(token_token)
return list_of_token_batch
class BERTClassifier(nn.Module):
def __init__(self,
bert,
hidden_size = 768,
num_classes = 7,
dr_rate = None,
params = None):
super(BERTClassifier, self).__init__()
self.bert = bert
self.dr_rate = dr_rate
self.classifier = nn.Linear(hidden_size, num_classes)
if dr_rate:
self.dropout = nn.Dropout(p=dr_rate)
def gen_attention_mask(self, token_ids, valid_length):
attention_mask = torch.zeros_like(token_ids)
for i, v in enumerate(valid_length):
attention_mask[i][:v] = 1
return attention_mask.float()
def forward(self, token_ids, valid_length, segment_ids):
attention_mask = self.gen_attention_mask(token_ids, valid_length)
_, pooler = self.bert(input_ids=token_ids, token_type_ids=segment_ids.long(), attention_mask=attention_mask.float().to(token_ids.device))
if self.dr_rate:
out = self.dropout(pooler)
else:
out = pooler
return self.classifier(out)
```
|
{
"source": "jeizenga/vgrna-project-scripts",
"score": 3
}
|
#### File: vgrna-project-scripts/python/calc_allele_rpvg_expression.py
```python
import sys
import os
import subprocess
import pickle
import gzip
from Bio.Seq import Seq
from Bio import SeqIO
from utils import *
def parse_hst_info(filename):
hst_info = {}
hst_file = gzip.open(filename, "rb")
for line in hst_file:
line_split = line.split("\t")
assert(len(line_split) == 5)
if line_split[0] == "Name":
continue
if not line_split[2] in hst_info:
hst_info[line_split[2]] = []
hst_info[line_split[2]].append((line_split[0], [line_split[4].split(",")[0].split("_")[i] for i in [2,4]]))
hst_file.close()
return hst_info
def parse_genome(filename):
genome = {}
for record in SeqIO.parse(filename, "fasta"):
if not record.id in genome:
genome[record.id] = ""
genome[record.id] += str(record.seq)
return genome
def parse_rpvg_haplotypes(filename):
rpvg_haps = {}
rpvg_file = gzip.open(filename, "rb")
for line in rpvg_file:
line_split = line.split("\t")
assert(len(line_split) >= 4)
if line_split[0] == "Name1" or line_split[0] == "Name_1":
continue
if not line_split[0] in rpvg_haps:
rpvg_haps[line_split[0]] = {}
assert(not line_split[1] in rpvg_haps[line_split[0]])
rpvg_haps[line_split[0]][line_split[1]] = float(line_split[3])
if line_split[0] != line_split[1]:
if not line_split[1] in rpvg_haps:
rpvg_haps[line_split[1]] = {}
assert(not line_split[0] in rpvg_haps[line_split[1]])
rpvg_haps[line_split[1]][line_split[0]] = float(line_split[3])
rpvg_file.close()
return rpvg_haps
def parse_rpvg_expression(filename):
rpvg_exp = {}
rpvg_file = gzip.open(filename, "rb")
for line in rpvg_file:
line_split = line.split("\t")
assert(len(line_split) == 7)
if line_split[0] == "Name":
continue
assert(not line_split[0] in rpvg_exp)
if float(line_split[6]) > 0:
rpvg_exp[line_split[0]] = float(line_split[6])
rpvg_file.close()
return rpvg_exp
printScriptHeader()
if len(sys.argv) != 7:
print("Usage: python calc_allele_rpvg_expression.py <variant_vcf_gz_name> <hst_input_gz_name> <genome_fasta_file> <rpvg_haplotypes_gz_name> <rpvg_expression_gz_name> <output_file_name>\n")
sys.exit(1)
hst_info = parse_hst_info(sys.argv[2])
print(len(hst_info))
genome = parse_genome(sys.argv[3])
print(len(genome))
rpvg_haps = parse_rpvg_haplotypes(sys.argv[4])
print(len(rpvg_haps))
rpvg_exp = parse_rpvg_expression(sys.argv[5])
print(len(rpvg_exp))
out_file = open(sys.argv[6], "w")
out_file.write("Chrom\tPosition\tAlleleNum\tAlleleType\tAlleleLength\tHomopolymerLength\tNumTandemRepeats\tProbability\tExpression\n")
variant_file = gzip.open(sys.argv[1], "rb")
sample_names = {}
for line in variant_file:
line_split = line.split("\t")
line_split[-1] = line_split[-1].strip()
if line_split[0] == "#CHROM":
assert(len(line_split) >= 10)
for i in range(9, len(line_split)):
sample_names[line_split[i]] = i
continue
if line_split[0][0] == "#":
continue
assert(len(line_split) >= 10)
alt_alleles = line_split[4].split(",")
allele_prob = [0.0 for x in range(1 + len(alt_alleles))]
allele_exp = [0.0 for x in range(1 + len(alt_alleles))]
transcripts = [x.split("=")[1].split(",") for x in line_split[7].split(";") if x.split("=")[0] == "TRANSCIPTS"]
assert(len(transcripts) == 1)
for transcript in transcripts[0]:
added_diplotypes = [{} for x in range(1 + len(alt_alleles))]
if transcript in hst_info:
for hst in hst_info[transcript]:
gt = line_split[sample_names[hst[1][0]]]
assert(not "/" in gt)
allele = gt.split("|")[int(hst[1][1])]
if allele != ".":
if hst[0] in rpvg_haps:
for key, value in rpvg_haps[hst[0]].items():
if not (hst[0], key) in added_diplotypes[int(allele)] and not (key, hst[0]) in added_diplotypes[int(allele)]:
added_diplotypes[int(allele)][(hst[0], key)] = ""
allele_prob[int(allele)] += value
if hst[0] in rpvg_exp:
allele_exp[int(allele)] += rpvg_exp[hst[0]]
if len(transcripts[0]) > 0:
for i in range(len(allele_prob)):
allele_prob[i] = allele_prob[i] / len(transcripts[0])
for i in range(len(allele_exp)):
if allele_exp[i] > 0:
assert(allele_prob[i] > 0)
max_hp_length = calcMaxHomopolymerLength(genome[line_split[0]], int(line_split[1]) - 1)
out_file.write(line_split[0] + "\t" + line_split[1] + "\t0\tRef\t0\t" + str(max_hp_length) + "\t0\t" + str(allele_prob[0]) + "\t" + str(allele_exp[0]) + "\n")
for i in range(len(alt_alleles)):
allele_type_length = getAlleleTypeLength(line_split[3], alt_alleles[i])
assert(allele_type_length[0] != "Ref")
max_num_tr = calcMaxNumTandemRepeats(genome[line_split[0]], int(line_split[1]) - 1, line_split[3], alt_alleles[i])
out_file.write(line_split[0] + "\t" + line_split[1] + "\t" + str(i + 1) + "\t" + allele_type_length[0] + "\t" + str(allele_type_length[1]) + "\t" + str(max_hp_length) + "\t" + str(max_num_tr) + "\t" + str(allele_prob[i + 1]) + "\t" + str(allele_exp[i + 1]) + "\n")
variant_file.close()
out_file.close()
print("Done")
```
#### File: vgrna-project-scripts/python/convert_salmon_bootstraps.py
```python
import sys
import os
import subprocess
import gzip
import struct
from utils import *
def parse_hst_names(filename):
hst_names = []
names_file = gzip.open(filename, "rb")
for line in names_file:
assert(len(hst_names) == 0)
hst_names = line.decode().strip().split("\t")
names_file.close()
return hst_names
def parse_hst_lengths(filename):
hst_lengths = {}
lengths_file = gzip.open(filename, "rb")
for line in lengths_file:
line_split = line.decode().split("\t")
assert(len(line_split) == 5)
if line_split[0] == "Name":
continue
assert(not line_split[0] in hst_lengths)
hst_lengths[line_split[0]] = (int(line_split[1]), float(line_split[2]))
lengths_file.close()
return hst_lengths
printScriptHeader()
if len(sys.argv) != 5:
print("Usage: python convert_salmon_bootstraps.py <bootstraps_gz_name> <names_tsv_gz_name> <em_quant_gz_name> <output_fil_name>\n")
sys.exit(1)
hst_names = parse_hst_names(sys.argv[2])
print(len(hst_names))
hst_lengths = parse_hst_lengths(sys.argv[3])
print(len(hst_lengths))
boot_struct = struct.Struct('@' + 'd' * len(hst_names))
sum_boot_values = [0] * len(hst_names)
num_zero_boot_values = [0] * len(hst_names)
num_boot_samples = 0
with gzip.open(sys.argv[1], "rb") as boot_file:
while True:
try:
boot_values = boot_struct.unpack_from(boot_file.read(boot_struct.size))
assert(len(sum_boot_values) == len(boot_values))
for i in range(len(sum_boot_values)):
sum_boot_values[i] += float(boot_values[i])
if float(boot_values[i]) < 10**-4:
num_zero_boot_values[i] += 1
num_boot_samples += 1
except:
break
boot_file.close()
print(num_boot_samples)
mean_boot_count = [0] * len(hst_names)
mean_boot_tpm = [0] * len(hst_names)
for i in range(len(sum_boot_values)):
mean_boot_count[i] = sum_boot_values[i] / num_boot_samples
mean_boot_tpm[i] = mean_boot_count[i] / hst_lengths[hst_names[i]][1]
assert(len(hst_names) == len(mean_boot_count))
assert(len(hst_names) == len(mean_boot_tpm))
assert(len(hst_names) == len(num_zero_boot_values))
sum_mean_boot_tpm = sum(mean_boot_tpm)
out_file = open(sys.argv[4], "w")
out_file.write("Name\tLength\tEffectiveLength\tTPM\tNumReads\tFracNonZero\n")
for i in range(len(hst_names)):
out_str = hst_names[i] + "\t" + str(hst_lengths[hst_names[i]][0]) + "\t" + str(hst_lengths[hst_names[i]][1]) + "\t" + str(10**6 * mean_boot_tpm[i] / sum_mean_boot_tpm) + "\t" + str(mean_boot_count[i]) + "\t" + str(1 - num_zero_boot_values[i] / num_boot_samples) + "\n"
out_file.write(out_str)
out_file.close()
print("Done")
```
|
{
"source": "jejay/baselines",
"score": 2
}
|
#### File: baselines/ddpg/models.py
```python
import tensorflow as tf
import tensorflow.contrib as tc
import numpy as np
class Model(object):
def __init__(self, name):
self.name = name
@property
def vars(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
@property
def trainable_vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
@property
def perturbable_vars(self):
return [var for var in self.trainable_vars if 'LayerNorm' not in var.name]
class WeightSharingActor(Model):
def __init__(self, specification, name='weight-sharing-actor', layer_norm=True):
super(WeightSharingActor, self).__init__(name=name)
specifications = {
'Ant-v2-shallow': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 0,
'units': 32,
'action_indice_groups': [[0, 1], [2, 3], [4, 5], [6, 7]]
}
]
},
'Ant-v2-deep': {
'commander': {
'hidden_layers': 0,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 1,
'units': 32,
'action_indice_groups': [[0, 1], [2, 3], [4, 5], [6, 7]]
}
]
},
'Ant-v2-balanced': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 1,
'units': 32,
'action_indice_groups': [[0, 1], [2, 3], [4, 5], [6, 7]]
}
]
},
'Ant-v2-balancedbottleneck': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 1,
'units': 32,
'action_indice_groups': [[0, 1], [2, 3], [4, 5], [6, 7]]
}
]
},
'Ant-v2-balancedbottlebottleneck': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 1,
'units': 32,
'action_indice_groups': [[0, 1], [2, 3], [4, 5], [6, 7]]
}
]
},
'Ant-v2-balancedbottlebottlebottleneck': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 1,
'units': 32,
'action_indice_groups': [[0, 1], [2, 3], [4, 5], [6, 7]]
}
]
},
'Walker2d-v2-shallow': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 0,
'units': 64,
'action_indice_groups': [[0, 1, 2], [3, 4, 5]]
}
]
},
'Walker2d-v2-deep': {
'commander': {
'hidden_layers': 0,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 1,
'units': 64,
'action_indice_groups': [[0, 1, 2], [3, 4, 5]]
}
]
},
'Walker2d-v2-balanced': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 1,
'units': 64,
'action_indice_groups': [[0, 1, 2], [3, 4, 5]]
}
]
},
'HalfCheetah-v2-shallow': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 0,
'units': 64,
'action_indice_groups': [[0, 1, 2], [3, 4, 5]]
}
]
},
'HalfCheetah-v2-deep': {
'commander': {
'hidden_layers': 0,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 1,
'units': 64,
'action_indice_groups': [[0, 1, 2], [3, 4, 5]]
}
]
},
'HalfCheetah-v2-balanced': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'leg',
'hidden_layers': 1,
'units': 64,
'action_indice_groups': [[0, 1, 2], [3, 4, 5]]
}
]
},
'Humanoid-v2-shallow': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'abdomen',
'hidden_layers': 0,
'units': 16,
'action_indice_groups': [[0, 1, 2]]
},
{
'name': 'hip-knee',
'hidden_layers': 0,
'units': 32,
'action_indice_groups': [[3, 4, 5, 6], [7, 8, 9, 10]]
},
{
'name': 'arm',
'hidden_layers': 0,
'units': 24,
'action_indice_groups': [[11, 12, 13], [14, 15, 16]]
}
]
},
'Humanoid-v2-deep': {
'commander': {
'hidden_layers': 0,
'units': 128
},
'controllers': [
{
'name': 'abdomen',
'hidden_layers': 1,
'units': 16,
'action_indice_groups': [[0, 1, 2]]
},
{
'name': 'hip-knee',
'hidden_layers': 1,
'units': 32,
'action_indice_groups': [[3, 4, 5, 6], [7, 8, 9, 10]]
},
{
'name': 'arm',
'hidden_layers': 1,
'units': 24,
'action_indice_groups': [[11, 12, 13], [14, 15, 16]]
}
]
},
'Humanoid-v2-balanced': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'abdomen',
'hidden_layers': 1,
'units': 16,
'action_indice_groups': [[0, 1, 2]]
},
{
'name': 'hip-knee',
'hidden_layers': 1,
'units': 32,
'action_indice_groups': [[3, 4, 5, 6], [7, 8, 9, 10]]
},
{
'name': 'arm',
'hidden_layers': 1,
'units': 24,
'action_indice_groups': [[11, 12, 13], [14, 15, 16]]
}
]
},
'Humanoid-v2-halfmirrored': {
'commander': {
'hidden_layers': 1,
'units': 128
},
'controllers': [
{
'name': 'body-half',
'hidden_layers': 0,
'units': 64,
'action_indice_groups': [[0, 1, 2, 3, 4, 5, 6, 11, 12, 13], [0, 1, 2, 7, 8, 9, 10, 14, 15, 16]]
}
]
},
'Humanoid-v2-fullmirrored': {
'commander': {
'hidden_layers': 0,
'units': 128
},
'controllers': [
{
'name': 'body-half',
'hidden_layers': 1,
'units': 64,
'action_indice_groups': [[0, 1, 2, 3, 4, 5, 6, 11, 12, 13], [0, 1, 2, 7, 8, 9, 10, 14, 15, 16]]
}
]
}
}
self.spec_name = specification
self.specification = specifications[specification]
self.layer_norm = layer_norm
self.indices = []
for controller in self.specification['controllers']:
controller['output_length'] = len(controller['action_indice_groups'][0])
for action_indice_group in controller['action_indice_groups']:
assert controller['output_length'] == len(action_indice_group), \
"Controller %r has an action_indice_group length mismatch. All groups should be the same length." % controller
self.indices += action_indice_group
for i in range(max(self.indices)):
assert i in self.indices, \
"Action index %r not found." % i
self.nb_actions = max(self.indices) + 1
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
x = obs
for i in range(self.specification['commander']['hidden_layers']):
x = tf.layers.dense(x, self.specification['commander']['units'])
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
x = tf.nn.relu(x)
output = tf.zeros(shape=[1, self.nb_actions])
for controller in self.specification['controllers']:
branch_units = controller['units']
if self.spec_name == 'Ant-v2-balancedbottlebottlebottleneck':
branch_units = 2
if self.spec_name == 'Ant-v2-balancedbottlebottleneck':
branch_units = 4
if self.spec_name == 'Ant-v2-balancedbottleneck':
branch_units = 8
for aig_idx, action_indice_group in enumerate(controller['action_indice_groups']):
with tf.variable_scope(controller['name']+'-branch-'+str(aig_idx)):
# This layer splits the controllers. Weights can not be shared here.
x_ = tf.layers.dense(x, branch_units)
if self.layer_norm:
x_ = tc.layers.layer_norm(x_, center=True, scale=True)
x_ = tf.nn.relu(x_)
with tf.variable_scope(controller['name']) as controller_scope:
# Starting variable/weights sharing if we are in the second or higher action index group
if aig_idx > 0:
controller_scope.reuse_variables()
for i in range(controller['hidden_layers']):
# controllers hidden layer
x_ = tf.layers.dense(x_, controller['units'],
name='hidden-layer-'+str(i))
if self.layer_norm:
x_ = tc.layers.layer_norm(x_, center=True, scale=True)
x_ = tf.nn.relu(x_)
#controllers output layer
x_ = tf.layers.dense(x_, controller['output_length'],
kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3),
name='final-layer')
output_projection = np.zeros((controller['output_length'], self.nb_actions))
for controller_output_index, action_index in enumerate(action_indice_group):
output_projection[controller_output_index, action_index] = 1/self.indices.count(action_index)
output_projection = tf.convert_to_tensor(output_projection, dtype=tf.float32)
x_ = tf.tensordot(x_, output_projection, axes = 1)
output = tf.add(output, x_)
output = tf.nn.tanh(output)
return output
class Actor(Model):
def __init__(self, nb_actions, name='actor', layer_norm=True):
super(Actor, self).__init__(name=name)
self.nb_actions = nb_actions
self.layer_norm = layer_norm
def __call__(self, obs, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
x = obs
x = tf.layers.dense(x, 128)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
x = tf.nn.relu(x)
x = tf.layers.dense(x, 128)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
x = tf.nn.relu(x)
x = tf.layers.dense(x, self.nb_actions, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
x = tf.nn.tanh(x)
return x
class Critic(Model):
def __init__(self, name='critic', layer_norm=True):
super(Critic, self).__init__(name=name)
self.layer_norm = layer_norm
def __call__(self, obs, action, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
x = obs
x = tf.layers.dense(x, 64)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
x = tf.nn.relu(x)
x = tf.concat([x, action], axis=-1)
x = tf.layers.dense(x, 64)
if self.layer_norm:
x = tc.layers.layer_norm(x, center=True, scale=True)
x = tf.nn.relu(x)
x = tf.layers.dense(x, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
return x
@property
def output_vars(self):
output_vars = [var for var in self.trainable_vars if 'output' in var.name]
return output_vars
```
|
{
"source": "jejay/RoboRL",
"score": 2
}
|
#### File: roborl/ddpg/ddpg.py
```python
import os
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
from random_process import OrnsteinUhlenbeckProcess
from memory import ReplayMemory
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
Tensor = FloatTensor
class DDPG:
def __init__(self, env, actor_model, critic_model, memory=10000, batch_size=64, gamma=0.99,
tau=0.001, actor_lr=1e-4, critic_lr=1e-3, critic_decay=1e-2, ou_theta=0.15,
ou_sigma=0.2, render=None, evaluate=None, save_path=None, save_every=10,
render_every=10, train_per_step=True):
self.env = env
self.actor = actor_model
self.actor_target = actor_model.clone()
self.critic = critic_model
self.critic_target = critic_model.clone()
if use_cuda:
for net in [self.actor, self.actor_target, self.critic, self.critic_target]:
net.cuda()
self.memory = ReplayMemory(memory)
self.batch_size = batch_size
self.gamma = gamma
self.tau = tau
self.random_process = OrnsteinUhlenbeckProcess(env.action_space.shape[0],
theta=ou_theta, sigma=ou_sigma)
self.optim_critic = optim.Adam(self.critic.parameters(), lr=critic_lr,
weight_decay=critic_decay)
self.optim_actor = optim.Adam(self.actor.parameters(), lr=actor_lr)
self.render = render
self.render_every = render_every
self.evaluate = evaluate
self.save_path = save_path
self.save_every = save_every
self.train_per_step = train_per_step
def update(self, target, source):
zipped = zip(target.parameters(), source.parameters())
for target_param, source_param in zipped:
updated_param = target_param.data * (1 - self.tau) + \
source_param.data * self.tau
target_param.data.copy_(updated_param)
def train_models(self):
if len(self.memory) < self.batch_size:
return None, None
mini_batch = self.memory.sample_batch(self.batch_size)
critic_loss = self.train_critic(mini_batch)
actor_loss = self.train_actor(mini_batch)
self.update(self.actor_target, self.actor)
self.update(self.critic_target, self.critic)
return critic_loss.data[0], actor_loss.data[0]
def mse(self, inputs, targets):
return torch.mean((inputs - targets)**2)
def train_critic(self, batch):
# forward pass
pred_actions = self.actor_target(batch.next_states)
target_q = batch.rewards + batch.done * self.critic_target([batch.next_states, pred_actions]) * self.gamma
pred_q = self.critic([batch.states, batch.actions])
# backward pass
loss = self.mse(pred_q, target_q)
self.optim_critic.zero_grad()
loss.backward(retain_graph=True)
for param in self.critic.parameters():
param.grad.data.clamp_(-1, 1)
self.optim_critic.step()
return loss
def train_actor(self, batch):
# forward pass
pred_mu = self.actor(batch.states)
pred_q = self.critic([batch.states, pred_mu])
# backward pass
loss = -pred_q.mean()
self.optim_actor.zero_grad()
loss.backward()
# for param in self.actor.parameters():
# param.grad.data.clamp_(-1, 1)
self.optim_actor.step()
return loss
def prep_state(self, s):
return Variable(torch.from_numpy(s).float().unsqueeze(0))
def select_action(self, state, exploration=True):
if use_cuda:
state = state.cuda()
self.actor.eval()
action = self.actor(state)
self.actor.train()
if exploration:
noise = Variable(torch.from_numpy(self.random_process.sample()).float())
if use_cuda:
noise = noise.cuda()
action = action + noise
return action
def step(self, action):
next_state, reward, done, _ = self.env.step(action.data.cpu().numpy()[0])
next_state = self.prep_state(next_state)
reward = FloatTensor([reward])
return next_state, reward, done
def warmup(self, num_steps):
overall_step = 0
while overall_step <= num_steps:
done = False
state = self.prep_state(self.env.reset())
self.random_process.reset()
while not done:
overall_step += 1
action = self.select_action(state)
next_state, reward, done = self.step(action)
self.memory.add(state, action, reward, next_state, done)
state = next_state
def train(self, num_steps):
running_reward = None
reward_sums = []
losses = []
overall_step = 0
episode_number = 0
while overall_step <= num_steps:
episode_number += 1
done = False
state = self.prep_state(self.env.reset())
reward_sum = 0
self.random_process.reset()
while not done:
overall_step += 1
action = self.select_action(state)
next_state, reward, done = self.step(action)
self.memory.add(state, action, reward, next_state, done)
state = next_state
reward_sum += reward[0]
if self.train_per_step:
losses.append(self.train_models())
if not self.train_per_step:
losses.append(self.train_models())
render_this_episode = self.render and (episode_number % self.render_every == 0)
evaluation_reward = self.run(render=render_this_episode)
reward_sums.append((reward_sum, evaluation_reward))
if self.save_path is not None and (episode_number % self.save_every == 0):
self.save_models(self.save_path)
self.save_results(self.save_path, losses, reward_sums)
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
print('episode: {} steps: {} running train reward: {:.4f} eval reward: {:.4f}'.format(episode_number, overall_step,
running_reward, evaluation_reward))
if self.save_path is not None:
self.save_models(self.save_path)
self.save_results(self.save_path, losses, reward_sums)
return reward_sums, losses
def run(self, render=True):
state = self.env.reset()
done = False
reward_sum = 0
while not done:
if render:
self.env.render()
action = self.select_action(self.prep_state(state),
exploration=False)
state, reward, done, _ = self.env.step(action.data.cpu().numpy()[0])
reward_sum += reward
return reward_sum
def save_models(self, path):
self.actor.save(path)
self.critic.save(path)
def save_results(self, path, losses, rewards):
losses = np.array([l for l in losses if l[0] is not None])
rewards = np.array(rewards)
np.savetxt(os.path.join(path, 'losses.csv'), losses, delimiter=',', header='critic,actor', comments='')
np.savetxt(os.path.join(path, 'rewards.csv'), rewards, delimiter=',', header='train,evaluation', comments='')
```
#### File: roborl/ddpg/models.py
```python
import os
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyModule(nn.Module):
def clone(self):
assert self.args
model_clone = self.__class__(*self.args)
model_clone.load_state_dict(self.state_dict())
model_clone.train(False)
return model_clone
@classmethod
def load(cls, filename):
with open('%s-args.pkl' % filename, 'rb') as f:
args = pickle.load(f)
model = cls(*args)
dict_filename = '%s.model' % filename
model.load_state_dict(torch.load(dict_filename, map_location=lambda storage, loc: storage))
return model
def save(self, path, filename):
args_file = os.path.join(path, '%s-args.pkl' % filename)
with open(args_file, 'wb') as f:
pickle.dump(self.args, f)
torch.save(self.state_dict(), os.path.join(path, '%s.model' % filename))
class Actor(MyModule):
def __init__(self, n_states, n_actions, n_hidden, use_batch_norm=False):
super().__init__()
self.args = (n_states, n_actions, n_hidden, use_batch_norm)
self.use_batch_norm = use_batch_norm
self.lin1 = nn.Linear(n_states, n_hidden)
self.lin2 = nn.Linear(n_hidden, n_hidden)
self.lin3 = nn.Linear(n_hidden, n_actions)
if self.use_batch_norm:
self.bn_1 = nn.BatchNorm1d(n_hidden)
self.bn_2 = nn.BatchNorm1d(n_hidden)
self.init_weights()
def init_weights(self):
for l in [self.lin1, self.lin2, self.lin3]:
nn.init.xavier_uniform(l.weight)
def save(self, path):
super().save(path, 'actor')
def forward(self, x):
x = self.lin1(x)
if self.use_batch_norm:
x = self.bn_1(x)
x = F.relu(x)
x = self.lin2(x)
if self.use_batch_norm:
x = self.bn_1(x)
x = F.relu(x)
x = F.tanh(self.lin3(x))
return x
class SharedControllerActor(MyModule):
def __init__(self, n_states, controller_conf, controller_list, n_hidden, use_batch_norm=False):
"""
constructs a policy network with locally connected controllers
that can share weights
Args:
n_states: number of states that are the input to the policy
controller_conf: dictionary with confifs for low-level controllers
controller_list: list of controller names, if one name appears multiple times
then these controllers share weights
n_hidden: number of hidden units in the fully connected layer
>> # example controller conf:
>> controller_conf = {
'leg': {
'actions': 4,
'hidden': 50
}
'arm': {
'actions': 3,
'hidden': 50
}
}
>> # example controller list:
>> controller_list = ['arm', 'arm', 'leg', 'leg']
"""
super().__init__()
self.args = (n_states, controller_conf, controller_list, n_hidden, use_batch_norm)
self.use_batch_norm = use_batch_norm
self.lin1 = nn.Linear(n_states, n_hidden)
self.lin2 = nn.Linear(n_hidden, n_hidden)
self.controller_inputs, self.controller = self.create_controllers(controller_conf, controller_list, n_hidden)
self.controller_list = controller_list
if use_batch_norm:
self.bn_1 = nn.BatchNorm1d(n_hidden)
self.bn_2 = nn.BatchNorm1d(n_hidden)
self.controller_input_bns = self.controller_bn(self.controller_inputs)
self.init_weights()
def create_controllers(self, controller_conf, controller_list, n_hidden):
shared_controller = {}
for name, conf in controller_conf.items():
# TODO: create arbitrary subnet based on conf
l = nn.Linear(conf['hidden'] , conf['actions'])
self.add_module(name, l)
shared_controller[name] = l
controller_inputs = []
for i, name in enumerate(controller_list):
n_output = controller_conf[name]['hidden']
l = nn.Linear(n_hidden, n_output)
self.add_module('controller_input_%d' % i, l)
controller_inputs.append(l)
return controller_inputs, shared_controller
def controller_bn(self, controller_inputs):
controller_input_bns = []
for i, input_layer in enumerate(controller_inputs):
bn = nn.BatchNorm1d(input_layer.out_features)
self.add_module('controller_input_bn_%d' % i, bn)
controller_input_bns.append(bn)
return controller_input_bns
def init_weights(self):
for l in [self.lin1, self.lin2, *self.controller_inputs, *self.controller.values()]:
nn.init.xavier_uniform(l.weight)
def save(self, path):
super().save(path, 'actor-shared')
def forward(self, x):
x = self.lin1(x)
if self.use_batch_norm:
x = self.bn_1(x)
x = F.relu(x)
x = self.lin2(x)
if self.use_batch_norm:
x = self.bn_2(x)
x = F.relu(x)
outs = []
i = 0
for name, input_layer in zip(self.controller_list, self.controller_inputs):
xc = input_layer(x)
if self.use_batch_norm:
xc = self.controller_input_bns[i](xc)
i += 1
sc = F.relu(xc)
outs.append(self.controller[name](xc))
out = torch.cat(outs, 1)
return F.tanh(out)
class Critic(MyModule):
def __init__(self, n_states, n_actions, n_hidden, use_batch_norm=False):
super().__init__()
self.args = (n_states, n_actions, n_hidden, use_batch_norm)
self.use_batch_norm = use_batch_norm
self.lin_states = nn.Linear(n_states, n_hidden)
self.lin1 = nn.Linear(n_hidden + n_actions, n_hidden)
self.lin2 = nn.Linear(n_hidden, 1)
if self.use_batch_norm:
self.bn_states = nn.BatchNorm1d(n_hidden)
self.bn_1 = nn.BatchNorm1d(n_hidden)
self.init_weights()
def init_weights(self):
for l in [self.lin_states, self.lin1, self.lin2]:
nn.init.xavier_uniform(l.weight)
def save(self, path):
super().save(path, 'critic')
def forward(self, x):
s, a = x
s = self.lin_states(s)
if self.use_batch_norm:
s = self.bn_states(s)
states_hidden = F.relu(s)
x = self.lin1(torch.cat([states_hidden, a], 1))
if self.use_batch_norm:
x = self.bn_1(x)
x = F.relu(x)
x = self.lin2(x)
return x
```
|
{
"source": "jejeking/onearth_bckup",
"score": 3
}
|
#### File: src/mrfgen/test_mrfgen.py
```python
import os
import unittest
import subprocess
import filecmp
import urllib
import shutil
import datetime
from osgeo import gdal
year = datetime.datetime.now().strftime('%Y')
doy = int(datetime.datetime.now().strftime('%j'))-1
def run_command(cmd):
"""
Runs the provided command on the terminal.
Arguments:
cmd -- the command to be executed.
"""
print '\nRunning command: ' + cmd
process = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
process.wait()
for output in process.stdout:
print output.strip()
for error in process.stderr:
print error.strip()
raise Exception(error.strip())
class TestMRFGeneration(unittest.TestCase):
def setUp(self):
self.dirpath = os.path.dirname(__file__)
self.test_config = self.dirpath + "/test/mrfgen_test_config.xml"
self.input_dir = self.dirpath + "/test/input_dir/"
self.output_dir = self.dirpath + "/test/output_dir"
self.working_dir = self.dirpath + "/test/working_dir"
self.logfile_dir = self.dirpath + "/test/logfile_dir"
self.output_mrf = self.output_dir+ "/MYR4ODLOLLDY2014277_.mrf"
self.output_img = self.output_dir+ "/MYR4ODLOLLDY2014277_.png"
self.compare_img = self.dirpath + "/test/test_comp1.png"
if not os.path.exists(self.input_dir):
os.makedirs(self.input_dir)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
if not os.path.exists(self.logfile_dir):
os.makedirs(self.logfile_dir)
# download tile
# image_url = "http://lance2.modaps.eosdis.nasa.gov/imagery/elements/MODIS/MYR4ODLOLLDY/%s/MYR4ODLOLLDY_global_%s%s_10km.png" % (year,year,doy)
# world_url = "http://lance2.modaps.eosdis.nasa.gov/imagery/elements/MODIS/MYR4ODLOLLDY/%s/MYR4ODLOLLDY_global_%s%s_10km.pgw" % (year,year,doy)
# image_name = self.input_dir + image_url.split('/')[-1]
# world_name = self.input_dir + world_url.split('/')[-1]
# print "Downloading", image_url
# image_file=urllib.URLopener()
# image_file.retrieve(image_url,image_name)
# print "Downloading", world_url
# world_file=urllib.URLopener()
# world_file.retrieve(world_url,world_name)
#generate MRF
run_command("python mrfgen.py -c " + self.test_config)
def test_generate_mrf(self):
# Check MRF generation succeeded
self.assertTrue(os.path.isfile(self.output_mrf), "MRF generation failed")
# Read MRF
dataset = gdal.Open(self.output_mrf)
driver = dataset.GetDriver()
print 'Driver:', str(driver.LongName)
self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
print 'Files:', ' '.join(dataset.GetFileList())
self.assertEqual(len(dataset.GetFileList()),3,"MRF does not contain triplet")
print 'Projection:', str(dataset.GetProjection())
self.assertEqual(str(dataset.GetProjection()),'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]')
print 'Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount
self.assertEqual(dataset.RasterXSize, 4096, "Size does not match")
self.assertEqual(dataset.RasterYSize, 2048, "Size does not match")
self.assertEqual(dataset.RasterCount, 1, "Size does not match")
geotransform = dataset.GetGeoTransform()
print 'Origin: (',geotransform[0], ',',geotransform[3],')'
self.assertEqual(geotransform[0], -180.0, "Origin does not match")
self.assertEqual(geotransform[3], 90.0, "Origin does not match")
print 'Pixel Size: (',geotransform[1], ',',geotransform[5],')'
self.assertEqual(geotransform[1], 0.087890625, "Pixel size does not match")
self.assertEqual(geotransform[5], -0.087890625, "Pixel size does not match")
band = dataset.GetRasterBand(1)
print 'Overviews:', band.GetOverviewCount()
self.assertEqual(band.GetOverviewCount(), 3, "Overview count does not match")
print 'Colors:', band.GetRasterColorTable().GetCount()
self.assertEqual(band.GetRasterColorTable().GetCount(), 256, "Color count does not match")
for x in range(0, 255):
color = band.GetRasterColorTable().GetColorEntry(x)
print color
if x == 0:
self.assertEqual(str(color), '(220, 220, 255, 0)', "Color does not match")
if x == 1:
self.assertEqual(str(color), '(0, 0, 0, 255)', "Color does not match")
# Convert and compare MRF
mrf = gdal.Open(self.output_mrf)
driver = gdal.GetDriverByName("PNG")
img = driver.CreateCopy(self.output_img, mrf, 0 )
print 'Generated: ' + ' '.join(img.GetFileList())
print 'Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount
self.assertEqual(img.RasterXSize, dataset.RasterXSize, "Size does not match")
self.assertEqual(img.RasterYSize, dataset.RasterYSize, "Size does not match")
self.assertEqual(img.RasterCount, dataset.RasterCount, "Size does not match")
print "Comparing: " + self.output_img + " to " + self.compare_img
self.assertTrue(filecmp.cmp(self.output_img, self.compare_img), "Output image does not match")
img = None
mrf = None
print "\n***Test Case Passed***\n"
def tearDown(self):
shutil.rmtree(self.working_dir)
shutil.rmtree(self.logfile_dir)
shutil.rmtree(self.output_dir)
class TestMRFGeneration_polar(unittest.TestCase):
def setUp(self):
self.dirpath = os.path.dirname(__file__)
self.test_config = self.dirpath + "/test/mrfgen_test_config2.xml"
self.input_dir = self.dirpath + "/test/input_dir/"
self.output_dir = self.dirpath + "/test/output_dir"
self.working_dir = self.dirpath + "/test/working_dir"
self.logfile_dir = self.dirpath + "/test/logfile_dir"
self.output_mrf = self.output_dir+ "/MORCR143ARDY2014203_.mrf"
self.output_img = self.output_dir+ "/MORCR143ARDY2014203_.jpg"
self.compare_img = self.dirpath + "/test/test_comp2.jpg"
if not os.path.exists(self.input_dir):
os.makedirs(self.input_dir)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
if not os.path.exists(self.logfile_dir):
os.makedirs(self.logfile_dir)
# download tiles
for r in range(0,8):
for c in range(0,8):
try:
image_url = "http://lance2.modaps.eosdis.nasa.gov/imagery/subsets/Arctic_r%02dc%02d/%s%03d/Arctic_r%02dc%02d.%s%03d.aqua.250m.jpg" % (r,c,year,doy,r,c,year,doy)
world_url = "http://lance2.modaps.eosdis.nasa.gov/imagery/subsets/Arctic_r%02dc%02d/%s%03d/Arctic_r%02dc%02d.%s%03d.aqua.250m.jgw" % (r,c,year,doy,r,c,year,doy)
image_name = self.input_dir + image_url.split('/')[-1]
world_name = self.input_dir + world_url.split('/')[-1]
print "Downloading", image_url
image_file=urllib.URLopener()
image_file.retrieve(image_url,image_name)
print "Downloading", world_url
world_file=urllib.URLopener()
world_file.retrieve(world_url,world_name)
except Exception,e:
print str(e)
#generate MRF
run_command("python mrfgen.py -c " + self.test_config)
def test_generate_mrf_polar(self):
# Check MRF generation succeeded
self.assertTrue(os.path.isfile(self.output_mrf), "MRF generation failed")
# Read MRF
dataset = gdal.Open(self.output_mrf)
driver = dataset.GetDriver()
print 'Driver:', str(driver.LongName)
self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
print 'Files:', ' '.join(dataset.GetFileList())
self.assertEqual(len(dataset.GetFileList()),3,"MRF does not contain triplet")
print 'Projection:', str(dataset.GetProjection())
self.assertEqual(str(dataset.GetProjection()),'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]')
print 'Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount
self.assertEqual(dataset.RasterXSize, 4096, "Size does not match")
self.assertEqual(dataset.RasterYSize, 4096, "Size does not match")
self.assertEqual(dataset.RasterCount, 3, "Size does not match")
geotransform = dataset.GetGeoTransform()
print 'Origin: (',geotransform[0], ',',geotransform[3],')'
self.assertEqual(geotransform[0], -4194304, "Origin does not match")
self.assertEqual(geotransform[3], 4194304, "Origin does not match")
print 'Pixel Size: (',geotransform[1], ',',geotransform[5],')'
self.assertEqual(int(geotransform[1]), 2048, "Pixel size does not match")
self.assertEqual(int(geotransform[5]), -2048, "Pixel size does not match")
band = dataset.GetRasterBand(1)
print 'Overviews:', band.GetOverviewCount()
self.assertEqual(band.GetOverviewCount(), 3, "Overview count does not match")
# Convert and compare MRF
mrf = gdal.Open(self.output_mrf)
driver = gdal.GetDriverByName("JPEG")
img = driver.CreateCopy(self.output_img, mrf, 0 )
print 'Generated: ' + ' '.join(img.GetFileList())
print 'Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount
self.assertEqual(img.RasterXSize, dataset.RasterXSize, "Size does not match")
self.assertEqual(img.RasterYSize, dataset.RasterYSize, "Size does not match")
self.assertEqual(img.RasterCount, dataset.RasterCount, "Size does not match")
# filesize = os.path.getsize(self.output_img)
# print "Comparing file size: " + self.output_img + " " + str(filesize) + " bytes"
# self.assertEqual(filesize, 3891603, "Output image does not match")
img = None
mrf = None
print "\n***Test Case Passed***\n"
def tearDown(self):
shutil.rmtree(self.input_dir)
shutil.rmtree(self.working_dir)
shutil.rmtree(self.logfile_dir)
shutil.rmtree(self.output_dir)
class TestMRFGeneration_mercator(unittest.TestCase):
def setUp(self):
self.dirpath = os.path.dirname(__file__)
self.test_config = self.dirpath + "/test/mrfgen_test_config3.xml"
self.output_dir = self.dirpath + "/test/output_dir"
self.working_dir = self.dirpath + "/test/working_dir"
self.logfile_dir = self.dirpath + "/test/logfile_dir"
self.output_mrf = self.output_dir+ "/BlueMarbleSmall2014237_.mrf"
self.output_img = self.output_dir+ "/BlueMarbleSmall2014237_.png"
self.compare_img = self.dirpath + "/test/test_comp3.png"
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
if not os.path.exists(self.logfile_dir):
os.makedirs(self.logfile_dir)
#generate MRF
run_command("python mrfgen.py -c " + self.test_config)
def test_generate_mrf(self):
# Check MRF generation succeeded
self.assertTrue(os.path.isfile(self.output_mrf), "MRF generation failed")
# Read MRF
dataset = gdal.Open(self.output_mrf)
driver = dataset.GetDriver()
print 'Driver:', str(driver.LongName)
self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
print 'Files:', ' '.join(dataset.GetFileList())
self.assertEqual(len(dataset.GetFileList()),3,"MRF does not contain triplet")
print 'Projection:', str(dataset.GetProjection())
self.assertEqual(str(dataset.GetProjection()),'PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],AUTHORITY["EPSG","3857"]]')
print 'Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount
self.assertEqual(dataset.RasterXSize, 1024, "Size does not match")
self.assertEqual(dataset.RasterYSize, 1024, "Size does not match")
self.assertEqual(dataset.RasterCount, 3, "Size does not match")
geotransform = dataset.GetGeoTransform()
print 'Origin: (',geotransform[0], ',',geotransform[3],')'
self.assertEqual(geotransform[0], -20037508.34, "Origin does not match")
self.assertEqual(geotransform[3], 20037508.34, "Origin does not match")
print 'Pixel Size: (',geotransform[1], ',',geotransform[5],')'
self.assertEqual(str(geotransform[1]), '39135.7584766', "Pixel size does not match")
self.assertEqual(str(geotransform[5]), '-39135.7584766', "Pixel size does not match")
band = dataset.GetRasterBand(1)
print 'Overviews:', band.GetOverviewCount()
self.assertEqual(band.GetOverviewCount(), 2, "Overview count does not match")
# Convert and compare MRF
mrf = gdal.Open(self.output_mrf)
driver = gdal.GetDriverByName("PNG")
img = driver.CreateCopy(self.output_img, mrf, 0 )
print 'Generated: ' + ' '.join(img.GetFileList())
print 'Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount
self.assertEqual(img.RasterXSize, dataset.RasterXSize, "Size does not match")
self.assertEqual(img.RasterYSize, dataset.RasterYSize, "Size does not match")
self.assertEqual(img.RasterCount, dataset.RasterCount, "Size does not match")
print "Comparing: " + self.output_img + " to " + self.compare_img
self.assertTrue(filecmp.cmp(self.output_img, self.compare_img), "Output image does not match")
img = None
mrf = None
print "\n***Test Case Passed***\n"
def tearDown(self):
shutil.rmtree(self.working_dir)
shutil.rmtree(self.logfile_dir)
shutil.rmtree(self.output_dir)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jejemaes/jejemaes-server",
"score": 2
}
|
#### File: jejemaes-server/cloud_tools/__init__.py
```python
from collections import OrderedDict
import os
_commands = OrderedDict()
def command(func):
_commands[func.__name__] = func
return func
HERE = os.path.dirname(os.path.realpath(__file__))
DIR_FILES = HERE + '/../cloud_files/'
DIR_TEMPLATE = HERE + '/../tmpl/'
DIR_SCRIPTS = HERE + '/../scripts/'
DIR_RESOURCE = HERE + '/../resources/'
from . import server_lemp
from . import server_odoo
```
#### File: jejemaes/jejemaes-server/fabfile.py
```python
import ConfigParser
import io
import json
import os
import re
from fabric.api import task, env, run, cd, sudo, put, hide, hosts, local, get, execute
from fabric.colors import red, yellow, green, blue, white
from fabric.utils import abort, puts, fastprint, warn
from fabric.context_managers import warn_only, settings, shell_env
from fabric.contrib.files import exists, upload_template
import fabtools
import fabtools.require
from functools import wraps
HERE = os.path.dirname(os.path.realpath(__file__))
LOCAL_DIR_RESOURCES = HERE + '/resources/'
DIR_SCRIPT = '/root/cloud/'
SERV_DIR_CLOUD_FILES = '/root/cloud/setup/cloud_files'
SERV_DIR_CLOUD_SETUP = '/root/cloud/setup'
SERV_DIR_RESOURCES = '/root/cloud/setup/resources'
SERV_DIR_FILES = '/root/cloud/setup/cloud_files'
CONFIG = {}
ODOO_USER = os.environ.get('ODOO_USER', 'odoo')
if not re.match(r'^[a-z_]+$', ODOO_USER):
abort('%r is not alphabetical' % (ODOO_USER,))
ODOO_DIR_HOME = '/home/%s' % ODOO_USER
ODOO_DIR_SRC = ODOO_DIR_HOME + '/src'
ODOO_DEFAULT_VERSION = '11.0'
# those github repo should be versionned as the odoo community repo (same branch nickname)
ODOO_REPO_DIR_MAP = {
'odoo': 'https://github.com/odoo/odoo.git',
'jejemaes': 'https://github.com/jejemaes/odoo-custom-addons.git',
}
# ----------------------------------------------------------
# Script Tools
# ----------------------------------------------------------
def as_(user):
def deco(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
current_user = env.user
if current_user != user:
warn('force using user ' + user)
env.user = user
try:
return fun(*args, **kwargs)
finally:
env.user = current_user
return wrapper
return deco
def _auto_load_config_file():
config_values = {}
file_path = "config.ini"
if os.path.exists(file_path):
# Load the configuration file
with open(file_path) as f:
sample_config = f.read()
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(sample_config))
for section in config.sections():
for option in config.options(section):
config_values['%s_%s' % (section, option)] = config.get(section, option)
return config_values
CONFIG = _auto_load_config_file()
def _get_config(section_prefix):
result = {}
for option in CONFIG:
if option.startswith(section_prefix):
result[option] = CONFIG.get(option)
return result
def has_systemd():
with hide('output', 'running', 'warnings'):
with settings(warn_only=True):
res = run('command -v systemctl')
return res.return_code == 0
def _validate_domain(domain):
regex = re.compile(
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, domain) is not None
def slugify(value):
name = re.sub('[^\w\s-]', '', value).strip().lower()
return name
def domain2database(domain):
temp_domain = domain[4:] if domain.startswith('www.') else domain
slug_name = slugify(temp_domain)
return slug_name
# fabtools issue: https://github.com/fabtools/fabtools/issues/4
# checking the existance of a user will always return True as a warning
# "/bin/bash: /root/.bash_profile: Permission denied\r\ncould not
# change directory to "/root" is returned, and is evaluated to True.
# This method patch it.
def pg_user_exists(name):
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = fabtools.postgres._run_as_pg('''psql -t -A -c "SELECT count(*) FROM pg_user WHERE usename='%(name)s';"''' % locals())
return '1' in res
# ----------------------------------------------------------
# Git Utils
# ----------------------------------------------------------
def git_clone(path, gh_url, directory, branch_name=False):
""" Clone a github repo into a given directory
:param path: absotute path of the directory in which create the new repo
:param gh_url: HTTPS github url of the repo to clone (e.i.: https://github.com/jejemaes/jejemaes-server.git)
:param directory: name of directory that will contain the code
:param branch_name: branche name (in github repo). If given, this will fecth only this branch. Otherwise, only the
primary of the github repo will be fetched.
"""
with cd(path):
if not fabtools.files.is_dir(directory):
if branch_name:
run('git clone -q --branch {0} --single-branch {1} {2}'.format(branch_name, gh_url, directory))
else:
run('git clone -q --single-branch {0} {1}'.format(gh_url, directory))
return True
return False
def git_update_directory(path):
""" Update code from git for the given git directory
:param path: absolute path of the git directory to update
"""
if not fabtools.files.is_dir(path):
puts(red('Setup directory {0} not found'.format(path)))
return
puts(blue('Update setup directory {0}'.format(path)))
with cd(path):
sudo('git fetch --quiet --prune')
sudo('git rebase --quiet --autostash')
with hide('status', 'commands'):
no_conflicts = sudo('git diff --diff-filter=U --no-patch --exit-code').succeeded
puts(blue('Update of cloud scripts done !'))
return no_conflicts
# ----------------------------------------------------------
# Deployment / Setup for Odoo and LEMP server
# ----------------------------------------------------------
@task
@as_('root')
def deploy(server=False):
if not server:
_setup_common_packages()
_setup_server_scripts()
# Odoo Server Deploy
if not server or server == 'odoo':
_setup_odoo_packages()
_setup_odoo_user()
run('geoipupdate')
# LEMP Server Deploy
if not server or server == 'lemp':
_setup_lemp_server()
_setup_rsync_files(server)
if not server:
fabtools.require.service.stopped('postgresql')
_setup_postgres()
fabtools.require.service.restarted('postgresql')
setup_metabase()
def _setup_common_packages():
""" Method to install common packages """
debs = """
debconf-utils
git
htop
jq
nginx
python-pip
postgresql
postgresql-contrib
rsync
vim
""".split()
fabtools.deb.update_index()
run("apt-get upgrade -y --force-yes")
fabtools.deb.install(debs, options=['--force-yes', '--ignore-missing'])
python_pip = """
docopt
fabric
fabtools
mako
sh
suds
""".split()
fabtools.python.install(python_pip, upgrade=True)
def _setup_server_scripts():
""" Checkout the scripts and setup the /root/src directory """
# checkout or pull
sudo('mkdir -p ' + DIR_SCRIPT)
with cd(DIR_SCRIPT):
if not fabtools.files.is_dir('setup'):
sudo('git clone https://github.com/jejemaes/jejemaes-server.git setup')
else:
git_update_directory(DIR_SCRIPT + 'setup')
def _setup_rsync_files(server=False):
""" Synchronize files from the setup repo to the real server configuration, in order to set services, ... as it should be. """
# nginx config
sudo('rsync -rtlE %s/etc/nginx/nginx.conf /etc/nginx/nginx.conf' % (SERV_DIR_CLOUD_FILES,))
# postgres config
sudo("find /etc/postgresql -name 'postgresql.local.conf' -type l -delete")
sudo("find /etc/postgresql -name 'main' -type d -exec touch '{}/postgresql.local.conf' ';' -exec chown postgres:postgres '{}/postgresql.local.conf' ';'")
sudo('rsync -rtlE %s/etc/postgresql /etc/postgresql' % (SERV_DIR_CLOUD_FILES,))
# make cloud meta tool executable
sudo("mkdir -p %s/log" % (SERV_DIR_CLOUD_SETUP,))
sudo("chmod 775 %s/cloud-meta" % (SERV_DIR_CLOUD_SETUP,))
if not server or server == 'odoo':
run('rsync -rtlE %s/odoo/ %s' % (SERV_DIR_CLOUD_FILES, ODOO_DIR_HOME))
sudo("chown -R {0}:{0} {1}/bin".format(ODOO_USER, ODOO_DIR_HOME))
run('rsync -rtlE %s/etc/sudoers.d/ /etc/sudoers.d/' % (SERV_DIR_CLOUD_FILES,))
run('chmod 440 /etc/sudoers.d/*')
def _setup_postgres(version='9.5'):
""" Setup postgres databse user and root and odoo roles """
datadir = '/home/postgres/%s/main' % version
if not fabtools.files.is_dir(datadir):
fabtools.require.directory('/home/postgres/%s/main' % version)
run('chown -R postgres:postgres /home/postgres')
sudo('/usr/lib/postgresql/%s/bin/initdb --locale=en_US.UTF-8 --lc-collate=C %s' % (version, datadir), user='postgres')
fabtools.service.start('postgresql')
if not pg_user_exists('root'):
fabtools.postgres.create_user('root', 'root', superuser=True)
if not pg_user_exists('odoo'):
fabtools.postgres.create_user('odoo', 'odoo', superuser=True)
sudo('''psql -tAq -d postgres -c ALTER USER odoo CREATEDB;"''', user='postgres')
@task
@as_('root')
def setup_metabase():
""" Create or update schema of `meta` database. Only root should access it since this is the cloud user. """
META = 'meta'
with settings(sudo_user=env.user):
if not fabtools.postgres.database_exists(META):
fabtools.postgres.create_database(META, owner='root')
with shell_env(PGOPTIONS='--client-min-messages=warning'):
sudo('psql -Xq -d {0} -f {1}/metabase.sql'.format(META, SERV_DIR_RESOURCES))
@as_('root')
def _setup_odoo_packages():
""" Install/Update debian and python packages needed for Odoo Server """
codename = sudo('lsb_release -cs').strip()
uninstall = """mlocate xinetd locate wkhtmltopdf whoopsie""".split()
# local packages repo
sio = io.BytesIO(b"deb http://nightly.openerp.com/deb/%s ./" % codename)
put(sio, '/etc/apt/sources.list.d/odoo.list')
sio = io.BytesIO(b"Package: nginx\nPin: origin nightly.openerp.com\nPin-Priority: 1001")
put(sio, '/etc/apt/preferences.d/odoo')
run('add-apt-repository -y ppa:maxmind/ppa') # for geoipupdate
base_debs = """
curl
fabric
file
geoipupdate
git
graphviz
htop
jq
less
libdbd-pg-perl
libev-dev
libevent-dev
libfreetype6-dev
libjpeg8-dev
libpq-dev
libsasl2-dev
libtiff-dev
libwww-perl
libxml2-dev
libxslt1-dev
lsb-release
lsof
make
mosh
ncdu
npm
p7zip-full
pg-activity
rsync
sudo
tree
unzip
uptimed
vim
wkhtmltox
zip
""".split()
fabtools.deb.uninstall(uninstall, purge=True, options=['--quiet'])
fabtools.deb.update_index()
run("apt-get upgrade -y --force-yes")
p3_debs = """
python3-dev
python3-babel
python3-dateutil
python3-decorator
python3-docopt
python3-docutils
python3-feedparser
python3-geoip
python3-gevent
python3-html2text
python3-jinja2
python3-lxml
python3-mako
python3-markdown
python3-matplotlib
python3-mock
python3-ofxparse
python3-openid
python3-passlib
python3-pil
python3-pip
python3-psutil
python3-psycopg2
python3-pydot
python3-pyparsing
python3-pypdf2
python3-reportlab
python3-requests
python3-setproctitle
python3-simplejson
python3-tz
python3-unittest2
python3-vatnumber
python3-werkzeug
python3-xlrd
python3-xlsxwriter
python3-yaml
""".split()
p3_pips = """
fabtools
geoip2
num2words==0.5.4
phonenumbers
psycogreen
python-slugify
suds-jurko
vobject
xlwt
""".split()
debs = base_debs + p3_debs
fabtools.deb.install(debs, options=['--force-yes', '--ignore-missing'])
# NOTE libevent-dev is required by gevent. /!\ version 1.0 of gevent will require libev-dev (and cython)
# run('pip install cython -e git://github.com/surfly/[email protected]#egg=gevent')
# fabtools.python.install(python_pip, upgrade=False)
run("pip3 install -q {}".format(' '.join(p3_pips)))
# Nodejs
run("ln -sf /usr/bin/nodejs /usr/bin/node")
run("npm install -g less less-plugin-clean-css")
def _setup_odoo_user():
if not fabtools.user.exists(ODOO_USER):
if ODOO_USER != 'odoo':
abort('user %r does not exists' % ODOO_USER)
return
fabtools.user.create('odoo', create_home=True, shell='/bin/bash')
sudo('mkdir -p {0}/log'.format(ODOO_DIR_HOME))
sudo('mkdir -p {0}/src'.format(ODOO_DIR_HOME))
sudo('mkdir -p {0}/bin'.format(ODOO_DIR_HOME))
sudo("chown -R {0}:{0} {1}/log".format(ODOO_USER, ODOO_DIR_HOME))
sudo("chown -R {0}:{0} {1}/src".format(ODOO_USER, ODOO_DIR_HOME))
sudo("chown -R {0}:{0} {1}/bin".format(ODOO_USER, ODOO_DIR_HOME))
@task
def setup_odoo_services(): #TODO JEM: not sure this is usefull
_setup_rsync_files('odoo')
if not fabtools.systemd.is_running('nginx'):
fabtools.systemd.start('nginx')
fabtools.systemd.enable('nginx')
# ----------------------------------------------------------
# LEMP server
# ----------------------------------------------------------
@as_('root')
def _setup_lemp_server():
with cd('/root/cloud/setup'):
sudo('./cloud-setup lemp setup -v')
@task
@as_('root')
def update_lemp_server():
with cd('/root/cloud/setup'):
sudo('./cloud-setup lemp update -v')
#TODO: define default env with mysql root credentials
@task
@as_('root')
def lemp_create_account(domain, user, password):
group = user
home_dir = '/home/%s' % (user,)
# create unix group
if not fabtools.group.exists(group):
fabtools.group.create(group)
# create unix user
if not fabtools.user.exists(user):
fabtools.user.create(user, group=group, home=home_dir, shell='/bin/bash')
# create php fpm and nginx files, and restart services
with cd('/root/cloud/setup'):
sudo('./cloud-setup lemp newsite -d {dns} -u {user} -g {group} -v'.format(dns=domain, user=user, group=user))
fabtools.service.restart('php7.0-fpm')
fabtools.service.restart('nginx')
# create mysql user and database
if not fabtools.mysql.user_exists(user):
fabtools.mysql.create_user(user, password)
if not fabtools.mysql.database_exists(user):
fabtools.mysql.create_database(user, owner=user)
# FTP SQL entries
unix_group_id = fabtools.utils.run_as_root("id -g %s" % (user,))
unix_user_id = fabtools.utils.run_as_root("id -u %s" % (user,))
query = """INSERT IGNORE INTO meta.ftpgroup (groupname, gid, members) VALUES ("%s", %s, "%s");""" % (user, unix_group_id, user)
puts(fabtools.mysql.query(query))
query = """INSERT IGNORE INTO meta.ftpuser (userid, passwd, uid, gid, homedir, shell, count, accessed, modified) VALUES ("%s", "%s", %s, %s, "%s", "/sbin/nologin", 0, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);""" % (user, password, unix_user_id, unix_group_id, home_dir)
puts(fabtools.mysql.query(query))
# ----------------------------------------------------------
# Odoo Server
# ----------------------------------------------------------
@task
def odoo_checkout(branch_nick):
""" Odoo: deploy (fetch sources, create service, ....) or update code base of given branch (fetch sources, restart service, ....) """
_odoo_fetch_sources(branch_nick)
if not _odoo_is_service_running(branch_nick): # then create it !
_odoo_sync_filestore()
sudo('{0}/cloud-meta odoo-add-version {1}'.format(SERV_DIR_CLOUD_SETUP, branch_nick))
_odoo_create_initd(branch_nick)
else: # simply restart service after source code checkout
_odoo_service_action(branch_nick, 'restart')
def _odoo_fetch_sources(branch_nick):
""" Fetch source in src/<version> or update sources """
for directory, repo_url in ODOO_REPO_DIR_MAP.items():
current_path = ODOO_DIR_SRC + '/' + directory + '/'
if fabtools.files.is_dir(current_path + branch_nick):
git_update_directory(current_path + branch_nick)
else:
sudo('mkdir -p {0}'.format(current_path))
result = git_clone(current_path, repo_url, branch_nick, branch_nick)
if result:
puts(blue('Odoo %s sources fetched !' % (branch_nick,)))
else:
puts(red('Error when fetching Odoo %s sources !' % (branch_nick,)))
run("chown -R {0}:{0} {1}".format(ODOO_USER, current_path))
def _odoo_sync_filestore():
# create a filestore directory and symlink from ~/.local to /home/odoo : one filestore per database
sudo('mkdir -p {0}/filestore'.format(ODOO_DIR_HOME))
# resymlink
product = 'Odoo' # hardcoded (was 'OpenERP' in 8.0)
sudo('mkdir -p {0}/.local/share/{1}/sessions'.format(ODOO_DIR_HOME, product))
sudo('ln -sfT {0}/filestore {0}/.local/share/{1}/filestore'.format(ODOO_DIR_HOME, product))
def _odoo_branch2service(branch):
"""returns a tuple (service_name, service_path)"""
service_name = 'openerp-' + branch
if has_systemd():
service_path = '/etc/systemd/system/{0}.service'.format(service_name)
else:
service_path = '/etc/init.d/{0}'.format(service_name)
return (service_name, service_path)
@as_('root')
def _odoo_create_initd(branch):
""" create the initd file for the service """
ctx = {
'branch': branch
}
sudo('ln -sf {0}/bin/openerp {0}/bin/openerp-{1}'.format(ODOO_DIR_HOME, branch))
def _upload_template(template, target, mode):
upload_template(os.path.join(LOCAL_DIR_RESOURCES, template), target, ctx, backup=False, mode=mode)
service_name, service_path = _odoo_branch2service(branch)
if has_systemd():
# systemd
_upload_template('unit_openerp.tpl', service_path, '0644')
run('systemctl daemon-reload')
fabtools.systemd.enable(service_name)
else:
# SysV init
_upload_template('initd_openerp.tpl', service_path, '0755')
run('update-rc.d {0} defaults'.format(service_name))
def _odoo_is_service_running(branch_nick):
""" check if the service of given branch exists and is running """
service_name, service_path = _odoo_branch2service(branch_nick)
if fabtools.files.is_file(service_path):
return fabtools.systemd.is_running(service_name)
return False
def _odoo_service_action(branch_nick, action):
service_name, service_path = _odoo_branch2service(branch_nick)
if not fabtools.files.is_file(service_path):
puts(yellow('service {0} missing: skipping'.format(branch_nick)))
else:
if has_systemd():
getattr(fabtools.systemd, action)(service_name)
else:
run('{0} {1}'.format(service_path, action))
@task
def odoo_db_add(domain, branch_nick, dbname=False):
if not _validate_domain(domain):
raise Exception("Given domain is not correct. Got '%s'." % (domain,))
if not dbname:
dbname = domain2database(domain)
# insert meta entry
sudo('{0}/cloud-meta odoo-add-database {1} {2} {3}'.format(SERV_DIR_CLOUD_SETUP, domain, branch_nick, dbname))
# create nginx file
odoo_create_nginx_config(dbname)
@task
def odoo_create_nginx_config(dbname):
""" Create the nginx config file and restart nginx service
:param dbname: name of the database to (re)generate the config file
"""
dbinfo_str = sudo('{0}/cloud-meta odoo-get-info {1}'.format(SERV_DIR_CLOUD_SETUP, dbname))
dbinfo = json.loads(dbinfo_str)
# create nginx file
upload_template(os.path.join(LOCAL_DIR_RESOURCES, 'nginx_openerp.tpl'), '/etc/nginx/sites-availables/%s' % (dbname,), dbinfo, backup=False, mode='0644')
run("ln -sfT /etc/nginx/sites-availables/%s /etc/nginx/sites-enabled/%s" % (dbname, dbname))
# restart nginx
if fabtools.systemd.is_running('nginx'):
fabtools.systemd.restart('nginx')
else:
fabtools.systemd.start('nginx')
```
|
{
"source": "jejen-juanda/TTMediaBot",
"score": 2
}
|
#### File: bot/connectors/tt_player_connector.py
```python
import logging
from threading import Thread
import time
from bot.player import State
from bot import vars
class TTPlayerConnector(Thread):
def __init__(self, player, ttclient):
super().__init__(daemon=True)
self.name = 'TTPlayerConnector'
self.player = player
self.ttclient = ttclient
def run(self):
last_player_state = State.Stopped
last_track_meta = {'name': None, 'url': None}
self._close = False
while not self._close:
try:
if self.player.state != last_player_state:
last_player_state = self.player.state
if self.player.state == State.Playing:
self.ttclient.enable_voice_transmission()
last_track_meta = self.player.track.get_meta()
if self.player.track.name:
self.ttclient.change_status_text(_('Playing: {track_name}').format(track_name=self.player.track.name))
else:
self.ttclient.change_status_text(_('Playing: {stream_url}').format(stream_url=self.player.track.url))
elif self.player.state == State.Stopped:
self.ttclient.disable_voice_transmission()
self.ttclient.change_status_text('')
elif self.player.state == State.Paused:
self.ttclient.disable_voice_transmission()
if self.player.track.name:
self.ttclient.change_status_text(_('Paused: {track_name}').format(track_name=self.player.track.name))
else:
self.ttclient.change_status_text(_('Paused: {stream_url}').format(stream_url=self.player.track.url))
if self.player.track.get_meta() != last_track_meta and last_player_state != State.Stopped:
last_track_meta = self.player.track.get_meta()
self.ttclient.change_status_text('{state}: {name}'.format(state=self.ttclient.status.split(':')[0], name=self.player.track.name))
except Exception as e:
logging.error(exc_info=True)
time.sleep(vars.loop_timeout)
def close(self):
self._close = True
```
|
{
"source": "jejenone/peeringdb",
"score": 2
}
|
#### File: management/commands/pdb_api_test.py
```python
import copy
import unittest
import uuid
import random
import re
import time
import datetime
from types import NoneType
from twentyc.rpc import RestClient, PermissionDeniedException, InvalidRequestException, NotFoundException
from django_namespace_perms.constants import PERM_READ, PERM_UPDATE, PERM_CREATE, PERM_DELETE
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group
from django.conf import settings
from django.db.utils import IntegrityError
from rest_framework import serializers
from peeringdb_server.models import (
REFTAG_MAP, QUEUE_ENABLED, User, Organization, Network, InternetExchange,
Facility, NetworkContact, NetworkIXLan, NetworkFacility, IXLan,
IXLanPrefix, InternetExchangeFacility)
from peeringdb_server.serializers import REFTAG_MAP as REFTAG_MAP_SLZ
START_TIMESTAMP = time.time()
SHARED = {}
NUMERIC_TESTS = {
"lt": "Less",
"lte": "LessEqual",
"gt": "Greater",
"gte": "GreaterEqual",
"": "Equal"
}
DATETIME = datetime.datetime.now()
DATE = DATETIME.date()
DATE_YDAY = DATE - datetime.timedelta(days=1)
DATE_TMRW = DATE - datetime.timedelta(days=-1)
DATES = {
"today": (DATE, DATE.strftime("%Y-%m-%d")),
"yesterday": (DATE_YDAY, DATE_YDAY.strftime("%Y-%m-%d")),
"tomorrow": (DATE_TMRW, DATE_TMRW.strftime("%Y-%m-%d"))
}
# entity names
ORG_RW = "API Test Organization RW"
ORG_RW_PENDING = "%s:Pending" % ORG_RW
ORG_R = "API Test Organization R"
NET_R = "%s:Network" % ORG_R
NET_R_PENDING = "%s:Pending" % NET_R
NET_R_DELETED = "%s:Deleted" % NET_R
IX_R = "%s:Exchange" % ORG_R
FAC_R = "%s:Facility" % ORG_R
# user specs
USER = {"user": "api_test", "password": "<PASSWORD>"}
USER_ORG_ADMIN = {"user": "api_test_org_admin", "password": "<PASSWORD>"}
USER_ORG_MEMBER = {"user": "api_test_org_member", "password": "<PASSWORD>"}
USER_CRUD = {
"delete": {
"user": "api_test_crud_delete",
"password": "<PASSWORD>"
},
"update": {
"user": "api_test_crud_update",
"password": "<PASSWORD>"
},
"create": {
"user": "api_test_crud_create",
"password": "<PASSWORD>"
}
}
# server location
URL = settings.API_URL
# common
CITY = "Chicago"
COUNTRY = "US"
CONTINENT = "North America"
PHONE = "12345"
WEBSITE = "http://www.test.apitest"
STATE = "IL"
ZIPCODE = "1-2345"
NOTE = "This is a test entry made by a script to test out the API"
EMAIL = "<EMAIL>"
VERBOSE = False
class TestJSON(unittest.TestCase):
rest_client = RestClient
PREFIX_COUNT = 110
IP4_COUNT = 1
IP6_COUNT = 1
@classmethod
def get_ip6(cls):
r = u"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:%d" % cls.IP6_COUNT
cls.IP6_COUNT += 1
return r
@classmethod
def get_ip4(cls):
r = u"1.1.1.%d" % cls.IP4_COUNT
cls.IP4_COUNT += 1
return r
@classmethod
def get_prefix4(cls):
r = u"206.41.{}.0/24".format(cls.PREFIX_COUNT)
cls.PREFIX_COUNT += 1
return r
def setUp(self):
self.db_guest = self.rest_client(URL, verbose=VERBOSE)
self.db_user = self.rest_client(URL, verbose=VERBOSE, **USER)
self.db_org_member = self.rest_client(URL, verbose=VERBOSE,
**USER_ORG_MEMBER)
self.db_org_admin = self.rest_client(URL, verbose=VERBOSE,
**USER_ORG_ADMIN)
for p, specs in USER_CRUD.items():
setattr(self, "db_crud_%s" % p,
self.rest_client(URL, verbose=VERBOSE, **specs))
def all_dbs(self, exclude=[]):
return [
db
for db in [
self.db_guest, self.db_org_member, self.db_user,
self.db_org_admin, self.db_crud_create, self.db_crud_delete,
self.db_crud_update
] if db not in exclude
]
def readonly_dbs(self, exclude=[]):
return [
db for db in [self.db_guest, self.db_org_member, self.db_user]
if db not in exclude
]
##########################################################################
@classmethod
def make_data_org(self, **kwargs):
data = {
"name": self.make_name("Test"),
"website": WEBSITE,
"notes": NOTE,
"address1": "address",
"address2": "address",
"city": CITY,
"country": COUNTRY,
"state": "state",
"zipcode": "12345"
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_ix(self, **kwargs):
data = {
"name": self.make_name("Test"),
"org_id": SHARED["org_rw_ok"].id,
"name_long": self.make_name("Test Long Name"),
"city": CITY,
"country": COUNTRY,
"region_continent": CONTINENT,
"media": "Ethernet",
"notes": NOTE,
"proto_unicast": True,
"proto_multicast": False,
"proto_ipv6": True,
"website": WEBSITE,
"url_stats": "%s/stats" % WEBSITE,
"tech_email": EMAIL,
"tech_phone": PHONE,
"policy_email": EMAIL,
"policy_phone": PHONE
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_fac(self, **kwargs):
data = {
"name": self.make_name("Test"),
"org_id": SHARED["org_rw_ok"].id,
"website": WEBSITE,
"clli": str(uuid.uuid4())[:6].upper(),
"rencode": str(uuid.uuid4())[:6].upper(),
"npanxx": "000-111",
"latitude": None,
"longitude": None,
"notes": NOTE
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_net(self, **kwargs):
try:
asn = Network.objects.order_by("-asn").first().asn
except AttributeError:
asn = 90000000
if asn < 90000000:
asn = 90000000
else:
asn = asn + 1
data = {
"name": self.make_name("Test"),
"org_id": SHARED["org_rw_ok"].id,
"aka": self.make_name("Also known as"),
"asn": asn,
"website": WEBSITE,
"irr_as_set": "AS-XX-XXXXXX",
"info_type": "NSP",
"info_prefixes4": 11000,
"info_prefixes6": 12000,
"info_traffic": "1 Tbps+",
"info_ratio": "Mostly Outbound",
"info_scope": "Global",
"info_unicast": True,
"info_multicast": False,
"info_ipv6": True,
"notes": NOTE,
"policy_url": "%s/policy" % WEBSITE,
"policy_general": "Restrictive",
"policy_locations": "Required - International",
"policy_ratio": True,
"policy_contracts": "Required"
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_poc(self, **kwargs):
data = {
"net_id": 1,
"role": "Technical",
"visible": "Private",
"name": "NOC",
"phone": PHONE,
"email": EMAIL,
"url": WEBSITE
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_ixlan(self, **kwargs):
data = {
"ix_id": 1,
"name": self.make_name("Test"),
"descr": NOTE,
"mtu": 12345,
"dot1q_support": False,
"rs_asn": 12345,
"arp_sponge": None
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_ixpfx(self, **kwargs):
data = {
"ixlan_id": SHARED["ixlan_r_ok"].id,
"protocol": "IPv4",
"prefix": "10.%d.10.0/23" % (self.PREFIX_COUNT + 1)
}
if "prefix" not in kwargs:
self.PREFIX_COUNT += 1
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_netixlan(self, rename={}, **kwargs):
data = {
"net_id": SHARED["net_r_ok"].id,
"ixlan_id": SHARED["ixlan_r_ok"].id,
"notes": NOTE,
"speed": 30000,
"asn": 12345,
"ipaddr4": self.get_ip4(),
"ipaddr6": self.get_ip6()
}
data.update(**kwargs)
for k, v in rename.items():
data[v] = data[k]
del data[k]
return data
##########################################################################
@classmethod
def make_name(self, name):
return "api-test:%s:%s" % (name, uuid.uuid4())
##########################################################################
@classmethod
def serializer_related_fields(cls, serializer_class):
"""
Returns declared relation fields on the provided serializer class
Returned value will be a tuple in which the first item is a list of
field names for primary key related fields and the second item is a list
of fields names for related sets
"""
pk_rel = []
nested_rel = []
for name, fld in serializer_class._declared_fields.items():
if type(fld) == serializers.PrimaryKeyRelatedField:
pk_rel.append(name[:-3])
elif isinstance(fld, serializers.ListSerializer):
nested_rel.append((name, fld.child))
return (pk_rel, nested_rel)
##########################################################################
def assert_handleref_integrity(self, data):
"""
here we assert the integrity of a handleref (which is
the base of all the models exposed on the api)
we do this by making sure all of the handleref fields
exist in the data
"""
self.assertIn("status", data)
# self.assertIn("version", data)
self.assertIn("id", data)
self.assertIn("created", data)
self.assertIn("updated", data)
self.assertNotEqual("created", None)
##########################################################################
def assert_data_integrity(self, data, typ, ignore=[]):
if hasattr(self, "make_data_%s" % typ):
msg = "data integrity failed on key '%s'"
func = getattr(self, "make_data_%s" % typ)
for k, v in func().items():
if k in ignore:
continue
if type(v) in [str, unicode]:
self.assertIn(
type(data.get(k)),
[str, unicode], msg=msg % k)
elif type(v) in [int, long]:
self.assertIn(type(data.get(k)), [int, long], msg=msg % k)
else:
self.assertEqual(type(v), type(data.get(k)), msg=msg % k)
##########################################################################
def assert_get_single(self, data):
self.assertEqual(len(data), 1)
return data[0]
##########################################################################
def assert_get_forbidden(self, db, typ, id):
with self.assertRaises(PermissionDeniedException) as cm:
db.get(typ, id)
##########################################################################
def assert_get_handleref(self, db, typ, id):
data = self.assert_get_single(db.get(typ, id))
self.assert_handleref_integrity(data)
self.assert_data_integrity(data, typ)
return data
##########################################################################
def assert_existing_fields(self, a, b, ignore={}):
for k, v in a.items():
if ignore and k in ignore:
continue
if k in ["suggest"]:
continue
self.assertEqual(v, b.get(k))
##########################################################################
def assert_delete(self, db, typ, test_success=None, test_failure=None):
if test_success:
db.rm(typ, test_success)
with self.assertRaises(NotFoundException) as cm:
self.assert_get_handleref(db, typ, test_success)
if test_failure:
with self.assertRaises(PermissionDeniedException) as cm:
db.rm(typ, test_failure)
try:
self.assert_get_handleref(db, typ, test_failure)
except PermissionDeniedException:
pass
##########################################################################
def assert_create(self, db, typ, data, test_failures=None,
test_success=True, **kwargs):
if test_success:
r_data = self.assert_get_single(
db.create(typ, data, return_response=True).get("data"))
self.assert_existing_fields(data, r_data,
ignore=kwargs.get("ignore"))
self.assertGreater(r_data.get("id"), 0)
status_checked = False
for model in QUEUE_ENABLED:
if hasattr(model, "handleref") and model.handleref.tag == typ:
self.assertEqual(r_data.get("status"), "pending")
status_checked = True
if not status_checked:
self.assertEqual(r_data.get("status"), "ok")
else:
r_data = {}
# if test_failures is set we want to test fail conditions
if test_failures:
# we test fail because of invalid data
if "invalid" in test_failures:
data_invalid = copy.copy(data)
for k, v in test_failures["invalid"].items():
data_invalid[k] = v
with self.assertRaises(InvalidRequestException) as inst:
r = db.create(typ, data_invalid, return_response=True)
for k, v in test_failures["invalid"].items():
self.assertIn(k, r.keys())
self.assertEqual("400 Unknown", str(inst.exception[1]))
# we test fail because of parent entity status
if "status" in test_failures:
data_status = copy.copy(data)
for k, v in test_failures["status"].items():
data_status[k] = v
with self.assertRaises(InvalidRequestException) as inst_status:
r = db.create(typ, data_status, return_response=True)
self.assertIn("not yet been approved",
str(inst_status.exception))
# we test fail because of permissions
if "perms" in test_failures:
data_perms = copy.copy(data)
for k, v in test_failures["perms"].items():
data_perms[k] = v
with self.assertRaises(PermissionDeniedException) as inst:
db.create(typ, data_perms, return_response=True)
return r_data
##########################################################################
def assert_create_status_failure(self, db, typ, data):
"""
Wrapper for assert_create for assertion of permission failure
"""
self.assert_create(db, typ, data, test_failures={"status": {}},
test_success=False)
##########################################################################
def assert_update(self, db, typ, id, data, test_failures=False,
test_success=True):
if test_success:
orig = self.assert_get_handleref(db, typ, id)
orig.update(**data)
else:
orig = {"id": id}
orig.update(**data)
for k, v in orig.items():
if k[-3:] == "_id" and k[:-3] in orig:
del orig[k[:-3]]
if test_success:
db.update(typ, **orig)
u_data = self.assert_get_handleref(db, typ, id)
if type(test_success) == list:
for test in test_success:
if test and callable(test):
test(data, u_data)
else:
# self.assertGreater(u_data["version"], orig["version"])
for k, v in data.items():
self.assertEqual(u_data.get(k), v)
# if test_failures is set we want to test fail conditions
if test_failures:
# we test fail because of invalid data
if "invalid" in test_failures:
data_invalid = copy.copy(orig)
for k, v in test_failures["invalid"].items():
data_invalid[k] = v
with self.assertRaises(InvalidRequestException) as inst:
db.update(typ, **data_invalid)
self.assertEqual("400 Unknown", str(inst.exception[1]))
# we test fail because of permissions
if "perms" in test_failures:
data_perms = copy.copy(orig)
for k, v in test_failures["perms"].items():
data_perms[k] = v
with self.assertRaises(PermissionDeniedException) as inst:
db.update(typ, **data_perms)
# we test failure to update readonly fields
if "readonly" in test_failures:
data_ro = copy.copy(orig)
b_data = self.assert_get_handleref(db, typ, id)
data_ro.update(**test_failures["readonly"])
db.update(typ, **data_ro)
u_data = self.assert_get_handleref(db, typ, id)
for k, v in test_failures["readonly"].items():
self.assertEqual(u_data.get(k), b_data.get(k))
##########################################################################
def assert_list_filter_related(self, target, rel, fld="id", valid=None,
valid_m=None):
#if not valid:
# valid = [o.id for k, o in SHARED.items() if type(
# o) != int and k.find("%s_" % target) == 0]
if fld != "id":
qfld = "_%s" % fld
else:
qfld = fld
ids = [
getattr(SHARED["%s_r_ok" % rel], fld),
getattr(SHARED["%s_rw_ok" % rel], fld)
]
kwargs_s = {
"%s_%s" % (rel, qfld): getattr(SHARED["%s_r_ok" % rel], fld)
}
kwargs_m = {
"%s_%s__in" % (rel, qfld): ",".join([str(id) for id in ids])
}
if hasattr(REFTAG_MAP[target], "%s" % rel):
valid_s = [
r.id
for r in REFTAG_MAP[target].objects.filter(**kwargs_s)
.filter(status="ok")
]
valid_m = [
r.id
for r in REFTAG_MAP[target]
.objects.filter(**{
"%s_%s__in" % (rel, qfld): ids
}).filter(status="ok")
]
elif target == "poc":
valid_s = [SHARED["%s_r_ok_public" % target].id]
valid_m = [
SHARED["%s_r_ok_public" % target].id,
SHARED["%s_rw_ok_public" % target].id
]
else:
valid_s = [SHARED["%s_r_ok" % target].id]
valid_m = [
SHARED["%s_r_ok" % target].id, SHARED["%s_rw_ok" % target].id
]
# exact
data = self.db_guest.all(target, **kwargs_s)
self.assertGreater(len(data), 0)
for row in data:
self.assert_data_integrity(row, target)
self.assertIn(row["id"], valid_s)
# in
data = self.db_guest.all(target, **kwargs_m)
self.assertGreater(len(data), 0)
for row in data:
self.assert_data_integrity(row, target)
self.assertIn(row["id"], valid_m)
##########################################################################
def assert_related_depth(self, obj, serializer_class, r_depth, t_depth,
note_tag, typ="listing", list_exclude=[]):
"""
Assert the data indegrity of structures within a result that have
been expanded via the depth parameter
"""
# get all the realtion ship properties declared in the serializer
pk_flds, n_flds = self.serializer_related_fields(serializer_class)
# some tag so we can track where the assertions fail since this will
# be doing nested checks
note_tag = "%s(%d/%d)" % (note_tag, r_depth, t_depth)
# first check that the provided object is not None, as this should
# never be the case
self.assertNotEqual(type(obj), NoneType, msg=note_tag)
# single primary key relation fields
for pk_fld in pk_flds:
# serializer has marked field as to be excluded from serialized data
# dont check for it
if pk_fld in list_exclude:
continue
if typ == "listing":
# in listing mode, depth should never expand pk relations
self.assertEqual(
obj.get(pk_fld), None, msg="PK Relation %s %s" % (note_tag,
pk_fld))
else:
# in single get mode, expand everything as long as we are at
# a relative depth greater than 1
if r_depth >= 1:
self.assert_related_depth(
obj.get(pk_fld), REFTAG_MAP_SLZ.get(pk_fld),
r_depth - 1, t_depth, "%s.%s" % (note_tag,
pk_fld), typ=typ)
else:
self.assertIn(
type(obj.get(pk_fld)),
[int, long, NoneType],
msg="PK Relation %s %s" % (note_tag, pk_fld))
# nested set relations
for n_fld, n_fld_cls in n_flds:
if r_depth > 1:
# sets should be expanded to objects
self.assertIn(n_fld, obj,
msg="Nested set existing (dN) %s %s" % (note_tag,
n_fld))
# make sure set exists and is of the correct type
self.assertEqual(
type(obj[n_fld]), list,
msg="Nested set list type (dN) %s %s" % (note_tag, n_fld))
# assert further depth expansions on all expanded objects in
# the set
for row in obj[n_fld]:
self.assert_related_depth(
row, n_fld_cls, r_depth - 2, t_depth, "%s.%s" %
(note_tag, n_fld), typ=typ, list_exclude=getattr(
n_fld_cls.Meta, "list_exclude", []))
elif r_depth == 1:
# sets should be expanded to ids
self.assertIn(n_fld, obj,
msg="Nested set existing (d1) %s %s" % (note_tag,
n_fld))
# make sure set exists and is of the correct type
self.assertEqual(
type(obj[n_fld]), list,
msg="Nested set list type (d1) %s %s" % (note_tag, n_fld))
# make all values in the set are of type int or long
for row in obj[n_fld]:
self.assertIn(
type(row),
[long, int],
msg="Nested set containing ids (d1) %s %s" % (note_tag,
n_fld))
else:
# sets should not exist
self.assertNotIn(n_fld, obj,
msg="Netsted set not existing (d0) %s %s" %
(note_tag, n_fld))
##########################################################################
# TESTS WITH USER THAT IS NOT A MEMBER OF AN ORGANIZATION
##########################################################################
def test_user_001_GET_org(self):
self.assert_get_handleref(self.db_user, "org", SHARED["org_r_ok"].id)
##########################################################################
def test_user_001_GET_net(self):
data = self.assert_get_handleref(self.db_user, "net",
SHARED["net_r_ok"].id)
self.assertNotEqual(len(data.get("poc_set")), 0)
##########################################################################
def test_user_001_GET_ix(self):
self.assert_get_handleref(self.db_user, "ix", SHARED["ix_r_ok"].id)
##########################################################################
def test_user_001_GET_fac(self):
self.assert_get_handleref(self.db_user, "fac", SHARED["fac_r_ok"].id)
##########################################################################
def test_user_001_GET_fac_netcount(self):
data = self.assert_get_handleref(self.db_user, "fac",
SHARED["fac_r_ok"].id)
self.assertEqual(data.get("net_count"), 1)
##########################################################################
def test_user_001_GET_poc_public(self):
self.assert_get_handleref(self.db_user, "poc",
SHARED["poc_r_ok_public"].id)
##########################################################################
def test_user_001_GET_poc_users(self):
self.assert_get_handleref(self.db_user, "poc",
SHARED["poc_r_ok_users"].id)
##########################################################################
def test_user_001_GET_poc_private(self):
self.assert_get_forbidden(self.db_user, "poc",
SHARED["poc_r_ok_private"].id)
##########################################################################
def test_user_001_GET_nefac(self):
self.assert_get_handleref(self.db_user, "netfac",
SHARED["netfac_r_ok"].id)
##########################################################################
def test_user_001_GET_netixlan(self):
self.assert_get_handleref(self.db_user, "netixlan",
SHARED["netixlan_r_ok"].id)
##########################################################################
def test_user_001_GET_ixfac(self):
self.assert_get_handleref(self.db_user, "ixfac",
SHARED["ixfac_r_ok"].id)
##########################################################################
def test_user_001_GET_ixlan(self):
self.assert_get_handleref(self.db_user, "ixlan",
SHARED["ixlan_r_ok"].id)
##########################################################################
def test_user_001_GET_ixpfx(self):
self.assert_get_handleref(self.db_user, "ixpfx",
SHARED["ixpfx_r_ok"].id)
##########################################################################
def test_user_005_list_poc(self):
data = self.db_guest.all("poc", limit=1000)
for row in data:
self.assertIn(row.get("visible"), ["Users", "Public"])
data = self.db_guest.all("poc", visible="Private", limit=100)
self.assertEqual(0, len(data))
##########################################################################
# TESTS WITH USER THAT IS ORGANIZATION MEMBER
##########################################################################
def test_org_member_001_GET_poc_public(self):
self.assert_get_handleref(self.db_org_member, "poc",
SHARED["poc_r_ok_public"].id)
##########################################################################
def test_org_member_001_GET_poc_users(self):
self.assert_get_handleref(self.db_org_member, "poc",
SHARED["poc_r_ok_users"].id)
##########################################################################
def test_org_member_001_GET_poc_private(self):
self.assert_get_handleref(self.db_org_member, "poc",
SHARED["poc_r_ok_private"].id)
##########################################################################
# TESTS WITH USER THAT IS ORGANIZATION ADMINISTRATOR
##########################################################################
##########################################################################
def test_org_admin_001_GET_poc_public(self):
self.assert_get_handleref(self.db_org_admin, "poc",
SHARED["poc_r_ok_public"].id)
##########################################################################
def test_org_admin_001_GET_poc_users(self):
self.assert_get_handleref(self.db_org_admin, "poc",
SHARED["poc_r_ok_users"].id)
##########################################################################
def test_org_admin_001_GET_poc_private(self):
# org admin is admin of rw org, so trying to access the private poc of the r org
# should still be forbidden
self.assert_get_forbidden(self.db_org_admin, "poc",
SHARED["poc_r_ok_private"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ix(self):
data = self.make_data_ix(prefix=self.get_prefix4())
r_data = self.assert_create(
self.db_org_admin,
"ix",
data,
ignore=["prefix"],
test_failures={
"invalid": {
"prefix": self.get_prefix4(),
"name": ""
},
"perms": {
"prefix": self.get_prefix4(),
# need to set name again so it doesnt fail unique validation
"name": self.make_name("Test"),
# set org to an organization the user doesnt have perms to
"org_id": SHARED["org_r_ok"].id
},
"status": {
# need to set name again so it doesnt fail unique validation
"prefix": self.get_prefix4(),
"name": self.make_name("Test"),
"org_id": SHARED["org_rwp"].id
}
})
SHARED["ix_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "ix", SHARED["ix_id"],
{"name": self.make_name("Test")}, test_failures={
"invalid": {
"name": ""
},
"perms": {
"id": SHARED["ix_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "ix",
test_success=SHARED["ix_id"],
test_failure=SHARED["ix_r_ok"].id)
self.assert_create(
self.db_org_admin, "ix", data, test_success=False, test_failures={
"invalid": {
"prefix": self.get_prefix4(),
"policy_email": "",
"tech_email": ""
},
})
self.assert_create(self.db_org_admin, "ix", data, test_success=False,
test_failures={
"invalid": {
"prefix": ""
},
})
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_fac(self):
data = self.make_data_fac()
r_data = self.assert_create(
self.db_org_admin,
"fac",
data,
test_failures={
"invalid": {
"name": ""
},
"perms": {
# need to set name again so it doesnt fail unique validation
"name": self.make_name("Test"),
# set org to an organization the user doesnt have perms to
"org_id": SHARED["org_r_ok"].id
},
"status": {
"name": self.make_name("Test"),
"org_id": SHARED["org_rwp"].id
}
})
SHARED["fac_id"] = r_data.get("id")
self.assert_update(
self.db_org_admin,
"fac",
SHARED["fac_id"],
{"name": self.make_name("Test")},
test_failures={
"invalid": {
"name": ""
},
"perms": {
"id": SHARED["fac_r_ok"].id
},
"readonly": {
"latitude": 1, #this should not take as it is read only
"longitude": 1 #this should not take as it is read only
}
},
)
self.assert_delete(self.db_org_admin, "fac",
test_success=SHARED["fac_id"],
test_failure=SHARED["fac_r_ok"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_net(self):
data = self.make_data_net(asn=9000900)
r_data = self.assert_create(
self.db_org_admin,
"net",
data,
test_failures={
"invalid": {
"name": ""
},
"perms": {
# need to set name again so it doesnt fail unique validation
"name": self.make_name("Test"),
"asn": data["asn"] + 1,
# set org to an organization the user doesnt have perms to
"org_id": SHARED["org_r_ok"].id
},
"status": {
"org_id": SHARED["org_rwp"].id,
"asn": data["asn"] + 1,
"name": self.make_name("Test")
}
})
SHARED["net_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "net", SHARED["net_id"],
{"name": self.make_name("Test")}, test_failures={
"invalid": {
"name": ""
},
"perms": {
"id": SHARED["net_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "net",
test_success=SHARED["net_id"],
test_failure=SHARED["net_r_ok"].id)
# Test RiR not found failure
r_data = self.assert_create(
self.db_org_admin, "net", data,
test_failures={"invalid": {
"asn": 9999999
}}, test_success=False)
##########################################################################
def test_org_admin_002_PUT_net_write_only_fields(self):
"""
with this we check that certain fields that are allowed to be
set via the api, but sre not supposed to be rendered in the
api data, work correctly
"""
def test_write_only_fields_missing(orig, updated):
assert (updated.has_key("allow_ixp_update") == False)
net = SHARED["net_rw_ok"]
self.assertEqual(net.allow_ixp_update, False)
self.assert_update(self.db_org_admin, "net", net.id,
{"allow_ixp_update": True},
test_success=[test_write_only_fields_missing])
net.refresh_from_db()
self.assertEqual(net.allow_ixp_update, True)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_netfac(self):
data = {
"net_id": SHARED["net_rw_ok"].id,
"fac_id": SHARED["fac_rw_ok"].id,
"local_asn": 12345
}
r_data = self.assert_create(
self.db_org_admin,
"netfac",
data,
test_failures={
"invalid": {
"net_id": ""
},
"perms": {
# set network to one the user doesnt have perms to
"net_id": SHARED["net_r_ok"].id
},
"status": {
"net_id": SHARED["net_rw_pending"].id,
"fac_id": SHARED["fac_rw_pending"].id,
}
})
SHARED["netfac_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "netfac", SHARED["netfac_id"],
{"local_asn": random.randint(999, 9999)},
test_failures={
"invalid": {
"fac_id": ""
},
"perms": {
"net_id": SHARED["net_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "netfac",
test_success=SHARED["netfac_id"],
test_failure=SHARED["netfac_r_ok"].id)
# re-create deleted netfac
r_data = self.assert_create(self.db_org_admin, "netfac", data)
# re-delete
self.assert_delete(self.db_org_admin, "netfac",
test_success=SHARED["netfac_id"])
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_poc(self):
data = self.make_data_poc(net_id=SHARED["net_rw_ok"].id)
r_data = self.assert_create(
self.db_org_admin,
"poc",
data,
test_failures={
"invalid": {
"net_id": ""
},
"perms": {
# set network to one the user doesnt have perms to
"net_id": SHARED["net_r_ok"].id
},
"status": {
"net_id": SHARED["net_rw_pending"].id
}
})
SHARED["poc_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "poc", SHARED["poc_id"],
{"role": "Sales"}, test_failures={
"invalid": {
"role": "NOPE"
},
"perms": {
"net_id": SHARED["net_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "poc",
test_success=SHARED["poc_id"],
test_failure=SHARED["poc_r_ok_users"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ixlan(self):
data = self.make_data_ixlan(ix_id=SHARED["ix_rw_ok"].id)
r_data = self.assert_create(
self.db_org_admin, "ixlan", data, test_failures={
"invalid": {
"ix_id": ""
},
"perms": {
"ix_id": SHARED["ix_r_ok"].id
},
"status": {
"ix_id": SHARED["ix_rw_pending"].id
}
})
SHARED["ixlan_id"] = r_data["id"]
self.assert_update(self.db_org_admin, "ixlan", SHARED["ixlan_id"],
{"name": self.make_name("Test")}, test_failures={
"invalid": {
"mtu": "NEEDS TO BE INT"
},
"perms": {
"ix_id": SHARED["ix_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "ixlan",
test_success=SHARED["ixlan_id"],
test_failure=SHARED["ixlan_r_ok"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ixpfx(self):
data = self.make_data_ixpfx(ixlan_id=SHARED["ixlan_rw_ok"].id,
prefix="192.168.3.11/25")
r_data = self.assert_create(
self.db_org_admin, "ixpfx", data, test_failures={
"invalid": {
"prefix": "127.0.0.0/8"
},
"perms": {
"prefix": "192.168.127.12/24",
"ixlan_id": SHARED["ixlan_r_ok"].id
},
"status": {
"prefix": "192.168.127.12/24",
"ixlan_id": SHARED["ixlan_rw_pending"].id
}
})
SHARED["ixpfx_id"] = r_data["id"]
#self.assert_create(self.db_org_admin, "ixpfx", data, test_failures={
# "invalid": {
# "prefix": "206.126.236.0/25"
# },
#}, test_success=False)
self.assert_update(self.db_org_admin, "ixpfx", SHARED["ixpfx_id"],
{"prefix": "192.168.3.11/24"}, test_failures={
"invalid": {
"prefix": "NEEDS TO BE VALID PREFIX"
},
"perms": {
"ixlan_id": SHARED["ixlan_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "ixpfx",
test_success=SHARED["ixpfx_id"],
test_failure=SHARED["ixpfx_r_ok"].id)
# re-create deleted ixpfx
r_data = self.assert_create(self.db_org_admin, "ixpfx", data)
# re-delete
self.assert_delete(self.db_org_admin, "ixpfx",
test_success=SHARED["ixpfx_id"])
# re-creating a deleted ixpfx that we dont have write permissions do
# should fail
pfx = IXLanPrefix.objects.create(ixlan=SHARED["ixlan_r_ok"],
prefix=u"192.168.127.12/24",
protocol="IPv4")
pfx.delete()
data.update(prefix="192.168.127.12/24")
r_data = self.assert_create(self.db_org_admin, "ixpfx", data,
test_failures={"invalid": {
}}, test_success=False)
# make sure protocols are validated
r_data = self.assert_create(self.db_org_admin, "ixpfx", data,
test_failures={
"invalid": {
"prefix": "192.168.3.11/24",
"protocol": "IPv6"
},
}, test_success=False)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_netixlan(self):
data = self.make_data_netixlan(net_id=SHARED["net_rw_ok"].id,
ixlan_id=SHARED["ixlan_rw_ok"].id)
r_data = self.assert_create(
self.db_org_admin,
"netixlan",
data,
test_failures={
"invalid": {
"ipaddr4": u"a b c"
},
"perms": {
# set network to one the user doesnt have perms to
"ipaddr4": self.get_ip4(),
"ipaddr6": self.get_ip6(),
"net_id": SHARED["net_r_ok"].id
}
})
SHARED["netixlan_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "netixlan",
SHARED["netixlan_id"], {"speed": 2000},
test_failures={
"invalid": {
"ipaddr4": "NEEDS TO BE VALID IP"
},
"perms": {
"net_id": SHARED["net_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "netixlan",
test_success=SHARED["netixlan_id"],
test_failure=SHARED["netixlan_r_ok"].id)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ixfac(self):
data = {
"fac_id": SHARED["fac_rw2_ok"].id,
"ix_id": SHARED["ix_rw2_ok"].id
}
r_data = self.assert_create(
self.db_org_admin,
"ixfac",
data,
test_failures={
"invalid": {
"ix_id": ""
},
"perms": {
# set network to one the user doesnt have perms to
"ix_id": SHARED["ix_r_ok"].id
},
"status": {
"fac_id": SHARED["fac_rw2_pending"].id,
"ix_id": SHARED["ix_rw2_pending"].id
}
})
SHARED["ixfac_id"] = r_data.get("id")
self.assert_update(self.db_org_admin, "ixfac", SHARED["ixfac_id"],
{"fac_id": SHARED["fac_r2_ok"].id}, test_failures={
"invalid": {
"fac_id": ""
},
"perms": {
"ix_id": SHARED["ix_r_ok"].id
}
})
self.assert_delete(self.db_org_admin, "ixfac",
test_success=SHARED["ixfac_id"],
test_failure=SHARED["ixfac_r_ok"].id)
##########################################################################
def test_org_admin_003_PUT_org(self):
self.assert_update(self.db_org_admin, "org", SHARED["org_rw_ok"].id,
{"name": self.make_name("Test")}, test_failures={
"invalid": {
"name": ""
},
"perms": {
"id": SHARED["org_r_ok"].id
}
})
##########################################################################
def test_zz_org_admin_004_DELETE_org(self):
self.assert_delete(self.db_org_admin, "org",
test_success=SHARED["org_rw_ok"].id,
test_failure=SHARED["org_r_ok"].id)
##########################################################################
# GUEST TESTS
##########################################################################
def test_guest_001_GET_org(self):
self.assert_get_handleref(self.db_guest, "org", SHARED["org_r_ok"].id)
##########################################################################
def test_guest_001_GET_net(self):
data = self.assert_get_handleref(self.db_guest, "net",
SHARED["net_r_ok"].id)
for poc in data.get("poc_set"):
self.assertEqual(poc["visible"], "Public")
##########################################################################
def __test_guest_001_GET_asn(self):
"""
ASN endpoint is currently disabled
"""
return
self.assert_get_handleref(self.db_guest, "asn", SHARED["net_r_ok"].asn)
with self.assertRaises(InvalidRequestException) as inst:
self.assert_get_handleref(self.db_guest, "asn",
"%s[" % SHARED["net_r_ok"].asn)
##########################################################################
def test_guest_001_GET_ix(self):
self.assert_get_handleref(self.db_guest, "ix", SHARED["ix_r_ok"].id)
##########################################################################
def test_guest_001_GET_fac(self):
self.assert_get_handleref(self.db_guest, "fac", SHARED["fac_r_ok"].id)
##########################################################################
def test_guest_001_GET_poc_private(self):
self.assert_get_forbidden(self.db_guest, "poc",
SHARED["poc_r_ok_private"].id)
##########################################################################
def test_guest_001_GET_poc_users(self):
self.assert_get_forbidden(self.db_guest, "poc",
SHARED["poc_r_ok_users"].id)
##########################################################################
def test_guest_001_GET_poc_public(self):
self.assert_get_handleref(self.db_guest, "poc",
SHARED["poc_r_ok_public"].id)
##########################################################################
def test_guest_001_GET_nefac(self):
self.assert_get_handleref(self.db_guest, "netfac",
SHARED["netfac_r_ok"].id)
##########################################################################
def test_guest_001_GET_netixlan(self):
self.assert_get_handleref(self.db_guest, "netixlan",
SHARED["netixlan_r_ok"].id)
##########################################################################
def test_guest_001_GET_ixfac(self):
self.assert_get_handleref(self.db_guest, "ixfac",
SHARED["ixfac_r_ok"].id)
##########################################################################
def test_guest_001_GET_ixlan(self):
self.assert_get_handleref(self.db_guest, "ixlan",
SHARED["ixlan_r_ok"].id)
##########################################################################
def test_guest_001_GET_ixpfx(self):
self.assert_get_handleref(self.db_guest, "ixpfx",
SHARED["ixpfx_r_ok"].id)
##########################################################################
def test_guest_001_GET_list_404(self):
for tag in REFTAG_MAP:
with self.assertRaises(NotFoundException) as inst:
data = self.db_guest.all(tag, limit=1, id=99999999)
if tag == "net":
with self.assertRaises(NotFoundException) as inst:
data = self.db_guest.all(tag, limit=1, asn=99999999999)
for tag in REFTAG_MAP:
if tag == "poc":
data = self.db_guest.all(tag, id=SHARED["poc_r_ok_public"].id)
else:
data = self.db_guest.all(tag, id=SHARED["%s_r_ok" % tag].id)
self.assertEqual(len(data), 1)
self.assert_handleref_integrity(data[0])
##########################################################################
def test_guest_005_list_all(self):
data = self.db_guest.all("org")
self.assertGreater(len(data), 1)
self.assert_handleref_integrity(data[0])
self.assert_data_integrity(data[0], "org")
##########################################################################
def test_guest_005_list_all_tags(self):
for tag in REFTAG_MAP:
if tag == "poc":
continue
data = self.db_guest.all(tag, limit=10)
self.assertLess(len(data), 11)
self.assert_handleref_integrity(data[0])
data = self.db_guest.all("poc", limit=10, visible="Public")
self.assertLess(len(data), 11)
self.assert_handleref_integrity(data[0])
##########################################################################
def test_org_admin_005_list(self):
for tag in REFTAG_MAP:
data = self.db_org_admin.all(tag, limit=10)
self.assertLess(len(data), 11)
self.assert_handleref_integrity(data[0])
for row in data:
self.assertEqual(row["status"], "ok")
##########################################################################
def test_guest_005_fields_filter(self):
data = self.db_guest.all("org", limit=10, fields=",".join(
["name", "status"]))
self.assertGreater(len(data), 0)
for row in data:
self.assertEqual(sorted(row.keys()), sorted([u"name", u"status"]))
data = self.db_guest.get("org", 1, fields=",".join(["name", "status"]))
self.assertGreater(len(data), 0)
self.assertEqual(sorted(data[0].keys()), sorted([u"name", u"status"]))
##########################################################################
def test_guest_005_list_limit(self):
data = self.db_guest.all("org", limit=10)
self.assertEqual(len(data), 10)
self.assert_handleref_integrity(data[0])
self.assert_data_integrity(data[0], "org")
##########################################################################
def test_guest_005_list_pagination(self):
n = 1
for i in range(0, 10):
data = self.db_guest.all("org", skip=i * 10, limit=10)
for row in data:
self.assertEqual(row.get("id"), n)
n += 1
##########################################################################
def test_guest_005_list_since(self):
data = self.db_guest.all("net", since=int(START_TIMESTAMP) - 10,
status="deleted")
self.assertEqual(len(data), 2)
self.assert_handleref_integrity(data[0])
self.assert_data_integrity(data[0], "net")
##########################################################################
def test_guest_005_get_depth_all(self):
"""
Test all end points single object GET with all valid depths
This also asserts data structure integrity for objects expanded
by the depth parameter
"""
for depth in [0, 1, 2, 3, 4]:
for tag, slz in REFTAG_MAP_SLZ.items():
note_tag = "(%s %s)" % (tag, depth)
if tag == "poc":
o = SHARED["%s_r_ok_public" % tag]
else:
o = SHARED["%s_r_ok" % tag]
data = self.db_guest.get(tag, o.id, depth=depth)
self.assertEqual(len(data), 1, msg="Data length %s" % note_tag)
pk_flds, n_flds = self.serializer_related_fields(slz)
obj = data[0]
self.assert_related_depth(obj, slz, depth, depth, note_tag,
typ="single")
##########################################################################
def test_guest_005_list_depth_all(self):
"""
Tests all end points multiple object GET with all valid depths
This also asserts data structure integrity for objects expanded
by the depth parameter
"""
for depth in [0, 1, 2, 3]:
for tag, slz in REFTAG_MAP_SLZ.items():
note_tag = "(%s %s)" % (tag, depth)
if tag == "poc":
o = SHARED["%s_r_ok_public" % tag]
else:
o = SHARED["%s_r_ok" % tag]
data = self.db_guest.all(tag, id=o.id, depth=depth)
self.assertEqual(len(data), 1, msg="Data length %s" % note_tag)
pk_flds, n_flds = self.serializer_related_fields(slz)
obj = data[0]
self.assert_related_depth(obj, slz, depth, depth, note_tag,
typ="listing")
##########################################################################
def test_guest_005_list_depth_not_set(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id)
self.assertEqual(data[0].get("net_set"), None)
##########################################################################
def test_guest_005_list_depth_0(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=0)
self.assertEqual(data[0].get("net_set"), None)
##########################################################################
def test_guest_005_list_depth_1(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=1)
self.assertEqual(len(data[0].get("net_set")), 3)
self.assertEqual(data[0].get("net_set")[0], SHARED["net_r_ok"].id)
self.assertEqual(data[0].get("net_set")[1], SHARED["net_r2_ok"].id)
self.assertEqual(data[0].get("net_set")[2], SHARED["net_r3_ok"].id)
#############################################################################
def test_guest_005_list_depth_2(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=2)
self.assertEqual(len(data[0].get("net_set")), 3)
obj = data[0].get("net_set")[0]
self.assertEqual(obj.get("id"), SHARED["net_r_ok"].id)
self.assert_data_integrity(obj, "net", ignore=["org_id"])
#############################################################################
def test_guest_005_list_depth_3(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=3)
self.assertEqual(len(data[0].get("net_set")), 3)
obj = data[0].get("net_set")[0]
self.assertEqual(obj.get("id"), SHARED["net_r_ok"].id)
self.assert_data_integrity(obj, "net", ignore=["org_id"])
obj = obj.get("netfac_set")
self.assertEqual(len(obj), 1)
self.assertEqual(obj[0], SHARED["netfac_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_dates_numeric(self):
for flt, ass in NUMERIC_TESTS.items():
for fld in ["created", "updated"]:
if flt in ["gt", "gte"]:
DATE = DATES["yesterday"]
elif flt in ["lt"]:
DATE = DATES["tomorrow"]
else:
DATE = DATES["today"]
if flt:
kwargs = {"%s__%s" % (fld, flt): DATE[1]}
else:
kwargs = {fld: DATE[1]}
data = self.db_guest.all("fac", limit=10, **kwargs)
self.assertGreater(
len(data), 0, msg="%s_%s - data length assertion" % (fld,
flt))
for row in data:
self.assert_data_integrity(row, "fac")
try:
dt = datetime.datetime.strptime(
row[fld], "%Y-%m-%dT%H:%M:%SZ").date()
except ValueError:
dt = datetime.datetime.strptime(
row[fld], "%Y-%m-%dT%H:%M:%S.%fZ").date()
fnc = getattr(self, "assert%s" % ass)
fnc(dt, DATE[0],
msg="%s__%s: %s, %s" % (fld, flt, row[fld], DATE[1]))
##########################################################################
def test_guest_005_list_filter_numeric(self):
data = self.db_guest.all("net", asn=SHARED["net_r_ok"].asn)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "net")
self.assertEqual(data[0]["asn"], SHARED["net_r_ok"].asn)
##########################################################################
def test_guest_005_list_filter_numeric_lte(self):
data = self.db_guest.all("fac", id__lte=SHARED["fac_rw_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertLessEqual(long(fac["id"]), SHARED["fac_rw_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_lt(self):
data = self.db_guest.all("fac", id__lt=SHARED["fac_rw_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertLess(long(fac["id"]), SHARED["fac_rw_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_gte(self):
data = self.db_guest.all("fac", id__gte=SHARED["fac_r_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertGreaterEqual(long(fac["id"]), SHARED["fac_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_gt(self):
data = self.db_guest.all("fac", id__gt=SHARED["fac_r_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertGreater(long(fac["id"]), SHARED["fac_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_in(self):
ids = [SHARED["fac_r_ok"].id, SHARED["fac_rw_ok"].id]
data = self.db_guest.all("fac", id__in="%s,%s" % tuple(ids))
self.assertEqual(len(data), len(ids))
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertIn(long(fac["id"]), ids)
##########################################################################
def test_guest_005_list_filter_string(self):
data = self.db_guest.all("ix", name=SHARED["ix_r_ok"].name)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "ix")
self.assertEqual(data[0]["name"], SHARED["ix_r_ok"].name)
##########################################################################
def test_guest_005_list_filter_string_contains(self):
token = SHARED["ix_r_ok"].name[3:5]
data = self.db_guest.all("ix", name__contains=token.lower())
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "ix")
for ix in data:
self.assertIn(token, ix["name"])
##########################################################################
def test_guest_005_list_filter_string_startswith(self):
token = SHARED["ix_r_ok"].name[0:5]
data = self.db_guest.all("ix", name__startswith=token.lower())
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "ix")
for ix in data:
self.assertEqual(ix["name"][:5], token)
##########################################################################
def test_guest_005_list_filter_string_in(self):
cities = ["API Test:IX:RW:ok", "API Test:IX:R:ok"]
data = self.db_guest.all("ix", name__in="%s,%s" % tuple(cities))
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "ix")
for ix in data:
self.assertIn(ix["name"], cities)
##########################################################################
def test_guest_005_list_filter_relation_basic(self):
data = self.db_guest.all("ix", org_id=SHARED["ix_r_ok"].org_id)
self.assertEqual(len(data), 3)
self.assert_data_integrity(data[0], "ix")
self.assertEqual(data[0]["org_id"], SHARED["ix_r_ok"].org_id)
##########################################################################
def test_guest_005_list_filter_relation_basic_2(self):
data = self.db_guest.all("ix", org=SHARED["ix_r_ok"].org_id)
self.assertEqual(len(data), 3)
self.assert_data_integrity(data[0], "ix")
self.assertEqual(data[0]["org_id"], SHARED["ix_r_ok"].org_id)
##########################################################################
def test_guest_005_list_filter_relation_fld_xl(self):
data = self.db_guest.all("netixlan", net_id__lt=4)
for row in data:
self.assertLess(row["net_id"], 4)
##########################################################################
def test_guest_005_list_filter_relation_nested(self):
data = self.db_user.all("poc", net__asn=SHARED["net_r_ok"].asn)
self.assertEqual(len(data), 2)
for row in data:
self.assertEqual(row.get("net_id"), SHARED["net_r_ok"].id)
##########################################################################
def test_guest_005_list_poc(self):
data = self.db_guest.all("poc", limit=100)
for row in data:
self.assertEqual(row.get("visible"), "Public")
data = self.db_guest.all("poc", visible__in="Private,Users", limit=100)
self.assertEqual(0, len(data))
##########################################################################
def test_guest_005_list_filter_net_related(self):
self.assert_list_filter_related("net", "ix")
self.assert_list_filter_related("net", "ixlan")
self.assert_list_filter_related("net", "netixlan")
self.assert_list_filter_related("net", "netfac")
self.assert_list_filter_related("net", "fac")
self.assert_list_filter_related("net", "org")
##########################################################################
def test_guest_005_list_filter_net_not_ix(self):
ix = SHARED["ix_r_ok"]
data_a = self.db_guest.all("net", ix=ix.id)
data_b = self.db_guest.all("net", not_ix=ix.id)
self.assertGreater(len(data_a), 0)
self.assertGreater(len(data_b), 0)
for row_b in data_b:
for row_a in data_a:
self.assertNotEqual(row_a["id"], row_b["id"])
##########################################################################
def test_guest_005_list_filter_net_not_fac(self):
fac = SHARED["fac_r_ok"]
data_a = self.db_guest.all("net", fac=fac.id)
data_b = self.db_guest.all("net", not_fac=fac.id)
self.assertGreater(len(data_a), 0)
self.assertGreater(len(data_b), 0)
for row_b in data_b:
for row_a in data_a:
self.assertNotEqual(row_a["id"], row_b["id"])
##########################################################################
def test_guest_005_list_filter_ixpfx_related(self):
self.assert_list_filter_related("ixpfx", "ix")
self.assert_list_filter_related("ixpfx", "ixlan")
##########################################################################
def test_guest_005_list_filter_ix_related(self):
self.assert_list_filter_related("ix", "ixlan")
self.assert_list_filter_related("ix", "ixfac")
self.assert_list_filter_related("ix", "fac")
self.assert_list_filter_related("ix", "net")
self.assert_list_filter_related("ix", "net", "asn")
self.assert_list_filter_related("ix", "org")
##########################################################################
def test_guest_005_list_filter_ix_ipblock(self):
prefix = str(SHARED["ixpfx_r_ok"].prefix)[:-3]
data = self.db_guest.all("ix", ipblock=prefix)
self.assertGreater(len(data), 0)
for row in data:
self.assertEqual(row["id"], SHARED["ix_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_ix_name_search(self):
data = self.db_guest.all("ix", name_search=SHARED["ix_r_ok"].name)
self.assertEqual(len(data), 1)
for row in data:
self.assertEqual(row["id"], SHARED["ix_r_ok"].id)
data = self.db_guest.all("ix", name_search=SHARED["ix_r_ok"].name_long)
self.assertEqual(len(data), 1)
for row in data:
self.assertEqual(row["id"], SHARED["ix_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_ix_asn_overlap(self):
# create three test networks
networks = [
Network.objects.create(status="ok", **self.make_data_net())
for i in range(0, 3)
]
# create two test exchanges
exchanges = [
InternetExchange.objects.create(status="ok", **self.make_data_ix())
for i in range(0, 2)
]
# create ixlan at each exchange
ixlans = [
IXLan.objects.create(status="ok",
**self.make_data_ixlan(ix_id=ix.id))
for ix in exchanges
]
# all three networks peer at first exchange
for net in networks:
NetworkIXLan.objects.create(network=net, ixlan=ixlans[0],
status="ok", asn=net.asn, speed=0)
# only the first two networks peer at second exchange
for net in networks[:2]:
NetworkIXLan.objects.create(network=net, ixlan=ixlans[1],
status="ok", asn=net.asn, speed=0)
# do test queries
# query #1 - test overlapping exchanges for all 3 asns - should return first ix
data = self.db_guest.all("ix", asn_overlap=",".join(
[str(net.asn) for net in networks]))
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["id"], exchanges[0].id)
# query #2 - test overlapping exchanges for first 2 asns - should return both ixs
data = self.db_guest.all("ix", asn_overlap=",".join(
[str(net.asn) for net in networks[:2]]))
self.assertEqual(len(data), 2)
for row in data:
self.assertIn(row["id"], [ix.id for ix in exchanges])
# query #3 - should error when only passing one asn
with self.assertRaises(InvalidRequestException) as inst:
self.db_guest.all("ix", asn_overlap=networks[0].asn)
# query #4 - should error when passing too many asns
with self.assertRaises(InvalidRequestException):
self.db_guest.all("ix", asn_overlap=",".join(
[str(i) for i in range(0, 30)]))
# clean up data
for net in networks:
net.delete(hard=True)
for ix in exchanges:
ix.delete(hard=True)
##########################################################################
def test_guest_005_list_filter_fac_related(self):
self.assert_list_filter_related("fac", "ix")
self.assert_list_filter_related("fac", "net")
##########################################################################
def test_guest_005_list_filter_fac_org_name(self):
data = self.db_guest.all("fac", org_name=SHARED["org_r_ok"].name[2:10])
for row in data:
self.assertEqual(data[0]["org_id"], SHARED["org_r_ok"].id)
self.assert_data_integrity(data[0], "fac")
##########################################################################
def test_guest_005_list_filter_fac_net_count(self):
data = self.db_guest.all("fac", net_count=1)
for row in data:
self.assert_data_integrity(row, "fac")
self.assertEqual(row["net_count"], 1)
data = self.db_guest.all("fac", net_count=0)
for row in data:
self.assert_data_integrity(row, "fac")
self.assertEqual(row["net_count"], 0)
data = self.db_guest.all("fac", net_count__lt=1)
for row in data:
self.assert_data_integrity(row, "fac")
self.assertEqual(row["net_count"], 0)
data = self.db_guest.all("fac", net_count__gt=0)
for row in data:
self.assert_data_integrity(row, "fac")
self.assertGreater(row["net_count"], 0)
##########################################################################
def test_guest_005_list_filter_fac_asn_overlap(self):
# create three test networks
networks = [
Network.objects.create(status="ok", **self.make_data_net())
for i in range(0, 3)
]
# create two test facilities
facilities = [
Facility.objects.create(status="ok", **self.make_data_fac())
for i in range(0, 2)
]
# all three networks peer at first facility
for net in networks:
NetworkFacility.objects.create(network=net, facility=facilities[0],
status="ok")
# only the first two networks peer at second facility
for net in networks[:2]:
NetworkFacility.objects.create(network=net, facility=facilities[1],
status="ok")
# do test queries
# query #1 - test overlapping facilities for all 3 asns - should return first facility
data = self.db_guest.all("fac", asn_overlap=",".join(
[str(net.asn) for net in networks]))
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["id"], facilities[0].id)
# query #2 - test overlapping facilities for first 2 asns - should return both facs
data = self.db_guest.all("fac", asn_overlap=",".join(
[str(net.asn) for net in networks[:2]]))
self.assertEqual(len(data), 2)
for row in data:
self.assertIn(row["id"], [ix.id for ix in facilities])
# query #3 - should error when only passing one asn
with self.assertRaises(InvalidRequestException):
self.db_guest.all("fac", asn_overlap=networks[0].asn)
# query #4 - should error when passing too many asns
with self.assertRaises(InvalidRequestException):
self.db_guest.all("fac", asn_overlap=",".join(
[str(i) for i in range(0, 30)]))
# clean up data
for net in networks:
net.delete(hard=True)
for fac in facilities:
fac.delete(hard=True)
##########################################################################
def test_guest_005_list_filter_netixlan_related(self):
self.assert_list_filter_related("netixlan", "net")
self.assert_list_filter_related("netixlan", "ixlan")
self.assert_list_filter_related("netixlan", "ix")
##########################################################################
def test_guest_005_list_filter_netixlan_related_name(self):
data = self.db_guest.all("netixlan", name=SHARED["ix_rw_ok"].name)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "netixlan")
##########################################################################
def test_guest_005_list_filter_netfac_related(self):
self.assert_list_filter_related("netfac", "net")
self.assert_list_filter_related("netfac", "fac")
##########################################################################
def test_guest_005_list_filter_netfac_related_name(self):
data = self.db_guest.all("netfac", name=SHARED["fac_rw_ok"].name)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "netfac")
##########################################################################
def test_guest_005_list_filter_netfac_related_city(self):
data = self.db_guest.all("netfac", city=SHARED["fac_rw_ok"].city)
self.assertEqual(len(data), 2)
self.assert_data_integrity(data[0], "netfac")
##########################################################################
def test_guest_005_list_filter_netfac_related_country(self):
data = self.db_guest.all("netfac", country=SHARED["fac_rw_ok"].country)
self.assertEqual(len(data), 2)
self.assert_data_integrity(data[0], "netfac")
##########################################################################
def test_guest_005_list_filter_ixlan_related(self):
self.assert_list_filter_related("ixlan", "ix")
##########################################################################
def test_guest_005_list_filter_ixfac_related(self):
self.assert_list_filter_related("ixfac", "fac")
self.assert_list_filter_related("ixfac", "ix")
##########################################################################
def test_guest_005_list_filter_poc_related(self):
self.assert_list_filter_related("poc", "net")
return
data = self.db_guest.all("poc", net_id=SHARED["net_r_ok"].id)
self.assertGreater(len(data), 0)
for row in data:
self.assert_data_integrity(row, "poc")
self.assertEqual(row["net_id"], SHARED["net_r_ok"].id)
##########################################################################
def test_guest_005_list_skip(self):
data = self.db_guest.all("org", skip=0, limit=20)
self.assertEqual(len(data), 20)
target = data[10]
data = self.db_guest.all("org", skip=10, limit=20)
self.assertEqual(len(data), 20)
comp = data[0]
self.assertEqual(target, comp)
##########################################################################
# READONLY PERMISSION TESTS
# These tests assert that the readonly users cannot write anything
##########################################################################
##########################################################################
def test_readonly_users_003_PUT_org(self):
for db in self.readonly_dbs():
self.assert_update(db, "org", SHARED["org_r_ok"].id, {},
test_success=False, test_failures={
"perms": {}
})
##########################################################################
def test_readonly_users_002_POST_ix(self):
for db in self.readonly_dbs():
self.assert_create(db, "ix",
self.make_data_ix(prefix=self.get_prefix4()),
test_failures={"perms": {}}, test_success=False)
##########################################################################
def test_readonly_users_003_PUT_ix(self):
for db in self.readonly_dbs():
self.assert_update(db, "ix", SHARED["ix_r_ok"].id, {},
test_success=False, test_failures={
"perms": {}
})
##########################################################################
def test_readonly_users_004_DELETE_ix(self):
for db in self.readonly_dbs():
self.assert_delete(db, "ix", test_success=False,
test_failure=SHARED["ix_r_ok"].id)
##########################################################################
def test_readonly_users_002_POST_fac(self):
for db in self.readonly_dbs():
self.assert_create(db, "fac", self.make_data_fac(),
test_failures={"perms": {}}, test_success=False)
##########################################################################
def test_readonly_users_003_PUT_fac(self):
for db in self.readonly_dbs():
self.assert_update(db, "fac", SHARED["fac_r_ok"].id, {},
test_success=False, test_failures={
"perms": {}
})
##########################################################################
def test_readonly_users_004_DELETE_fac(self):
for db in self.readonly_dbs():
self.assert_delete(db, "fac", test_success=False,
test_failure=SHARED["fac_r_ok"].id)
##########################################################################
def test_readonly_users_002_POST_netfac(self):
for db in self.readonly_dbs():
self.assert_create(
db, "netfac", {
"net_id": SHARED["net_r_ok"].id,
"fac_id": SHARED["fac_r2_ok"].id,
"local_asn": 12345
}, test_failures={"perms": {}}, test_success=False)
##########################################################################
def test_readonly_users_003_PUT_netfac(self):
for db in self.readonly_dbs():
self.assert_update(db, "netfac", SHARED["netfac_r_ok"].id, {},
test_success=False, test_failures={
"perms": {}
})
##########################################################################
def test_readonly_users_004_DELETE_netfac(self):
for db in self.readonly_dbs():
self.assert_delete(db, "netfac", test_success=False,
test_failure=SHARED["netfac_r_ok"].id)
##########################################################################
def test_readonly_users_002_POST_ixfac(self):
for db in self.readonly_dbs():
self.assert_create(db, "ixfac", {
"ix_id": SHARED["ix_r_ok"].id,
"fac_id": SHARED["fac_r2_ok"].id
}, test_failures={"perms": {}}, test_success=False)
##########################################################################
def test_readonly_users_003_PUT_ixfac(self):
for db in self.readonly_dbs():
self.assert_update(db, "ixfac", SHARED["ixfac_r_ok"].id, {},
test_success=False, test_failures={
"perms": {}
})
##########################################################################
def test_readonly_users_004_DELETE_ixfac(self):
for db in self.readonly_dbs():
self.assert_delete(db, "ixfac", test_success=False,
test_failure=SHARED["ixfac_r_ok"].id)
##########################################################################
def test_readonly_users_002_POST_poc(self):
for db in self.readonly_dbs():
self.assert_create(
db, "poc", self.make_data_poc(net_id=SHARED["net_rw_ok"].id),
test_failures={"perms": {}}, test_success=False)
##########################################################################
def test_readonly_users_003_PUT_poc(self):
for db in self.readonly_dbs(exclude=[self.db_user]):
self.assert_update(db, "poc", SHARED["poc_r_ok_public"].id, {},
test_success=False, test_failures={
"perms": {}
})
self.assert_update(db, "poc", SHARED["poc_r_ok_private"].id, {},
test_success=False, test_failures={
"perms": {}
})
self.assert_update(db, "poc", SHARED["poc_r_ok_users"].id, {},
test_success=False, test_failures={
"perms": {}
})
##########################################################################
def test_readonly_users_004_DELETE_poc(self):
for db in self.readonly_dbs():
self.assert_delete(db, "poc", test_success=False,
test_failure=SHARED["poc_r_ok_public"].id)
self.assert_delete(db, "poc", test_success=False,
test_failure=SHARED["poc_r_ok_private"].id)
self.assert_delete(db, "poc", test_success=False,
test_failure=SHARED["poc_r_ok_users"].id)
##########################################################################
def test_readonly_users_002_POST_ixlan(self):
for db in self.readonly_dbs():
self.assert_create(db, "ixlan", self.make_data_ixlan(),
test_failures={"perms": {}}, test_success=False)
##########################################################################
def test_readonly_users_003_PUT_ixlan(self):
for db in self.readonly_dbs():
self.assert_update(db, "ixlan", SHARED["ixlan_r_ok"].id, {},
test_success=False, test_failures={
"perms": {}
})
##########################################################################
def test_readonly_users_004_DELETE_ixlan(self):
for db in self.readonly_dbs():
self.assert_delete(db, "ixlan", test_success=False,
test_failure=SHARED["ixlan_r_ok"].id)
##########################################################################
def test_readonly_users_002_POST_ixpfx(self):
for db in self.readonly_dbs():
self.assert_create(db, "ixpfx",
self.make_data_ixpfx(prefix="200.100.200.0/22"),
test_failures={"perms": {}}, test_success=False)
##########################################################################
def test_readonly_users_003_PUT_ixpfx(self):
for db in self.readonly_dbs():
self.assert_update(db, "ixpfx", SHARED["ixpfx_r_ok"].id, {},
test_success=False, test_failures={
"perms": {}
})
##########################################################################
def test_readonly_users_004_DELETE_ixpfx(self):
for db in self.readonly_dbs():
self.assert_delete(db, "ixpfx", test_success=False,
test_failure=SHARED["ixpfx_r_ok"].id)
##########################################################################
def test_readonly_users_002_POST_netixlan(self):
for db in self.readonly_dbs():
self.assert_create(db, "netixlan", self.make_data_netixlan(),
test_failures={"perms": {}}, test_success=False)
##########################################################################
def test_readonly_users_003_PUT_netixlan(self):
for db in self.readonly_dbs():
self.assert_update(db, "netixlan", SHARED["netixlan_r_ok"].id, {},
test_success=False, test_failures={
"perms": {}
})
##########################################################################
def test_readonly_users_004_DELETE_netixlan(self):
for db in self.readonly_dbs():
self.assert_delete(db, "netixlan", test_success=False,
test_failure=SHARED["netixlan_r_ok"].id)
##########################################################################
def test_readonly_users_004_DELETE_org(self):
for db in self.readonly_dbs():
self.assert_delete(db, "org", test_success=False,
test_failure=SHARED["org_r_ok"].id)
##########################################################################
# CRUD PERMISSION TESTS
##########################################################################
def test_z_crud_002_create(self):
# user with create perms should be allowed to create a new poc under net_rw3_ok
# but not under net_rw2_ok
self.assert_create(self.db_crud_create, "poc",
self.make_data_poc(net_id=SHARED["net_rw3_ok"].id),
test_failures={
"perms": {
"net_id": SHARED["net_rw2_ok"].id
}
})
# user with create perms should not be able to create an ixlan under
# net_rw_ix
self.assert_create(self.db_crud_create, "ixlan",
self.make_data_ixlan(ix_id=SHARED["ix_rw3_ok"].id),
test_failures={"perms": {}}, test_success=False)
# other crud test users should not be able to create a new poc under
# net_rw3_ok
for p in ["delete", "update"]:
self.assert_create(
getattr(self, "db_crud_%s" % p), "poc",
self.make_data_poc(net_id=SHARED["net_rw3_ok"].id),
test_failures={"perms": {}}, test_success=False)
def test_z_crud_003_update(self):
# user with update perms should be allowed to update net_rw3_ok
# but not net_rw2_ok
self.assert_update(self.db_crud_update, "net", SHARED["net_rw3_ok"].id,
{"name": self.make_name("Test")}, test_failures={
"perms": {
"id": SHARED["net_rw2_ok"].id
}
})
# user with update perms should not be allowed to update ix_rw3_ok
self.assert_update(self.db_crud_update, "ix", SHARED["ix_rw3_ok"].id,
{"name": self.make_name("Test")},
test_failures={"perms": {}}, test_success=False)
# other crud test users should not be able to update net_rw3_ok
for p in ["delete", "create"]:
self.assert_update(
getattr(self, "db_crud_%s" % p), "net",
SHARED["net_rw3_ok"].id, {"name": self.make_name("Test")},
test_failures={"perms": {}}, test_success=False)
def test_z_crud_004_delete(self):
# other crud test users should not be able to delete net_rw3_ok
for p in ["update", "create"]:
self.assert_delete(
getattr(self, "db_crud_%s" % p),
"net",
test_success=False,
test_failure=SHARED["net_rw3_ok"].id,
)
# user with delete perms should be allowed to update net_rw3_ok
# but not net_rw2_ok
self.assert_delete(self.db_crud_delete, "net", SHARED["net_rw3_ok"].id,
test_failure=SHARED["net_rw2_ok"].id)
# user with delete perms should not be allowed to delete ix_rw3_ok
self.assert_delete(
self.db_crud_delete,
"ix",
test_success=False,
test_failure=SHARED["ix_rw3_ok"].id,
)
##########################################################################
# MISC TESTS
##########################################################################
def test_z_misc_002_dupe_netixlan_ip(self):
# test that addint duplicate netixlan ips is impossible
A = SHARED["netixlan_rw_ok"]
self.assert_create(self.db_org_admin, "netixlan",
self.make_data_netixlan(ixlan_id=A.ixlan_id,
net_id=A.network_id),
test_success=False, test_failures={
"invalid": {
"ipaddr4": unicode(A.ipaddr4)
}
})
self.assert_create(self.db_org_admin, "netixlan",
self.make_data_netixlan(
ixlan_id=A.ixlan_id,
net_id=A.network_id,
), test_success=False, test_failures={
"invalid": {
"ipaddr6": unicode(A.ipaddr6)
}
})
def test_z_misc_002_dupe_name_update(self):
# test that changing the name of entity A (status=ok)
# to name of entity B (status=deleted) does raise the approporiate
# unique key error and does not undelete entity B
A = SHARED["fac_rw_dupe_ok"]
B = SHARED["fac_rw_dupe_deleted"]
self.assertEqual(A.status, "ok")
self.assertEqual(B.status, "deleted")
self.assert_update(self.db_org_admin, "fac", A.id, {}, test_failures={
"invalid": {
"name": B.name
}
})
B.refresh_from_db()
self.assertEqual(B.status, "deleted")
def test_z_misc_001_org_create(self):
# no one should be allowed to create an org via the api
# at this point in time
for db in self.all_dbs():
self.assert_create(db, "org",
self.make_data_org(name=self.make_name("Test")),
test_success=False, test_failures={
"perms": {}
})
def test_z_misc_001_suggest_net(self):
# test network suggestions
data = self.make_data_net(
asn=9000901, org_id=settings.SUGGEST_ENTITY_ORG, suggest=True)
r_data = self.assert_create(self.db_user, "net", data)
self.assertEqual(r_data["org_id"], settings.SUGGEST_ENTITY_ORG)
self.assertEqual(r_data["status"], "pending")
net = Network.objects.get(id=r_data["id"])
self.assertEqual(net.org_id, settings.SUGGEST_ENTITY_ORG)
data = self.make_data_net(
asn=9000902, org_id=settings.SUGGEST_ENTITY_ORG, suggest=True)
r_data = self.assert_create(self.db_guest, "net", data,
test_success=False, test_failures={
"perms": {}
})
def test_z_misc_001_suggest_fac(self):
# test facility suggestions
data = self.make_data_fac(org_id=settings.SUGGEST_ENTITY_ORG,
suggest=True)
r_data = self.assert_create(self.db_user, "fac", data)
self.assertEqual(r_data["org_id"], settings.SUGGEST_ENTITY_ORG)
self.assertEqual(r_data["status"], "pending")
fac = Facility.objects.get(id=r_data["id"])
self.assertEqual(fac.org_id, settings.SUGGEST_ENTITY_ORG)
data = self.make_data_fac(org_id=settings.SUGGEST_ENTITY_ORG,
suggest=True)
r_data = self.assert_create(self.db_guest, "fac", data,
test_success=False, test_failures={
"perms": {}
})
def test_z_misc_001_suggest_ix(self):
# test exchange suggestions
data = self.make_data_ix(org_id=settings.SUGGEST_ENTITY_ORG,
suggest=True, prefix=self.get_prefix4())
r_data = self.assert_create(self.db_user, "ix", data,
ignore=["prefix", "suggest"])
self.assertEqual(r_data["org_id"], settings.SUGGEST_ENTITY_ORG)
self.assertEqual(r_data["status"], "pending")
ix = InternetExchange.objects.get(id=r_data["id"])
self.assertEqual(ix.org_id, settings.SUGGEST_ENTITY_ORG)
data = self.make_data_ix(org_id=settings.SUGGEST_ENTITY_ORG,
suggest=True, prefix=self.get_prefix4())
r_data = self.assert_create(self.db_guest, "ix", data, ignore=[
"prefix", "suggest"
], test_success=False, test_failures={
"perms": {}
})
def test_z_misc_001_suggest_outside_of_post(self):
# The `suggest` keyword should only be allowed for
# `POST` events
for reftag in ["ix", "fac", "net"]:
ent = SHARED["{}_rw_ok".format(reftag)]
org_id = ent.org_id
self.assert_update(self.db_org_admin, reftag, ent.id,
{"notes": "bla"}, test_failures={
"invalid": {
"suggest": True
}
})
ent.refresh_from_db()
self.assertEqual(ent.org_id, org_id)
def test_z_misc_001_fac_address_geocode(self):
# test that facility gets marked for geocode sync after address field
# change
fac = SHARED["fac_rw_ok"]
fac.geocode_status = True
fac.save()
self.assert_update(self.db_org_admin, "fac", fac.id, {
"address1": "This is a test"
})
fac.refresh_from_db()
self.assertEqual(fac.geocode_status, False)
# reset geocode status
fac.geocode_status = True
fac.save()
# test that facility does NOT get marked for geocode sync after non relevant
# fields are changed
self.assert_update(self.db_org_admin, "fac", fac.id, {
"website": "http://example.com",
"name": fac.name + " Geocode Test"
})
fac.refresh_from_db()
self.assertEqual(fac.geocode_status, True)
class Command(BaseCommand):
help = "This runs the api test harness. All write ops are performed under an organization specifically made for testing, so running to against a prod environment should be fine in theory."
def add_arguments(self, parser):
parser.add_argument("--only", help="only run this test", dest="only")
parser.add_argument("--setup",
help="runs api test setup (user, org create) only",
dest="setup", action="store_true")
@classmethod
def log(cls, msg):
print msg
@classmethod
def create_entity(cls, model, prefix="rw", unset=[], key_suffix=None,
name_suffix=None, **kwargs):
tag = model.handleref.tag
status = kwargs.get("status", "ok")
name = "API Test:%s:%s:%s" % (tag.upper(), prefix.upper(), status)
if name_suffix:
name = "%s%s" % (name, name_suffix)
data = {"status": status}
if tag in ["ix", "net", "fac", "org"]:
data["name"] = name
data.update(**kwargs)
try:
obj = model.objects.get(**data)
cls.log(
"%s with status '%s' for %s testing already exists, skipping!"
% (tag.upper(), status, prefix.upper()))
except model.DoesNotExist:
fn = getattr(TestJSON, "make_data_%s" % tag, None)
if fn:
data = fn(**data)
for k in unset:
if k in data:
del data[k]
obj = model.objects.create(**data)
cls.log("%s with status '%s' for %s testing created! (%s)" %
(tag.upper(), status, prefix.upper(), obj.updated))
id = "%s_%s_%s" % (tag, prefix, status)
if key_suffix:
id = "%s_%s" % (id, key_suffix)
SHARED[id] = obj
return obj
@classmethod
def create_user(cls, USER):
try:
user = User.objects.get(username=USER.get("user"))
cls.log("USER '%s' already exists, skipping!" % USER.get("user"))
user.groups.clear()
user.userpermission_set.all().delete()
except User.DoesNotExist:
user = User.objects.create(username=USER.get("user"))
user.set_password(USER.get("password"))
user.save()
cls.log("USER '%s' created!" % USER.get("user"))
return user
@classmethod
def prepare(cls, *args, **options):
cls.log("Running setup for API testing...")
memberGroup = Group.objects.get(name="user")
# create API test user
user = cls.create_user(USER)
memberGroup.user_set.add(user)
# create API test user org member
user_org_member = cls.create_user(USER_ORG_MEMBER)
memberGroup.user_set.add(user_org_member)
# create API test user org member
user_org_admin = cls.create_user(USER_ORG_ADMIN)
memberGroup.user_set.add(user_org_admin)
# create API test user for crud testing
crud_users = {}
for p, specs in USER_CRUD.items():
crud_user = cls.create_user(specs)
crud_users[p] = crud_user
memberGroup.user_set.add(crud_user)
# see if we need to create extra organizations (to fill up the
# database)
extra_orgs = getattr(cls, "create_extra_orgs", 0)
i = 0
while i < extra_orgs:
cls.create_entity(Organization, prefix="r_%d" % i, status="ok")
i += 1
# create API test organization (read & write)
try:
org_rw = Organization.objects.get(name=ORG_RW)
cls.log("ORG for WRITE testing already exists, skipping!")
except Organization.DoesNotExist:
org_rw = Organization.objects.create(status="ok", name=ORG_RW)
cls.log("ORG for WRITE testing created!")
org_rw.admin_usergroup.user_set.add(user_org_admin)
for crud_user in crud_users.values():
org_rw.usergroup.user_set.add(crud_user)
SHARED["org_id"] = org_rw.id
SHARED["org_rw"] = SHARED["org_rw_ok"] = org_rw
# create API test organization (read & write) - status pending
try:
org_rwp = Organization.objects.get(name=ORG_RW_PENDING)
cls.log(
"ORG for WRITE testing (with status pending) already exists, skipping!"
)
except Organization.DoesNotExist:
org_rwp = Organization.objects.create(status="pending",
name=ORG_RW_PENDING)
cls.log("ORG for WRITE testing (with status pending) created!")
org_rwp.admin_usergroup.user_set.add(user_org_admin)
SHARED["org_rwp"] = SHARED["org_rw_pending"] = org_rwp
# create API test organization (read only)
try:
org_r = Organization.objects.get(name=ORG_R)
cls.log("ORG for READONLY testing already exists, skipping!")
except Organization.DoesNotExist:
org_r = Organization.objects.create(name=ORG_R, status="ok")
cls.log("ORG for READONLY testing created!")
org_r.usergroup.user_set.add(user_org_member)
SHARED["org_r"] = SHARED["org_r_ok"] = org_r
cls.create_entity(Organization, prefix="r", status="pending")
# create API test network (for status "deleted" tests)
try:
net_rd = Network.objects.get(name=NET_R_DELETED, org_id=org_r.id)
cls.log(
"NET for status 'deleted' testing already exists, skipping!")
except Network.DoesNotExist:
net_rd = Network.objects.create(**TestJSON.make_data_net(
name=NET_R_DELETED, org_id=org_r.id))
cls.log("NET for status 'deleted' testing created!")
net_rd.delete()
SHARED["net_rd"] = net_rd
# create various entities for rw testing
for model in [Network, Facility, InternetExchange]:
for status in ["ok", "pending"]:
for prefix in ["r", "rw"]:
cls.create_entity(model, status=status, prefix=prefix,
org_id=SHARED["org_%s_%s" % (prefix,
status)].id)
cls.create_entity(
model, status=status, prefix="%s2" % prefix,
org_id=SHARED["org_%s_%s" % (prefix, status)].id)
cls.create_entity(
model, status=status, prefix="%s3" % prefix,
org_id=SHARED["org_%s_%s" % (prefix, status)].id)
# create entities for duplicate validation testing
for model in [Network, Facility, InternetExchange]:
cls.create_entity(model, status="deleted", prefix="rw_dupe",
name_suffix=" DUPE",
org_id=SHARED["org_rw_ok"].id)
cls.create_entity(model, status="ok", prefix="rw_dupe",
name_suffix=" DUPE !",
org_id=SHARED["org_rw_ok"].id)
for status in ["ok", "pending"]:
for prefix in ["r", "rw"]:
cls.create_entity(IXLan, status=status, prefix=prefix,
ix_id=SHARED["ix_%s_%s" % (prefix,
status)].id)
cls.create_entity(
IXLanPrefix,
status=status,
prefix=prefix,
ixlan_id=SHARED["ixlan_%s_%s" % (prefix, status)].id,
)
cls.create_entity(
InternetExchangeFacility, status=status, prefix=prefix,
facility_id=SHARED["fac_%s_%s" % (prefix, status)].id,
ix_id=SHARED["ix_%s_%s" % (prefix, status)].id)
cls.create_entity(
NetworkFacility, status=status, prefix=prefix, unset=[
"net_id"
], facility_id=SHARED["fac_%s_%s" % (prefix, status)].id,
network_id=SHARED["net_%s_%s" % (prefix, status)].id)
cls.create_entity(
NetworkIXLan, status=status, prefix=prefix, unset=[
"net_id"
], ixlan_id=SHARED["ixlan_%s_%s" % (prefix, status)].id,
network_id=SHARED["net_%s_%s" % (prefix, status)].id)
for v in ["Private", "Users", "Public"]:
cls.create_entity(NetworkContact, status=status,
prefix=prefix, visible=v,
network_id=SHARED["net_%s_%s" %
(prefix, status)].id,
unset=["net_id"], key_suffix=v.lower())
# set up permissions for crud permission tests
crud_users["delete"].userpermission_set.create(
namespace=SHARED["net_rw3_ok"].nsp_namespace,
permissions=PERM_READ | PERM_DELETE)
crud_users["create"].userpermission_set.create(
namespace=SHARED["net_rw3_ok"].nsp_namespace,
permissions=PERM_READ | PERM_CREATE)
crud_users["update"].userpermission_set.create(
namespace=SHARED["net_rw3_ok"].nsp_namespace,
permissions=PERM_READ | PERM_UPDATE)
# undelete in case they got flagged as deleted
for name, obj in SHARED.items():
if hasattr(
obj, "status"
) and obj.status == "deleted" and obj != net_rd and getattr(
obj, "name", "").find("DUPE") == -1:
obj.status = "ok"
obj.save()
Organization.objects.create(name="Suggested Entitites", status="ok",
id=settings.SUGGEST_ENTITY_ORG)
cls.log("Setup for API testing completed!")
@classmethod
def cleanup(cls, *args, **options):
cls.log("Cleaning up...")
deleted = 0
for k, obj in SHARED.items():
if hasattr(obj, "delete"):
# print "HARD deleting ", obj
try:
obj.delete(hard=True)
deleted += 1
except AssertionError:
pass
elif k[-3:] == "_id":
reftag = re.match("^(.+)_id$", k).group(1)
cls = REFTAG_MAP.get(reftag)
if cls:
try:
inst = cls.objects.get(id=obj)
# print "HARD deleting ",inst
deleted += 1
inst.delete()
except cls.DoesNotExist:
pass
print "Deleted", deleted, "objects"
def handle(self, *args, **options):
try:
self.prepare()
except IntegrityError, inst:
print inst
self.cleanup()
print "Cleaned up after inegrity error, please try again .."
return
if options['setup']:
return
if not options['only']:
suite = unittest.TestLoader().loadTestsFromTestCase(TestJSON)
else:
only = options["only"].split(",")
funcs = []
for key in vars(TestJSON).keys():
for o in only:
if key[:5] == "test_" and key.find(o) > -1:
funcs.append(
"peeringdb_server.management.commands.pdb_api_test.TestJSON.%s"
% key)
funcs = sorted(funcs)
suite = unittest.TestLoader().loadTestsFromNames(funcs)
unittest.TextTestRunner(verbosity=2).run(suite)
self.cleanup()
```
#### File: peeringdb/tests/test_api_cache.py
```python
import pytest
import json
import tempfile
import os
import datetime
import re
from django.test import TestCase
from django.contrib.auth.models import Group
from django.core.management import call_command
from django.conf import settings
import peeringdb_server.models as models
import peeringdb_server.management.commands.pdb_api_test as api_test
import test_api as api_tests
import django_namespace_perms as nsp
def setup_module(module):
api_tests.setup_module(module)
def teardown_module(module):
api_tests.teardown_module(module)
class APICacheTests(TestCase, api_test.TestJSON, api_test.Command):
"""
Runs the api test after generating cache files and enabling
api cache
You can find the logic / definition of those tests in
peeringdb_server.manangement.commands.pdb_api_test
This simply extends the command and testcase defined for it
but uses a special RestClient that sends requests to the
rest_framework testing api instead of a live server.
"""
# we want to use this rest-client for our requests
rest_client = api_tests.DummyRestClient
# The db will be empty and at least one of the tests
# requires there to be >100 organizations in the database
# this tells the test to create them
create_extra_orgs = 110
@classmethod
def setUpTestData(cls):
# create user and guest group
guest_group = Group.objects.create(name="guest")
user_group = Group.objects.create(name="user")
guest_user = models.User.objects.create_user(
"guest", "guest@localhost", "guest")
guest_group.user_set.add(guest_user)
nsp.models.GroupPermission.objects.create(
group=guest_group, namespace="peeringdb.organization",
permissions=0x01)
nsp.models.GroupPermission.objects.create(
group=user_group, namespace="peeringdb.organization",
permissions=0x01)
nsp.models.GroupPermission.objects.create(
group=user_group, namespace="peeringdb.organization.{}".format(
settings.SUGGEST_ENTITY_ORG), permissions=0x04)
nsp.models.GroupPermission.objects.create(
group=user_group,
namespace="peeringdb.organization.*.network.*.poc_set.users",
permissions=0x01)
# prepare api test data
cls.prepare()
settings.API_CACHE_ROOT = tempfile.mkdtemp()
settings.API_CACHE_LOG = os.path.join(settings.API_CACHE_ROOT,
"log.log")
super_user = models.User.objects.create_user(
"admin", "admin@localhost", "admin")
super_user.is_superuser = True
super_user.is_staff = True
super_user.save()
# generate cache files
now = datetime.datetime.now() + datetime.timedelta(days=1)
call_command("pdb_api_cache", date=now.strftime("%Y%m%d"))
def setUp(self):
settings.API_CACHE_ALL_LIMITS = True
settings.API_CACHE_ENABLED = True
super(APICacheTests, self).setUp()
def tearDown(self):
settings.API_CACHE_ALL_LIMITS = False
settings.API_CACHE_ENABLED = False
super(APICacheTests, self).tearDown()
```
#### File: peeringdb/tests/test_inet.py
```python
from peeringdb_server.inet import RdapLookup, RdapNotFoundError
import pytest
def test_rdap_asn_lookup(rdap):
asn = rdap.get_asn(63311)
assert asn.raw
assert asn.name
assert asn.emails
assert asn.org_name
assert asn.org_address
def test_rdap_asn_lookup_not_found(rdap):
with pytest.raises(RdapNotFoundError):
rdap.get_asn(65535)
def test_rdap_asn_lookup_not_found(rdap):
with pytest.raises(RdapNotFoundError):
rdap.get_asn(9999999)
def test_mocker(rdap):
with pytest.RequestsData("rdap"):
asn = rdap.get_asn(63311)
@pytest.RequestsData("rdap")
def test_arin0(rdap):
asn = rdap.get_asn(63311)
assert asn.emails == ['<EMAIL>']
def test_recurse_contacts(rdap):
asn = rdap.get_asn(3333)
assert rdap == asn._rdapc
assert len(asn.emails) > 1
assert len(rdap.history) > len(asn.emails)
```
|
{
"source": "jejer/iptables-inspector",
"score": 3
}
|
#### File: cfgparser/matches/addrtype.py
```python
class Addrtype(object):
def __init__(self, raw):
self.src_type = ""
self.invert_src_type = False
self.dst_type = ""
self.invert_dst_type = False
fields = raw.split()
for i in range(len(fields)):
if fields[i] == "--src-type":
self.src_type = fields[i+1]
if fields[i-1] == "!":
self.invert_src_type = True
if fields[i] == "--dst-type":
self.dst_type = fields[i+1]
if fields[i-1] == "!":
self.invert_dst_type = True
def match(self, packet, runner):
if self.src_type == "LOCAL" and (not self.invert_src_type) and (not runner.localhost_ip(packet.source)):
return False
if self.src_type == "LOCAL" and self.invert_src_type and runner.localhost_ip(packet.source):
return False
if self.dst_type == "LOCAL" and (not self.invert_dst_type) and (not runner.localhost_ip(packet.dest)):
return False
if self.dst_type == "LOCAL" and self.invert_dst_type and runner.localhost_ip(packet.dest):
return False
return True
```
#### File: cfgparser/matches/comment.py
```python
class Comment(object):
def __init__(self, raw):
self.comment = raw[21:]
def match(self, packet, runner):
return True
```
#### File: cfgparser/targets/dnat.py
```python
import ipaddress
from colorama import Fore
# http://ipset.netfilter.org/iptables-extensions.man.html#lbCV
class DNAT(object):
def __init__(self, raw):
if ":" in raw.split()[3]:
self.ip = raw.split()[3].split(":")[0]
self.port = int(raw.split()[3].split(":")[1])
else:
self.ip = raw.split()[3]
self.port = 0
def process(self, packet, runner):
original_ip, oritinal_port = packet.dest, packet.dport
packet.dest = ipaddress.ip_address(self.ip)
if self.port:
packet.dport = self.port
print(Fore.RED + "TARGET DNAT: " + str(original_ip) + ":" + str(oritinal_port) + " => " + str(packet.dest) + ":" + str(packet.dport))
return "ACCEPT"
```
#### File: cfgparser/targets/mark.py
```python
from colorama import Fore
# http://ipset.netfilter.org/iptables-extensions.man.html#lbDE
class Mark(object):
def __init__(self, raw):
self.op = raw.split()[2]
if "/" in raw.split()[3]:
self.value = int(raw.split()[3].split("/")[0], 0)
self.mask = int(raw.split()[3].split("/")[1], 0)
else:
self.value = int(raw.split()[3], 0)
self.mask = int("0xFFFFFFFF", 0)
def process(self, packet, runner):
original_mark = packet.nfmark
if self.op == "--set-xmark":
packet.nfmark = (packet.nfmark & (self.mask ^ 0xFFFFFFFF)) ^ self.value
if self.op == "--set-mark":
packet.nfmark = (packet.nfmark & (self.mask ^ 0xFFFFFFFF)) | self.value
packet.stack_next_rule()
print(Fore.RED + "TARGET MARK: " + hex(original_mark) + " => " + hex(packet.nfmark))
return "CONTINUE"
```
|
{
"source": "jejikenwogu/integrations-core",
"score": 2
}
|
#### File: postgres/tests/test_unit.py
```python
import mock
import psycopg2
import pytest
from mock import MagicMock
from pytest import fail
from semver import VersionInfo
from six import iteritems
from datadog_checks.postgres import util
from .common import SCHEMA_NAME
pytestmark = pytest.mark.unit
def test_get_instance_metrics_lt_92(integration_check, pg_instance):
"""
check output when 9.2+
"""
pg_instance['collect_database_size_metrics'] = False
check = integration_check(pg_instance)
res = check.metrics_cache.get_instance_metrics(VersionInfo(9, 1, 0))
assert res['metrics'] == util.COMMON_METRICS
def test_get_instance_metrics_92(integration_check, pg_instance):
"""
check output when <9.2
"""
pg_instance['collect_database_size_metrics'] = False
check = integration_check(pg_instance)
res = check.metrics_cache.get_instance_metrics(VersionInfo(9, 2, 0))
assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS)
def test_get_instance_metrics_state(integration_check, pg_instance):
"""
Ensure data is consistent when the function is called more than once
"""
pg_instance['collect_database_size_metrics'] = False
check = integration_check(pg_instance)
res = check.metrics_cache.get_instance_metrics(VersionInfo(9, 2, 0))
assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS)
res = check.metrics_cache.get_instance_metrics('foo') # metrics were cached so this shouldn't be called
assert res['metrics'] == dict(util.COMMON_METRICS, **util.NEWER_92_METRICS)
def test_get_instance_metrics_database_size_metrics(integration_check, pg_instance):
"""
Test the function behaves correctly when `database_size_metrics` is passed
"""
pg_instance['collect_default_database'] = True
pg_instance['collect_database_size_metrics'] = False
check = integration_check(pg_instance)
expected = util.COMMON_METRICS
expected.update(util.NEWER_92_METRICS)
expected.update(util.DATABASE_SIZE_METRICS)
res = check.metrics_cache.get_instance_metrics(VersionInfo(9, 2, 0))
assert res['metrics'] == expected
def test_get_instance_with_default(check):
"""
Test the contents of the query string with different `collect_default_db` values
"""
version = VersionInfo(9, 2, 0)
res = check.metrics_cache.get_instance_metrics(version)
assert " AND psd.datname not ilike 'postgres'" in res['query']
check._config.collect_default_db = True
res = check.metrics_cache.get_instance_metrics(version)
assert " AND psd.datname not ilike 'postgres'" not in res['query']
def test_malformed_get_custom_queries(check):
"""
Test early-exit conditions for _get_custom_queries()
"""
check.log = MagicMock()
db = MagicMock()
check.db = db
check._config.custom_queries = [{}]
# Make sure 'metric_prefix' is defined
check._collect_custom_queries([])
check.log.error.assert_called_once_with("custom query field `metric_prefix` is required")
check.log.reset_mock()
# Make sure 'query' is defined
malformed_custom_query = {'metric_prefix': 'postgresql'}
check._config.custom_queries = [malformed_custom_query]
check._collect_custom_queries([])
check.log.error.assert_called_once_with(
"custom query field `query` is required for metric_prefix `%s`", malformed_custom_query['metric_prefix']
)
check.log.reset_mock()
# Make sure 'columns' is defined
malformed_custom_query['query'] = 'SELECT num FROM sometable'
check._collect_custom_queries([])
check.log.error.assert_called_once_with(
"custom query field `columns` is required for metric_prefix `%s`", malformed_custom_query['metric_prefix']
)
check.log.reset_mock()
# Make sure we gracefully handle an error while performing custom queries
malformed_custom_query_column = {}
malformed_custom_query['columns'] = [malformed_custom_query_column]
db.cursor().execute.side_effect = psycopg2.ProgrammingError('FOO')
check._collect_custom_queries([])
check.log.error.assert_called_once_with(
"Error executing query for metric_prefix %s: %s", malformed_custom_query['metric_prefix'], 'FOO'
)
check.log.reset_mock()
# Make sure the number of columns defined is the same as the number of columns return by the query
malformed_custom_query_column = {}
malformed_custom_query['columns'] = [malformed_custom_query_column]
query_return = ['num', 1337]
db.cursor().execute.side_effect = None
db.cursor().__iter__.return_value = iter([query_return])
check._collect_custom_queries([])
check.log.error.assert_called_once_with(
"query result for metric_prefix %s: expected %s columns, got %s",
malformed_custom_query['metric_prefix'],
len(malformed_custom_query['columns']),
len(query_return),
)
check.log.reset_mock()
# Make sure the query does not return an empty result
db.cursor().__iter__.return_value = iter([[]])
check._collect_custom_queries([])
check.log.debug.assert_called_with(
"query result for metric_prefix %s: returned an empty result", malformed_custom_query['metric_prefix']
)
check.log.reset_mock()
# Make sure 'name' is defined in each column
malformed_custom_query_column['some_key'] = 'some value'
db.cursor().__iter__.return_value = iter([[1337]])
check._collect_custom_queries([])
check.log.error.assert_called_once_with(
"column field `name` is required for metric_prefix `%s`", malformed_custom_query['metric_prefix']
)
check.log.reset_mock()
# Make sure 'type' is defined in each column
malformed_custom_query_column['name'] = 'num'
db.cursor().__iter__.return_value = iter([[1337]])
check._collect_custom_queries([])
check.log.error.assert_called_once_with(
"column field `type` is required for column `%s` of metric_prefix `%s`",
malformed_custom_query_column['name'],
malformed_custom_query['metric_prefix'],
)
check.log.reset_mock()
# Make sure 'type' is a valid metric type
malformed_custom_query_column['type'] = 'invalid_type'
db.cursor().__iter__.return_value = iter([[1337]])
check._collect_custom_queries([])
check.log.error.assert_called_once_with(
"invalid submission method `%s` for column `%s` of metric_prefix `%s`",
malformed_custom_query_column['type'],
malformed_custom_query_column['name'],
malformed_custom_query['metric_prefix'],
)
check.log.reset_mock()
# Make sure we're only collecting numeric value metrics
malformed_custom_query_column['type'] = 'gauge'
query_return = MagicMock()
query_return.__float__.side_effect = ValueError('Mocked exception')
db.cursor().__iter__.return_value = iter([[query_return]])
check._collect_custom_queries([])
check.log.error.assert_called_once_with(
"non-numeric value `%s` for metric column `%s` of metric_prefix `%s`",
query_return,
malformed_custom_query_column['name'],
malformed_custom_query['metric_prefix'],
)
@pytest.mark.parametrize(
'test_case, params',
[
('9.6.2', {'version.major': '9', 'version.minor': '6', 'version.patch': '2'}),
('10.0', {'version.major': '10', 'version.minor': '0', 'version.patch': '0'}),
(
'11nightly3',
{'version.major': '11', 'version.minor': '0', 'version.patch': '0', 'version.release': 'nightly.3'},
),
],
)
def test_version_metadata(check, test_case, params):
check.check_id = 'test:123'
with mock.patch('datadog_checks.base.stubs.datadog_agent.set_check_metadata') as m:
check.set_metadata('version', test_case)
for name, value in iteritems(params):
m.assert_any_call('test:123', name, value)
m.assert_any_call('test:123', 'version.scheme', 'semver')
m.assert_any_call('test:123', 'version.raw', test_case)
@pytest.mark.usefixtures('mock_cursor_for_replica_stats')
def test_replication_stats(aggregator, integration_check, pg_instance):
check = integration_check(pg_instance)
check.check(pg_instance)
base_tags = ['foo:bar', 'server:localhost', 'port:5432']
app1_tags = base_tags + ['wal_sync_state:async', 'wal_state:streaming', 'wal_app_name:app1']
app2_tags = base_tags + ['wal_sync_state:sync', 'wal_state:backup', 'wal_app_name:app2']
aggregator.assert_metric('postgresql.db.count', 0, base_tags)
for suffix in ('wal_write_lag', 'wal_flush_lag', 'wal_replay_lag'):
metric_name = 'postgresql.replication.{}'.format(suffix)
aggregator.assert_metric(metric_name, 12, app1_tags)
aggregator.assert_metric(metric_name, 13, app2_tags)
aggregator.assert_all_metrics_covered()
def test_relation_filter():
relations_config = {'breed': {'relation_name': 'breed', 'schemas': ['public']}}
query_filter = util.build_relations_filter(relations_config, SCHEMA_NAME)
assert query_filter == "( relname = 'breed' AND schemaname = ANY(array['public']::text[]) )"
def test_relation_filter_no_schemas():
relations_config = {'persons': {'relation_name': 'persons', 'schemas': [util.ALL_SCHEMAS]}}
query_filter = util.build_relations_filter(relations_config, SCHEMA_NAME)
assert query_filter == "( relname = 'persons' )"
def test_relation_filter_regex():
relations_config = {'persons': {'relation_regex': 'b.*', 'schemas': [util.ALL_SCHEMAS]}}
query_filter = util.build_relations_filter(relations_config, SCHEMA_NAME)
assert query_filter == "( relname ~ 'b.*' )"
def test_query_timeout_connection_string(aggregator, integration_check, pg_instance):
pg_instance['password'] = ''
pg_instance['query_timeout'] = 1000
check = integration_check(pg_instance)
try:
check._connect()
except psycopg2.ProgrammingError as e:
fail(str(e))
except psycopg2.OperationalError:
# could not connect to server because there is no server running
pass
```
|
{
"source": "jejimenez/invetronic",
"score": 3
}
|
#### File: invetronic/authentication/models.py
```python
from django.contrib.auth.models import AbstractUser, PermissionsMixin
from django.db import models
from django.contrib.auth.models import BaseUserManager
from inventario.models import Company
# Create your models here.
TIPO_USUARIO = (
('TECNICO','Técnico'),
('CLIENTE_ADMIN','Cliente Administrador'),
('CLIENTE','Cliente'),
)
class UserManager(BaseUserManager):
def create_superuser(self,username, password, **kwargs):
user = self.model(username=username)
user.set_password(password)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.company_id = 1
user.save()
return user
class User(AbstractUser):
company = models.ForeignKey(Company, on_delete=models.CASCADE, verbose_name="empresa", related_name='user_empresa', null=True, blank=True)
objects = UserManager()
USERNAME_FIELD ='username'
tipo_usuario = models.CharField(max_length=100,choices=TIPO_USUARIO,blank=True,null=True,verbose_name="tipo_usuario")
```
#### File: invetronic/authentication/views.py
```python
from django.shortcuts import render
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from common import utils
# Create your views here.
class IndexView(TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['page_title'] = 'Dashboard'
context['breadcrumbs'] = utils.get_breadcrumbs_from_url(self.request.get_full_path())
return context
@method_decorator(login_required)
#@login_required()
def dispatch(self, *args, **kwargs):
return super(IndexView, self).dispatch(*args, **kwargs)
```
#### File: invetronic/inventario/admin.py
```python
from django.contrib import admin
# Register your models here.
from .models import Machine, Company, HardwareComponent, HardwareComponentType, SoftwareComponent, SoftwareComponentType, Shared, Printer
from django.contrib.admin.widgets import AdminFileWidget
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.contrib import admin
class AdminImageWidget(AdminFileWidget):
def render(self, name, value, attrs=None):
output = []
if value and getattr(value, "url", None):
image_url = value.url
file_name = str(value)
output.append(u'<a href="%s" target="_blank"><img src="%s" alt="%s" /></a>' % \
(image_url, image_url, file_name))
#output.append(super(AdminFileWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class ImageWidgetAdmin(admin.ModelAdmin):
image_fields = []
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.image_fields:
request = kwargs.pop("request", None)
kwargs['widget'] = AdminImageWidget
return db_field.formfield(**kwargs)
return super(ImageWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
class SoftwareComponentInLine(admin.TabularInline):
model = SoftwareComponent
#exclude = ('machine',)
fields = ('component_type', 'name', 'release','compilation','bits','features',)
def get_extra (self, request, obj=None, **kwargs):
"""Dynamically sets the number of extra forms. 0 if the related object
already exists or the extra configuration otherwise."""
if obj:
# Don't add any extra forms if the related object already exists.
return 0
return self.extra
def formfield_for_foreignkey(self, db_field, request, **kwargs):
person = kwargs.pop('obj', None)
if db_field.name == 'component_type':
kwargs['queryset'] = SoftwareComponentType.objects.filter(status='O')
return super(SoftwareComponentInLine, self).formfield_for_foreignkey(db_field, request, **kwargs)
class HardwareComponentInLine(admin.TabularInline):
model = HardwareComponent
#exclude = ('machine',)
fields = ('component_type', 'brand', 'model','serie','size','features',)
my_id_for_formfield = None
def get_extra (self, request, obj=None, **kwargs):
"""Dynamically sets the number of extra forms. 0 if the related object
already exists or the extra configuration otherwise."""
if obj:
# Don't add any extra forms if the related object already exists.
return 0
return self.extra
def formfield_for_foreignkey(self, db_field, request, **kwargs):
person = kwargs.pop('obj', None)
if db_field.name == 'component_type':
kwargs['queryset'] = HardwareComponentType.objects.filter(status='O')
return super(HardwareComponentInLine, self).formfield_for_foreignkey(db_field, request, **kwargs)
"""
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
print(db_field)
field = super(HardwareComponentInLine, self).formfield_for_foreignkey(db_field, request, **kwargs)
#if request._obj_ is not None:
# field.queryset = field.queryset.filter(building__exact = request._obj_)
#else:
# field.queryset = field.queryset.none()
field.queryset = field.queryset.filter(status = 'O')
return field"""
class SharedInLine(admin.TabularInline):
model = Shared
#exclude = ('machine',)
fields = ('description',)
my_id_for_formfield = None
def get_extra (self, request, obj=None, **kwargs):
"""Dynamically sets the number of extra forms. 0 if the related object
already exists or the extra configuration otherwise."""
if obj:
# Don't add any extra forms if the related object already exists.
return 0
return self.extra
class PrinterInLine(admin.TabularInline):
model = Printer
#exclude = ('machine',)
fields = ('description',)
my_id_for_formfield = None
def get_extra (self, request, obj=None, **kwargs):
"""Dynamically sets the number of extra forms. 0 if the related object
already exists or the extra configuration otherwise."""
if obj:
# Don't add any extra forms if the related object already exists.
return 0
return self.extra
class MachineAdmin(admin.ModelAdmin):
# A template for a customized change view:
readonly_fields = ('machine_sequence',)
change_form_template = 'admin/machine/change_form.html'
list_display = ('name','company','ip','mac_address','os',)
inlines = [
HardwareComponentInLine,SoftwareComponentInLine,SharedInLine,PrinterInLine
]
class CompanyAdmin(ImageWidgetAdmin):
# A template for a customized change view:
image_fields = ['photo_thumbnail1',]
#exclude = ('photo_thumbnail1',)
list_display = ('id','name', 'nit', 'dv', 'address','telephone','cellphone','legal_representative')
#def render_change_form(self, request, context, *args, **kwargs):
#context['adminform'].form.fields['someField'].help_text = "Ir a Editar " + str(context['original'].anyFunction()) + " (descarta cambios)"
#print('rendering')
#print(vars(kwargs['obj']))
#if kwargs['obj'].photo:
# print(kwargs['obj'].photo.url)
#if kwargs['obj'].photo_thumbnail1:
# print(kwargs['obj'].photo_thumbnail1.url)
#return super(CompanyAdmin, self).render_change_form(request, context, args, kwargs)
admin.site.register(Machine, MachineAdmin)
admin.site.register(Company, CompanyAdmin)
admin.site.register(HardwareComponentType)
admin.site.register(SoftwareComponentType)
```
#### File: invetronic/inventario/models.py
```python
from django.db import models
# Create your models here.
from django.db.models.signals import post_save, post_delete, pre_save
from PIL import Image as Img
import io, sys, os, errno, re
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.forms import fields
from django.core.validators import RegexValidator
from common.utils import validate_file_extension
#from io import BytesIO
# MAC Address field
MAC_RE = r'^([0-9a-fA-F]{2}([:-]?|$)){6}$'
mac_re = re.compile(MAC_RE)
BUG_STATUS = (
('A','Activo'),
('I','Inactivo'),
)
TIPO_DOCUMENTO = (
('NIT','NIT'),
('CC','Cédula Ciudadanía'),
)
class MACAddressFormField(fields.RegexField):
default_error_messages = {
'invalid': _(u'Enter a valid MAC address.'),
}
def __init__(self, *args, **kwargs):
super(MACAddressFormField, self).__init__(mac_re, *args, **kwargs)
class MACAddressField(models.Field):
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 17
super(MACAddressField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
def formfield(self, **kwargs):
defaults = {'form_class': MACAddressFormField}
defaults.update(kwargs)
return super(MACAddressField, self).formfield(**defaults)
class Company(models.Model):
class Meta:
verbose_name = ("Empresa")
numericregex = RegexValidator(r'^[0-9]*$', 'Solo acepta caractéres numéricos.')
name = models.CharField(max_length=500, verbose_name="Nombre o Razón Social",blank=False,null=False,)
tipo_documento = models.CharField(max_length=10,choices=TIPO_DOCUMENTO,blank=False,null=False,verbose_name="tipo documento")
nit = models.CharField(max_length=15,validators=[numericregex], verbose_name="no documento",blank=False,null=False,)
dv = models.CharField(max_length=1,validators=[numericregex], verbose_name="dv",blank=True,null=True,)
city = models.CharField(max_length=100, verbose_name="ciudad",blank=False,null=False,)
address = models.CharField(max_length=500, verbose_name="dirección de domicilio principal", blank=False, null=False)
telephone = models.CharField(max_length=20, verbose_name="teléfono", blank=False, null=False)
cellphone = models.CharField(max_length=20, verbose_name="celular", blank=True, null=True)
email1 = models.CharField(max_length=100, verbose_name="correo electrónico", blank=False, null=False)
email2 = models.CharField(max_length=100, verbose_name="correo electrónico (Alternativo)", blank=True, null=True)
legal_representative = models.CharField(max_length=500, verbose_name="representante legal", blank=True, null=True)
email_legal_representative = models.CharField(max_length=500, verbose_name="correo electrónico representante legal", blank=True, null=True)
status = models.CharField(max_length=1,choices=BUG_STATUS,default='O',verbose_name="estado", blank=True, null=True)
photo = models.ImageField(upload_to='company/img', verbose_name="imagen",validators=[validate_file_extension], blank=True, null=True)
photo_thumbnail1 = models.ImageField(upload_to='company/img', verbose_name="imagen 250x250", blank=True, null=True)
def __str__(self):
return self.name
def __unicode__(self):
return self.name
def __init__(self, *args, **kwargs):
super(Company, self).__init__(*args, **kwargs)
self.old_photo = self.photo
self.old_photo_thumbnail1 = self.photo_thumbnail1
def save(self, *args, **kwargs):
if self.photo:
self.photo.seek(0)
bytes_img = io.BytesIO(self.photo.read())
image = Img.open(bytes_img)
image.thumbnail((250,250), Img.ANTIALIAS)
output = io.BytesIO()
#print(image.format)
image.save(output, format=image.format, quality=75)
output.seek(0)
self.photo_thumbnail1= InMemoryUploadedFile(output,'ImageField', "%s_thumbnail1.jpg" %self.photo.name, 'image/jpeg', sys.getsizeof(output), None)
if self.pk:
try:
os.remove(settings.MEDIA_ROOT+(self.old_photo.name or ""))
except OSError as e:
if e.errno != errno.ENOENT:
None
else:
print(e)
try:
os.remove(settings.MEDIA_ROOT+(self.old_photo_thumbnail1.name or ""))
except OSError as e:
if e.errno != errno.ENOENT:
None
else:
print(e)
super(Company, self).save(*args, **kwargs)
@property
def photo_url(self):
print(self.photo)
if self.photo and hasattr(self.photo, 'url'):
print(self.photo.url)
return self.photo.url
#@property
#def photo_tag(self):
# return mark_safe('<img src="/directory/%s" width="150" height="150" />' % (self.photo))
#@classmethod
#def create(self):
# book = cls(title=title)
# # do something with the book
# return book
class Machine(models.Model):
class Meta:
verbose_name = ("Máquina")
company = models.ForeignKey(Company, on_delete=models.CASCADE, verbose_name="empresa", related_name='empresa')
name = models.CharField(max_length=200, verbose_name="nombre")
ip = models.GenericIPAddressField(blank=True, null=True)
mac_address = MACAddressField(verbose_name="mac address", blank=True, null=True)
#shared = models.CharField(max_length=200, verbose_name="compartido", blank=True, null=True)
#printers = models.CharField(max_length=200, verbose_name="impresoras", blank=True, null=True)
os = models.CharField(max_length=200, verbose_name="sistema operativo", blank=True, null=True)
machine_sequence = models.CharField(max_length=200, verbose_name="número de secuencia", blank=True, null=True)
buyed_date = models.DateField(verbose_name="fecha de compra",blank=True, null=True)
supplier = models.CharField(max_length=200, verbose_name="proveedor", blank=True, null=True)
warranty_months = models.PositiveSmallIntegerField(verbose_name="meses garantía", blank=True, null=True)
creation_time = models.DateTimeField(auto_now_add=True,verbose_name="fecha de creacion",blank=False, null=False)
status = models.CharField(max_length=1,choices=BUG_STATUS,default='O',verbose_name="estado", blank=True, null=True)
def __str__(self):
return self.company.name+" "+self.name
def __unicode__(self):
return self.name
"""
def save(self, *args, **kwargs):
print('okkkkk')
print(self.__dict__)
return super(Machine, self).save( *args, **kwargs)
@staticmethod
def pre_save(sender, instance, **kwargs):
print('ok')
delattr(instance, "shajred")
print(instance.__dict__)
pre_save.connect(Machine.pre_save, Machine, dispatch_uid="inventario.models.Machine")
"""
class HardwareComponentType(models.Model):
class Meta:
verbose_name_plural = ("Tipos de Componente Hardware")
verbose_name = ("Tipo de Componente Hardware")
name = models.CharField(max_length=200, verbose_name="nombre")
status = models.CharField(max_length=1,choices=BUG_STATUS,default='O',verbose_name="estado", blank=True, null=True)
def __str__(self):
return self.name
class HardwareComponent(models.Model):
class Meta:
verbose_name_plural = ("Componentes de Hardware")
verbose_name = ("Componente de Hardware")
machine = models.ForeignKey(Machine, on_delete=models.CASCADE, verbose_name="máquina", related_name='hw_machine')
component_type = models.ForeignKey(HardwareComponentType, on_delete=models.CASCADE, verbose_name="tipo_componente", related_name='hw_tipo_componente')
brand = models.CharField(max_length=200, verbose_name="marca", null=True, blank=True)
model = models.CharField(max_length=200, verbose_name="modelo", null=True, blank=True)
serie = models.CharField(max_length=200, verbose_name="serie", null=True, blank=True)
size = models.CharField(max_length=200, verbose_name="tamaño", null=True, blank=True)
features = models.CharField(max_length=500, verbose_name="características", null=True, blank=True)
status = models.CharField(max_length=1,choices=BUG_STATUS,default='O',verbose_name="estado", blank=True, null=True)
def __str__(self):
return self.machine.name + " - " +self.component_type.name
class SoftwareComponentType(models.Model):
class Meta:
verbose_name_plural = ("Tipos de Componente Software")
verbose_name = ("Tipo de Componente Software")
name = models.CharField(max_length=200, verbose_name="nombre")
status = models.CharField(max_length=1,choices=BUG_STATUS,default='O',verbose_name="estado", blank=True, null=True)
def __str__(self):
return self.name
class SoftwareComponent(models.Model):
class Meta:
verbose_name_plural = ("Componentes de Software")
verbose_name = ("Componente de Software")
machine = models.ForeignKey(Machine, on_delete=models.CASCADE, verbose_name="máquina", related_name='sw_machine')
component_type = models.ForeignKey(SoftwareComponentType, on_delete=models.CASCADE, verbose_name="tipo_componente", related_name='sw_tipo_componente')
name = models.CharField(max_length=200, verbose_name="nombre", null=True, blank=True)
release = models.CharField(max_length=200, verbose_name="version", null=True, blank=True)
compilation = models.CharField(max_length=200, verbose_name="compilación", null=True, blank=True)
bits = models.CharField(max_length=200, verbose_name="bits", null=True, blank=True)
features = models.CharField(max_length=500, verbose_name="características", null=True, blank=True)
status = models.CharField(max_length=1,choices=BUG_STATUS,default='O',verbose_name="estado", blank=True, null=True)
def __str__(self):
return self.machine.name + " - " +self.component_type.name
class Shared(models.Model):
class Meta:
verbose_name_plural = ("Recursos Compartidos")
verbose_name = ("Recurso Compartido")
def __str__(self):
return " - "
machine = models.ForeignKey(Machine, on_delete=models.CASCADE, verbose_name="máquina", related_name='shared_machine')
status = models.CharField(max_length=1,choices=BUG_STATUS,default='O',verbose_name="estado", blank=True, null=True)
description = models.CharField(max_length=200, verbose_name="descripción")
class Printer(models.Model):
class Meta:
verbose_name_plural = ("Impresoras")
verbose_name = ("Impresora")
def __str__(self):
return " - "
machine = models.ForeignKey(Machine, on_delete=models.CASCADE, verbose_name="máquina", related_name='printer_machine')
status = models.CharField(max_length=1,choices=BUG_STATUS,default='A',verbose_name="estado", blank=True, null=True)
description = models.CharField(max_length=200, verbose_name="descripción")
```
#### File: invetronic/inventario/views.py
```python
from django.shortcuts import render
from io import BytesIO
from django.http import HttpResponse
from django.core import serializers
from django.forms.models import model_to_dict
from django.views.generic.list import ListView
from django.views.generic import DetailView
from django.utils import timezone
from templated_docs_adecuated import fill_template
from templated_docs_adecuated.http import FileResponse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from inventario.models import Machine, HardwareComponent, SoftwareComponent, Shared, Printer
from authentication.models import TIPO_USUARIO
from PIL import Image as Img
from common import utils
def machine_detail_view(request, pk_machine):
#print(request.user.username)
#print(request.user.first_name)
#print(request.user.last_name)
#print(request.user.email)
machine = Machine.objects.select_related('company').get(pk=pk_machine)
company = machine.company
# fetching hardware components to append in context file
component = HardwareComponent.objects.filter(machine=machine)
component_context = []
i = 1
for comp in component:
component_context.append({'component_type':comp.component_type or "", 'brand':comp.brand or "", 'model':comp.model or "",
'serie':comp.serie or "", 'size':comp.size or "", 'features':comp.features or ""})
# fetching software components to append in context file
component = SoftwareComponent.objects.filter(machine=machine)
swcomponent_context = []
i = 1
for comp in component:
swcomponent_context.append({'component_type':comp.component_type or "", 'name':comp.name or "", 'release':comp.release or "",
'bits':comp.bits or "", 'compilation':comp.compilation or "", 'features':comp.features or ""})
# fetching shared to append in context file
component = Shared.objects.filter(machine=machine)
shared_context = []
i = 1
for comp in component:
shared_context.append({'description':comp.description or ""})
# fetching shared to append in context file
component = Printer.objects.filter(machine=machine)
printer_context = []
i = 1
for comp in component:
printer_context.append({'description':comp.description or ""})
doctype = 'pdf'
machine_context = model_to_dict(machine)
company_context = model_to_dict(company,
fields = ['id','name','photo_thumbnail1','address','telephone'])
company_context['name'] = str.upper(company_context['name'])
#context = {}
context = machine_context
context['company'] = company_context
#context = {**machine_context, 'company':{**company_context}}
if request.user.is_authenticated:
context['user'] = {'label':'Usuario', 'name':request.user.first_name+" "+request.user.last_name}
else:
context['user'] = {'label':'', 'name':''}
context['components'] = component_context
context['swcomponents'] = swcomponent_context
context['shared'] = shared_context
context['printer'] = printer_context
#print(context)
filename = fill_template(
'inventario/machine_detail2.odt', context,
output_format=doctype)
visible_filename = 'machine.{}'.format(doctype)
return FileResponse(filename, visible_filename)
class MachineListView(ListView):
model = Machine
def get_context_data(self, **kwargs):
context = super(MachineListView, self).get_context_data(**kwargs)
context['now'] = timezone.now()
context['page_title'] = 'Inventario'
context['breadcrumbs'] = utils.get_breadcrumbs_from_url(self.request.get_full_path())
if self.request.user.is_superuser:
context['object_list'] = Machine.objects.all()
elif self.request.user.tipo_usuario == 'CLIENTE' or self.request.user.tipo_usuario == 'CLIENTE_ADMIN':
context['object_list'] = Machine.objects.filter(company=self.request.user.company.pk)
elif self.request.user.tipo_usuario == 'TECNICO': # falta implementar logica para buscar machinas asignadas a los tickets
context['object_list'] = Machine.objects.filter(company=self.request.user.company.pk)
else:
context['object_list'] = []
#print(self.request.__dict__)
return context
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MachineListView, self).dispatch(*args, **kwargs)
class MachineDetail(DetailView):
model = Machine
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(MachineDetail, self).get_context_data(**kwargs)
context['page_title'] = 'Detalle Máquina'
context['breadcrumbs'] = utils.get_breadcrumbs_from_url(self.request.get_full_path())
# Add in a QuerySet of all the books
context['SoftwareComponent'] = SoftwareComponent.objects.filter(machine=self.object.pk)
context['HardwareComponent'] = HardwareComponent.objects.filter(machine=self.object.pk)
return context
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MachineDetail, self).dispatch(*args, **kwargs)
```
|
{
"source": "jejimenezm/T2GEORES",
"score": 3
}
|
#### File: T2GEORES/T2GEORES/output.py
```python
import numpy as np
import shutil
import os
import csv
import pandas as pd
import sys
import sqlite3
import subprocess
import json
import datetime
import re
from T2GEORES import formats as formats
def write_PT_from_t2output(input_dictionary):
"""It writes the parameter for every block from every well from the last output file of TOUGH2 simulation
Parameters
----------
input_dictionary : dictionary
Dictionary contaning the path and name of database on keyword 'db_path', list of wells under the keywords 'WELLS', 'MAKE_UP_WELLS' and 'NOT_PRODUCING_WELL'
Returns
-------
file
{well_name}_PT.txt: file containing the information for every block from every well
Attention
---------
The layers and well block need to be on the databse
Examples
--------
>>> write_PT_from_t2output(input_dictionary)
"""
wells=[]
for key in ['WELLS','MAKE_UP_WELLS','NOT_PRODUCING_WELL']:
try:
for well in input_dictionary[key]:
wells.append(well)
except KeyError:
pass
db_path=input_dictionary['db_path']
t2_output_file="../model/t2/t2.out"
if os.path.isfile(t2_output_file):
pass
else:
return "Theres is not t2.out file on t2/t2.out"
#Extracts all the times the line 'OUTPUT DATA AFTER' was printed
output_headers=[]
with open(t2_output_file,'r') as t2_file:
t2_output_array = t2_file.readlines()
for line_i, line in enumerate(t2_output_array):
if "OUTPUT DATA AFTER" in line.rstrip():
output_headers.append(line_i)
conn=sqlite3.connect(db_path)
c=conn.cursor()
#Create a dictionary containing all the blocks in a well
blocks_wells={}
wells_data={}
data_layer=pd.read_sql_query("SELECT correlative FROM layers ORDER BY middle DESC;",conn)
for name in sorted(wells):
wells_data[name]=""
data_block=pd.read_sql_query("SELECT blockcorr FROM t2wellblock WHERE well='%s' ORDER BY blockcorr;"%name,conn)
if len(data_block)>0:
for n in data_layer['correlative'].values:
blocks_wells[n+data_block['blockcorr'].values[0]]=name
#Select the last line of TOUGH2 output file
for line in t2_output_array[output_headers[-1]:-1]:
for block in blocks_wells:
if block in line.split() and len(line.split())==12: #11
wells_data[blocks_wells[block]]+="%s\n"%(','.join(line.split()))
#Writes an output file for every well
for well in wells_data:
file_out=open("../output/PT/txt/%s_PT.dat"%(well), "w")
file_out.write("ELEM,INDEX,P,T,SG,SW,X(WAT1),X(WAT2),PCAP,DG,DW,LOG(PERM)\n")
file_out.write(wells_data[well])
file_out.close()
conn.close()
def from_sav_to_json(sav_version='sav1'):
"""It writes a json file with temperature and pressure data from the specified .sav for every block including coordinates
Parameters
----------
sav_version : str
Extension of sav file, i.e. sav, sav1, sav2, etc.
Returns
-------
file
PT_json_from_sav.txt : on ../output/PT/json/
Attention
---------
The ELEME.json file needs to be updated
Examples
--------
>>> from_sav_to_json(sav_version='sav1')
"""
output_sav_file="../model/t2/t2.%s"%sav_version
#Generates a dictionary with block as keyword and x,y,z coordinates
with open('../mesh/ELEME.json') as file:
blocks_position=json.load(file)
eleme_dict={}
for block in blocks_position:
eleme_dict[block]=[blocks_position[block]['X'],blocks_position[block]['Y'],blocks_position[block]['Z']]
#Creates a string from .sav file
savfile=open(output_sav_file,"r")
savstring=[]
for linesav in savfile:
savstring.append(linesav.rstrip())
savfile.close()
#Stores Pressure and Temperature on the dictionary
if os.path.isfile(output_sav_file):
t2_sav_file=open(output_sav_file, "r")
contidion_found=False
for t2_sav_line in t2_sav_file:
if t2_sav_line[0:5] in eleme_dict.keys():
contidion_found=True
block=t2_sav_line[0:5]
continue
if contidion_found:
eleme_dict[block].extend([float(i) for i in t2_sav_line.split()])
contidion_found=False
t2_sav_file.close()
eleme_pd=pd.DataFrame.from_dict(eleme_dict,orient='index', columns=['X', 'Y','Z','P','T'])
eleme_pd.to_json("../output/PT/json/PT_json_from_sav.json",orient="index",indent=2)
def PTjson_to_sqlite(input_dictionary,source='t2',):
"""It stores the defined json file into the database on the table t2PTout
Parameters
----------
source : str
It can be 't2' or 'sav'
input_dictionary : dictionary
Dictionary contaning the path and name of database on keyword 'db_path'.
Examples
--------
>>> PTjson_to_sqlite("t2")
"""
db_path=input_dictionary['db_path']
conn=sqlite3.connect(db_path)
c=conn.cursor()
if source=="t2":
if os.path.isfile("../output/PT/json/PT_json.txt"):
with open("../output/PT/json/PT_json.txt") as f:
data=json.load(f)
for element in sorted(data):
try:
q="INSERT INTO t2PTout(blockcorr,x,y,z,'index',P,T,SG,SW,X1,X2,PCAP,DG,DW) \
VALUES ('%s',%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"%(element, data[element]['X'],data[element]['Y'],data[element]['Z'],data[element]['INDEX'],\
data[element]['P'],data[element]['T'],data[element]['SG'],\
data[element]['SW'],data[element]['X(WAT1)'],data[element]['X(WAT2)'],\
data[element]['PCAP'],data[element]['DG'],data[element]['DW'])
c.execute(q)
except sqlite3.IntegrityError:
q="UPDATE t2PTout SET \
x=%s, \
y=%s, \
z=%s, \
'index'=%s, \
P=%s, \
T=%s, \
SG=%s, \
SW=%s, \
X1=%s, \
X2=%s, \
PCAP=%s, \
DG=%s, \
DW=%s \
WHERE blockcorr='%s'"%( data[element]['X'],data[element]['Y'],data[element]['Z'],data[element]['INDEX'],\
data[element]['P'],data[element]['T'],data[element]['SG'],\
data[element]['SW'],data[element]['X(WAT1)'],data[element]['X(WAT2)'],\
data[element]['PCAP'],data[element]['DG'],data[element]['DW'],element)
c.execute(q)
conn.commit()
elif source=="sav":
if os.path.isfile("../output/PT/json/PT_json_from_sav.txt"):
with open("../output/PT/json/PT_json_from_sav.txt") as f:
data=json.load(f)
for element in sorted(data):
try:
q="INSERT INTO t2PTout(blockcorr,x,y,z,P,T) \
VALUES ('%s',%s,%s,%s,%s,%s)"%(element,\
data[element]['X'],data[element]['Y'],data[element]['Z'],\
data[element]['P'],data[element]['T'])
c.execute(q)
except sqlite3.IntegrityError:
q="UPDATE t2PTout SET \
x=%s, \
y=%s, \
z=%s, \
P=%s, \
T=%s \
WHERE blockcorr='%s'"%(data[element]['ELEM'],\
data[element]['X'],data[element]['Y'],data[element]['Z'],\
data[element]['P'],data[element]['T'],data[element]['ELEM'])
c.execute(q)
conn.commit()
conn.close()
def write_PT_of_wells_from_t2output_in_time(input_dictionary):
"""It generates file containing the evolution of every block on every well
Extrae la evolucion de los bloques relacionados de todos los pozos en la direccion "../output/PT/evol"
Parameters
----------
input_dictionary : dictionary
Dictionary contaning the path and name of database on keyword 'db_path', list of wells under the keywords 'WELLS', 'MAKE_UP_WELLS' and 'NOT_PRODUCING_WELL'
Returns
-------
file
{well}_PT_{layer}.txt : on ../output/PT/evol
Examples
--------
>>> write_PT_of_wells_from_t2output_in_time(input_dictionary)
"""
db_path=input_dictionary['db_path']
wells=[]
for key in ['WELLS','MAKE_UP_WELLS','NOT_PRODUCING_WELL']:
try:
for well in input_dictionary[key]:
wells.append(well)
except KeyError:
pass
conn=sqlite3.connect(db_path)
c=conn.cursor()
blocks_wells={}
blocks_data={}
#Generates a dictionary with the blocks on a well as a keyword
data_layer=pd.read_sql_query("SELECT correlative FROM layers ORDER BY middle DESC;",conn)
for name in sorted(wells):
data_block=pd.read_sql_query("SELECT blockcorr FROM t2wellblock WHERE well='%s' ORDER BY blockcorr;"%name,conn)
if len(data_block)>0:
for n in data_layer['correlative'].values:
blocks_wells[n+data_block['blockcorr'].values[0]]=name
blocks_data[n+data_block['blockcorr'].values[0]]=""
#Explore the TOUGH2 output file line by line and store the information if the block list generated on the previous step is on the line.
output_t2_file="../model/t2/t2.out"
if os.path.isfile(output_t2_file):
t2_file=open(output_t2_file, "r")
for t2_line in t2_file:
if "OUTPUT DATA AFTER" in t2_line:
time=t2_line.rstrip().split(" ")[-2]
if len(t2_line.split())==11 and t2_line.split()[0] in blocks_data.keys():
t2_array=t2_line.rstrip().split(" ")
data_list= list(filter(None, t2_array))
data_list.append(time)
blocks_data[t2_line.split()[0]]+=','.join(data_list)+'\n'
t2_file.close()
else:
sys.exit("The file %s or directory do not exist"%output_t2_file)
#Store the data from the dictionary in files
for block in blocks_data:
evol_file_out=open("../output/PT/evol/%s_PT_%s_evol.dat"%(blocks_wells[block],block[0]), "w")
evol_file_out.write("ELEM,INDEX,P,T,SG,SW,X(WAT1),X(WAT2),PCAP,DG,DW,TIME\n")
evol_file_out.write(blocks_data[block])
evol_file_out.close()
conn.close()
def gen_evol(input_dictionary):
"""It generates an output file containing flow and flowing enthalpy for each GEN element. As a convention the library it is suggested to use GEN for any source/sink that is not a well
Parameters
----------
input_dictionary : dictionary
Dictionary contaning the path and name of database on keyword 'db_path'.
Returns
-------
file
{GEN}_{BLOCK}_{NICKNAME}.txt : on ../output/mh/txt
"""
db_path=input_dictionary['db_path']
#List the GEN elements
conn=sqlite3.connect(db_path)
c=conn.cursor()
data_source=pd.read_sql_query("SELECT well,blockcorr,source_nickname FROM t2wellsource WHERE source_nickname LIKE'GEN*' ORDER BY source_nickname;",conn)
final_t2=""
output_fi_file="../model/t2/t2.out"
#Initialize a dictionary containing the file path and name.
dictionary_files={}
for n in range(len(data_source)):
well=data_source['well'][n]
blockcorr=data_source['blockcorr'][n]
source=data_source['source_nickname'][n]
dictionary_files[well]={'filename':"../output/mh/txt/%s_%s_%s_evol_mh.dat"%(well,blockcorr,source),'file_container':"",'blockcorr':blockcorr,'source':source}
dictionary_files[well]['file_container']+="ELEMENT,SOURCEINDEX,GENERATION RATE,ENTHALPY,X1,X2,FF(GAS),FF(AQ.),P(WB),TIME\n"
#It reads the TOUGH2 output file line by line and store the data from each GEN element
output_t2_file="../model/t2/t2.out"
if os.path.isfile(output_t2_file):
t2_file=open(output_t2_file, "r")
for t2_line in t2_file:
if "OUTPUT DATA AFTER" in t2_line:
time=t2_line.rstrip().split(" ")[-2]
for well in dictionary_files:
if dictionary_files[well]['blockcorr'] in t2_line and dictionary_files[well]['source'] in t2_line:
t2_array=t2_line.rstrip().split(" ")
str_list = list(filter(None, t2_array))
str_list.append(time)
dictionary_files[well]['file_container']+=','.join(str_list)+'\n'
t2_file.close()
else:
sys.exit("The file %s or directory do not exist"%output_t2_file)
#Creates a file for every GEN elemen
for well in dictionary_files:
t2_file_out=open(dictionary_files[well]['filename'], "w")
t2_file_out.write(dictionary_files[well]['file_container'])
t2_file_out.close()
conn.close()
def src_evol(input_dictionary):
"""It generates an output file containing flow and flowing enthalpy for each SRC element. As a convention the library it is suggested to use SRC for any source/sink that is a well
Parameters
----------
input_dictionary : dictionary
Dictionary contaning the path and name of database on keyword 'db_path'.
Returns
-------
file
{GEN}_{BLOCK}_{NICKNAME}.txt : on ../output/mh/txt
"""
#See comment for gen_evol
db_path=input_dictionary['db_path']
conn=sqlite3.connect(input_data['db_path'])
c=conn.cursor()
data_source=pd.read_sql_query("SELECT well,blockcorr,source_nickname FROM t2wellsource WHERE source_nickname NOT LIKE'GEN*' ORDER BY source_nickname;",conn)
final_t2=""
output_fi_file="../model/t2/t2.out"
dictionary_files={}
for n in range(len(data_source)):
well=data_source['well'][n]
blockcorr=data_source['blockcorr'][n]
source=data_source['source_nickname'][n]
dictionary_files[well]={'filename':"../output/mh/txt/%s_%s_%s_evol_mh.dat"%(well,blockcorr,source),'file_container':"",'blockcorr':blockcorr,'source':source}
dictionary_files[well]['file_container']+="ELEMENT,SOURCEINDEX,GENERATION RATE,ENTHALPY,X1,X2,FF(GAS),FF(AQ.),P(WB),TIME\n"
output_t2_file="../model/t2/t2.out"
if os.path.isfile(output_t2_file):
t2_file=open(output_t2_file, "r")
for t2_line in t2_file:
if "OUTPUT DATA AFTER" in t2_line:
time=t2_line.rstrip().split(" ")[-2]
for well in dictionary_files:
if dictionary_files[well]['blockcorr'] in t2_line and dictionary_files[well]['source'] in t2_line:
t2_array=t2_line.rstrip().split(" ")
str_list = list(filter(None, t2_array))
str_list.append(time)
dictionary_files[well]['file_container']+=','.join(str_list)+'\n'
t2_file.close()
else:
sys.exit("The file %s or directory do not exist"%output_t2_file)
for well in dictionary_files:
t2_file_out=open(dictionary_files[well]['filename'], "w")
t2_file_out.write(dictionary_files[well]['file_container'])
t2_file_out.close()
conn.close()
def write_PT_from_t2output_from_prod(input_dictionary,sav_version='sav1'):
"""It writes a pressure and temperature comming from block on every well in the specified .sav file.
Parameters
----------
input_dictionary : dictionary
Dictionary contaning the path and name of database on keyword 'db_path'.
sav_version : str
Extension of sav file, i.e. sav, sav1, sav2, etc.
Returns
-------
file
{well_name}_PT.txt: it contains the pressure and temperature information for every well
"""
db_path=input_dictionary['db_path']
wells=[]
for key in ['WELLS','MAKE_UP_WELLS','NOT_PRODUCING_WELL']:
try:
for well in input_dictionary[key]:
wells.append(well)
except KeyError:
pass
conn=sqlite3.connect(db_path)
c=conn.cursor()
#Listing all the wells and assigning them to a well
blocks_wells={}
wells_data={}
blocks_dictionary={}
data_layer=pd.read_sql_query("SELECT correlative FROM layers ORDER BY middle DESC;",conn)
for name in sorted(wells):
wells_data[name]=""
data_block=pd.read_sql_query("SELECT blockcorr FROM t2wellblock WHERE well='%s' ORDER BY blockcorr;"%name,conn)
if len(data_block)>0:
for n in data_layer['correlative'].values:
blocks_wells[n+data_block['blockcorr'].values[0]]=name
output_sav_file="../model/t2/t2.%s"%sav_version
if os.path.isfile(output_sav_file):
t2_sav_file=open(output_sav_file, "r")
contidion_found=False
for t2_sav_line in t2_sav_file:
if t2_sav_line[0:5] in blocks_wells.keys():
contidion_found=True
well=blocks_wells[t2_sav_line[0:5]]
block=t2_sav_line[0:5]
continue
if contidion_found:
wells_data[well]+="%s%s"%(block,','.join(t2_sav_line.split(" ")))
contidion_found=False
t2_sav_file.close()
for well in wells_data:
file_out=open("../output/PT/txt/%s_PT.dat"%(well), "w")
file_out.write("ELEM,P,T\n")
file_out.write(wells_data[well])
file_out.close()
conn.close()
def extract_csv_from_t2out(json_output=False):
"""It writes the parameter for every block from the last output file of TOUGH2 simulation on csv or json
Parameters
----------
json_output : bool
If True a json file is save on ../output/PT/json/
Returns
-------
file
PT.csv: on ../output/PT/csv/
Attention
---------
The file ELEME.json needs to be updated
"""
eleme_dict={}
ELEME_file='../mesh/ELEME.json'
if os.path.isfile(ELEME_file):
with open(ELEME_file) as file:
blocks_position=json.load(file)
for block in blocks_position:
eleme_dict[block]=[blocks_position[block]['X'],blocks_position[block]['Y'],blocks_position[block]['Z']]
else:
return "The file %s does not exist"%ELEME_file
last=""
if os.path.isfile("../model/t2/t2.out"):
t2file=open("../model/t2/t2.out","r")
else:
return "Theres is not t2.out file on t2/t2.out"
cnt=0
t2string=[]
#It finds the last section where OUTPUT DATA AFTER was printed and uses it to know where to start to extract data. But also converts every line of the file into an element in an array
for linet2 in t2file:
cnt+=1
t2string.append(linet2.rstrip())
if "OUTPUT DATA AFTER" in linet2.rstrip():
last=linet2.rstrip().split(",")
line=cnt
t2file.close()
high_iteration=[int(s) for s in last[0].split() if s.isdigit()]
for elementx in eleme_dict:
cnt2=0
for lineout in t2string[line+cnt2:-1]:
if " @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"==lineout:
cnt2+=1
elif cnt2>2:
break
elif elementx in lineout:
lineselect=lineout.split()
eleme_dict[elementx].extend(lineselect)
break
csv_columns=['X','Y','Z','ELEM','INDEX','P','T','SG','SW','X(WAT1)','X(WAT2)','PCAP','DG','DW']
if json_output:
eleme_pd=pd.DataFrame.from_dict(eleme_dict,orient='index',columns=csv_columns)
dtype= {'X':'float',
'Y':'float',
'Z':'float',
'ELEM':'str',
'INDEX':'float',
'P':'float',
'T':'float',
'SG':'float',
'SW':'float',
'X(WAT1)':'float',
'X(WAT2)':'float',
'PCAP':'float',
'DG':'float',
'DW':'float'}
eleme_pd= eleme_pd.astype(dtype)
eleme_pd.to_json("../output/PT/json/PT_json.txt",orient="index",indent=2)
with open("../output/PT/csv/PT.csv",'w') as file:
file.write(','.join(csv_columns))
file.write('\n')
for key in eleme_dict.keys():
string=""
for d in eleme_dict[key]:
string+="%s,"%(d)
file.write(string[0:-2])
file.write('\n')
file.close()
def t2_to_json(itime=None,save_full=False):
"""It creates a severals or a single json file from the output file from the TOUGH2 run
Parameters
----------
itime: float
It defines a time at which a the parameters from the blocks are extracted into json file. Must be on the same units as the TOUGH2 output files (days, seconds, etc.)
save_full: bool
If True it creates a single output json file
Returns
-------
files
t2_ouput_{time}.json: on ../output/PT/json/evol/
file
t2_output: on ../output/PT/json/
Attention
---------
When t2_output is saved, the large could be too large for the system to handle it
Examples
--------
>>> t2_to_json()
"""
block__json_file='../mesh/ELEME.json'
if os.path.isfile(block__json_file):
with open('../mesh/ELEME.json') as file:
blocks=json.load(file)
else:
sys.exit("The file %s or directory do not exist, run ELEM_to_json from regeo_mesh"%output_t2_file)
parameters={0:"P",1:"T",2:"SG",3:"SW",4:"X(WAT1)",5:"X(WAT2)",6:"PCAP",7:"DG",8:"DW"}
#Creates a json file for every output time on the TOUGH2 output file
cnt=0
output_t2_file="../model/t2/t2.out"
if os.path.isfile(output_t2_file):
t2_file=open(output_t2_file, "r")
for t2_line in t2_file:
if "OUTPUT DATA AFTER" in t2_line:
if cnt!=0:
if itime==None or (itime!=None and float(time)==float(itime)):
t2_pd=pd.DataFrame.from_dict(data_dictionary,orient='index')
t2_pd.to_json('../output/PT/json/evol/t2_output_%s.json'%time,orient="index",indent=2)
if itime!=None and itime==float(itime):
break
cnt+=1
time=t2_line.rstrip().split(" ")[-2]
data_dictionary={}
data_dictionary[time]={}
if len(t2_line.split())==12 and t2_line.split()[0] in blocks.keys():
t2_array=t2_line.rstrip().split(" ")
data_list= list(filter(None, t2_array))
data_dictionary[time][data_list[0]]={}
for i in parameters:
data_dictionary[time][data_list[0]][parameters[i]]=float(data_list[i+2])
t2_file.close()
else:
sys.exit("The file %s or directory do not exist"%output_t2_file)
if save_full:
#It generates a dictionary with time as key and json file output as value
src='../output/PT/json/evol/'
src_files = os.listdir(src)
files_dictionary={}
for file_name in src_files:
time=file_name.split("_")[2].split(".j")[0]
full_file_name = os.path.join(src, file_name)
files_dictionary[time]=full_file_name
#It generates a single json output file, for a large model it can be to large for the system memory
t2_json='../output/PT/json/t2_output.json'
t2_output_file=open(t2_json,'w')
t2_output_file.write("{\n")
for time in sorted(files_dictionary):
file=open(files_dictionary[time],'r')
lines=file.readlines()[1:-2]
for line in lines:
t2_output_file.write(line)
t2_output_file.write(",\n")
t2_output_file.write("}")
t2_output_file.close()
def it2COF(input_dictionary):
"""Extracts the COF value for each block on the OBSERVATION section of the inverse file
Parameters
----------
input_dictionary : dictionary
Contains the name of the iTOUGH2 file
Returns
-------
file
COF_PT.json: on the output/PT/json/ folder
Attention
---------
It might changed based on the iTOUGH2 version
Examples
--------
>>> it2COF(input_dictionary)
"""
it2_output_file="../model/t2/%s.out"%input_dictionary['iTOUGH2_file']
if os.path.isfile(it2_output_file):
pass
else:
return "Theres is not %.out file on t2/%s.out"%(input_dictionary['iTOUGH2_file'],input_dictionary['iTOUGH2_file'])
#First and last line in between the data
compare1 = " DATASET DATAPOINTS MEAN MEDIAN STD. DEV. AVE. DEV. SKEWNESS KURTOSIS M/S DWA COF"
compare2 = " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"
#Empty dataframe
data = pd.DataFrame(columns=["DATASET","DATAPOINTS","MEAN","MEDIAN","STD. DEV.","AVE. DEV.","SKEWNESS","KURTOSIS","M/S","DWA","COF"])
#Extracts the data based on compare1 and compare2
output_headers = []
with open(it2_output_file,'r') as t2_file:
it2_output_array = t2_file.readlines()
save = False
for line_i, line in enumerate(it2_output_array):
if compare1 in line.rstrip():
save = True
elif compare2 in line.rstrip():
save = False
if save:
output_headers.append(it2_output_array[line_i+1])
#Stores the date into the appropiate column
output_headers=output_headers[1:-1]
for values in output_headers:
data_i = values.split()
data = data.append({"DATASET":data_i[0],
"DATAPOINTS":data_i[2],
"MEAN":data_i[3],
"MEDIAN":data_i[4],
"STD. DEV.":data_i[5],
"AVE. DEV.":data_i[6],
"SKEWNESS":data_i[7],
"KURTOSIS":data_i[8],
"M/S":data_i[9],
"DWA":data_i[10],
"COF":data_i[12]},ignore_index = True)
data.to_json("../output/PT/json/COF_PT.json",indent=2)
def it2DATASET(input_dictionary):
"""Extracts the OBSERVATION dataset on the the inverse file
Parameters
----------
input_dictionary : dictionary
Contains the name of the iTOUGH2 file
Returns
-------
file
it2_PT.json: on the output/PT/json/ folder
Attention
---------
It might changed based on the iTOUGH2 version
Examples
--------
>>> it2DATASET(input_dictionary)
"""
it2_output_file="../model/t2/%s.out"%input_dictionary['iTOUGH2_file']
if os.path.isfile(it2_output_file):
pass
else:
return "Theres is not %s.out file on t2/%s.out"%(input_dictionary['iTOUGH2_file'],input_dictionary['iTOUGH2_file'])
#First and last line in between the data
compare1="# OBSERVATION AT TIME [sec] MEASURED COMPUTED RESIDUAL WEIGHT C.O.F [%] STD. DEV. Yi Wi DWi +/-"
compare2=" Residual Plots"
data = pd.DataFrame(columns=["NUMBER","OBSERVATION","TIME","MEASURED","COMPUTED","RESIDUAL","WEIGHT","C.O.F","STD.DEV"])
#Extracts the data based on compare1 and compare2
output_headers=[]
with open(it2_output_file,'r') as t2_file:
it2_output_array = t2_file.readlines()
save = False
for line_i, line in enumerate(it2_output_array):
if compare1 in line.rstrip():
save = True
elif compare2 in line.rstrip():
save = False
if save:
output_headers.append(it2_output_array[line_i+1])
#Stores the date into the corresponding column
output_headers=output_headers[1:-3][::2]
for values in output_headers:
data_i = values.split()
data = data.append({"NUMBER":data_i[0],
"OBSERVATION":data_i[1],
"TIME":data_i[2],
"MEASURED":data_i[3],
"COMPUTED":data_i[4],
"RESIDUAL":data_i[5],
"WEIGHT":data_i[6],
"C.O.F":data_i[7],
"STD":data_i[8]},ignore_index = True)
data.to_json("../output/PT/json/it2_PT.json",indent=2)
def it2OBJF(input_dictionary):
"""Extracts the value of the objective function from the output inverse file and append it with the current time
Parameters
----------
input_dictionary : dictionary
Contains the name of the iTOUGH2 file
Returns
-------
file
OBJ.json: on the output/PT/json/ folder
Attention
---------
It might changed based on the iTOUGH2 version
Examples
--------
>>> it2OBJF(input_dictionary)
"""
it2_output_file="../model/t2/%s.out"%input_dictionary['iTOUGH2_file']
if os.path.isfile(it2_output_file):
pass
else:
return "Theres is not %s.out file on t2/%s.out"%(input_dictionary['iTOUGH2_file'],input_dictionary['iTOUGH2_file'])
#First and last line in between the data
compare1 = "Objective Function"
compare2 = "=================================================================================================================================="
#Extracts the data based on compare1 and compare2
cnt=0
output_headers=[]
with open(it2_output_file,'r') as t2_file:
it2_output_array = t2_file.readlines()
save = False
for line_i, line in enumerate(it2_output_array):
if compare1 in line.rstrip():
cnt+=1
save = True
elif compare2 in line.rstrip():
save = False
if cnt == 2 and save:
output_headers.append(it2_output_array[line_i+1])
OBJ_file = "../output/PT/json/OBJ.json"
OBJ = pd.read_json(OBJ_file)
#Add the current time to the file
OBJ.loc[len(OBJ.index)] = [datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S'), float(output_headers[2][1:-4].split()[6])]
OBJ.to_json(OBJ_file,indent=2)
#not documented
def normal_directions(input_dictionary,block,block2):
"""
not documented
"""
conn=sqlite3.connect(input_dictionary['db_path'])
points=pd.read_sql_query("SELECT x1,y1,x2,y2,ELEME1,ELEME2 FROM segment WHERE ELEME1='%s' AND ELEME2='%s';"%(block,block2),conn)
conn.close()
if block[0]!=block2[0]:
vectors=[]
if (formats.formats_t2['LAYERS'][block[0]]-formats.formats_t2['LAYERS'][block2[0]])>0:
duz=-1
else:
duz=1
dux=0
duy=0
points=pd.DataFrame([[dux,duy,duz]],columns=['dux','duy','duz'])
elif block[0]==block2[0]:
points['dx']=points['x2']-points['x1']
points['dy']=points['y2']-points['y1']
points['r']=(points['dx']**2+points['dy']**2)**0.5
points['ux']=points['dx']/points['r']
points['uy']=points['dy']/points['r']
points['dux']=-points['uy']
points['duy']=points['ux']
points['duz']=0
return points
def output_flows(input_dictionary):
"""
Read the .out file from the standard TOUGH2 file and store the data into a database. It includes the model version and timestamp.
"""
conn=sqlite3.connect(input_dictionary['db_path'])
elements=pd.read_sql_query("SELECT DISTINCT ELEME FROM ELEME WHERE model_version=%d;"%(input_dictionary['model_version']),conn)
elements_list=elements['ELEME'].values.tolist()
conn.close()
column_names=['ELEME1', 'ELEME2', 'INDEX','FHEAT','FLOH','FLOF','FLOG','FLOAQ','FLOWTR2','VELG','VELAQ','TURB_COEFF','model_time','model_version','model_output_timestamp']
time_now=datetime.datetime.now()
output_t2_file="../model/t2/t2.out"
bulk_data=[]
allow=False
allowed_line=1E50
if os.path.isfile(output_t2_file):
t2_file=open(output_t2_file, "r")
for n,t2_line in enumerate(t2_file):
if "OUTPUT DATA AFTER" in t2_line:
time=t2_line.rstrip().split(" ")[-2]
fix_data=[time,input_dictionary['model_version'],time_now]
allow=True
if t2_line==" (W) (J/KG) (KG/S) (KG/S) (KG/S) (KG/S) (M/S) (M/S) (1/M)\n":
allowed_line=n+2
if t2_line==" ELEMENT SOURCE INDEX GENERATION RATE ENTHALPY X1 X2 FF(GAS) FF(AQ.) P(WB)\n":
allow=False
allowed_line=1E50
if allow and n>=allowed_line:
content=t2_line.split()
if len(content)==12 and content[0]!='ELEM1':
data_list= list(filter(None, content))
data_list.extend(fix_data)
bulk_data.append(data_list)
"""
Previous logic
#if len(t2_line.split())==12 and elements.ELEME.str.count(t2_line.split()[0]).sum()==1 and elements.ELEME.str.count(t2_line.split()[1]).sum()==1:
#if len(t2_line.split())==12 and bool(re.match("^[A-Z][A-Z][0-9][0-9][0-9]$",str(t2_line.split()[0]))) and bool(re.match("^[A-Z][A-Z][0-9][0-9][0-9]$",str(t2_line.split()[1]))): #3000s
if len(t2_line.split())==12 and len(t2_line.split()[0])==5 and not(any(y in t2_line.split() for y in ['ELEM1','GENER'] )): #2950s #t2_line.split()[0] in elements_list and t2_line.split()[1] in elements_list: #3500s
#print(t2_line.split())
t2_array=t2_line.rstrip().split(" ")
data_list= list(filter(None, t2_array))
data_list.extend(fix_data)
data_line={}
for i,name in enumerate(column_names):
try:
data_line[name]=float(data_list[i])
except (ValueError,TypeError):
data_line[name]=data_list[i]
flows_df=flows_df.append(data_line,ignore_index=True)
"""
flows_df = pd.DataFrame(bulk_data,columns = column_names)
t2_file.close()
#Writing the dataframe to the database
conn=sqlite3.connect(input_dictionary['db_path'])
flows_df.to_sql('t2FLOWSout',if_exists='append',con=conn,index=False)
conn.close()
else:
sys.exit("The file %s or directory do not exist"%output_t2_file)
def flow_direction(input_dictionary):
"""
not documented
"""
conn=sqlite3.connect(input_dictionary['db_path'])
elements=pd.read_sql_query("SELECT DISTINCT ELEME FROM ELEME WHERE model_version=%d;"%(input_dictionary['model_version']),conn)
elements_list=elements['ELEME'].values.tolist()
flows_dict={}
for element in elements_list:
#print("PRE DIRECTIONS",datetime.datetime.now())
directions=normal_directions(input_dictionary,element)
#print("AFTER DIRECTIONS",datetime.datetime.now())
#Direction 1
#print("PRE query flow",datetime.datetime.now())
flow_data=pd.read_sql_query("SELECT * FROM t2FLOWSout WHERE ELEME1='%s'AND model_time=-3359500000.0"%element,conn)
#print("AFTER query flow",datetime.datetime.now())
flow_data['flow_x']=0
flow_data['flow_y']=0
#print("PRE FIRST LOOP",datetime.datetime.now())
for index, row in flow_data.iterrows(): #horizontal
if row['ELEME2'][0]!=row['ELEME1'][0]:
#Establish the direction of the flow
if (formats.formats_t2['LAYERS'][row['ELEME1'][0]]-formats.formats_t2['LAYERS'][row['ELEME2'][0]])>0:
uy=-1
else:
uy=1
#It is necessary to check if the right position of ELEME1 and ELEME2 exists
if element==row['ELEME1']:
flow_data.loc[(flow_data['ELEME1']==row['ELEME1']) & (flow_data['ELEME2']==row['ELEME2']),"flow_y"]=uy*row['FLOF']
else:
flow_data.loc[(flow_data['ELEME1']==row['ELEME2']) & (flow_data['ELEME2']==row['ELEME1']),"flow_y"]=-uy*row['FLOF']
else:
ux=float(directions.loc[ (directions['ELEME1']==row['ELEME1']) & (directions['ELEME2']==row['ELEME2'])]['dux'])
uy=float(directions.loc[ (directions['ELEME1']==row['ELEME1']) & (directions['ELEME2']==row['ELEME2'])]['duy'])
flux=float(flow_data.loc[(flow_data['ELEME1']==row['ELEME1']) & (flow_data['ELEME2']==row['ELEME2'])]['FLOF'])
flow_data.loc[(flow_data['ELEME1']==row['ELEME1']) & (flow_data['ELEME2']==row['ELEME2']),"flow_x"]=ux*flux
flow_data.loc[(flow_data['ELEME1']==row['ELEME1']) & (flow_data['ELEME2']==row['ELEME2']),"flow_y"]=uy*flux
#print("AFTER FIRST LOOP",datetime.datetime.now())
#Direction 2
flow_data2=pd.read_sql_query("SELECT * FROM t2FLOWSout WHERE ELEME2='%s' AND model_time=-3359500000.0"%element,conn)
flow_data.append(flow_data2)
#print("PRE SECOND LOOP",datetime.datetime.now())
for index, row in flow_data.iterrows(): #horizontal
if row['ELEME2'][0]!=row['ELEME1'][0]:
#Establish the direction of the flow
if (formats.formats_t2['LAYERS'][row['ELEME1'][0]]-formats.formats_t2['LAYERS'][row['ELEME2'][0]])>0:
uy=-1
else:
uy=1
#It is necessary to check if the right position of ELEME1 and ELEME2 exists
if element==row['ELEME1']:
flow_data.loc[(flow_data['ELEME1']==row['ELEME1']) & (flow_data['ELEME2']==row['ELEME2']),"flow_y"]=uy*row['FLOF']
else:
flow_data.loc[(flow_data['ELEME1']==row['ELEME2']) & (flow_data['ELEME2']==row['ELEME1']),"flow_y"]=-uy*row['FLOF']
else:
ux=float(directions.loc[ (directions['ELEME1']==row['ELEME1']) & (directions['ELEME2']==row['ELEME2'])]['dux'])
uy=float(directions.loc[ (directions['ELEME1']==row['ELEME1']) & (directions['ELEME2']==row['ELEME2'])]['duy'])
flux=float(flow_data.loc[(flow_data['ELEME1']==row['ELEME1']) & (flow_data['ELEME2']==row['ELEME2'])]['FLOF'])
flow_data.loc[(flow_data['ELEME1']==row['ELEME1']) & (flow_data['ELEME2']==row['ELEME2']),"flow_x"]=ux*flux
flow_data.loc[(flow_data['ELEME1']==row['ELEME1']) & (flow_data['ELEME2']==row['ELEME2']),"flow_y"]=uy*flux
#print("AFTER SECOND LOOP",datetime.datetime.now())
#print("PRE CALCS",datetime.datetime.now())
sum_flowx=flow_data['flow_x'].sum()
sum_flowy=flow_data['flow_y'].sum()
flows_dict[element]=[sum_flowx,sum_flowy,(sum_flowx**2+sum_flowy**2)**0.5]
#print("AFTER CALCS",datetime.datetime.now())
#print(element,flows_dict[element])
flows=pd.DataFrame.from_dict(flows_dict,orient='index',columns=['flow_x','flow_y','flow_mag'])
#left_side=flow_data.loc[flow_data['ELEME1']==element]
#right_side=flow_data.loc[flow_data['ELEME2']==element]
flows.to_csv('directions.csv')
conn.close()
def flow_direction2(input_dictionary):
"""
not documented
"""
conn=sqlite3.connect(input_dictionary['db_path'])
elements=pd.read_sql_query("SELECT DISTINCT ELEME FROM ELEME WHERE model_version=%d;"%(input_dictionary['model_version']),conn)
elements_list=elements['ELEME'].values.tolist()
flows_dict={}
flow_data=pd.read_sql_query("SELECT * FROM t2FLOWSout WHERE model_time=-3359500000.0",conn)
flow_data['x_flow']=flow_data.apply(lambda row : normal_directions(input_dictionary,row['ELEME1'],row['ELEME2'])['dux']*row['FLOF'],axis=1)
flow_data['y_flow']=flow_data.apply(lambda row : normal_directions(input_dictionary,row['ELEME1'],row['ELEME2'])['duy']*row['FLOF'],axis=1)
flow_data['z_flow']=flow_data.apply(lambda row : normal_directions(input_dictionary,row['ELEME1'],row['ELEME2'])['duz']*row['FLOF'],axis=1)
#[Finished in 905.5s]
elements=pd.read_sql_query("SELECT DISTINCT ELEME FROM ELEME WHERE model_version=%d;"%(input_dictionary['model_version']),conn)
elements_list=elements['ELEME'].values.tolist()
bulk_data=[]
time_now=datetime.datetime.now()
time=-3359500000.0
for element in elements_list:
x=flow_data.loc[flow_data['ELEME1']==element,'x_flow'].sum()-flow_data.loc[flow_data['ELEME2']==element,'x_flow'].sum()
y=flow_data.loc[flow_data['ELEME1']==element,'y_flow'].sum()-flow_data.loc[flow_data['ELEME2']==element,'y_flow'].sum()
z=flow_data.loc[flow_data['ELEME1']==element,'z_flow'].sum()-flow_data.loc[flow_data['ELEME2']==element,'z_flow'].sum()
bulk_data.append([element,x,y,z,time,input_dictionary['model_version'],time_now])
column_names=['ELEME','FLOF_x','FLOF_y','FLOF_z','model_time','model_version','model_output_timestamp']
flows_df = pd.DataFrame(bulk_data,columns = column_names)
#Writing the dataframe into the database
conn=sqlite3.connect(input_dictionary['db_path'])
flows_df.to_sql('t2FLOWVectors',if_exists='append',con=conn,index=False)
conn.close()
```
|
{
"source": "Jejinketsu/API_Workload",
"score": 3
}
|
#### File: server/database/connectionToMySQL.py
```python
import mysql.connector
from mysql.connector import Error
import json
class DataBaseMySql(object):
def __init__(self):
file = open('database/db_auth_mysql.json')
auth = json.load(file)
file.close()
self.host = auth['host']
self.usuario = auth['user']
self.db = auth['db']
self.password = auth['password']
self.conexao = None
self.cursor = None
def connect(self):
self.conexao = mysql.connector.connect(host=self.host, db=self.db, user=self.usuario, passwd=self.password)
self.cursor = self.conexao.cursor()
def disconnect(self):
self.conexao.close()
def select(self, fields, tables, where=None):
query = "SELECT " + fields + " FROM " + tables
if(where):
query = query + " WHERE " + where
query = query + ';'
print('query: ', query)
self.cursor.execute(query)
return self.cursor.fetchall()
def insert(self, archive):
sql_insert_blob_query ="""INSERT INTO workload(dados) VALUES (%s);"""
insert_blob_tuple = (archive,)
self.cursor.execute(sql_insert_blob_query, insert_blob_tuple)
self.conexao.commit()
```
#### File: server/database/connectionToPostgres.py
```python
import psycopg2
import json
class DataBasePostGres(object):
def __init__(self):
file = open('database/db_auth_postgres.json')
auth = json.load(file)
file.close()
self.host = auth['host']
self.usuario = auth['user']
self.db = auth['db']
self.password = auth['password']
self.conexao = None
self.cursor = None
def connect(self):
self.conexao = psycopg2.connect(host=self.host, database=self.db, user=self.usuario, password=self.password)
self.cursor = self.conexao.cursor()
def disconnect(self):
self.conexao.close()
def insert(self, archive):
self.cursor.execute("""INSERT INTO workload(dado) VALUES(%s);""", (archive,))
self.conexao.commit()
"""
teste = DataBasePostGres()
teste.connect()
bytes = b'teste2'
teste.insert(bytes)
teste.disconnect()
"""
```
|
{
"source": "jejjohnson/2019_rbig_rs",
"score": 2
}
|
#### File: data/climate/cmip5.py
```python
import cdsapi
import xarray as xr
from typing import Union
from src.data.climate.amip import DataLoader as DLAMIP
from src.data.climate.rcp import DataLoader as DLRCP
DATA_DIR = "/home/emmanuel/projects/2020_rbig_rs/data/climate/raw/"
xr_types = Union[xr.Dataset, xr.DataArray]
def get_cmip5_model(cmip_model: str, variable: str, model: str = "amip") -> xr_types:
if model == "amip":
loader = DLAMIP()
ds = loader.load_amip_data(cmip_model)[variable]
elif model == "rcp":
loader = DLRCP()
ds = loader.load_rcp_data(cmip_model)[variable]
else:
raise ValueError("Unrecognized model:", model)
ds.attrs["model_id"] = cmip_model
return ds
def main():
get_data()
return None
if __name__ == "__main__":
main()
```
#### File: data/climate/era5.py
```python
import cdsapi
import xarray as xr
DATA_DIR = "/home/emmanuel/projects/2020_rbig_rs/data/climate/raw/"
ERA5_PATH = f"/home/emmanuel/projects/2020_rbig_rs/data/climate/raw/era5/"
def get_era5_data():
era5_data = xr.open_mfdataset(f"{ERA5_PATH}*.nc", combine="by_coords")
era5_data = era5_data.rename({"msl": "mslp", "latitude": "lat", "longitude": "lon"})
era5_data.attrs["model_id"] = "era5"
era5_data = era5_data.rename({"mslp": "psl"})
return era5_data
def get_verify_data():
c = cdsapi.Client()
c.retrieve(
"reanalysis-era5-single-levels-monthly-means",
{
"product_type": "monthly_averaged_reanalysis",
"variable": ["mean_sea_level_pressure", "surface_pressure"],
"year": [
"1979",
"1980",
"1981",
"1982",
"1983",
"1984",
"1985",
"1986",
"1987",
"1988",
"1989",
"1990",
"1991",
"1992",
"1993",
"1994",
"1995",
"1996",
"1997",
"1998",
"1999",
"2000",
"2001",
"2002",
"2003",
"2004",
"2005",
"2006",
"2007",
"2008",
"2009",
"2010",
"2011",
"2012",
"2013",
"2014",
"2015",
"2016",
"2017",
"2018",
"2019",
],
"month": [
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"09",
"10",
"11",
"12",
],
"time": "00:00",
"format": "netcdf",
},
f"{DATA_DIR}ERA5.nc",
)
return None
def main():
get_verify_data()
return None
if __name__ == "__main__":
main()
```
#### File: data/climate/loader.py
```python
import pandas as pd
from typing import List, Optional
class DataLoader:
def __init__(self, data="amip"):
self.data = data
def load_amip_data(self):
pass
class ResultsLoader:
def __init__(self, filenames: List[str]):
self.filenames = filenames
def load_dataframes(self, filenames: Optional[List[str]] = None):
if filenames is None:
filenames = self.filenames
results = pd.DataFrame()
for ifile in filenames:
# append results
new = pd.read_csv(f"{ifile}", index_col=[0])
# print(new.head())
results = results.append(new, ignore_index=True)
# results = results.drop()
return results
```
#### File: data/climate/rcp.py
```python
import os
import cdsapi
import xarray as xr
from typing import Type, Union
from zipfile import ZipFile, BadZipFile
from src.data.climate.era5 import get_era5_data
from src.data.climate.ncep import get_ncep_data
DATA_DIR = "/home/emmanuel/projects/2020_rbig_rs/data/climate/raw/rcp/"
c = cdsapi.Client()
xr_types = Union[xr.Dataset, xr.DataArray]
def get_base_model(base_model: str, variable: str) -> xr_types:
if base_model == "era5":
ds = get_era5_data()[variable]
elif base_model == "ncep":
ds = get_ncep_data()[variable]
else:
raise ValueError("Unrecognized base model:", base_model)
ds.attrs["model_id"] = base_model
return ds
class DataDownloader:
def __init__(self):
pass
def download_all(self) -> None:
# Russian Model
download_inmcm4()
self.zip_2_netcdf("inmcm4")
# Austrailian Model
download_access1_0()
self.zip_2_netcdf("access1_0")
# Austrailian Model
download_access1_3()
self.zip_2_netcdf("access1_3")
# French Model II
download_ipsl_cm5a_lr()
self.zip_2_netcdf("ipsl_cm5a_lr")
# French Model III
download_ipsl_cm5a_mr()
self.zip_2_netcdf("ipsl_cm5a_mr")
# German Model I
download_mpi_esm_lr()
self.zip_2_netcdf("mpi_esm_lr")
# German Model II
download_mpi_esm_mr()
self.zip_2_netcdf("mpi_esm_mr")
# Norweigian Model I
download_noresm1_m()
self.zip_2_netcdf("noresm1_m")
def zip_2_netcdf(self, name=str) -> None:
# Unzip files
print(f"{DATA_DIR}{name}/{name}.zip")
try:
with ZipFile(f"{DATA_DIR}{name}/{name}.zip", "r") as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(f"{DATA_DIR}{name}/")
print("Removing file:", f"{DATA_DIR}{name}/")
os.remove(f"{DATA_DIR}{name}/{name}.zip")
except BadZipFile:
# rename to netcdf
print("Already nc...")
print("Changing name: \n", f"{DATA_DIR}{name}/{name}.nc")
os.rename(f"{DATA_DIR}{name}/{name}.zip", f"{DATA_DIR}{name}/{name}.nc")
class DataLoader:
def __init__(self):
pass
def load_rcp_data(self, data: str = "inmcm4") -> Union[xr.DataArray, xr.Dataset]:
"""Loads AMIP models previously downloaded.
Available Models
----------------
* inmcm4
* access1_0
* bcc_csm1_1
* bcc_csm1_1_m
* bnu_esm
* cnrm_cm5
* giss_e2_r
* ipsl_cm5a_lr
* ipsl_cm5a_mr
* ipsl_cm5b_lr
* mpi_esm_lr
* mpi_esm_mr
* noresm1_m
"""
if data in [
"inmcm4",
"access1_0",
"access1_3",
"bcc_csm1_1_m",
"ipsl_cm5a_lr",
"ipsl_cm5a_mr",
"mpi_esm_lr",
"mpi_esm_mr",
"noresm1_m",
]:
return xr.open_mfdataset(f"{DATA_DIR}{data}/*.nc", combine="by_coords")
else:
raise ValueError(f"Unrecognized dataset:", data)
# inmcm4 (INM, Russia)
def download_inmcm4():
name = "inmcm4"
make_directory(DATA_DIR, name)
c.retrieve(
"projections-cmip5-monthly-single-levels",
{
"ensemble_member": "r1i1p1",
"format": "zip",
"experiment": "rcp_8_5",
"variable": "mean_sea_level_pressure",
"period": ["200601-210012"],
"model": "inmcm4",
},
f"{DATA_DIR}{name}/inmcm4.zip",
)
# ACCESS1-0 (BoM-CSIRO, Australia)
def download_access1_0():
name = "access1_0"
make_directory(DATA_DIR, name)
c.retrieve(
"projections-cmip5-monthly-single-levels",
{
"ensemble_member": "r1i1p1",
"format": "zip",
"experiment": "rcp_8_5",
"variable": "mean_sea_level_pressure",
"period": "200601-210012",
"model": "access1_0",
},
f"{DATA_DIR}{name}/access1_0.zip",
)
# ACCESS1-3 (BoM-CSIRO, Australia)
def download_access1_3():
name = "access1_3"
make_directory(DATA_DIR, name)
c.retrieve(
"projections-cmip5-monthly-single-levels",
{
"ensemble_member": "r1i1p1",
"format": "zip",
"experiment": "rcp_8_5",
"variable": "mean_sea_level_pressure",
"period": "200601-210012",
"model": "access1_3",
},
f"{DATA_DIR}{name}/access1_3.zip",
)
# bcc-csm1-1 (BCC, China)
def download_bcc_csm1_1_m():
name = "bcc_csm1_1_m"
make_directory(DATA_DIR, name)
c.retrieve(
"projections-cmip5-monthly-single-levels",
{
"ensemble_member": "r1i1p1",
"format": "zip",
"experiment": "rcp_8_5",
"variable": "mean_sea_level_pressure",
"period": "200601-210012",
"model": "bnu_esm",
},
f"{DATA_DIR}{name}/bcc_csm1_1_m.zip",
)
# IPSL-CM5A-MR (IPSL, France)
def download_ipsl_cm5a_mr():
name = "ipsl_cm5a_mr"
make_directory(DATA_DIR, name)
c.retrieve(
"projections-cmip5-monthly-single-levels",
{
"ensemble_member": "r1i1p1",
"format": "zip",
"experiment": "rcp_8_5",
"variable": "mean_sea_level_pressure",
"period": "200601-210012",
"model": "ipsl_cm5a_mr",
},
f"{DATA_DIR}{name}/ipsl_cm5a_mr.zip",
)
# IPSL-CM5A-LR (IPSL, France)
def download_ipsl_cm5a_lr():
name = "ipsl_cm5a_lr"
make_directory(DATA_DIR, name)
c.retrieve(
"projections-cmip5-monthly-single-levels",
{
"ensemble_member": "r1i1p1",
"format": "zip",
"experiment": "rcp_8_5",
"variable": "mean_sea_level_pressure",
"period": "200601-210012",
"model": "ipsl_cm5b_lr",
},
f"{DATA_DIR}{name}/ipsl_cm5a_lr.zip",
)
# MPI-ESM-LR (MPI, Germany)
def download_mpi_esm_lr():
name = "mpi_esm_lr"
make_directory(DATA_DIR, name)
c.retrieve(
"projections-cmip5-monthly-single-levels",
{
"ensemble_member": "r1i1p1",
"format": "zip",
"experiment": "rcp_8_5",
"variable": "mean_sea_level_pressure",
"period": "200601-210012",
"model": "mpi_esm_lr",
},
f"{DATA_DIR}{name}/mpi_esm_lr.zip",
)
# MPI-ESM-MR (MPI, Germany)
def download_mpi_esm_mr():
name = "mpi_esm_mr"
make_directory(DATA_DIR, name)
c.retrieve(
"projections-cmip5-monthly-single-levels",
{
"ensemble_member": "r1i1p1",
"format": "zip",
"experiment": "rcp_8_5",
"variable": "mean_sea_level_pressure",
"period": "200601-210012",
"model": "mpi_esm_mr",
},
f"{DATA_DIR}{name}/mpi_esm_mr.zip",
)
# NorESM1-M (NCC, Norway)
def download_noresm1_m():
name = "noresm1_m"
make_directory(DATA_DIR, name)
c.retrieve(
"projections-cmip5-monthly-single-levels",
{
"ensemble_member": "r1i1p1",
"format": "zip",
"experiment": "rcp_8_5",
"variable": "mean_sea_level_pressure",
"period": "200601-210012",
"model": "noresm1_m",
},
f"{DATA_DIR}{name}/noresm1_m.zip",
)
def make_directory(directory: str, name: str):
if not os.path.exists(directory + name):
os.makedirs(directory + name)
```
#### File: data/drought/loader.py
```python
import sys
sys.path.insert(0, "/home/emmanuel/code/py_esdc")
# standard packages
import xarray as xr
import pandas as pd
import numpy as np
# esdc tools
from esdc.subset import select_pixel
from esdc.shape import ShapeFileExtract, rasterize
from esdc.transform import DensityCubes
DATA_PATH = "/media/disk/databases/DROUGHT/conus/"
class DataLoader:
def __init__(self):
pass
def load_data(self, region="conus", sampling="14D"):
# load cube
drought_cube = xr.open_dataset(f"{DATA_PATH}AD_{region}_{sampling}.nc")
return drought_cube
```
#### File: experiments/spatemp/info_sub.py
```python
import sys, os
from pyprojroot import here
root = here(project_files=[".here"])
sys.path.append(str(here()))
from typing import Dict, Tuple, Optional, Union, Any
from collections import namedtuple
from sklearn.utils import gen_batches
from joblib import Parallel, delayed
import pathlib
import argparse
import pandas as pd
from tqdm import tqdm
import numpy as np
import time
import joblib
import xarray as xr
# Experiment Functions
from src.data.esdc import get_dataset
from src.features.temporal import select_period, TimePeriod, remove_climatology
from src.features.spatial import select_region, get_spain, get_europe
from sklearn.preprocessing import StandardScaler
from src.models.density import get_rbig_model
from src.models.utils import parallel_predictions
from src.experiments.utils import dict_product, run_parallel_step
from src.features.density import get_density_cubes
from src.features.preprocessing import (
standardizer_data,
get_reference_cube,
get_common_indices,
)
from sklearn.utils import check_random_state
import logging
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format=f"%(asctime)s: %(levelname)s: %(message)s",
)
logger = logging.getLogger()
# logger.setLevel(logging.INFO)
SPATEMP = namedtuple("SPATEMP", ["spatial", "temporal", "dimensions"])
RNG = check_random_state(123)
RES_PATH = pathlib.Path(str(root)).joinpath("data/spa_temp/info_earth")
def get_parameters(args) -> Dict:
parameters = {}
# ======================
# Variable
# ======================
if args.variable == "gpp":
parameters["variable"] = ["gross_primary_productivity"]
elif args.variable == "rm":
parameters["variable"] = ["root_moisture"]
elif args.variable == "lst":
parameters["variable"] = ["land_surface_temperature"]
elif args.variable == "lai":
parameters["variable"] = ["leaf_area_index"]
elif args.variable == "precip":
parameters["variable"] = ["precipitation"]
else:
raise ValueError("Unrecognized variable")
# ======================
# Region
# ======================
if args.region == "spain":
parameters["region"] = get_spain()
elif args.region == "europe":
parameters["region"] = get_europe()
elif args.region == "world":
parameters["region"] = ["world"]
else:
raise ValueError("Unrecognized region")
# ======================
# Period
# ======================
if args.period == "2010":
parameters["period"] = TimePeriod(name="2010", start="Jan-2010", end="Dec-2010")
elif args.period == "2002_2010":
parameters["period"] = TimePeriod(
name="2002_2010", start="Jan-2002", end="Dec-2010"
)
parameters["spatial"] = args.spatial
parameters["temporal"] = args.temporal
parameters["subsample"] = args.subsample
return parameters
def experiment_step(
params: Dict, smoke_test: bool = False, subsample: Optional[int] = None
) -> Union[Any, Any, Any, Any]:
# ======================
# experiment - Data
# ======================
# Get DataCube
logging.info(f"Loading '{params['variable']}' variable")
datacube = get_dataset(params["variable"])
# subset datacube (spatially)
try:
logging.info(f"Selecting region '{params['region'].name}'")
datacube = select_region(xr_data=datacube, bbox=params["region"])[
params["variable"]
]
except:
logging.info(f"Selecting region 'world'")
#
logging.info("Removing climatology...")
datacube, _ = remove_climatology(datacube)
# subset datacube (temporally)
logging.info(f"Selecting temporal period: '{params['period'].name}'")
datacube = select_period(xr_data=datacube, period=params["period"])
# get density cubes
logging.info(
f"Getting density cubes: S: {params['spatial']}, T: {params['temporal']}"
)
if isinstance(datacube, xr.Dataset):
# print(type(datacube))
datacube = datacube[params["variable"][0]]
density_cube_df = get_density_cubes(
data=datacube, spatial=params["spatial"], temporal=params["temporal"],
)
logging.info(f"Total data: {density_cube_df.shape}")
if smoke_test:
density_cube_df = density_cube_df.iloc[:1_000]
logging.info(f"Total data (smoke-test): {density_cube_df.shape}")
# # standardize data
logging.info(f"Standardizing data...")
x_transformer = StandardScaler().fit(density_cube_df.values)
density_cube_df_norm = pd.DataFrame(
data=x_transformer.transform(density_cube_df.values),
columns=density_cube_df.columns.values,
index=density_cube_df.index,
)
# =========================
# Model - Gaussianization
# =========================
# Gaussianize the data
logging.info(f"Gaussianizing data...")
t0 = time.time()
rbig_model = get_rbig_model(
X=density_cube_df_norm.values, subsample=params["subsample"]
)
t1 = time.time() - t0
logging.info(f"Time Taken: {t1:.2f} secs")
# get the probability estimates
logging.info(f"Getting probability estimates...")
t0 = time.time()
# add noise
prob_inputs = density_cube_df_norm.values + 1e-1 * RNG.rand(
*density_cube_df_norm.values.shape
)
logging.info(f"Parallel predictions...")
X_prob = parallel_predictions(
X=prob_inputs,
func=rbig_model.predict_proba,
batchsize=10_000,
n_jobs=-1,
verbose=1,
)
t1 = time.time() - t0
logging.info(f"Time Taken: {t1:.2f} secs")
X_prob = pd.DataFrame(data=X_prob, index=density_cube_df_norm.index,)
logging.info(f"Computing Mean...")
X_prob = X_prob.groupby(level=["lat", "lon"]).mean()
return rbig_model, x_transformer, X_prob, density_cube_df
def main(args):
logging.info("Getting parameters...")
parameters = get_parameters(args)
logging.info("Getting save path...")
save_name = (
f"{args.save}_"
f"{args.region}_"
f"{args.variable}_"
f"{args.period}_"
f"s{args.subsample}_"
f"d{args.spatial}{args.spatial}{args.temporal}"
)
if args.smoke_test:
logging.info("Starting smoke test...")
smoke_test = True
else:
smoke_test = False
rbig_model, x_transformer, X_prob_df, density_cube_df = experiment_step(
params=parameters, smoke_test=smoke_test, subsample=args.subsample
)
# ======================
# SAVING
# ======================
# # Model + Transform
# logging.info(f"Saving rbig model and transformer...")
# model = {"rbig": rbig_model, "x_transform": x_transformer, "parameters": parameters}
# joblib.dump(model, RES_PATH.joinpath(f"models/{save_name}.joblib"))
# Data
logging.info(f"Saving data...")
with open(RES_PATH.joinpath(f"cubes/{save_name}.csv"), "w") as f:
density_cube_df.to_csv(f, header=True)
# Probabilities
logging.info(f"Saving estimated probabilities...")
with open(RES_PATH.joinpath(f"probs/{save_name}.csv"), "w") as f:
X_prob_df.to_csv(f, header=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Arguments for GP experiment.")
parser.add_argument(
"--res", default="low", type=str, help="Resolution for datacube"
)
parser.add_argument(
"-v", "--variable", default="rm", type=str, help="Variable to use"
)
parser.add_argument(
"-s", "--save", default="v0", type=str, help="Save name for experiment."
)
parser.add_argument(
"--njobs", type=int, default=-1, help="number of processes in parallel",
)
parser.add_argument(
"--subsample", type=int, default=200_000, help="subset points to take"
)
parser.add_argument(
"--region", type=str, default="spain", help="Region to be Gaussianized"
)
parser.add_argument(
"--temporal", type=int, default=1, help="Number of temporal dimensions",
)
parser.add_argument(
"--spatial", type=int, default=1, help="Number of spatial dimensions"
)
parser.add_argument(
"--period", type=str, default="2010", help="Period to do the Gaussianization"
)
parser.add_argument("-sm", "--smoke_test", action="store_true")
main(parser.parse_args())
```
#### File: features/spatemp/build_features.py
```python
import xarray as xr
import numpy as np
import pandas as pd
from typing import Tuple, Optional, List, Union
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def select_region(
ds: Union[xr.DataArray, xr.Dataset], region: str = "europe"
) -> Union[xr.DataArray, xr.Dataset]:
if region == "europe":
return ds.sel(lat=slice(71.5, 35.5), lon=slice(-18.0, 60.0))
else:
raise ValueError("Unrecognized region:", region)
def normalize_inputs(
Xtrain: Union[np.ndarray, pd.DataFrame], Xtest: Union[np.ndarray, pd.DataFrame]
) -> Tuple[np.ndarray, np.ndarray]:
# normalize inputs
x_normalizer = StandardScaler(with_mean=True, with_std=False)
xtrain_norm = x_normalizer.fit_transform(Xtrain)
xtest_norm = x_normalizer.transform(Xtest)
# # remove mean outputs
# y_normalizer = StandardScaler(with_std=False)
# ytrain_norm = y_normalizer.fit_transform(ytrain)
# ytest_norm = y_normalizer.transform(ytest)
return xtrain_norm, xtest_norm
def normalize_outputs(
Ytrain: Union[np.ndarray, pd.DataFrame], Ytest: Union[np.ndarray, pd.DataFrame]
) -> Tuple[np.ndarray, np.ndarray]:
# remove mean outputs
y_normalizer = StandardScaler(with_std=False)
ytrain_norm = y_normalizer.fit_transform(Ytrain)
ytest_norm = y_normalizer.transform(Ytest)
return ytrain_norm, ytest_norm
```
#### File: models/information/ensemble.py
```python
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import gen_batches, resample
from sklearn.model_selection import train_test_split
from typing import Optional, Tuple
from sklearn.utils import shuffle
class Ensemble:
def __init__(self):
pass
def _fit(self, X: np.ndarray) -> BaseEstimator:
pass
def _fit_ensemble(self, X: np.ndarray, n_models: int = 10) -> float:
raise NotImplemented
class Batch:
"""Abstract class to be used to estimate scores in batches.
Parameters
----------
batch_size : int, default = 1_000
batch size
min_batch_size : int, default = 100
the minimum batch size to be used for the indices generator
shuffle : bool, default = True
option to shuffle the data before doing batches
random_state : int, default = None
the random seed when doing the shuffling if option chosen
summary : str, default = 'mean'
the way to summarize the scores {'mean', 'median'}
Attributes
----------
raw_scores : np.ndarray
the raw batchsize scores
batch_score : float
the final score after the summary stat (e.g. mean)
"""
def __init__(
self,
batch_size: int = 1_000,
min_batch_size: int = 100,
shuffle: bool = True,
random_state: int = 123,
summary: str = "mean",
):
self.batch_size = batch_size
self.min_batch_size = min_batch_size
self.random_state = random_state
self.shuffle = shuffle
self.summary = summary
def _fit(self, X: np.ndarray, y: Optional[np.ndarray] = None):
"""
IT method to fit to batches. Must be implemented by the user.
"""
pass
def _fit_batches(self, X: np.ndarray, Y: Optional[np.ndarray] = None) -> float:
"""
Fits models to inherited class
Parameters
----------
X : np.ndarray
The data to be fit.
y : np.ndarray
The second dataset to be fit
Returns
-------
batch_score : float
the score after the summary
"""
it_measure = list()
# Shuffle dataset
if self.shuffle:
if Y is not None:
X, Y = shuffle(X, Y, random_state=self.random_state)
else:
X = shuffle(X, random_state=self.random_state)
# batch scores
for idx in gen_batches(X.shape[0], self.batch_size, self.min_batch_size):
if Y is not None:
it_measure.append(self._fit(X[idx], Y[idx]))
else:
it_measure.append(self._fit(X[idx]))
# save raw scores
self.raw_scores = it_measure
# return summary score
if self.summary == "mean":
self.batch_score = np.mean(it_measure)
elif self.summary == "median":
self.batch_score = np.median(it_measure)
else:
raise ValueError("Unrecognized summarizer: {}".format(self.summary))
return self.batch_score
class BootStrap:
def __init__(self, n_iterations=100):
self.n_iterations = n_iterations
def _fit(self, X: np.ndarray) -> BaseEstimator:
pass
def run_bootstrap(
self,
X: np.ndarray,
y: Optional[np.ndarray] = None,
sample_size: Optional[int] = 1_000,
) -> None:
raw_scores = list()
if sample_size is not None:
n_samples = min(X.shape[0], sample_size)
else:
n_samples = X.shape[0]
for i in range(self.n_iterations):
if y is None:
X_sample = resample(X, n_samples=sample_size)
raw_scores.append(self._fit(X_sample))
else:
X_sample, Y_sample = resample(X, y, n_samples=sample_size)
raw_scores.append(self._fit(X_sample, Y_sample))
self.raw_scores = raw_scores
return np.mean(raw_scores)
def ci(self, p: float) -> Tuple[float, float]:
"""
Return 2-sided symmetric confidence interval specified
by p.
"""
u_pval = (1 + p) / 2.0
l_pval = 1 - u_pval
l_indx = int(np.floor(self.n_iterations * l_pval))
u_indx = int(np.floor(self.n_iterations * u_pval))
return self.raw_scores[l_indx], self.raw_scores[u_indx]
```
#### File: src/models/train_models.py
```python
from typing import Dict, Optional
import pandas as pd
from src.models.similarity import rv_coefficient, rbig_it_measures, cka_coefficient
from src.models.baseline import train_rf_model, train_rf_model, train_ridge_lr_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from src.features import Metrics
import numpy as np
from sklearn.utils import check_random_state
def get_similarity_scores(
X_ref: pd.DataFrame, Y_compare: pd.DataFrame, verbose: int = 0
) -> Dict:
# RV Coefficient
rv_results = rv_coefficient(X_ref, Y_compare)
# CKA Coefficient
cka_results = cka_coefficient(X_ref, Y_compare)
# RBIG Coefficient
rbig_results = rbig_it_measures(X_ref, Y_compare, verbose=verbose)
results = {
**rv_results,
**cka_results,
**rbig_results,
}
return results
def get_regression_models(X: np.ndarray, y: np.ndarray, subsample: int = 10_000):
subsample = np.minimum(X.shape[0], subsample)
if subsample is not None:
rng = check_random_state(123)
X = rng.permutation(X)[:subsample, :]
y = rng.permutation(y)[:subsample, :]
random_state = 123
xtrain, xtest, ytrain, ytest = train_test_split(
X, y, train_size=0.8, random_state=random_state
)
# normalize inputs
x_normalizer = StandardScaler(with_mean=True, with_std=False)
xtrain_norm = x_normalizer.fit_transform(xtrain)
xtest_norm = x_normalizer.transform(xtest)
# remove mean outputs
y_normalizer = StandardScaler(with_std=False)
ytrain_norm = y_normalizer.fit_transform(ytrain)
ytest_norm = y_normalizer.transform(ytest)
# linear regresion model
rlr_model = train_ridge_lr_model(xtrain_norm, ytrain_norm)
ypred = rlr_model.predict(xtest_norm)
# get statistics
rlr_metrics = Metrics().get_all(ypred, ytest_norm, "rlr")
# RF Model
rf_model = train_rf_model(xtrain_norm, ytrain_norm)
ypred = rf_model.predict(xtest_norm)
# get statistics
rf_metrics = Metrics().get_all(ypred, ytest_norm, "rf")
results = {**rlr_metrics, **rf_metrics}
return results
```
#### File: src/models/utils.py
```python
from typing import Tuple, Optional, Callable
from sklearn.utils import check_random_state
import numpy as np
from sklearn.utils import gen_batches, check_array
from joblib import Parallel, delayed
def subset_indices(
X: np.ndarray, subsample: Optional[int] = None, random_state: int = 123,
) -> Tuple[np.ndarray, np.ndarray]:
if subsample is not None and subsample < X.shape[0]:
rng = check_random_state(random_state)
indices = np.arange(X.shape[0])
subset_indices = rng.permutation(indices)[:subsample]
X = X[subset_indices, :]
return X
def parallel_predictions(
X: np.ndarray, func: Callable, batchsize: int = 10_000, n_jobs: int = 1, verbose=0
) -> np.ndarray:
"""Function to do parallel predictions
Primary use was for predictions but any function will do with
one inputs.
Parameters
----------
X : np.ndarray, (n_samples, n_features)
input data to be predicted
func : Callable
the callable function
batchsize : int, default=10_000
the size of the batches
n_jobs : int, default=1
the number of jobs
verbose : int, default=0
the verbosity of the parallel predictions
"""
X = check_array(X, ensure_2d=True)
# get indices slices
slices = list(gen_batches(X.shape[0], batchsize))
# delayed jobs function for predictions
jobs = (delayed(func)(X[islice, :]) for islice in slices)
# parallel function
parallel = Parallel(verbose=verbose, n_jobs=n_jobs)
# do parallel predictions
results = parallel(jobs)
# return as array of inputs
# print(len(results))
# print(results[0].shape)
results = np.concatenate(results, 0).reshape(-1, 1)
# print(results.shape)
msg = f"Sizes don't match: {results.shape}=/={X.shape}"
assert results.shape[0] == X.shape[0], msg
return results
if __name__ == "__main__":
X = np.random.randn(1_000, 1)
def f(x):
return x
X_ = parallel_predictions(X, func=f, n_jobs=2, batchsize=100)
np.testing.assert_array_almost_equal(X, X_)
```
#### File: visualization/drought/analysis.py
```python
import xarray as xr
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
plt.style.use(["fivethirtyeight", "seaborn-poster"])
def plot_mean_time(da: xr.DataArray) -> None:
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection=ccrs.PlateCarree())
da.mean(dim="time").plot(
ax=ax,
transform=ccrs.PlateCarree(),
cmap="RdBu_r",
robust=True,
cbar_kwargs={"shrink": 0.5},
)
ax.set_title("Land Surface Temperature")
ax.coastlines()
plt.show()
```
#### File: spatemp/info_earth/demo_spain.py
```python
import sys, os
from pyprojroot import here
from typing import Optional
import argparse
root = here(project_files=[".here"])
sys.path.append(str(here()))
import pathlib
# standard python packages
import xarray as xr
# NUMPY SETTINGS
import numpy as np
np.set_printoptions(precision=3, suppress=True)
# MATPLOTLIB Settings
import matplotlib as mpl
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
# SEABORN SETTINGS
import seaborn as sns
sns.set_context(context="talk", font_scale=0.7)
# sns.set(rc={'figure.figsize': (12, 9.)})
# sns.set_style("whitegrid")
# PANDAS SETTINGS
import pandas as pd
pd.set_option("display.max_rows", 120)
pd.set_option("display.max_columns", 120)
RES_PATH = pathlib.Path(str(root)).joinpath("data/spa_temp/info_earth")
FIG_PATH = pathlib.Path(str(root)).joinpath("reports/figures/spa_temp/demos/infoearth/")
def plot_map(xr_data, measure: str, save_name: Optional[str] = None):
fig, ax = plt.subplots()
if measure == "probs":
xr_data.probs.mean(dim="time").plot(
ax=ax,
vmin=0,
robust=True,
cmap="Reds",
cbar_kwargs={"label": "Probability"},
)
elif measure == "info":
xr_data.shannon_info.mean(dim="time").plot(
ax=ax,
vmin=0,
robust=True,
cmap="Reds",
cbar_kwargs={"label": "Shannon Information"},
)
else:
raise ValueError(f"Unrecognized measure: {measure}")
ax.set(xlabel="Longitude", ylabel="Latitude")
plt.tight_layout()
if save_name:
fig.savefig(
FIG_PATH.joinpath(
f"{save_name.split('_')[0]}/{measure}_maps_{save_name}.png"
)
)
plt.close()
def plot_ts(xr_data, measure: str, save_name: Optional[str] = None):
fig, ax = plt.subplots()
if measure == "probs":
xr_data.probs.mean(dim=["lon", "lat"]).plot.line(
ax=ax, color="black", linewidth=3
)
ylabel = "Probability"
elif measure == "info":
xr_data.shannon_info.mean(dim=["lon", "lat"]).plot.line(
ax=ax, color="black", linewidth=3
)
ylabel = "Shannon Information"
else:
raise ValueError(f"Unrecognized measure: {measure}")
ax.set(xlabel="Time", ylabel=ylabel)
ax.legend(["Mean Predictions"])
plt.tight_layout()
if save_name:
fig.savefig(
FIG_PATH.joinpath(f"{save_name.split('_')[0]}/{measure}_ts_{save_name}.png")
)
plt.close()
def plot_ts_error(xr_data, measure: str, save_name: Optional[str] = None):
if measure == "probs":
predictions = xr_data.probs.mean(dim=["lat", "lon"])
std = xr_data.probs.std(dim=["lat", "lon"])
ylabel = "Probabilities"
elif measure == "info":
predictions = xr_data.shannon_info.mean(dim=["lat", "lon"])
std = xr_data.shannon_info.std(dim=["lat", "lon"])
ylabel = "Shannon Information"
else:
raise ValueError(f"Unrecognized measure: {measure}")
fig, ax = plt.subplots()
ax.plot(xr_data.coords["time"].values, predictions)
ax.fill_between(
predictions.coords["time"].values,
predictions - std,
predictions + std,
alpha=0.7,
color="orange",
)
ax.set(
xlabel="Time", ylabel=ylabel,
)
ax.legend(["Mean_predictions"])
plt.tight_layout()
if save_name:
fig.savefig(
FIG_PATH.joinpath(
f"{save_name.split('_')[0]}/{measure}_ts_err_{save_name}.png"
)
)
plt.close()
def plot_monthly_map(xr_data, measure: str, save_name: Optional[str] = None):
plt.figure()
xr_data.probs.groupby("time.month").mean().plot.pcolormesh(
x="lon", y="lat", col="month", col_wrap=3, vmin=0, robust=True, cmap="Reds"
)
plt.savefig(
FIG_PATH.joinpath(
f"{save_name.split('_')[0]}/{measure}_monthly_{save_name}.png"
)
)
plt.close()
def main(args):
dimensions = ["111", "116", "331", "333"]
for idimension in dimensions:
filename = f"{args.region}_{args.variable}_{args.period}_v0_s{args.samples}_d{idimension}"
# read csv file
probs_df = pd.read_csv(str(RES_PATH.joinpath(f"probs/{filename}" + ".csv")))
# convert to datetime
probs_df["time"] = pd.to_datetime(probs_df["time"])
# create dataframe in the format for xarray
probs_df = probs_df.set_index(["time", "lat", "lon"]).rename(
columns={"0": "probs"}
)
# remove probabilities greater than 1
probs_df["probs"][probs_df["probs"] >= 1.0] = np.nan
# shannon info
probs_df["shannon_info"] = -np.log(probs_df["probs"])
# create xarray cubes
probs_cubes = xr.Dataset.from_dataframe(probs_df)
# Probability / Shannon Information Maps
plot_map(probs_cubes, "probs", f"{filename}")
plot_map(probs_cubes, "info", f"{filename}")
# Probability Maps (per month)
plot_monthly_map(probs_cubes, "probs", f"{filename}")
plot_monthly_map(probs_cubes, "info", f"{filename}")
plot_ts(probs_cubes, "probs", f"{filename}")
plot_ts(probs_cubes, "info", f"{filename}")
plot_ts_error(probs_cubes, "probs", f"{filename}")
plot_ts_error(probs_cubes, "info", f"{filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Arguments for GP experiment.")
parser.add_argument("-s", "--samples", default=200000, type=int, help="Samples")
parser.add_argument("-v", "--variable", default=str, type=str, help="Variable")
parser.add_argument("-r", "--region", default="spain", type=str, help="Region")
parser.add_argument("-p", "--period", default="2010", type=str, help="Period")
main(parser.parse_args())
```
#### File: src/visualization/taylor.py
```python
from typing import Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.projections import PolarAxes
from mpl_toolkits.axisartist import floating_axes, grid_finder
# TODO - move init with new correlation labels
class TaylorDiagram:
"""Creates a Taylor diagram
Parameters
----------
ref_point: float
The reference point for the
fig :
Information
-----------
Author: <NAME>
Date: 10-02-2020
References
----------
Original Implementation:
- <NAME>
- https://gist.github.com/ycopin/3342888
Modified Implementation:
- StackOverFlow Question
- https://codereview.stackexchange.com/questions/82919/modified-taylor-diagrams
"""
def __init__(
self,
ref_point: float,
fig: Optional[plt.figure] = None,
subplot: Optional[int] = 111,
extend_angle: bool = False,
corr_labels: Optional[np.ndarray] = None,
ref_range: Tuple[float, float] = (0, 10),
ref_label: str = "Reference Point",
angle_label: str = "Correlation",
var_label: str = "Standard Deviation",
) -> None:
self.angle_label = angle_label
self.ref_label = ref_label
self.var_label = var_label
self.extend_angle = extend_angle
# correlation labels
if corr_labels is None:
corr_labels = np.array([0, 0.2, 0.4, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 1.0])
# extend
if extend_angle:
# Extend to negative correlations
self.tmax = np.pi
corr_labels = np.concatenate((-corr_labels[:0:-1], corr_labels))
else:
# limit to only positive correlations
self.tmax = np.pi / 2.0
# init figure
if fig is None:
fig = plt.figure(figsize=(8, 8))
# extend or not
self.smin = ref_range[0] * ref_point
self.smax = (ref_range[1] / 100) * ref_point + ref_point
corr_ticks = np.arccos(corr_labels)
gl1 = grid_finder.FixedLocator(corr_ticks)
tf1 = grid_finder.DictFormatter(dict(zip(corr_ticks, map(str, corr_labels))))
# Grid Helper
ghelper = floating_axes.GridHelperCurveLinear(
aux_trans=PolarAxes.PolarTransform(),
extremes=(0, self.tmax, self.smin, self.smax),
grid_locator1=gl1,
tick_formatter1=tf1,
)
# create graphical axes
ax = floating_axes.FloatingSubplot(fig, subplot, grid_helper=ghelper)
fig.add_subplot(ax)
self.graph_axes = ax
self.polar_axes = ax.get_aux_axes(PolarAxes.PolarTransform())
self.sample_points = []
# Setup Axes
self.reset_axes()
def add_reference_point(self, ref_point: float, *args, **kwargs) -> None:
line = self.polar_axes.plot([0], ref_point, *args, **kwargs)
self.sample_points.append(line[0])
return None
def add_reference_line(self, ref_point: float, *args, **kwargs) -> None:
t = np.linspace(0, self.tmax)
r = np.zeros_like(t) + ref_point
self.polar_axes.plot(t, r, *args, **kwargs)
return None
def add_point(self, var_point: float, corr_point: float, *args, **kwargs) -> None:
# add sample to plot
line = self.polar_axes.plot(np.arccos(corr_point), var_point, *args, **kwargs)
# add line to sample points
self.sample_points.append(line[0])
return None
def add_scatter(
self, var_points: np.ndarray, corr_points: np.ndarray, *args, **kwargs
) -> None:
pts = self.polar_axes.scatter(
np.arccos(corr_points), var_points, *args, **kwargs
)
self.sample_points.append(pts)
return None
def add_grid(self, *args, **kwargs):
self.graph_axes.grid(*args, **kwargs)
def add_contours(self, ref_point: float, levels: int = 4, **kwargs) -> None:
# create meshgrid of values
rs, ts = np.meshgrid(
np.linspace(self.smin, self.smax), np.linspace(0, self.tmax)
)
# calculate the distance
dist = np.sqrt(ref_point ** 2 + rs ** 2 - 2 * ref_point * rs * np.cos(ts))
self.contours = self.polar_axes.contour(ts, rs, dist, levels=levels, **kwargs)
return None
def add_legend(self, fig, *args, **kwargs):
fig.legend(
self.sample_points,
[p.get_label() for p in self.sample_points],
*args,
**kwargs
)
return None
def reset_axes(self):
self._setup_angle_axes()
self._setup_xaxis()
self._setup_yaxis()
def reset_axes_labels(
self, angle_label: str = "Correlation", var_label: str = "Variance"
):
# Adjust axes
self.graph_axes.axis["left"].label.set_text(var_label)
self.graph_axes.axis["top"].label.set_text(angle_label)
def _setup_angle_axes(self):
self.graph_axes.axis["top"].set_axis_direction("bottom")
self.graph_axes.axis["top"].toggle(ticklabels=True, label=True)
self.graph_axes.axis["top"].major_ticklabels.set_axis_direction("top")
self.graph_axes.axis["top"].label.set_axis_direction("top")
self.graph_axes.axis["top"].label.set_text(self.angle_label)
def _setup_xaxis(self):
self.graph_axes.axis["left"].set_axis_direction("bottom")
self.graph_axes.axis["left"].label.set_text(self.var_label)
def _setup_yaxis(self):
self.graph_axes.axis["right"].set_axis_direction("top")
self.graph_axes.axis["right"].toggle(ticklabels=True)
self.graph_axes.axis["right"].major_ticklabels.set_axis_direction(
"bottom" if self.extend_angle else "left"
)
self.graph_axes.axis["bottom"].toggle(ticklabels=False, label=False)
```
|
{
"source": "jejjohnson/gaussian_processes",
"score": 3
}
|
#### File: pkg_sgpuppy/pkg/Covariance.py
```python
import numpy as np
from numpy.linalg import det, cholesky
from scipy.linalg import cho_solve,inv, solve_triangular
#from numpy import dot
import traceback
from Utilities import minimize
#Scipy inv seems faster than numpy inv and cho_solve for symmetric matrices
# However cholesky decomposition might be numerically more stable
#TODO!!!: revisit matrix multiplication complexity
class Dot(object):
"""
A class to inspect the matrix multiplication complexity
"""
_a = 0
_b = 0
_c = 0
_path = ""
_line = ""
_in_func = ""
# def __init__(self):
# self.a = 0
# self.b = 0
# self.c = 0
# self.path = ""
# self.line = ""
# self.in_func = ""
def __call__(self,A,B):
"""
Usage: Like the original np.dot function
It tracks the matrix multiplication complexity and gives a stacktrace of the most complex matrix multiplication within some code
:param A: numpy Matrix
:param B: numpy Matrix
:return: numpy.dot(A,B)
"""
la = len(A.shape)
lb = len(B.shape)
n = 1
o = 1
m1 = 1
m2 = 1
if la == 2:
n,m1 = A.shape
else:
m1 = A.shape[0]
if lb == 2:
m2,o = B.shape
else:
m2 = B.shape[0]
if n*m1*o > Dot._a*Dot._b*Dot._c:
stack = traceback.extract_stack()[-2:]
Dot._path, Dot._line, Dot._in_func, _instr = stack[0]
Dot._a = n
Dot._b = m1
Dot._c = o
assert(m1 == m2)
return np.dot(A,B)
def reset(self):
"""
Reset the gathered statistics
"""
Dot._a = 0
Dot._b = 0
Dot._c = 0
Dot._path = ""
Dot._line = ""
Dot._in_func = ""
def __repr__(self):
return str(Dot._a) + "x" + str(Dot._b) + "x" + str(Dot._c) + ' called from %s in func %s at line %s' % (Dot._path, Dot._in_func, Dot._line)
dot = Dot()
def dldot(a,B):
"""
:param a: diagonal of a diagonal matrix
:param B: Matrix
"""
return (a*B.T).T
def drdot(A,b):
"""
:param A: Matrix
:param b: diagonal of a diagonal matrix
"""
return A*b
def tracedot(A,B):
"""
:param A: Matrix
:param B: Matrix
:return: trace(dot(A,B))
"""
#assert np.allclose(np.dot(np.ravel(A.T),np.ravel(B)),np.trace(np.dot(A,B)))
return np.dot(np.ravel(A.T),np.ravel(B))
class Covariance(object):
"""
Superclass for all covariance functions
"""
def __init__(self):
pass
def __call__(self,xi,xj,theta):
"""
:param xi: d-dimensional vector
:param xj: d-dimensional vector
:param theta: hyperparameters
:return: covariance between xi and xj
"""
pass
def get_theta(self,x,t):
"""
Guesses the initial theta vector for the hyperparameter optimization step
:return: initial theta vector
"""
pass
def cov_matrix_ij(self,xi,xj,theta):
"""
:param xi: list of d-dimensional vectors of size N1
:param xj: list of d-dimensional vectors of size N2
:param theta: hyperparameters
:return: N1xN2 covariance matrix between xi and xj
"""
ni = len(xi)
nj = len(xj)
K = np.zeros((ni, nj))
for i in range(ni):
for j in range(nj):
K[i, j] = self(xi[i], xj[j], theta)
return K
def cov_matrix(self,x,theta):
"""
:param x: list of d-dimensional vectors of size N
:param theta: hyperparameters
:return: NxN covariance matrix
"""
n,dim = np.shape(x)
return self.cov_matrix_ij(x,x,theta)
def inv_cov_matrix(self,x,theta,cov_matrix=None):
"""
:param x: list of d-dimensional vectors
:param theta: hyperparameters
:param cov_matrix: invert this precalculated cov matrix for x and theta
:return: inverse of the covariance matrix
"""
if cov_matrix is None:
K = np.array(self.cov_matrix(x,theta))
m=len(K)
try:
return inv(K)
except ValueError:
#Inversion done right
L = cholesky(K+np.eye(m)*1e-5)
L_inv = solve_triangular(L,np.eye(m),lower=True)
K_inv = dot(L_inv.T,L_inv)
return K_inv
else:
return inv(cov_matrix)
def _log_det_cov_matrix(self,x,theta):
"""
:param x: list of d-dimensional vectors
:param theta: hyperparameters
:return: logarithm of the determinant of the cov matrix
"""
return np.linalg.slogdet(self.cov_matrix(x,theta))[1]
def _negativeloglikelihood(self,x,t,theta):
"""
:param x: list of d-dimensional vectors
:param t: list of responses
:param theta: hyperparameters
:return: negative loglikelihood
"""
N = len(x)
logdetK = self._log_det_cov_matrix(x,theta)
invK = self.inv_cov_matrix(x,theta)
try:
#print "t'*inv(K)*t ", dot(t.T, dot(invK, t))
nll = N / 2.0 * np.log(2 * np.pi) + 0.5 * logdetK + 0.5 * dot(t.T, dot(invK, t))
except (np.linalg.linalg.LinAlgError, RuntimeWarning, ZeroDivisionError,ValueError):
nll = 1.0e+20
return nll
def _d_cov_d_theta(self,xi,xj,theta,j):
"""
:param xi: d-dimensional vector
:param xj: d-dimensional vector
:param theta: hyperparameters
:param j: the part of theta to derive by
:return: derivative of the covariance d theta_j
"""
eps = 1e-5
d = np.zeros(len(theta))
d[j] = eps
return (self(xi,xj,theta+d)-self(xi,xj,theta-d))/(2*eps)
def _d_cov_matrix_d_theta_ij(self,xi,xj,theta,j):
"""
:param xi: list of d-dimensional vectors
:param xj: list of d-dimensional vectors
:param theta: hyperparameters
:param j: the part of theta to derive by
:return: derivative of the covariance matrix d theta_j
"""
ni = len(xi)
nj = len(xj)
K = np.zeros((ni, nj))
for i1 in range(ni):
for i2 in range(nj):
K[i1, i2] = self._d_cov_d_theta(xi[i1], xj[i2], theta,j)
return K
def _d_cov_matrix_d_theta(self,x,theta,j):
"""
:param x: list of d-dimensional vectors
:param theta: hyperparameters
:return: derivative of the covariance matrix d theta_j
"""
return self._d_cov_matrix_d_theta_ij(x,x,theta,j)
def _d_nll_d_theta(self,x,t,theta):
"""
:param x: list of d-dimensional vectors
:param t: list of responses
:param theta: hyperparameters
:return: Gradient of the negative log likelihood function
"""
n_theta = len(theta)
gradient = []
Kinv = self.inv_cov_matrix(x,theta)
for j in range(0,n_theta):
dKdj = self._d_cov_matrix_d_theta(x,theta,j)
gradient.append(0.5*tracedot(Kinv,dKdj) - 0.5* dot(t.T,dot(Kinv,dot(dKdj,dot(Kinv,t)))))
return np.array(gradient)
def _nll_function(self, x, t):
"""
:param x: list of d-dimensional vectors
:param t: list of responses
:return: negative log likelihood as function of theta
"""
def nll(theta):
#for p in ltheta:
# if p <= 0:
# return 1.0e+20
return self._negativeloglikelihood(x, t, theta)
return nll
def _gradient_function(self,x, t):
"""
:param x: list of d-dimensional vectors
:param t: list of responses
:return: gradient of the negative log likelihood as function of theta
"""
def gradient(theta):
try:
gr = self._d_nll_d_theta(x,t,theta)
except np.linalg.linalg.LinAlgError:
gr = self._d_nll_d_theta(x,t,theta*0.999)
return gr
return gradient
def ml_estimate(self,x,t):
"""
:param x: list of d-dimensional vectors
:param t: list of responses
:return: maximum likelihood estimate for theta
"""
d = len(x[0])
theta_start = self.get_theta(x,t)
print(theta_start)
func = self._nll_function(x, t)
fprime = self._gradient_function(x,t)
#for tnc, l_bfgs_b and slsqp
#bounds = [(1.0e-15,1e20) for i in range(len(theta_start)) ]
#for cobyla
#constr = [(lambda theta : theta[i]) for i in range(len(theta_start)) ]
bounds = None
constr = None
theta_min = minimize(func,theta_start,bounds,constr,fprime = fprime, method=["l_bfgs_b"])#["slsqp","l_bfgs_b","simplex"]
return np.array(theta_min)
#TODO numerical implementation as fallback
def get_Hessian(self,u,xi, theta):
"""
Get the Hessian of the covariance function with respect to u
:param u: d-dimensional vector
:param xi: d-dimensional vector
:param theta: hyperparameters
:return: Hessian
"""
pass
def get_Jacobian(self,u,xi, theta):
"""
Get the Jacobian of the covariance function with respect to u
:param u: d-dimensional vector
:param xi: d-dimensional vector
:param theta: hyperparameters
:return: Jacobian
"""
pass
class PeriodicCovariance(Covariance):
"""
A class to represent a mixed Gaussian and periodic covariance.
.. warning::
No derivatives for uncertainty.py propagation and faster hyperparameter optimization implemented yet.
"""
def __call__(self,xi,xj,theta):
d, = np.shape(xi)
v = np.exp(theta[0])
vt = np.exp(theta[1])
w = np.exp(theta[2:2+d])
p = np.exp(theta[2+d:2+2*d])
w2 = np.exp(theta[2+2*d:])
#Winv = np.diag(w)
diff = xi - xj
#slighly dirty hack to determine whether i==j
return v * np.exp(-0.5 * ((np.sin(np.pi/p* diff)**2 *w2).sum() + dot(diff.T, w* diff))) + (vt if (xi == xj).all() else 0)
#v * np.exp(-0.5 * (dot(diff.T, w* diff))) + (vt if (xi == xj).all() else 0)
def get_theta(self,x,t):
n,d = np.shape(x)
theta = np.ones(2+3*d)
theta[0] = np.log(np.var(t)) if t is not None else 1 #size
theta[1] = np.log(np.var(t)/100) if t is not None else 1 #noise
theta[2:2+d] = -2*np.log((np.max(x,0)-np.min(x,0))/2.0)#w
theta[2+d:2+2*d] = np.ones(d)#p
theta[2+2*d:] = -2*np.log((np.max(x,0)-np.min(x,0))/2.0) +np.log(100)#w2
return theta
def _d_cov_d_theta(self,xi,xj,theta,j):
d, = np.shape(xi)
v = np.exp(theta[0])
vt = np.exp(theta[1])
w = np.exp(theta[2:2+d])
p = np.exp(theta[2+d:2+2*d])
w2 = np.exp(theta[2+2*d:])
#Winv = np.diag(w)
diff = xi - xj
#slighly dirty hack to determine whether i==j
#return v * np.exp(-0.5 * ((np.sin(np.pi/p* diff)**2 *w2).sum() + dot(diff.T, w* diff))) + (vt if (xi == xj).all() else 0)
if j == 0:
#nach log(v) abgeleitet
return v * np.exp(-0.5 * ((np.sin(np.pi/p* diff)**2 *w2).sum() + dot(diff.T, w* diff)))
elif j == 1:
#nach log(vt) abgeleitet
return vt if (xi == xj).all() else 0
elif j >= 2 and j < 2+d:
# nach log(w) abgeleitet
return -0.5 * ( diff[j-2]**2 * w[j-2]) * v * np.exp(-0.5 * ((np.sin(np.pi/p* diff)**2 *w2).sum() + dot(diff.T, w* diff)))
elif j >= 2+d and j < 2+2*d:
# nach log(p) abgeleitet
i = j-(2+d)
return np.pi * diff[i] * w2[i] / p[i]*np.sin(np.pi/p[i]*diff[i])*np.cos(np.pi/p[i]*diff[i]) * v * np.exp(-0.5 * ((np.sin(np.pi/p* diff)**2 *w2).sum() + dot(diff.T, w* diff)))
elif j >= 2+2*d and j < 2+3*d:
# nach log(w2) abgeleitet
i = j-(2+2*d)
return -0.5 * (np.sin(np.pi/p[i]* diff[i])**2 *w2[i]) * v * np.exp(-0.5 * ((np.sin(np.pi/p* diff)**2 *w2).sum() + dot(diff.T, w* diff)))
class GaussianCovariance(Covariance):
"""
The classic Gaussian squared exponential covariance function. Suitable to approximate smooth functions.
"""
def __call__(self,xi,xj,theta):
v = np.exp(theta[0])
vt = np.exp(theta[1])
w = np.exp(theta[2:])
#Winv = np.diag(w)
diff = xi - xj
#slighly dirty hack to determine whether i==j
return v * np.exp(-0.5 * (dot(diff.T, w* diff))) + (vt if (xi == xj).all() else 0)
def get_theta(self,x,t):
n,d = np.shape(x)
theta = np.ones(2+d)
theta[0] = np.log(np.var(t)) if t is not None else 1 #size
theta[1] = np.log(np.var(t)/4) if t is not None else 1 #noise
theta[2:] = -2*np.log((np.max(x,0)-np.min(x,0))/2.0)#w
return theta
def cov_matrix(self,x,theta):
vt = np.exp(theta[1])
n = len(x)
return self.cov_matrix_ij(x,x,theta) + vt*np.eye(n)
def cov_matrix_ij(self,xi,xj,theta):
v = np.exp(theta[0])
vt = np.exp(theta[1])
w = np.exp(theta[2:])
x1 = np.copy(xi)
x2 = np.copy(xj)
n1,dim = np.shape(x1)
n2 = np.shape(x2)[0]
x1 = x1 * np.tile(np.sqrt(w),(n1,1))
x2 = x2 * np.tile(np.sqrt(w),(n2,1))
K = -2*dot(x1,x2.T)
K += np.tile(np.atleast_2d(np.sum(x2*x2,1)),(n1,1))
K += np.tile(np.atleast_2d(np.sum(x1*x1,1)).T,(1,n2))
K = v*np.exp(-0.5*K)
return K
def _d_cov_d_theta(self,xi,xj,theta,j):
diff = xi - xj
v = np.exp(theta[0])
vt = np.exp(theta[1])
w = np.exp(theta[2:])
#Winv = np.diag(w)
if j == 0:
return v*np.exp(-0.5 * (dot(diff.T, w* diff)))
elif j == 1:
if (xi == xj).all():
return vt
else:
return 0
else:
return -0.5 * diff[j-2]**2 * v * np.exp(-0.5 * (dot(diff.T, w* diff))) * w[j-2]
#0.5*x1**2*exp(-0.5*x3**2/w3 - 0.5*x2**2/w2 - 0.5*x1**2/w1)/w1**2
def _d_cov_matrix_d_theta(self,x,theta,j):
vt = np.exp(theta[1])
n,dim = np.shape(x)
if j == 1:
return np.eye(n) *vt
else:
return self._d_cov_matrix_d_theta_ij(x,x,theta,j)
def _d_cov_matrix_d_x(self,x,theta,i,dim,Cov= None):
"""
Derive by one dimension of one x
:param x:
:param theta:
:param dim:
:param Cov: regular covariance Matrix
:return:
"""
#vt = np.exp(theta[1])
w =np.exp( theta[2:])
#Winv = np.diag(w)
n1 = np.shape(x)[0]
n2 = n1
x1d = np.atleast_2d(x[:,dim])
x2d = np.atleast_2d(x[:,dim])
#diff
d = np.tile(x1d.T,(1,n2)) - np.tile(x2d,(n1,1))
if Cov is not None:
K = -1*d*Cov*w[dim]
else:
v = np.exp(theta[0])
x1 = np.copy(x)
x2 = np.copy(x)
x1 = x1 * np.tile(np.sqrt(w),(n1,1))
x2 = x2 * np.tile(np.sqrt(w),(n2,1))
K = -2*dot(x1,x2.T)
K += np.tile(np.atleast_2d(np.sum(x2*x2,1)),(n1,1))
K += np.tile(np.atleast_2d(np.sum(x1*x1,1)).T,(1,n2))
K = -1*v*d*np.exp(-0.5*K) * w[dim]
Res = np.zeros((n1,n2))
#The ith row contains interactions between x_i and x
Res[i,:] = K[i,:]
#The ith column contains interactions between x and x_i
Res[:,i] = -K[:,i] # This is different cause x_i is now on the right side of the difference
Res[i,i] = 0 # the difference between x_i and x_i is always zero
return Res
def _d_cov_matrix_d_xi_ij(self,xi,xj,theta,i,dim, Cov=None):
"""
Derive by one dimension of one xi
:param xi:
:param xj:
:param theta:
:param i:
:param dim:
:return:
"""
#vt = np.exp(theta[1])
w =np.exp( theta[2:])
#Winv = np.diag(w)
n1 = np.shape(xi)[0]
n2 = np.shape(xj)[0]
x1d = np.atleast_2d(xi[:,dim])
x2d = np.atleast_2d(xj[:,dim])
#diff
d = np.tile(x1d.T,(1,n2)) - np.tile(x2d,(n1,1))
if Cov is not None:
K = -1*d*Cov*w[dim]
else:
v = np.exp(theta[0])
x1 = np.copy(xi)
x2 = np.copy(xj)
x1 = x1 * np.tile(np.sqrt(w),(n1,1))
x2 = x2 * np.tile(np.sqrt(w),(n2,1))
K = -2*dot(x1,x2.T)
K += np.tile(np.atleast_2d(np.sum(x2*x2,1)),(n1,1))
K += np.tile(np.atleast_2d(np.sum(x1*x1,1)).T,(1,n2))
K = -1*v*d*np.exp(-0.5*K) * w[dim]
Res = np.zeros((n1,n2))
#Only the ith row contains interactions between the xi_i and the xj
Res[i,:] = K[i,:]
return Res
def _d_cov_matrix_d_theta_ij(self,xi,xj,theta,j,Cov=None):
"""
:param x: list of d-dimensional vectors
:param theta: hyperparameters
:return: derivative of the covariance matrix d theta_j
"""
n1,dim = np.shape(xi)
n2 = np.shape(xj)[0]
w =np.exp( theta[2:])
if Cov is not None:
K = Cov
else:
v = np.exp(theta[0])
vt = np.exp(theta[1])
#Winv = np.diag(w)
x1 = np.copy(xi)
x2 = np.copy(xj)
x1 = x1 * np.tile(np.sqrt(w),(n1,1))
x2 = x2 * np.tile(np.sqrt(w),(n2,1))
K = -2*dot(x1,x2.T)
K += np.tile(np.atleast_2d(np.sum(x2*x2,1)),(n1,1))
K += np.tile(np.atleast_2d(np.sum(x1*x1,1)).T,(1,n2))
K = v*np.exp(-0.5*K)
if j == 0:
#return np.exp(-0.5 * (dot(diff.T, w* diff)))
#K = -2*dot(x1,x2.T)
#K += np.tile(np.atleast_2d(np.sum(x2*x2,1)),(n1,1))
#K += np.tile(np.atleast_2d(np.sum(x1*x1,1)).T,(1,n2))
#K = v*np.exp(-0.5*K)
return K
elif j == 1:
return np.zeros((n1,n2))
else:
x1j = np.atleast_2d(xi[:,j-2])
x2j = np.atleast_2d(xj[:,j-2])
#diff squared
d = -2 * dot(x1j.T,x2j)
d += np.tile(x2j*x2j,(n1,1))
d += np.tile((x1j*x1j).T,(1,n2))
#K = -2*dot(x1,x2.T)
#K += np.tile(np.atleast_2d(np.sum(x2*x2,1)),(n1,1))
#K += np.tile(np.atleast_2d(np.sum(x1*x1,1)).T,(1,n2))
#K = -0.5*v*d*np.exp(-0.5*K) * w[j-2]
return -0.5*K*d*w[j-2]
def get_Hessian(self,u,xi, theta):
v = np.exp(theta[0])
vt = np.exp(theta[1])
w = np.exp(theta[2:])
Winv = np.diag(w)
diff = xi - u
#exp(...) = exp(-1/2*(d1**2/e11 + d2**2/e22 + d3**2/e33)) ;
expstuff = v * np.exp(-0.5 * (np.dot(diff.T, np.dot(Winv, diff))))
tile = np.tile(diff*w,(len(u),1))
hessian = (tile*tile.T - Winv)*expstuff # We assume Winv to be diagonal
return hessian
def get_Jacobian(self,u,xi, theta):
v = np.exp(theta[0])
vt = np.exp(theta[1])
w = np.exp(theta[2:])
Winv = np.diag(w)
diff = xi - u
#exp(...) = exp(-1/2*(d1**2/e11 + d2**2/e22 + d3**2/e33)) ;
expstuff = v * np.exp(-0.5 * (np.dot(diff.T, np.dot(Winv, diff))))
jacobian = np.atleast_2d(-diff*w*expstuff).T #Eigentlich diff statt -diff weil nach u abgeleitet wird
return jacobian
class SPGPCovariance(Covariance):
"""
A covariance function for fast matrix inversion on large datasets based on Snelsons thesis.
<NAME>. Flexible and efficient Gaussian process models for machine learning, Gatsby Computational Neuroscience Unit, University College London, 2007
.. warning::
No derivatives for uncertainty.py propagation implemented yet.
.. warning::
Not as efficient as it should be.
"""
def __init__(self,m):
self.m = m
self.cov = GaussianCovariance()
def __call__(self,xi,xj,theta):
vt = np.exp(theta[1])
d = np.shape(xi)[0]
#TODO: ecapsulate the theta part of the use cov function
theta_gc = theta[0:2+d]
x_m = np.reshape(theta[2+d:],(self.m,d))
K_M = self.cov.cov_matrix_ij(x_m,x_m,theta_gc)
k_xi_u = self.cov.cov_matrix_ij(np.atleast_2d(xi),x_m,theta_gc)
k_u_xj = self.cov.cov_matrix_ij(x_m,np.atleast_2d(xj),theta_gc)
L_M = cholesky(K_M+1e-5*np.eye(self.m))
#KMinvR = solve(L_M.T,solve(L_M,k_u_xj))
KMinvR = cho_solve((L_M,True),k_u_xj)
k_SOR = dot(k_xi_u,KMinvR)
#k_SOR = dot(k_xi_u,dot( inv(K_M),k_u_xj))
return self.cov(xi,xj,theta_gc) if (xi == xj).all() else k_SOR
def get_theta(self,x,t):
n,d = np.shape(x)
theta = np.ones(2+d+self.m*d)
theta_gc = self.cov.get_theta(x,t)
theta[0:2+d] = theta_gc
theta[2+d:] = np.reshape(x[np.random.randint(n,size=self.m),:],self.m*d)
return theta
def cov_matrix_ij(self,xi,xj,theta):
vt = np.exp(theta[1])
n,d = np.shape(xi)
m = self.m
theta_gc = theta[0:2+d]
x_m = np.reshape(theta[2+d:],(self.m,d))
K_NM = self.cov.cov_matrix_ij(xi,x_m,theta_gc)
K_MN = self.cov.cov_matrix_ij(x_m,xj,theta_gc)
K_M = self.cov.cov_matrix_ij(x_m,x_m,theta_gc)
L_M = cholesky(K_M+1e-5*np.eye(m))
K_Minv_K_MN = cho_solve((L_M,True),K_MN)
Q_N = dot(K_NM, K_Minv_K_MN) #Q_N = dot(K_NM,dot(inv(K_M),K_NM.T))
#K_N = self.cov.cov_matrix_ij(x,x,theta_gc)
#LI = np.diag(np.diag(K_N - Q_N)+vt*np.ones(n))
return Q_N #+ LI
#
# def estimate(self,x,t,theta,x_star):
# vt = np.exp(theta[1])
# n,d = np.shape(x)
# theta_gc = theta[0:2+d]
# m = self.m
# x_m = np.reshape(theta[2+d:],(self.m,d))
#
# K_NM = self.cov.cov_matrix_ij(x,x_m,theta_gc)
# K_M = self.cov.cov_matrix_ij(x_m,x_m,theta_gc)
# L_M = cholesky(K_M+1e-5*np.eye(m))
# L_Minv_K_NM = solve_triangular(L_M,K_NM.T,lower=True)
# Q_N = dot(L_Minv_K_NM.T, L_Minv_K_NM) #dot(K_NM,dot(inv(K_M),K_NM.T))
#
# K_N = self.cov.cov_matrix_ij(x,x,theta_gc)
#
# #LIinv = np.diag(np.diag(1/(np.diag(K_N - Q_N)+vt*np.eye(n))))
# LIinvD = 1/(np.diag(K_N - Q_N)+vt*np.ones(n))
# LIinv = np.diag(LIinvD)
#
# K_starM = self.cov.cov_matrix_ij(x_star,x_m,theta_gc)
# B = K_M + dot(K_NM.T,dldot(LIinvD,K_NM))
#
# R = dot(K_NM.T,LIinvD*t)
# L_B = cholesky(B+1e-5*np.eye(m))
# BinvRt = cho_solve((L_B,True),R)
# mean = dot(K_starM,BinvRt)
#
# #K_star = self.cov.cov_matrix_ij(x_star,x_star,theta_gc)
#
# #variances = np.diag(K_star )
#
# return mean
def cov_matrix(self,x,theta):
vt = np.exp(theta[1])
n,d = np.shape(x)
m = self.m
theta_gc = theta[0:2+d]
x_m = np.reshape(theta[2+d:],(self.m,d))
K_NM = self.cov.cov_matrix_ij(x,x_m,theta_gc)
K_M = self.cov.cov_matrix_ij(x_m,x_m,theta_gc)
L_M = cholesky(K_M+1e-5*np.eye(m))
L_Minv_K_NM = solve_triangular(L_M,K_NM.T,lower=True)
Q_N = dot(L_Minv_K_NM.T, L_Minv_K_NM) #Q_N = dot(K_NM,dot(inv(K_M),K_NM.T))
K_N = self.cov.cov_matrix_ij(x,x,theta_gc)
LI = np.diag(np.diag(K_N - Q_N)+vt*np.ones(n))
return Q_N + LI
def inv_cov_matrix(self,x,theta,cov_matrix=None):
vt = np.exp(theta[1])
n,d = np.shape(x)
theta_gc = theta[0:2+d]
m = self.m
x_m = np.reshape(theta[2+d:],(self.m,d))
K_NM = self.cov.cov_matrix_ij(x,x_m,theta_gc)
K_M = self.cov.cov_matrix_ij(x_m,x_m,theta_gc)
L_M = cholesky(K_M+1e-5*np.eye(m))
L_Minv_K_NM = solve_triangular(L_M,K_NM.T,lower=True)
Q_N = dot(L_Minv_K_NM.T, L_Minv_K_NM) #Q_N = dot(K_NM,dot(inv(K_M),K_NM.T))
K_N = self.cov.cov_matrix_ij(x,x,theta_gc)
#LIinv = np.diag(1/(np.diag(K_N - Q_N)+vt*np.ones(n)))
LIinvD = 1/(np.diag(K_N - Q_N)+vt*np.ones(n))
LIinv = np.diag(LIinvD)
B = K_M + dot(K_NM.T,dldot(LIinvD,K_NM))
L_B = cholesky(B+1e-5*np.eye(m))
L_Binv_K_NM = solve_triangular(L_B,K_NM.T,lower=True) #O(m**2 n)?
Middle = dot(L_Binv_K_NM.T, L_Binv_K_NM) #nm dot mn => O(n**2 m) dominates here
result = LIinv - dldot(LIinvD,drdot(Middle,LIinvD))
return result
def _log_det_cov_matrix(self,x,theta):
return np.linalg.slogdet(self.cov_matrix(x,theta))[1]
# def d_cov_d_theta(self,xi,xj,theta,j):
# pass
#
# def d_cov_matrix_d_theta_ij(self,xi,xj,theta,j):
# pass
def _d_nll_d_theta(self,x,t,theta):
vt = np.exp(theta[1])
n,d = np.shape(x)
m = self.m
theta_gc = theta[0:2+d]
x_m = np.reshape(theta[2+d:],(self.m,d))
K_NM = self.cov.cov_matrix_ij(x,x_m,theta_gc)
K_M = self.cov.cov_matrix_ij(x_m,x_m,theta_gc)
#L_M = cholesky(K_M+1e-5*np.eye(m))
#L_Minv_K_NM = solve(L_M,K_NM.T)
#Q_N = dot(L_Minv_K_NM.T, L_Minv_K_NM) #Q_N = dot(K_NM,dot(inv(K_M),K_NM.T))
#K_N = self.cov.cov_matrix_ij(x,x,theta_gc)
L_M = cholesky(K_M+np.eye(m)*1e-5)
#Inversion done right
#TODO: cho_solve?
L_M_inv = solve_triangular(L_M,np.eye(m),lower=True)
K_M_inv = dot(L_M_inv.T,L_M_inv)
#LI = np.diag(np.diag(K_N - Q_N)+vt*np.ones(n))
n_theta = len(theta)
gradient = []
Kinv = self.inv_cov_matrix(x,theta) #TODO: N^2 M
dot_K_NM_K_M_inv = dot(K_NM,K_M_inv)
dot_K_M_inv_K_NM_T = dot_K_NM_K_M_inv.T
dot_Kinv_t = dot(Kinv,t)
Cov_xm_xm = self.cov.cov_matrix_ij(x_m,x_m,theta_gc)
Cov_x_xm = self.cov.cov_matrix_ij(x,x_m,theta_gc)
Cov_x_x = self.cov.cov_matrix_ij(x,x,theta_gc)
for j in range(0,n_theta):
if j < 2+d:
if j ==1 :
dKdj = vt*np.eye(n)
else:
K_NM_d = self.cov._d_cov_matrix_d_theta_ij(x,x_m,theta_gc,j,Cov=Cov_x_xm)
K_M_d = self.cov._d_cov_matrix_d_theta_ij(x_m,x_m,theta_gc,j,Cov=Cov_xm_xm)
K_N_d = self.cov._d_cov_matrix_d_theta_ij(x,x,theta_gc,j,Cov=Cov_x_x)
#Derivation by the hyperparameters:
#print K_M_inv -inv(K_M)#
#print "difference: ", np.sum(np.abs(K_M_inv -inv(K_M)))
#dKdj = Q_N_dt + LI_dt
else:
i = (j-(2+d))/d
dim = (j-(2+d))%d
K_NM_d = self.cov._d_cov_matrix_d_xi_ij(x_m,x,theta_gc,i,dim,Cov=Cov_x_xm.T).T#)
K_M_d = self.cov._d_cov_matrix_d_x(x_m,theta_gc,i,dim,Cov=Cov_xm_xm).T#,Cov=Cov_xm_xm).T
K_N_d = np.zeros((n,n))
#Q_N_dt = 2*dot(K_NM_d[i],dot_K_M_inv_K_NM_T) - dot(dot_K_NM_K_M_inv,dot( K_M_d,dot_K_M_inv_K_NM_T))
#basically the same as above:
#LI_dt = -np.diag(np.diag(Q_N_dt)) #K_N_d == Zeros
if j != 1:
Q_N_dt = 2*dot(K_NM_d,dot_K_M_inv_K_NM_T) - dot(dot_K_NM_K_M_inv,dot( K_M_d,dot_K_M_inv_K_NM_T)) #TODO: N^2 M
LI_dt = np.diag(np.diag(K_N_d - Q_N_dt))
dKdj = Q_N_dt + LI_dt
#dKdj = self.d_cov_matrix_d_theta(x,theta,j)
gradient.append(0.5*tracedot(Kinv,dKdj) - 0.5* dot(dot_Kinv_t.T,dot(dKdj,dot_Kinv_t))) #TODO: N^2 M
return np.array(gradient)
def _d_cov_matrix_d_theta(self,x,theta,j):
vt = np.exp(theta[1])
n,d = np.shape(x)
m = self.m
theta_gc = theta[0:2+d]
x_m = np.reshape(theta[2+d:],(self.m,d))
K_NM = self.cov.cov_matrix_ij(x,x_m,theta_gc)
K_M = self.cov.cov_matrix_ij(x_m,x_m,theta_gc)
#L_M = cholesky(K_M+1e-5*np.eye(m))
#L_Minv_K_NM = solve(L_M,K_NM.T)
#Q_N = dot(L_Minv_K_NM.T, L_Minv_K_NM) #Q_N = dot(K_NM,dot(inv(K_M),K_NM.T))
#K_N = self.cov.cov_matrix_ij(x,x,theta_gc)
L_M = cholesky(K_M+np.eye(m)*1e-5)
#TODO: cho_solve?
L_M_inv = solve_triangular(L_M,np.eye(m),lower=True)
K_M_inv = dot(L_M_inv.T,L_M_inv)
#LI = np.diag(np.diag(K_N - Q_N)+vt*np.ones(n))
if j < 2+d:
if j ==1 :
return vt*np.eye(n)
else:
K_NM_d = self.cov._d_cov_matrix_d_theta_ij(x,x_m,theta_gc,j)
K_M_d = self.cov._d_cov_matrix_d_theta_ij(x_m,x_m,theta_gc,j)
K_N_d = self.cov._d_cov_matrix_d_theta_ij(x,x,theta_gc,j)
#Derivation by the hyperparameters:
#print K_M_inv -inv(K_M)#
#print "difference: ", np.sum(np.abs(K_M_inv -inv(K_M)))
Q_N_dt = dot(K_NM_d,dot(K_M_inv, K_NM.T)) + dot(K_NM,dot(K_M_inv, K_NM_d.T)) - dot(K_NM ,dot(K_M_inv,dot( K_M_d,dot(K_M_inv, K_NM.T))))
LI_dt = np.diag(np.diag(K_N_d - Q_N_dt))
return Q_N_dt + LI_dt
else:
i = (j-(2+d))/d
dim = (j-(2+d))%d
K_NM_d = self.cov._d_cov_matrix_d_xi_ij(x_m,x,theta_gc,i,dim).T #self.cov.d_cov_matrix_d_theta_ij(x,x_m,theta_gc,j)
K_M_d = self.cov._d_cov_matrix_d_x(x_m,theta_gc,i,dim).T#self.cov.d_cov_matrix_d_theta_ij(x_m,x_m,theta_gc,j)
#basically the same as above:
Q_N_dt = dot(K_NM_d,dot(K_M_inv, K_NM.T)) + dot(K_NM,dot(K_M_inv, K_NM_d.T)) - dot(K_NM ,dot(K_M_inv,dot( K_M_d,dot(K_M_inv, K_NM.T))))
LI_dt = -np.diag(np.diag(Q_N_dt)) #K_N_d == Zeros
return Q_N_dt + LI_dt
def _negativeloglikelihood(self,x,t,theta):
# Code rewritten from Snelson 2006
delta = 1e-6
n = self.m
y = np.atleast_2d(t).T
N,dim = np.shape(x)
xb = np.reshape(theta[2+dim:],(n,dim))
b = np.exp(theta[2:2+dim]) #w
c = np.exp(theta[0]) #v
sig = np.exp(theta[1]) #vt
x = x*1.0
xb = xb * np.tile(np.sqrt(b),(n,1))
x = x * np.tile(np.sqrt(b),(N,1))
Q = dot(xb,xb.T)
Q = np.tile(np.atleast_2d(np.diag(Q)).T,(1,n)) + np.tile(np.diag(Q),(n,1)) - 2*Q
Q = c*np.exp(-0.5*Q) + delta*np.eye(n)
K = -2*dot(xb,x.T)
K += np.tile(np.atleast_2d(np.sum(x*x,1)),(n,1))
K += np.tile(np.atleast_2d(np.sum(xb*xb,1)).T,(1,N))
K = c*np.exp(-0.5*K)
L = np.linalg.cholesky(Q)
V = solve_triangular(L,K,lower=True)
ep = np.atleast_2d(1 + (c-np.sum(V**2,0))/sig).T
K = K/np.tile(np.sqrt(ep).T,(n,1))
V = V/np.tile(np.sqrt(ep).T,(n,1))
y = y/np.sqrt(ep)
Lm = np.linalg.cholesky(sig*np.eye(n) + dot(V,V.T))
invLmV = solve_triangular(Lm,V,lower=True)
bet = dot(invLmV,y)
fw = np.sum(np.log(np.diag(Lm))) + (N-n)/2*np.log(sig) + (dot(y.T,y) - dot(bet.T,bet))/2/sig + np.sum(np.log(ep))/2 + 0.5*N*np.log(2*np.pi)
return fw[0,0]
#TODO!!!: Hessian+ Jacobian
#TODO!!!: SPGP_DR
# class SPGP_DR(Covariance):
# def __call__(self,xi,xj,theta):
# pass
#
# def get_theta(self,d,n):
# pass
#
# def cov_matrix_ij(self,xi,xj,theta):
# pass
#
#
# def cov_matrix(self,x,theta):
# vt = theta[1]
# n = len(x)
# return self.cov_matrix_ij(x,x,theta) + vt*np.eye(n) #
#
# def inv_cov_matrix(self,x,theta,cov_matrix=None):
# pass
#
#
# def d_cov_d_theta(self,xi,xj,theta,j):
# pass
#
# def d_cov_matrix_d_theta_ij(self,xi,xj,theta,j):
# pass
```
#### File: gaussian_processes/pkg_sgpuppy/uncertainty.py
```python
import numpy as np
from scipy.linalg import solve_triangular
class GPUncertainty(object):
"""Gaussian Process Uncertainty Propagation
This is a module that allows one to propagate the error given some
input for a gaussian process.
"""
def __init__(self, gp_model, x_error):
self.gp_model = gp_model
self.x_error = x_error
def fit(self, X):
# extract kernel parameters from previous GP Model
# kernel parameters
kernel_model = self.gp_model.kernel_
self.signal_variance = kernel_model.get_params()['k1__k1__constant_value']
self.length_scale = kernel_model.get_params()['k1__k2__length_scale']
self.likelihood_variance = kernel_model.get_params()['k2__noise_level']
# weights and data
self.weights = self.gp_model.alpha_
self.x_train = self.gp_model.X_train_
# kernel matrices
self.L = self.gp_model.L_
L_inv = solve_triangular(self.L.T, np.eye(self.L.shape[0]))
self.K_inv = np.dot(L_inv, L_inv.T)
# initialize parameters
return self
def predict(self, X, return_std=False):
return None
def propagate_error(self):
return
def get_covariance(self, u, x):
diff = u - x
D = np.dot(diff.T, self.)
C_ux = self.signal_variance * \
np.exp(-0.5)
return C_ux
def get_jacobian(self):
return J_ux
def get_hessian(self):
return
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.