repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
toddheitmann/PetroPy | setup.py | 1 | 1590 | """Setup script for PetroPy"""
from setuptools import setup
from os import path
from petropy import __version__
with open(path.join(path.dirname(__file__), "requirements.txt"), "r") as f:
requirements = f.read().splitlines()
with open(path.join(path.dirname(__file__), "README.rst"), "r") as f:
long_description = f.read()
setup(
name = 'petropy',
packages=["petropy", ],
version = __version__,
description = 'A package to calculate petrophysical properties for formation evaluation.',
long_description = long_description,
author = 'Todd Heitmann',
author_email = '[email protected]',
url = 'https://github.com/toddheitmann/petropy',
keywords = ['petrophysics', 'formation evaluation', 'reservoir characterization', 'Oil and Gas'],
classifiers=[
"Intended Audience :: Customer Service",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Other Audience",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
"Topic :: System :: Filesystems",
"Topic :: Scientific/Engineering :: Information Analysis",
],
install_requires = requirements,
package_data = {'petropy': ['data/*.csv', 'data/*.xml', 'data/*.las']}
)
| mit | 8,575,491,469,597,915,000 | 37.780488 | 101 | 0.637107 | false |
saisankargochhayat/algo_quest | leetcode/115. Distinct Subsequences/soln.py | 1 | 1479 | from functools import lru_cache
class Solution:
def numDistinct(self, s: str, t: str) -> int:
@lru_cache(maxsize=None)
def helper(i, j):
M, N = len(s), len(t)
if i == M or j == N or M-i < N-j:
return int(j == N)
# if i of s and j of s dont match or match either case we skip
ans = helper(i+1, j)
# if it matches we skip both
if s[i] == t[j]:
ans += helper(i+1, j+1)
return ans
res = helper(0,0)
return res
class Solution:
def numDistinct(self, s: str, t: str) -> int:
# Dictionary for memoization
mem = {}
def helper(i, j):
M, N = len(s), len(t)
# Base case
if i == M or j == N or M - i < N - j:
return int(j == len(t))
# Check if the result is already cached
if (i, j) in mem:
return mem[i,j]
# Always make this recursive call
ans = helper(i + 1, j)
# If the characters match, make the other
# one and add the result to "ans"
if s[i] == t[j]:
ans += helper(i + 1, j + 1)
# Cache the answer and return
mem[i, j] = ans
return ans
return helper(0, 0) | apache-2.0 | 3,884,121,225,222,017,000 | 27.461538 | 74 | 0.408384 | false |
skosukhin/spack | var/spack/repos/builtin/packages/trimgalore/package.py | 1 | 1901 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Trimgalore(Package):
"""Trim Galore! is a wrapper around Cutadapt and FastQC to consistently
apply adapter and quality trimming to FastQ files, with extra
functionality for RRBS data."""
homepage = "https://github.com/FelixKrueger/TrimGalore"
url = "https://github.com/FelixKrueger/TrimGalore/archive/0.4.4.tar.gz"
version('0.4.4', 'aae1b807b48e38bae7074470203997bb')
depends_on('perl', type=('build', 'run'))
depends_on('py-cutadapt', type=('build', 'run'))
depends_on('fastqc')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('trim_galore', prefix.bin)
| lgpl-2.1 | 4,734,068,059,006,878,000 | 42.204545 | 80 | 0.6707 | false |
benob/chainer | chainer/functions/evaluation/accuracy.py | 1 | 2436 | import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Accuracy(function.Function):
def __init__(self, ignore_label=None):
self.ignore_label = ignore_label
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype == numpy.int32
)
t_ndim = t_type.ndim.eval()
type_check.expect(
x_type.ndim >= t_type.ndim,
x_type.shape[0] == t_type.shape[0],
x_type.shape[2: t_ndim + 1] == t_type.shape[1:]
)
for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()):
type_check.expect(x_type.shape[i] == 1)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
y, t = inputs
if self.ignore_label is not None:
mask = (t == self.ignore_label)
ignore_cnt = mask.sum()
# will always be true when the true label is ignore_label
# TODO(henry0312)
# If cupy.where returns indexes, we could make the code better.
# Also, we would need Advanced Indexing.
pred = xp.where(mask, self.ignore_label,
y.argmax(axis=1).reshape(t.shape))
count = (pred == t).sum() - ignore_cnt
total = t.size - ignore_cnt
if total == 0:
return xp.asarray(0.0, dtype=y.dtype),
else:
return xp.asarray(float(count) / total, dtype=y.dtype),
else:
pred = y.argmax(axis=1).reshape(t.shape)
return xp.asarray((pred == t).mean(dtype=y.dtype)),
def accuracy(y, t, ignore_label=None):
"""Computes muticlass classification accuracy of the minibatch.
Args:
y (Variable): Variable holding a matrix whose (i, j)-th element
indicates the score of the class j at the i-th example.
t (Variable): Variable holding an int32 vector of ground truth labels.
ignore_label (int or None): Skip calculating accuracy
if the ture label is ``ignore_label``.
Returns:
Variable: A variable holding a scalar array of the accuracy.
.. note:: This function is non-differentiable.
"""
return Accuracy(ignore_label=ignore_label)(y, t)
| mit | -7,014,396,633,179,930,000 | 31.918919 | 78 | 0.573481 | false |
zakirovandrey/cfmaxwell | src/genConeFold.py | 1 | 21709 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from types import *
from operator import *
def makeCombinations(lst, res=['']):
if len(lst)<=0: return res
else: return makeCombinations(lst[:-1], [c+n for n in res for c in lst[-1]])
class CFact:
def __init__(self, Gen, actN):
self.Gen=Gen; self.actN=actN
self.podDatas,self.podDatasShift = ['',],[0,]
for s in xrange(Gen.dim):
#datas = self.pars4actN[actN[s]]
datas = Gen.Rules2act['pars'][actN[s]]
for k in xrange(1<<s):
self.podDatas.append(self.podDatas[k] + datas[1])
self.podDatas[k] += datas[0]
self.podDatasShift.append(self.podDatasShift[k] + (0,1<<s)[datas[1] is 'p'])
pass
pass
pass
def PodActList(self, tC='A'):
tier0=makeCombinations([self.Gen.Rules2act['subacts'][a][:2] for a in self.actN])
tier1=makeCombinations([self.Gen.Rules2act['subacts'][a][2:] for a in self.actN])
#tier0=makeCombinations([self.decomp4actN[a][:2] for a in self.actN])
#tier1=makeCombinations([self.decomp4actN[a][2:] for a in self.actN])
tier2=reduce(add,zip(tier0,tier1))
tier0=filter(lambda (a,n,t):'-' not in a, zip(tier0,xrange(len(tier0)),['B']*len(tier0))); tier0.reverse()
tier1=filter(lambda (a,n,t):'-' not in a, zip(tier1,xrange(len(tier1)),['T']*len(tier1))); tier1.reverse()
tier2=filter(lambda (a,n,t):'-' not in a, zip(tier2,xrange(len(tier2)),['B','T']*len(tier0))); tier2.reverse()
if tC is 'B': return tier0
if tC is 'T': return tier1
if tC is 'BT': return tier2
return tier0+tier1
def PodActList_mp(self):
tier=makeCombinations([self.Gen.Rules2act['subacts_mp'][a] for a in self.actN])
tier=filter(lambda (a,n):'-' not in a, zip(tier,xrange(len(tier)))); tier.reverse()
return tier
def getIsh(self, datI, shI):
return filter(len, [('',self.Gen.rulesShift[self.podDatas[datI]][s])[(shI&(1<<s))>0] for s in xrange(self.Gen.dim)])
def getPodActPar(self, par, parI, cntI, tC):
'''Возвращает параметр parI (0..2^d) для под-act-а с базовым datas-ом cntI (0..2^d), исходя из шаблона par'''
if tC is 'B': datI,poddatI = cntI&parI,cntI^parI
if tC is 'T': datI,poddatI = cntI|parI,((1<<self.Gen.dim)-1)&(~(cntI^parI))
if tC is 'X':
parIl,cntIl=map(lambda v:(0,1,-1)[v],self.Gen.num2list(parI,3)),self.Gen.num2list(cntI)
datI = self.Gen.list2num([(p+n)>=1 for (p,n) in zip(parIl,cntIl)])
poddatI = self.Gen.list2num([1&~((p&1)^n) for (p,n) in zip(parIl,cntIl)])
#datI,poddatI --- номера датаса и его поддатаса для параметра parI
poddatIx = 0
for s in xrange(self.Gen.dim-1,-1,-1):
i2s = 1<<s
if self.Gen.Rules2dim[self.podDatas[datI][s]]: poddatIx = 2*poddatIx+((poddatI&i2s)!=0)
#poddatIx --- смещение параметра в массиве поддатасов для датаса datI
#print par, parI, cntI, tC,self.podDatas[datI],"->",datI,poddatI,poddatIx
if par.find('dat')>=0:
dat_shift = self.podDatasShift[datI]
return (self.Gen.datTmpl%datI+'->',self.Gen.datTmpl%(datI-dat_shift)+'[%s].'%('+'.join(self.getIsh(datI-dat_shift,dat_shift))))[dat_shift>0]+'datas'+('','+%d'%poddatIx)[poddatIx>0]
if par.find('const int _I')>=0:
sh = par[par.find('const int _I')+len('const int _I'):]
sI = sh.find('p')
if sI >= 0:
if poddatI&(1<<sI): return '%d'%(1<<self.Gen.get_dim(sh[:sI]))
return 'I'+sh
sI = sh.find('m')
if sI >= 0:
if poddatI&(1<<sI): return '-I'+sh.replace('m','p')
return '-%d'%(1<<self.Gen.get_dim(sh[:sI]))
return '?'+sh
return '======== not implemented yet,',par, n, t
class CFact_mp(CFact):
def __init__(self, Gen, actN):
CFact.__init__(self, Gen, actN)
self.podDatas_mp,self.podDatasShift_mp = ['',],[0,]
for s in xrange(Gen.dim):
datas = Gen.Rules2act_mp['pars'][actN[s]]
for k in xrange(3**s): self.podDatas_mp.append(self.podDatas_mp[k] + datas[2])
for k in xrange(3**s): self.podDatas_mp.append(self.podDatas_mp[k] + datas[0])
for k in xrange(3**s): self.podDatas_mp[k] += datas[1]
for k in xrange(3**s): self.podDatasShift_mp.append(self.podDatasShift_mp[k] + (0,3**s)[datas[2] is 'p'])
for k in xrange(3**s): self.podDatasShift_mp.append(self.podDatasShift_mp[k] + 2*(0,3**s)[datas[0] is 'm'])
pass
pass
def PodActList_mp(self):
#tier=makeCombinations([self.decomp4actNmp[a] for a in self.actN])
tier=makeCombinations([self.Gen.Rules2act_mp['subacts'][a] for a in self.actN])
tier=filter(lambda (a,n):'-' not in a, zip(tier,xrange(len(tier)))); tier.reverse()
return tier
def PodActList_mpPIC(self):
#tier=makeCombinations([self.decomp4actNmpPIC[a] for a in self.actN])
tier=makeCombinations([self.Gen.Rules2act_mp['subactsPIC'][a] for a in self.actN])
tier=filter(lambda (a,n):'-' not in a, zip(tier,xrange(len(tier)))); tier.reverse()
return tier
def getIsh_mp(self, datI, shI):
'''вычисляет сдвиг datas-а'''
if '_' in self.podDatas_mp[datI]: return ('<no-data>',)
#print datI, self.podDatas_mp[datI], self.Gen.rulesShift[self.podDatas_mp[datI]]
shIl = self.Gen.num2list(shI,3)
Ish=[]
for s,sh in zip(xrange(self.Gen.dim), self.Gen.num2list(shI,3)):
rul = self.Gen.rulesShift[self.podDatas_mp[datI]][s]
Ish.append(('',rul, rul.replace('p','m'))[shIl[s]])
return filter(len, Ish)
def getPodActPar(self, par, parI, cntIl, tC):
'''Возвращает параметр parI (0..3^d) для под-act-а с базовым datas-ом cntI (0..2^d), исходя из шаблона par'''
if tC is 'X':
parIl=map(lambda v:(0,1,-1)[v],self.Gen.num2list(parI,3))
datIl = [(-1,-1,0,0,1,1)[p+n+2] for (p,n) in zip(parIl,cntIl)] # можно (p+n)/2 # номер (смещение) datas-а относительно базового cntI
poddatIl = [(0,1,0,1,0,1)[p+n+2] for (p,n) in zip(parIl,cntIl)] # можно (p+n)%2 # номер poddatas-а в datas-е
datI = self.Gen.list2num([((p+n)/2)%3 for (p,n) in zip(parIl,cntIl)], 3)
poddatI = self.Gen.list2num([((p+n)%2)&3 for (p,n) in zip(parIl,cntIl)])
poddatIx = 0
for s in xrange(self.Gen.dim-1,-1,-1):
i2s = 1<<s
if self.Gen.Rules2dim[self.podDatas_mp[datI][s]]: poddatIx = 2*poddatIx+((poddatI&i2s)!=0)
if par.find('dat')>=0:
dat_shift = self.podDatasShift_mp[datI]
return (self.Gen.datTmpl%datI+'->',self.Gen.datTmpl%(datI-dat_shift)+'[%s].'%('+'.join(self.getIsh_mp(datI-dat_shift,dat_shift))))[dat_shift!=0]+'datas'+('','+%d'%poddatIx)[poddatIx>0]
if par.find('const int _I')>=0:
sh = par[par.find('const int _I')+len('const int _I'):]
sI = sh.find('p')
if sI >= 0:
if poddatIl[sI]==1: return '%d'%(1<<self.Gen.get_dim(sh[:sI]))
if datIl[sI]<=0: return '-I'+sh.replace('p','m')
return 'I'+sh
sI = sh.find('m')
if sI >= 0:
if poddatIl[sI]==0: return '-%d'%(1<<self.Gen.get_dim(sh[:sI]))
if datIl[sI]>=0: return '-I'+sh.replace('m','p')
return 'I'+sh
return '?'+sh
return '======== not implemented yet,',par, n, t
class CFpodact(CFact):
def __init__(self, act, actN):
CFact.__init__(self, act.Gen, actN)
self.nadact = act
pass
def getParsList(self, n, tC):
pars_list, full_pars_list = self.Gen.get_pars(self.podDatas)
fakt_pars = [self.nadact.getPodActPar(par, full_pars_list.index(par), n,tC) for par in pars_list]
return fakt_pars
class CFpodact_mp(CFact_mp):
def __init__(self, act, actN):
CFact_mp.__init__(self, act.Gen, actN)
self.nadact = act
def getParsList(self, n, tC):
pars_list, full_pars_list = self.Gen.get_pars_mp(self.podDatas_mp)
fakt_pars = [self.nadact.getPodActPar(par, full_pars_list.index(par), n, tC) for par in pars_list]
return fakt_pars
class Generator:
def __init__(self, dim, types):
self.Rules2act = {
'pars': {'D':'dp', 'S':'dp', 'I':'_d', 'J':'dp', 'X':'dx', 'Y':'d_', 'P':'dd', 'Q':'dd', 'L':'ld', 'M':'dp', 'R':'dr'},
'subacts':{'D':'DDDD', 'S':'SSSS', 'I':'-IID', 'X':'DXX-', 'Y':'DYY-', 'P':'DPPD', 'L':'-LLD', 'R':'DRR-'},
'subacts_mp': {'D':'DD', 'I':'-J', 'J':'JD', 'X':'X-', 'Y':'Y-', 'P':'PQ', 'L':'-M', 'R':'R-'},
'LR':{'L':'IL-', 'R':'-RY'},
}
self.Rules2act_mp = {
#'pars': {'D':'mdp', 'S':'msp', 'J':'_dp', 'X':'mdp', 'P':'mdd', 'Q':'ddp', 'M':'ldp', 'R':'mdr'},
'pars': {'D':'mdp', 'S':'msp', 'J':'_dp', 'X':'mdx', 'P':'mdd', 'Q':'ddp', 'M':'ldp', 'R':'mdr'},
'subacts': {'D':'DD', 'I':'JD', 'J':'JD', 'X':'DX', 'P':'DP', 'Q': 'QD', 'L':'MD', 'M':'MD', 'R':'DR'},
'subactsPIC': {'D':'DDDD', 'J':'-JDD', 'X':'DDX-', 'P':'DDPQ', 'Q': 'PQDD', 'M':'-MDD', 'R':'DDR-'},
}
self.Rules2rank = {}
self.Rules2dim = {}
for s in 'dmps': self.Rules2dim[s] = 1
for s in 'xlr': self.Rules2dim[s] = 0
self.dim=dim
self.par_name_start=-(dim+2)-(dim==1)
self.datTmpl='datas_____'[:2+(dim+1)/2:]+'%0'+'%dd'%((dim+1)/2)
self.types=types
print '// acts: %d'%len(makeCombinations([types]*dim))
pass
def num2list(self, num, bas=2):
numLst = []
for s in xrange(self.dim):
numLst.append(num%bas)
num /= bas
return numLst
def list2num(self, numLst, bas=2):
numLstR = numLst[:]
numLstR.reverse()
return reduce(lambda r,v: r*bas+v, numLstR, 0)
def get_dim(self, pd):
return reduce(lambda r,s: r+self.Rules2dim[s], pd, 0)
def add2rules(self, pd):
self.rules[pd] = 'cubeLR<%d,T%%(Npd)d,%s>'%(self.get_dim(pd),self.Rules2rank.get(pd,self.rank)) + '* const '+self.datTmpl.replace('%','%(Npd)')
self.add2rulesShift(pd)
pass
def add2rulesShift(self, pd):
self.rulesShift[pd] = []
#self.rulesShiftM[pd] = []
for s in xrange(len(pd)):
sh=pd[:s]+'p'+pd[s+1:]
self.rules[sh] = 'const int _I'+sh
self.rulesShift[pd].append('_I'+sh)
shM=pd[:s]+'m'+pd[s+1:]
self.rules[shM] = 'const int _I'+shM
#self.rulesShiftM[pd].append('_I'+shM)
pass
pass
def get_pars(self, datas, shift=0):
full_pars_list = [self.rules.get(pd,'')%{'Npd':i+shift} for (i,pd) in zip(xrange(1<<self.dim),datas)]
pars_list = filter(len, full_pars_list)
pars_list = map(lambda i: pars_list[i], filter(lambda i: pars_list.index(pars_list[i])==i, xrange(len(pars_list))))
return pars_list, full_pars_list
def get_pars_mp(self, datas):
full_pars_list = [self.rules.get(pd,'')%{'Npd':i} for (i,pd) in zip(xrange(3**self.dim),datas)]
pars_list = filter(len, full_pars_list)
pars_list = map(lambda i: pars_list[i], filter(lambda i: pars_list.index(pars_list[i])==i, xrange(len(pars_list))))
return pars_list, full_pars_list
def getTmplPars(self, formal_pars):
template_pars = ','.join(map(lambda s: 'class '+s, filter(lambda s: s[0] is 'T', ''.join(formal_pars).split(','))))
rank_par = ('','int %s'%self.rank)[self.rank is 'rank']
return 'template <%s>'%(', '.join(filter(len,(rank_par,template_pars))))
def makeAct(self, actN):
act = CFact(self, actN)
formal_pars = self.get_pars(act.podDatas)[0]
shift_pars = map(lambda fp: fp[10:], filter(lambda fp: fp[:12] == 'const int _I', formal_pars))
shift_line = ', '.join(map(lambda p: '%s=(%s<<%d)-%d'%(p[1:],p,self.get_dim(p[2:]),1<<self.get_dim(p[2:][:p[2:].index('p')])), shift_pars))
#вычисление сдвигов поддатасов из имени сдвига датаса (например, из _Ixpd получаем Ixpd=(_Ixpd<<2)-1, где 2=dim(xpd), а 1=1<<dim(x).
print self.getTmplPars(formal_pars)+' inline void %s(%s) {'%(self.actTmpl%actN,', '.join(formal_pars))
if len(shift_line)>0:
if ''.join(self.subactTmpl.keys()) in 'SF': print '//',
print ' const int %s;'%shift_line
if 'B' in self.subactTmpl.keys():
for (a,n,tC) in act.PodActList('B'):
print ' %s(%s);'%(self.subactTmpl['B']%a, ', '.join(CFpodact(act,a).getParsList(n,tC)))
if 'F' in self.subactTmpl.keys():
print ' %s(%s);'%(self.subactTmpl['F']%actN,', '.join(map(lambda p: p[self.par_name_start:], formal_pars)))
if 'S' in self.subactTmpl.keys():
tier=filter(lambda a:'-' not in a, makeCombinations([self.Rules2act['LR'].get(a, '-%c-'%a) for a in actN])); tier.reverse()
for tactN in tier:
tact = CFact(self, tactN)
tformal_pars = self.get_pars(tact.podDatas, shift=self.list2num([{'I':-1,'Y':1}.get(c,0) for c in tactN]))[0]
print ' %s(%s);'%(self.subactTmpl['S']%tactN,', '.join(map(lambda p: p[self.par_name_start:], tformal_pars)))
if 'X' in self.subactTmpl.keys():
for (a,n) in act.PodActList_mp():
print ' %s(%s);'%(self.subactTmpl['X']%a, ', '.join(CFpodact_mp(act,a).getParsList(n,'X')))
if 'T' in self.subactTmpl.keys():
for (a,n,tC) in act.PodActList('T'): print ' %s(%s);'%(self.subactTmpl['T']%a, ', '.join(CFpodact(act,a).getParsList(n,tC)))
print '}'
pass
def makeAct_mp(self, actN):
actT = CFact_mp(self, 'D'*self.dim)
act = CFact_mp(self, actN)
formal_pars = self.get_pars_mp(act.podDatas_mp)[0]
shift_pars = map(lambda fp: fp[10:], filter(lambda fp: fp[:12] == 'const int _I', formal_pars))
shift_line = ', '.join(map(lambda p: '%s=(%s<<%d)%c%d'%(p[1:],p,self.get_dim(p[2:]),"-+-+"[p[2:].count('m')],1<<self.get_dim(p[2:][:p[2:].replace('m','p').index('p')])), shift_pars))
#вычисление сдвигов поддатасов из имени сдвига датаса (например, из _Ixmd получаем Ixmd=(_Ixmd<<2)+1, где 2=dim(xmd), а 1=1<<dim(x).
print self.getTmplPars(formal_pars)+' inline void %s(%s) {'%(self.actTmpl%actN,', '.join(formal_pars))
if len(shift_line)>0: print ' const int %s;'%shift_line
if 'B' in self.subactTmpl.keys():
for (a,n,tC) in act.PodActList('B'): print ' %s(%s);'%(self.subactTmpl['B']%a, ', '.join(CFpodact(act,a).getParsList(n,tC)))
#if 'F' in self.subactTmpl.keys():
# print ' %s(%s);'%(self.subactTmpl['F']%actN,', '.join(map(lambda p: p[self.par_name_start:], formal_pars)))
# #for (a,n,tC) in act.PodActList('F'): print ' %s(%s);'%(self.subactTmpl['F']%a, ', '.join(CFpodact(act,a).getParsList(n,tC)))
if 'J' in self.subactTmpl.keys():
caseNshift = self.list2num([1]*self.dim, 4)
print ' for(int ic=0; ic<4; ic++) {'
print 'if(dat0->datas[ic].Npts>NptsMax) {\n T0& datT=dat0->datas[ic];\n NptsMax = datT.Npts;\n printf("'+'===%s:'%actN+' inc NptsMax to %d in xyt: %.3g %.3g %d\\n", NptsMax, datT.x, datT.y, datT.it);\n}'
print ' int ip=dat0->datas[ic].Nexch;\n while(ip < dat0->datas[ic].Npts) {\n pts& pt=dat0->datas[ic].ptslist[ip];\n double dstep=1.0;\n do {\n switch(pt.ix+4*pt.iy) {'
for (a,n) in act.PodActList_mpPIC():
nL = map(lambda nt: nt-1, self.num2list(n,4))
parList = CFpodact_mp(act,a).getParsList(nL,'X')
print ' case %d: dstep=pt.%s(dstep, %s); break;'%(n-caseNshift,self.subactTmpl['J']%a, ', '.join(parList))
print ' }\n } while(dstep<1.0);\n if((pt.ix&2)|(pt.iy&2)) {\n int swk=pt.ix+4*pt.iy;\n if(pt.ix<0) pt.ix += 2; else if(pt.ix>1) pt.ix -= 2;\n if(pt.iy<0) pt.iy += 2; else if(pt.iy>1) pt.iy -= 2;\n switch(swk) {'
for (a,n) in act.PodActList_mpPIC():
nL = map(lambda nt: nt-1, self.num2list(n,4))
if len(filter(lambda _n: _n in (-1,2), nL))==0: continue;
datPtr = '(%s)'%CFpodact_mp(act,a).getParsList(nL,'X')[0]
oldPtr = 'dat0->datas[ic]'
Npts = datPtr+'->Npts'
Nxch = datPtr+'->Nexch'
case_dict = {'ptN': datPtr, 'ptO': oldPtr,'Np': Npts,'Nx':Nxch}
print ' case %(n)d:'%{'n':n-caseNshift},
#print 'if(%(ptO)s.it > %(ptN)s->it) printf("Illegal Exch!\\n"); else'%case_dict,
print 'if(%(ptO)s.it > %(ptN)s->it) printf("Illegal Exch!\\n"); else if(%(ptO)s.it < %(ptN)s->it) %(ptN)s->ptslist[%(Np)s].copyfrom(pt); else { %(ptN)s->ptslist[%(Np)s].copyfrom(%(ptN)s->ptslist[%(Nx)s]); %(ptN)s->ptslist[%(Nx)s].copyfrom(pt); %(Nx)s++; } %(Np)s++; break;'%case_dict
#print 'if(%(ptO)s.it < %(ptN)s->it) %(ptN)s->ptslist[%(Np)s].copyfrom(pt); else { %(ptN)s->ptslist[%(Np)s].copyfrom(%(ptN)s->ptslist[%(Nx)s]); %(ptN)s->ptslist[%(Nx)s].copyfrom(pt); %(Nx)s++; } %(Np)s++; break;'%case_dict
print ' }\n dat0->datas[ic].Npts--;\n if(ip<dat0->datas[ic].Npts) pt.copyfrom(dat0->datas[ic].ptslist[dat0->datas[ic].Npts]);\n } else ip++;\n }\n dat0->datas[ic].Nexch=0; dat0->datas[ic].it++;\n }'
if 'X' in self.subactTmpl.keys():
for (a,n) in act.PodActList_mp():
print ' for(int ip=0; ip<Nz; ip++) dat0->datas[%d].ptslist[ip].%s(1.0, %s);'%(n,self.subactTmpl['X']%a, ', '.join(CFpodact_mp(act,a).getParsList(n,'X')))
if 'T' in self.subactTmpl.keys():
for (a,n,tC) in act.PodActList('T'): print ' %s(%s);'%(self.subactTmpl['T']%a, ', '.join(CFpodact(act,a).getParsList(n,tC)))
print '}'
pass
def genConeFold(self, rank='rank', actTmpl=r'%sactCF', subactTmpl=None, knot='p', exclude=[], acts4gen=[]):
'''Печатает ConeFold заданного ранга, имени и типа:
rank --- имя ранга (либо диапазона), строка;
actTmpl --- шаблон имени, строка, на которую накатывается имя act-а;
subactTmpl --- правила разбиения и имена подConeFold-ов (мЕньшего ранга), на которые разбивается ConeFold, словарь, ключи которого ---
слои по времени B/T/F/X --- bottom/top/flat/flat с , уровни подConeFold-ов'''
if len(exclude): print '// exclude up to: %d'%len(exclude)
self.rank=rank
self.actTmpl=actTmpl
if subactTmpl is None: subactTmpl = { 'BT' : actTmpl }
if type(subactTmpl) is str: subactTmpl = { subactTmpl : actTmpl }
self.subactTmpl={}; map(lambda k: self.subactTmpl.update(dict(zip(k,(subactTmpl[k],)*len(k)))), subactTmpl.keys())
self.rules,self.rulesShift={},{}
datasTypes = reduce(lambda r,t: r+filter(lambda c: c not in r+'_mp', self.Rules2act['pars'][t]), self.types, '')
for pd in makeCombinations([datasTypes]*self.dim): self.add2rules(pd)
if len(acts4gen) == 0: acts4gen = makeCombinations([self.types]*self.dim)
for a in acts4gen:
if a in exclude: continue
if knot == 'p': self.makeAct(a)
elif knot == 'mp': self.makeAct_mp(a)
pass
pass
#stdout, sys.stdout = sys.stdout,open('Test.inc.hpp', 'w')
#gPJ = Generator(dim=2, types='JDX')
#gPJ.genConeFold(rank="FFRank-1", actTmpl=r'PIC2update%s', subactTmpl={'J': r'PIC2update%s'}, knot='mp')
dim=3
incpath = sys.argv[0][:sys.argv[0].find(sys.argv[0].split('/')[-1])]
stdout, sys.stdout = sys.stdout,open(incpath+'CF2Dpic.inc.hpp', 'w')
g = Generator(dim=dim, types='IDX')
g.genConeFold(actTmpl=r'update%s')
g.genConeFold(actTmpl=r'FLDupdate%s')
#g.genConeFold(actTmpl=r'FLDupdate%s', subactTmpl={'B': r'FLDupdate%s'})
sys.stdout.close(); sys.stdout = stdout
stdout, sys.stdout = sys.stdout,open(incpath+'CF2Dpic.inc.hpp', 'w')
print 'int NptsMax=0;'
gP = Generator(dim=dim, types='IDX')
gPJ = Generator(dim=dim, types='JDX')
g.genConeFold(actTmpl=r'picNfld%s', subactTmpl={'B': r'picNfld%s', 'T':r'FLDupdate%s'})
gP.genConeFold(rank="PicRank+1", actTmpl=r'update%s', subactTmpl={'B': r'picNfld%s', 'T':r'FLDupdate%s'})
gP.genConeFold(rank="FFRank-1", actTmpl=r'PIC1update%s', subactTmpl={'X': r'PIC1update%s'})
gPJ.genConeFold(rank="FFRank-1", actTmpl=r'PIC2update%s', subactTmpl={'J': r'PIC2update%s'}, knot='mp')
gP.genConeFold(rank="FFRank-1", actTmpl=r'PIC3update%s', subactTmpl={'X': r'PIC3update%s'})
gP.genConeFold(rank="FFRank", actTmpl=r'pic%s', subactTmpl={'B': r'PIC1update%s', 'X':r'PIC2update%s', 'T':r'PIC3update%s'})
gP.genConeFold(rank="FFRank+1", actTmpl=r'picNfld%s', subactTmpl={'B': r'pic%s', 'T':r'FLDupdate%s'})
sys.stdout.close(); sys.stdout = stdout
#---------------PML-------------------------------------------
stdout, sys.stdout = sys.stdout,open(incpath+'CF2Dpic.inc.hpp', 'w')
#============= Non-PML ConeFold
g = Generator(dim=dim, types='DX')
#acts = ['DD','DX']
acts = ['D'*dim]
#============= BC ConeFold for rank>PMLrank
gBC = Generator(dim=dim, types='LDRX')
gBC.Rules2rank['d'*dim] = 'rank+PMLrank'
#gBC.Rules2rank['dx'] = 'rank+PMLrank'
actsBC = makeCombinations(['LDR','LDR','LDX'])
#============= PML ConeFold for rank<=PMLrank
gPML = Generator(dim=dim, types='ILDSRYX')
gPML.Rules2act['subacts'].update({'I':'-IIS', 'Y':'SYY-', 'S':'SSSS', 'L':'SLLD', 'R':'DRRS'})
gPML.Rules2act['pars'].update({'S':'sp', 'L':'sd', 'R':'ds', 'Y':'s_', 'I':'_s', 'J':'sp'})
for s in 'lr': gPML.Rules2dim[s] = 1
actsPML = makeCombinations(['ILDSRY','ILDSRY','ILDSX'])
print '//===========any rank==============DX'
g.genConeFold(actTmpl=r'%sact', acts4gen=acts)
print '//=========rank>PMLrank============LDR/X'
gBC.genConeFold(actTmpl=r'%sact', acts4gen=actsBC, exclude=acts)
print '//=========rank<=PMLrank============DX'
g.genConeFold(actTmpl=r'%sactPML', subactTmpl={'F':r'%sact'}, acts4gen=acts)
print '//=========rank<PMLrank============ILDSRY'
gPML.genConeFold(actTmpl=r'%sactPML', acts4gen=actsPML, exclude=acts)
print '//=========rank=PMLrank============LDR/X'
gPML.genConeFold(rank='PMLrank', actTmpl=r'%sact', subactTmpl={'S':r'%sactPML'}, acts4gen=actsBC, exclude=acts)
sys.stdout.close(); sys.stdout = stdout
| gpl-2.0 | -776,956,531,473,899,800 | 54.430446 | 291 | 0.592831 | false |
atom-bomb/drill_from_image | drill_from_image.py | 1 | 11733 | #!/usr/bin/python
#
# Hey, here's a thing:
#
# You can use this bit of python script to generate GCode to drill a PCB based on an image file that you used
# to etch the board.
#
# This script makes GCode to drill the center of sections of an image that are a given color or brightness.
#
# All you need to do is load the image file that you used to etch and color the things you want drilled.
# This should be easy since all of your drills are probably surrounded by traces and all of your traces are
# probably colored black. Just use your favorite graphic editor (such as gimp) to flood fill parts of the board
# that aren't traces or drills, leaving the drills as the only thing that are white.
#
# Run this script on your edited image and you'll get some GCode.
#
# Before you run the GCode, jog the spindle over where you want the topmost, leftmost hole to be drilled and
# zero your machine.
# The GCode will begin my moving over where the bottommost, rightmost hole would be drilled.
# Move your workpiece, return to zero rewind and restart the GCode until your machine lines up with both drills,
# then you can allow the machine to continue to drill your board.
#
from __future__ import print_function
import sys
import math
from PIL import Image
import subprocess
import re
import argparse
class BoundingBox:
def __init__(self):
self.coord = [[0, 0], [0, 0]]
self.empty = 1
def intersects(self, box):
return (((1 ^ self.empty) and (1 ^ box.empty)) and
((self.coord[0][0] < box.coord[1][0]) and
(self.coord[0][1] < box.coord[1][1]) and
(self.coord[1][0] > box.coord[0][0]) and
(self.coord[1][1] > box.coord[0][1])))
def center(self):
return [self.coord[0][0] + ((self.coord[1][0] - self.coord[0][0]) / 2),
self.coord[0][1] + ((self.coord[1][1] - self.coord[0][1]) / 2)]
def boundCoord(self, coord):
if (self.empty):
self.coord[0][0] = coord[0]
self.coord[0][1] = coord[1]
self.coord[1][0] = coord[0]
self.coord[1][1] = coord[1]
self.empty = 0
else:
if (coord[0] < self.coord[0][0]):
self.coord[0][0] = coord[0]
if (coord[1] < self.coord[0][1]):
self.coord[0][1] = coord[1]
if (coord[0] > self.coord[1][0]):
self.coord[1][0] = coord[0]
if (coord[1] > self.coord[1][1]):
self.coord[1][1] = coord[1]
class BoundingBoxList:
def __init__(self):
self.boxes = []
def addBox(self, box):
for oldBox in self.boxes:
if (oldBox.intersects(box)):
return
self.boxes.append(box)
# use ImageMagick to figure out how many pixels per inch or cm in the image file
def getDensity(filename, units = "PixelsPerInch"):
pipe = subprocess.Popen(["identify", "-format", "%x,%y", "-units", units, filename],
stdout=subprocess.PIPE)
res = re.sub('[\t\r\n"]', '', pipe.communicate()[0]).split(',')
xres = float(res[0].split(' ')[0])
yres = float(res[1].split(' ')[0])
return [xres, yres]
# make a list of drill points from an image map
class DrillMap:
def __init__(self, filename, units = 'Inches', density = [], rgbThresh = 127 * 3):
self.image = Image.open(filename)
self.pixmap = self.image.load()
if (len(density) == 0):
if (units == 'Inches'):
self.density = getDensity(filename)
else:
cmDensity = getDensity(filename, units = 'PixelsPerCentimeter')
self.density = [float(cmDensity[0]) / 10, float(cmDensity[1]) / 10]
else:
self.density = density ;
self.rgbThresh = rgbThresh ;
self.boxlist = BoundingBoxList()
self.drillList = []
self.findBoxes()
self.makeDrillList()
def coordOffset(self, coord):
return [float(coord[0]) / float(self.density[0]), float(coord[1]) / float(self.density[1])]
def isCoordOn(self, coord):
pixel = self.pixmap[coord[0], coord[1]]
if (self.image.mode == "RGB"):
sum = pixel[0] + pixel[1] + pixel[2]
return (sum > self.rgbThresh)
if (self.image.mode == "1"):
return pixel
def scanLeftToBox(self, coord, box):
y = coord[1]
x = coord[0]
while ((x >= 0) and self.isCoordOn([x, y])):
box.boundCoord([x, y])
x = x - 1
return (x != coord[0])
def scanRightToBox(self, coord, box):
y = coord[1]
x = coord[0]
while ((x <= self.image.size[1] - 1) and self.isCoordOn([x, y])):
box.boundCoord([x, y])
x = x + 1
return (x != coord[0])
def scanLineToBox(self, coord, box):
return (self.scanLeftToBox(coord, box) or self.scanRightToBox(coord, box))
def scanUpperLineToBox(self, coord, box):
if (coord[1] > 0):
upperCoord = [int(box.center()[0]), coord[1] - 1]
if (self.scanLineToBox(upperCoord, box)):
self.scanUpperLineToBox(upperCoord, box)
def scanLowerLineToBox(self, coord, box):
if (coord[1] < self.image.size[1] - 1):
lowerCoord = [box.center()[0], coord[1] + 1]
if (self.scanLineToBox(lowerCoord, box)):
self.scanLowerLineToBox(lowerCoord, box)
def scanToBox(self, coord):
box = BoundingBox()
if (self.scanRightToBox(coord, box)):
self.scanUpperLineToBox(coord, box)
self.scanLowerLineToBox(coord, box)
return box
def findBoxes(self):
y = 0
while (y < self.image.size[1] - 1):
x = 0
while (x < self.image.size[0] - 1):
if (self.isCoordOn([x, y])):
newBox = self.scanToBox([x, y])
if (not newBox.empty):
self.boxlist.addBox(newBox)
x = newBox.coord[1][0] + 1
else:
x += 1
else:
x += 1
y += 1
def makeDrillList(self):
for eachBox in self.boxlist.boxes:
self.drillList.append(self.coordOffset(eachBox.center()))
class GCode:
GCodeCommands = {'Mach3': {
'Message': '(',
'Stop': 'M0',
'Sleep': 'M01',
'SpindleCW': 'M03',
'SpindleCCW': 'M04',
'SpindleStop': 'M05',
'ToolChange': 'M06',
'Pause': 'M60',
'FastMove': 'G0',
'SlowMove': 'G1',
'Dwell': 'G4',
'InchesMode': 'G20',
'MillimetersMode': 'G21',
'MoveToOrigin': 'G28',
'ClearToolOffet': 'G49',
'Drill': 'G81',
'DrillWithDwell': 'G82',
'AbsoluteMode': 'G90',
'RelativeMode': 'G91',
'SetPosition': 'G92',
},
'EMC': {
'Message': '(MSG,',
'Stop': 'M0',
'Sleep': 'M01',
'SpindleCW': 'M03',
'SpindleCCW': 'M04',
'SpindleStop': 'M05',
'ToolChange': 'M06',
'Pause': 'M60',
'FastMove': 'G0',
'SlowMove': 'G1',
'Dwell': 'G4',
'InchesMode': 'G20',
'MillimetersMode': 'G21',
'MoveToOrigin': 'G28',
'ClearToolOffet': 'G49',
'Drill': 'G81',
'DrillWithDwell': 'G82',
'AbsoluteMode': 'G90',
'RelativeMode': 'G91',
'SetPosition': 'G92',
}}
def __init__(self, theGCodeType):
self.variant = theGCodeType
def Comment(self, string):
return " ; " + string
def Message(self, string):
return self.GCodeCommands[self.variant]['Message'] + string + " )"
def Pause(self):
return self.GCodeCommands[self.variant]['Pause']
def Spindle(self, Mode):
SpindleModes = {'Stop': 'SpindleStop', 'CW': 'SpindleCW', 'CCW': 'SpindleCCW'}
return self.GCodeCommands[self.variant][SpindleModes[Mode]]
def Units(self, theUnits):
if (theUnits == 'Inches'):
return self.GCodeCommands[self.variant]['InchesMode']
else:
return self.GCodeCommands[self.variant]['MillimetersMode']
def Absolute(self, isAbsolute = True):
if (isAbsolute):
return self.GCodeCommands[self.variant]['AbsoluteMode']
else:
return self.GCodeCommands[self.variant]['RelativeMode']
def _CommonArgs(self, X = None, Y = None, Z = None, rate = None):
OutStr = ''
if (X != None):
OutStr += ' X' + format(X, ".4f")
if (Y != None):
OutStr += ' Y' + format(Y, ".4f")
if (Z != None):
OutStr += ' Z' + format(Z, ".4f")
if (rate != None):
OutStr += ' F' + format(rate, ".4f")
return OutStr
def Move(self, X = None, Y = None, Z = None, rate = None, speed='Fast'):
OutStr = self.GCodeCommands[self.variant][speed + 'Move']
OutStr += self._CommonArgs(X = X, Y = Y, Z = Z, rate = rate)
return OutStr
def Dwell(self, seconds = 1):
OutStr = self.GCodeCommands[self.variant]['Dwell'] + ' P' + `seconds`
return OutStr
def Drill(self, X = None, Y = None, Z = None, retract = None, seconds = None, rate = None):
if (seconds != None):
OutStr = self.GCodeCommands[self.variant]['DrillWithDwell']
OutStr += ' P' + `seconds`
else:
OutStr = self.GCodeCommands[self.variant]['Drill']
OutStr += self._CommonArgs(X = X, Y = Y, Z = Z, rate = rate)
if (retract != None):
OutStr += ' R' + `retract`
return OutStr
# -------- execution starts here
# parse parameters
# TODO: add density parameter & drill color parameter & check for ImageMagick
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true', help='spew possibly useless output')
parser.add_argument('-m', '--millimeters',
action='store_const', dest='units', const='Millimeters', help='set units to millimeters')
parser.add_argument('-i', '--inches',
action='store_const', dest='units', const='Inches', help='set units to inches')
parser.add_argument('-a', '--mach3',
action='store_const', dest='gcode', const='Mach3', help='set gcode type to mach3')
parser.add_argument('-e', '--emc',
action='store_const', dest='gcode', const='EMC', help='set gcode type to emc')
parser.add_argument('-s', '--safe',
nargs=1, default='0.25', type=float, help='safe height')
parser.add_argument('-d', '--drill',
nargs=1, default='-0.2', type=float, help='drill depth')
parser.add_argument('-p', '--dwell',
nargs=1, default='0.5', type=float, help='drill dwell')
parser.add_argument('-f', '--feed',
nargs=1, default='100', type=float, help='feed rate')
parser.add_argument('input')
args = parser.parse_args()
if (args.gcode == None):
args.gcode = 'Mach3'
if (args.units == None):
args.units = 'Inches'
theMap = DrillMap(args.input, args.units)
# make drill coordinates relative to first drill
if (theMap.drillList):
firstCoord = theMap.drillList[0]
relativeDrillList = []
for drill in theMap.drillList:
newCoord = [drill[0] - firstCoord[0], drill[1] - firstCoord[1]]
relativeDrillList.append(newCoord)
# output gcode for the list of drills
# init machine, set units, zero axes
gc = GCode(args.gcode)
print(gc.Spindle('Stop'))
print(gc.Units(args.units))
print(gc.Absolute())
print(gc.Pause(), gc.Comment('Check that tool is aligned with first drill'))
print(gc.Move(Z = args.safe))
# move to last drill position and pause
lastDrill = len(relativeDrillList) - 1
print(gc.Move(X = relativeDrillList[lastDrill][0], Y = relativeDrillList[lastDrill][1]))
print(gc.Pause())
print(gc.Pause(), gc.Comment('Check that tool is aligned with last drill'))
print(gc.Spindle('CW'))
print(gc.Dwell(3))
print(gc.Message('Drilling'))
# move to each drill position and drill
for eachDrill in relativeDrillList:
print(gc.Drill(X = eachDrill[0], Y = eachDrill[1], Z = args.drill, retract = args.safe, seconds = args.dwell))
# end of GCode program
print(gc.Spindle('Stop'))
print(gc.Pause())
| unlicense | 695,087,722,139,142,000 | 32.618911 | 112 | 0.594903 | false |
735tesla/SkypeDump | skypedump.py | 1 | 6469 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import unicodedata
import webbrowser
import hashlib
import sqlite3 as sqlite
import xml.etree.ElementTree as ET
import platform
import sys
class ChatMessage(object):
def __init__(self):
super(ChatMessage, self).__init__()
self.from_username = '[data not available]'
self.to_username = '[data not available]'
self.message_body = '[data not available]'
@property
def from_username(self):
return self._from_username
@from_username.setter
def from_username(self, value):
self._from_username = value
@property
def to_username(self):
return self._to_username
@to_username.setter
def to_username(self, value):
self._to_username = value
@property
def message_body(self):
return self._message_body
@message_body.setter
def message_body(self, value):
self._message_body = value
def to_html(self):
html = """
<tr>
<td>__from_username__</td>
<td>__to_username__</td>
<td>__msg_body__</td>
</tr>
"""
html = html.replace('__from_username__', self.from_username)
html = html.replace('__to_username__', self.to_username)
html = html.replace('__msg_body__', self.message_body)
return html
class SkypeUser(object):
def __init__(self):
super(SkypeUser, self).__init__()
self.actual_name = '[data not available]'
self.username = '[data not available]'
self.birthday = '[data not available]'
self.phone_home = '[data not available]'
self.phone_mobile = '[data not available]'
self.email = '[data not available]'
@property
def actual_name(self):
return self._actual_name
@actual_name.setter
def actual_name(self, value):
self._actual_name = value
@property
def username(self):
return self._username
@username.setter
def username(self, value):
self._username = value
@property
def birthday(self):
return self._birthday
@birthday.setter
def birthday(self, value):
self._birthday = str(value)
@property
def phone_home(self):
return self._phone_home
@phone_home.setter
def phone_home(self, value):
self._phone_home = value
@property
def phone_mobile(self):
return self._phone_mobile
@phone_mobile.setter
def phone_mobile(self, value):
self._phone_mobile = value
@property
def email(self):
return self._email
@email.setter
def email(self, value):
self._email = value
def to_html(self):
html = """
<tr>
<td>__username__</td>
<td>__fullname__</td>
<td>__birthday__</td>
<td>__homphone__</td>
<td>__mobphone__</td>
<td>__theemail__</td>
</tr>
"""
html = html.replace('__username__', self.username)
html = html.replace('__fullname__', self.actual_name)
html = html.replace('__birthday__', self.birthday)
html = html.replace('__homphone__', self.phone_home)
html = html.replace('__mobphone__', self.phone_mobile)
html = html.replace('__theemail__', self.email)
return html
def process_skype_database(db_file):
messages = []
user = None
database_connection = sqlite.connect(db_file)
database_cursor = database_connection.cursor()
database_cursor.execute('SELECT author,dialog_partner,body_xml FROM Messages')
for from_username,to_username,body_xml in database_cursor.fetchall():
chatmessage = ChatMessage()
if from_username:
chatmessage.from_username = from_username
if to_username:
chatmessage.to_username = to_username
if body_xml:
chatmessage.message_body = body_xml
messages.append(chatmessage)
database_cursor.execute('SELECT skypename,fullname,birthday,phone_home,phone_mobile,emails from Accounts')
xml_root = ET.parse('/'.join(db_file.split('/')[:-1])+'/config.xml').getroot()
auth_data = xml_root[0][0][0].text # TODO: find out how to decrypt this
user = SkypeUser()
user_data = database_cursor.fetchone()
if user_data[0]:
user.username = user_data[0]
if user_data[1]:
user.actual_name = user_data[1]
if user_data[2]:
user.birthday = user_data[2]
if user_data[3]:
user.phone_home = user_data[3]
if user_data[4]:
user.phone_mobile = user_data[4]
if user_data[5]:
user.email = user_data[5]
return (user, messages)
def verify_os_type():
if platform.system() != 'Darwin':
sys.stderr.write('[!] Incompatible operating system\n')
exit(-1)
def get_db_list():
db_files = []
home_dir = os.path.expanduser("~")
db_dir = home_dir+'/Library/Application Support/Skype'
for the_dir in os.listdir(db_dir):
if os.path.isdir(db_dir+'/'+the_dir) and the_dir not in ('DataRv', 'EmoticonCache.bundle', 'shared_dynco', 'shared_httpfe'):
db_files.append(db_dir+'/'+the_dir+'/main.db')
return db_files
def main(args):
html = """
<!DOCTYPE html>
<html>
<head>
<meta charset='utf-8'>
<title>SkypeDump Output Table</title>
<link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">
<link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css">
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>
<style type="text/css">
.sd-table{
margin: 20px;
}
</style>
</head>
<body>
<div class="sd-table">
<table class="table">
<thead>
<tr>
<th>Skype Username:</th>
<th>Real Name:</th>
<th>Birthday:</th>
<th>Home Phone #:</th>
<th>Cell Phone #:</th>
<th>Email:</th>
</tr>
</thead>
<tbody>
__USER_DATA__
</tbody>
</table>
<table class="table">
<thead>
<tr>
<th>From:</th>
<th>To:</th>
<th>Message:</th>
</tr>
</thead>
<tbody>
__MESSAGE_DATA__
</tbody>
</table>
</div>
</body>
</html>
"""
user_html = ''
message_html = ''
for db_file in get_db_list():
print "[*] Processing database: %s\n" % (db_file)
user_info, messages_info = process_skype_database(db_file)
user_html += user_info.to_html()
for message in messages_info:
message_html += message.to_html()
html = html.replace('__USER_DATA__', user_html)
html = html.replace('__MESSAGE_DATA__', message_html)
html = unicodedata.normalize('NFKD', html).encode('ascii', 'ignore')
html = re.sub(r'[^\x00-\x7F]+', '', html)
with open('/tmp/skype_db.html', 'w') as f:
f.write(html)
webbrowser.open_new_tab('/tmp/skype_db.html')
if __name__ == '__main__':
main(sys.argv)
| gpl-2.0 | 962,441,054,399,818,200 | 27.004329 | 126 | 0.64554 | false |
jtauber/czerny | prototypes/process_hanon_21.py | 1 | 2417 | #!/usr/bin/env python
from align import nw_align
def load_score(filename):
score = []
for line in open(filename):
note, duration_64 = line.strip().split()
note = int(note)
duration_64 = int(duration_64)
score.append((note, duration_64))
return score
def load_performance(filename):
performance = []
# dictionary mapping pitch to offset and velocity of event when that pitch
# was started
note_started = {}
for line in open(filename):
offset, note, velocity = line.strip().split()
offset = int(float(offset) * 1000000)
note = int(note)
velocity = int(velocity)
if velocity > 0:
if note in note_started:
# new note at that pitch started before previous finished
# not sure it should happen but let's handle it anyway
(start_offset, start_velocity) = note_started.pop(note)
duration = offset - start_offset
performance.append(
(start_offset, note, start_velocity, duration))
note_started[note] = (offset, velocity)
else: # note end
if note not in note_started:
# note was never started so ignore
pass
else:
(start_offset, start_velocity) = note_started.pop(note)
duration = offset - start_offset
performance.append(
(start_offset, note, start_velocity, duration))
return performance
# similarity measure used by Needleman-Wunsch algorithm
def note_similarity(score_note, performance_note):
# at the moment we just give a 1 if the pitch matches, 0.5 if it's
# within a tone and 0 if more
# over time this can be tweaked to include velocity, duration, etc
if score_note[0] == performance_note[1]:
return 1
elif abs(score_note[0] - performance_note[1]) < 3:
return 0.5
else:
return 0
if __name__ == "__main__":
score = load_score("../examples/scores/hanon_21_rh.txt")
performance = load_performance("../examples/recordings/hanon_21_rh.txt")
# align score and performance using above similarity function and a penalty
# of -1 for insertions and deletions @@@ might need a lot of tweaking
for i in nw_align(score, performance, note_similarity, -1, -1):
print i
| mit | 829,367,193,271,788,400 | 29.2125 | 79 | 0.599917 | false |
Farthen/OTFBot | otfbot/plugins/ircClient/seen.py | 1 | 2384 | # This file is part of OtfBot.
#
# OtfBot is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OtfBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OtfBot; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# (c) 2009 by Thomas Wiegart
#
import pickle,time,os
from otfbot.lib import chatMod
class Plugin(chatMod.chatMod):
def __init__(self, bot):
self.bot = bot
try:
os.mkdir(datadir)
except OSError:
pass
try:
f = file(datadir + "/users", "rb")
self.userdata = pickle.load(f)
f.close()
except IOError:
self.userdata = [{}]
self.bot.root.getServiceNamed('scheduler').callLater(60, self.save_data) #TODO: call this only on exit
def joined(self,channel):
try:
self.userdata[0][channel]
except KeyError:
self.userdata[0][channel] = {}
def msg(self, user, channel, msg):
if channel[0] == "#":
self.userdata[0][channel][user.split("!")[0].lower()] = {'msg':msg, 'time':time.time()}
def command(self, user, channel, command, options):
if command == "seen":
try:
zeit = self.userdata[0][channel][options.lower()]['time']
msg = self.userdata[0][channel][options.lower()]['msg']
self.bot.sendmsg(channel,"user " + options + " was last seen on " + str(time.strftime("%a, %d %b %Y %H:%M:%S",time.localtime(zeit))) + " saying '" + msg + "'.")
except:
self.bot.sendmsg(channel,"user " + options + " is unknown")
def stop(self):
self.save_data()
def save_data(self):
f = file(datadir + "/users", "wb")
pickle.dump(self.userdata, f)
f.close()
self.bot.root.getServiceNamed('scheduler').callLater(60, self.save_data)
| gpl-2.0 | -1,188,863,002,668,787,700 | 36.25 | 176 | 0.599832 | false |
QuantiModo/QuantiModo-SDK-Python | SwaggerPetstore/models/connector.py | 1 | 3170 | #!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Connector(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name and the value is attribute type.
:param dict attributeMap: The key is attribute name and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'display_name': 'str',
'image': 'str',
'get_it_url': 'str',
'connected': 'str',
'connect_instructions': 'str',
'last_update': 'int',
'latest_data': 'int',
'no_data_yet': 'bool'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'display_name': 'displayName',
'image': 'image',
'get_it_url': 'getItUrl',
'connected': 'connected',
'connect_instructions': 'connectInstructions',
'last_update': 'lastUpdate',
'latest_data': 'latestData',
'no_data_yet': 'noDataYet'
}
# Connector ID number
self.id = None # int
# Connector lowercase system name
self.name = None # str
# Connector pretty display name
self.display_name = None # str
# URL to the image of the connector logo
self.image = None # str
# URL to a site where one can get this device or application
self.get_it_url = None # str
# True if the authenticated user has this connector enabled
self.connected = None # str
# URL and parameters used when connecting to a service
self.connect_instructions = None # str
# Epoch timestamp of last sync
self.last_update = None # int
# Number of measurements obtained during latest update
self.latest_data = None # int
# True if user has no measurements for this connector
self.no_data_yet = None # bool
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'swaggerTypes' and p != 'attributeMap':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| gpl-2.0 | -8,626,761,504,861,406,000 | 31.346939 | 100 | 0.561199 | false |
mvsaha/blahb | blahb/label.py | 1 | 15233 | import numba
from .utils import exponential_search
from .strgen import *
def _split_init_into_coords_init_str(dim):
return "coords_{} = loc[:, {}]".format(dim, dim)
def split_init_into_coords_init_str(ndim):
return '\n'.join([_split_init_into_coords_init_str(dim)
for dim in range(ndim)])
update_cursor_dim_0_base_string = """
if shift_0:
left_edge_0 = c0 - r0
if shift_0 >= {P_shape_0}:
# Re-initialize first cursor when all spans are invalidated
cursors_0[0] = exponential_search(
coords_0, left_edge_0, start=ends_0[-1])
ends_0[0] = exponential_search(
coords_0, left_edge_0 + 1, start=cursors_0[0])
else:
# Shift the spans that are still valid, but cursors must be reset
for sh in range({P_shape_0} - shift_0): #
cursors_0[sh] = ends_0[sh + shift_0 - 1]
ends_0[sh] = ends_0[sh + shift_0]
# Initialize cursors/ends for positions that are not shifted
shift_0 = min(shift_0, {P_shape_0})
for sh in range({P_shape_0} - shift_0, {P_shape_0}):
cursors_0[sh] = exponential_search(
coords_0, left_edge_0 + sh, ends_0[sh - 1])
ends_0[sh] = exponential_search(
coords_0, left_edge_0 + sh + 1, start=cursors_0[sh])
if shift_0:
shift_1 = np.int64({P_shape_1})
shift_0 = np.int64(coords_0[i_coord + 1] - c0)
c0 = coords_0[i_coord + 1]
"""
def update_cursor_section_dim_0(neigh_shape):
"""Propagate shift should be true if there is more than one dimension"""
if not len(neigh_shape) > 1:
raise ValueError(
"Use specialized 1d labeling function for 1d pixelsets.")
return update_cursor_dim_0_base_string.format(
P_shape_0=neigh_shape[0],
P_shape_1=neigh_shape[1],
)
init_loop_base_string = """
start = cursors_{dim_minus_1}[{lower_dim_index}]
stop = ends_{dim_minus_1}[{lower_dim_index}]
cursors_{dim}[{lower_dim_index}, 0] = exponential_search(
coords_{dim}, left_edge_{dim}, start=start, stop=stop)
ends_{dim}[{lower_dim_index}, 0] = exponential_search(
coords_{dim}, left_edge_{dim} + 1, start=cursors_{dim}[{lower_dim_index}, 0], stop=stop)
"""
def param_init_loop(shp, dim):
assert dim <= len(shp)
lower_dim_index = ', '.join(
[i_(low_dim) for low_dim in range(dim)]) # 'i0, i1, ...'
body = init_loop_base_string.format(
dim=dim,
dim_minus_1=dim - 1,
lower_dim_index=lower_dim_index,
)
return loop_over_shape(shp[:dim], body)
shift_loop_base_string = """
for sh in range({dim_shape} - shift_{dim}): #
cursors_{dim}[{lower_dim_index}, sh] = ends_{dim}[{lower_dim_index}, sh + shift_{dim} - 1]
ends_{dim}[{lower_dim_index}, sh] = ends_{dim}[{lower_dim_index}, sh + shift_{dim}]
"""
def param_shift_loop(shp, dim):
assert len(shp) > dim
lower_dim_index = ', '.join(
[i_(low_dim) for low_dim in range(dim)]) # 'i0, i1, ...'
body = shift_loop_base_string.format(
dim=dim,
dim_shape=shp[dim],
lower_dim_index=lower_dim_index,
)
return loop_over_shape(shp[:dim], body)
set_higher_shift_string = """shift_{dim_plus_1} = {dim_plus_1_shape}"""
def param_set_higher_shift(shp, dim):
if len(shp) - dim < 2:
return ''
else:
return set_higher_shift_string.format(
dim_plus_1=dim + 1, dim_plus_1_shape=shp[dim + 1])
set_new_cursor_loop_base_exponential_search_string = """
start = cursors_{dim_minus_1}[{lower_dim_index}]
stop = ends_{dim_minus_1}[{lower_dim_index}]
for sh in range({dim_shape} - shift_{dim}, {dim_shape}):
start = max(start, ends_{dim}[{lower_dim_index}, sh - 1])
cursors_{dim}[{lower_dim_index}, sh] = exponential_search(
coords_{dim}, left_edge_{dim} + sh, start=start, stop=stop)
ends_{dim}[{lower_dim_index}, sh] = exponential_search(
coords_{dim}, left_edge_{dim} + sh + 1,
start=cursors_{dim}[{lower_dim_index}, sh], stop=stop)
"""
set_new_cursor_loop_base_linear_search_string = """
start = cursors_{dim_minus_1}[{lower_dim_index}]
stop = ends_{dim_minus_1}[{lower_dim_index}]
for sh in range({dim_shape} - shift_{dim}, {dim_shape}):
start = max(start, ends_{dim}[{lower_dim_index}, sh - 1])
for i in range(start, stop + 1):
if coords_{dim}[i] >= left_edge_{dim} + sh or i == stop:
cursors_{dim}[{lower_dim_index}, sh] = i
break
start = cursors_{dim}[{lower_dim_index}, sh]
for i in range(start, stop + 1):
if coords_{dim}[i] > left_edge_{dim} + sh or i == stop:
ends_{dim}[{lower_dim_index}, sh] = i
break
"""
def param_set_new_cursor_loop(shp, dim):
assert len(shp) > dim
lower_dim_index = ', '.join(
[i_(low_dim) for low_dim in range(dim)]) # 'i0, i1, ...'
if dim < 2:
base_str = set_new_cursor_loop_base_exponential_search_string
else:
base_str = set_new_cursor_loop_base_linear_search_string
body = base_str.format(
dim=dim,
dim_shape=shp[dim],
dim_minus_1=dim - 1,
lower_dim_index=lower_dim_index
)
return loop_over_shape(shp[:dim], body)
minimize_shift_string = """shift_{dim} = min(shift_{dim}, {dim_shape})"""
def minimize_shift(dim, dim_shape):
return minimize_shift_string.format(dim=dim, dim_shape=dim_shape)
cursor_loops_string = """
if shift_{dim}:
left_edge_{dim} = c{dim} - r{dim}
right_edge_{dim} = c{dim} + r{dim}
if shift_{dim} >= {dim_shape}:
{init_loop}
else:
{shift_loop}
{minimize_shift}
{set_new_cursor_loop}
{set_higher_shift}
shift_{dim} = np.int64(coords_{dim}[i_coord + 1] - c{dim})
c{dim} = coords_{dim}[i_coord + 1]
"""
def param_cursor_loops(shp, dim):
return cursor_loops_string.format(
dim=dim,
dim_shape=shp[dim],
init_loop=indent_block(param_init_loop(shp, dim), 2, first_line=0),
shift_loop=indent_block(param_shift_loop(shp, dim), 2, first_line=0),
minimize_shift=minimize_shift(dim, shp[dim]),
set_new_cursor_loop=indent_block(param_set_new_cursor_loop(shp, dim),
1, first_line=0),
set_higher_shift=param_set_higher_shift(shp, dim)
)
last_dim_loop_string = """
c{dim} = coords_{dim}[i_coord]
left_edge_{dim} = c{dim} - r{dim}
right_edge_{dim} = c{dim} + r{dim}
{do_something_with_central_pixel}
{low_dim_loop}"""
last_dim_loop_body_string_hyperrect = """
cursor = cursors_{dim_minus_1}[{lower_dim_index}]
while cursor < ends_{dim_minus_1}[{lower_dim_index}] and coords_{dim}[cursor] < left_edge_{dim}:
cursor += 1
cursors_{dim_minus_1}[{lower_dim_index}] = cursor # Save the position we reached along the shard
while cursor < ends_{dim_minus_1}[{lower_dim_index}] and coords_{dim}[cursor] <= right_edge_{dim}:
{do_something_with_neighbors}
cursor += 1"""
last_dim_loop_body_string_struct_el = """
cursor = cursors_{dim_minus_1}[{lower_dim_index}]
while cursor < ends_{dim_minus_1}[{lower_dim_index}] and coords_{dim}[cursor] < left_edge_{dim}:
cursor += 1
cursors_{dim_minus_1}[{lower_dim_index}] = cursor # Save the position we reached along the shard
_end = ends_{dim_minus_1}[{lower_dim_index}]
for i_final in range({last_dim_shape}):
while cursor < _end and coords_{dim}[cursor] < left_edge_{dim} + i_final:
cursor += 1
if cursor == _end:
break
elif coords_{dim}[cursor] == left_edge_{dim} + i_final and struct_el[{lower_dim_index}, i_final]:
{do_something_with_neighbors}"""
def param_last_dim_loop(shp, struct_el):
"""
shp : Shape of the hyperrect around the central pixel to search for neighbors
struct_el: True/False on whether a structuring element of shape shp will be used."""
assert len(shp)
last_dim = len(shp) - 1
lower_dim_index = ', '.join(
[i_(low_dim) for low_dim in range(last_dim)]) # 'i0, i1, ...'
if struct_el:
loop_body = last_dim_loop_body_string_struct_el.format(
dim=last_dim,
dim_minus_1=last_dim - 1,
last_dim_shape=shp[-1],
lower_dim_index=lower_dim_index,
do_something_with_neighbors="{do_something_with_neighbors}"
)
else:
loop_body = last_dim_loop_body_string_hyperrect.format(
dim=last_dim,
dim_minus_1=last_dim - 1,
lower_dim_index=lower_dim_index,
do_something_with_neighbors="{do_something_with_neighbors}"
)
loop = loop_over_shape(shp[:-1], loop_body)
return last_dim_loop_string.format(
dim=last_dim,
low_dim_loop=loop,
do_something_with_central_pixel="{do_something_with_central_pixel}",
)
# Find the ancestors of neighbor index
find_central_ancestor_string = """
central_ancestor = labels[i_coord]
while labels[central_ancestor] != central_ancestor:
prev_central_ancestor = central_ancestor
central_ancestor = labels[central_ancestor]
labels[prev_central_ancestor] = central_ancestor"""
find_neighbor_ancestor_string = """
#central_ancestor = labels[i_coord]
neighbor_ancestor = labels[cursor]
if neighbor_ancestor == central_ancestor:
break
#while labels[central_ancestor] != central_ancestor:
# prev_central_ancestor = central_ancestor
# central_ancestor = labels[central_ancestor]
# labels[prev_central_ancestor] = central_ancestor
while labels[neighbor_ancestor] != neighbor_ancestor:
prev_neighbor_ancestor = neighbor_ancestor
neighbor_ancestor = labels[neighbor_ancestor]
labels[prev_neighbor_ancestor] = neighbor_ancestor
if neighbor_ancestor == central_ancestor:
labels[cursor] = central_ancestor
labels[i_coord] = central_ancestor
if neighbor_ancestor < central_ancestor:
labels[cursor] = neighbor_ancestor
labels[i_coord] = neighbor_ancestor
labels[central_ancestor] = neighbor_ancestor
central_ancestor = neighbor_ancestor
else: # neighbor_ancestor > central_ancestor:
labels[cursor] = central_ancestor
labels[i_coord] = central_ancestor
labels[neighbor_ancestor] = central_ancestor"""
finalize_labels_str = """
for i in range(labels.size-1, -1, -1):
i = numba.int_(i)
anc = i
while anc != labels[anc]:
anc = numba.int_(labels[anc])
while labels[i] != anc:
i_prev = i
labels[i_prev] = anc
i = numba.int_(labels[i])
"""
label_func_string = """
def label(loc, labels, {struct_el}):
{split_loc_to_coords}
# Number of coordinates
n = coords_0.size
{shift_init_strings}
{cursors_init_strings}
{ends_init_strings}
{coord_init_strings}
{range_init_strings}
for i_coord in range(n):
{coord_loop_body}
{finish_up}
return labels"""
def find_neighbors_func(neigh_shape, use_struct_el):
""" Build a nopython function to label locations.
Arguments
---------
neigh_shape : ndim-tuple of ints
Should all be odd numbers so that the central pixel remains well
defined
use_struct_el : bool
Flag indicating that the structuring element is not a perfect
hyperect neighborhood (i.e. np.all(struct_el) == False)
Returns
-------
Numba nopython function that labels IndexSet locations that are neighbors.
"""
ndim = len(neigh_shape)
fn = label_func_string.format(
struct_el='struct_el' if use_struct_el else '',
split_loc_to_coords = indent_block(
split_init_into_coords_init_str(ndim), 1, first_line=0),
coord_dim_names=coord_dim_names(ndim),
coord_init_strings=indent_block(coord_init_strings(ndim),
first_line=0),
shift_init_strings=indent_block(shift_init_strings(neigh_shape), 1,
first_line=0),
cursors_init_strings=indent_block(
cursors_init_strings(neigh_shape, np.int64), first_line=0),
ends_init_strings=indent_block(
ends_init_strings(neigh_shape, np.int64), first_line=0),
range_init_strings=indent_block(range_init_strings(neigh_shape),
first_line=0),
coord_loop_body=''.join(
[indent_block(update_cursor_section_dim_0(neigh_shape), 2)] +
[indent_block(param_cursor_loops(neigh_shape, i), 2) for i in
range(1, ndim - 1)] +
[indent_block(param_last_dim_loop(neigh_shape, use_struct_el), 2)]
),
finish_up=indent_block(finalize_labels_str, 1, first_line=0),
)
indent_amount = ndim + 3 if use_struct_el else ndim + 2
fn = fn.format(
do_something_with_central_pixel=indent_block(
find_central_ancestor_string, 2, first_line=0),
do_something_with_neighbors=indent_block(find_neighbor_ancestor_string,
indent_amount, first_line=0),
)
return fn
__saved_neighbor_funcs = dict()
def build_label_func(shape, use_struct_el):
if (shape, use_struct_el) in __saved_neighbor_funcs:
return __saved_neighbor_funcs[(shape, use_struct_el)]
fn_string = find_neighbors_func(shape, use_struct_el)
_loc = dict()
exec(fn_string, globals(), _loc)
fn = numba.jit(_loc['label'], nopython=True, nogil=True)
__saved_neighbor_funcs[(shape, use_struct_el)] = fn
return fn
@numba.njit
def merge_chunked_labels(master_labels, chunk_labels, overlap_start,
overlap_stop):
n_overlapping = overlap_stop - overlap_start
for i_chunk, i_master in enumerate(range(overlap_start, overlap_stop)):
# print(i_chunk, i_master)
anc_master = master_labels[i_master]
while master_labels[anc_master] != anc_master:
anc_master_prev = anc_master
anc_master = master_labels[anc_master]
master_labels[anc_master_prev] = anc_master
anc_chunk = chunk_labels[i_chunk] + overlap_start
while master_labels[anc_chunk] != anc_chunk:
anc_chunk_prev = anc_chunk
anc_chunk = master_labels[anc_chunk]
master_labels[anc_chunk_prev] = anc_chunk
if anc_chunk < anc_master:
master_labels[anc_master] = anc_chunk
elif anc_master < anc_chunk:
master_labels[anc_chunk] = anc_master
fin = overlap_stop + chunk_labels.size - n_overlapping
master_labels[overlap_stop:fin] = (
chunk_labels[n_overlapping:] + overlap_start)
@numba.njit([numba.void(numba.uint8[:]), numba.void(numba.uint16[:]),
numba.void(numba.uint32[:]), numba.void(numba.uint64[:])],
nogil=True)
def finalize_labels(labels):
"""Ensure that labels are root or point to a root."""
for i in range(labels.size - 1, -1, -1):
i = numba.int_(i)
anc = i
while anc != labels[anc]:
anc = numba.int_(labels[anc])
while labels[i] != anc:
i_prev = i
labels[i_prev] = anc
i = numba.int_(labels[i]) | mit | 230,391,288,093,049,820 | 31.551282 | 101 | 0.601589 | false |
istio/tools | perf/docker/rabbitmq/client.py | 1 | 2647 | # Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import logging
import prom_client
import pika
import sys
password = os.environ["PASSWORD"]
username = os.environ["USERNAME"]
address = os.environ["ADDRESS"]
queue = 'queue'
def with_metrics(f, valid=None):
return prom_client.attempt_request(
f,
source='rabbitmq-client',
destination='rabbitmq',
valid=valid
)
def with_metrics_or_fail(f, valid=None):
r, success = with_metrics(f, valid)
if not success:
raise Exception("Function failed")
return r, success
def setup_client():
credentials = pika.PlainCredentials(username, password)
connection = pika.BlockingConnection(
pika.ConnectionParameters(address, credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue=queue)
return channel
def send(channel, message):
with_metrics_or_fail(
lambda: channel.basic_publish(
exchange='',
routing_key=queue,
body=message
),
valid=None
)
def attempt_decode(s):
if s is None:
return ""
return s.decode('utf-8')
def receive(channel, expected):
with_metrics_or_fail(
lambda: attempt_decode(
next(channel.consume(queue, inactivity_timeout=1))[2]),
valid=lambda resp: resp == expected
)
def run_test():
pub, succeeded = with_metrics(setup_client)
if not succeeded:
logging.error("Failed to setup client")
sys.exit(1)
sub, succeeded = with_metrics(setup_client)
if not succeeded:
logging.error("Failed to setup client")
sys.exit(1)
while True:
message = "a message"
send(pub, message)
receive(sub, message)
time.sleep(.5)
if __name__ == "__main__":
prom_client.report_metrics()
prom_client.report_running('rabbitmq')
time.sleep(10) # Wait for server
while True:
try:
run_test()
except Exception:
logging.warning("Rerunning test due to exception")
time.sleep(.5)
| apache-2.0 | 3,253,971,791,620,770,300 | 23.738318 | 74 | 0.649037 | false |
HybridF5/jacket | jacket/api/compute/openstack/compute/legacy_v2/contrib/floating_ip_pools.py | 1 | 2154 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jacket.api.compute.openstack import extensions
from jacket.compute import network
authorize = extensions.extension_authorizer('compute', 'floating_ip_pools')
def _translate_floating_ip_view(pool_name):
return {
'name': pool_name,
}
def _translate_floating_ip_pools_view(pools):
return {
'floating_ip_pools': [_translate_floating_ip_view(pool_name)
for pool_name in pools]
}
class FloatingIPPoolsController(object):
"""The Floating IP Pool API controller for the OpenStack API."""
def __init__(self):
self.network_api = network.API()
super(FloatingIPPoolsController, self).__init__()
def index(self, req):
"""Return a list of pools."""
context = req.environ['compute.context']
authorize(context)
pools = self.network_api.get_floating_ip_pools(context)
return _translate_floating_ip_pools_view(pools)
class Floating_ip_pools(extensions.ExtensionDescriptor):
"""Floating IPs support."""
name = "FloatingIpPools"
alias = "os-floating-ip-pools"
namespace = ("http://docs.openstack.org/compute/ext/"
"floating_ip_pools/api/v1.1")
updated = "2012-01-04T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ip-pools',
FloatingIPPoolsController(),
member_actions={})
resources.append(res)
return resources
| apache-2.0 | 1,853,800,950,264,657,400 | 31.149254 | 78 | 0.65506 | false |
pekkosk/hotbit | hotbit/containers/chiralwedge.py | 1 | 5906 | from __future__ import division
import numpy as np
from box.mix import phival
from math import sin,cos
from weakref import proxy
import warnings
class ChiralWedge:
def __init__(self,atoms,type):
'''
Class for chiral+wedge boundary conditions.
@param: atoms hotbit.Atoms instance
@param: type Should equal to "ChiralWedge"
More documentation for the methods can be found from hotbit.Atoms -class.
'''
self.type='ChiralWedge'
assert type==self.type
self.atoms = proxy(atoms)
self.par = {'height':(1,0),'twist':(0,1),'angle':(2,0),'physical':(1,1)}
self.atoms.set_pbc((True,False,True))
#self._set_table()
def get_type(self):
return self.type
def __repr__(self):
twist, angle, height, physical = self.get('twist'), self.get('angle'), self.get('height'), self.get('physical')
x='ChiralWedge: angle=%.4f (2*pi/%.2f, ' %(angle,2*np.pi/angle)
if physical:
x+='physical), '
else:
x+='not physical), '
x+='height=%.4f Ang ' %height
x+='twist angle %.4f' %twist
return x
def get_table(self):
M = int( round(2*np.pi/self.get('angle')) )
return [{'M':M},{'M':1},{'M':np.Inf}]
def get(self,key):
"""
Get container parameters
key: 'angle','height','twist','physical'
"""
x = self.atoms.get_cell()[self.par[key]]
if key in ['angle','height','twist']:
return x
else:
return bool(np.round(x))
def _set(self,**kwargs):
assert len(kwargs)==1
for key in kwargs:
cell = self.atoms.get_cell()
cell[self.par[key]] = kwargs[key]
self.atoms.set_cell(cell)
def set(self, angle=None, height=None, M=None, physical=True, twist=None, scale_atoms=False, container=None):
"""
parameters:
===========
angle angle (in radians) of the wedge (and M=None)
height Height of the primitive cell in z-direction
M set angle to 2*pi/M (and angle=None)
physical (only if M=None) if angle is small, it does not be
exactly 2*pi/integer, i.e. situation has no physical meaning
(use for calculating stuff continuously)
twist The twist angle for z-translation
scale_atoms Scale atoms according to changes in parameters
"""
if container!=None:
assert angle==None and height==None and M==None and twist==None
self.set(angle=container.get('angle'),height=container.get('height'),\
physical=container.get('physical'), twist=container.get('twist'))
if angle!=None or M!=None:
#assert not scale_atoms
assert not (angle!=None and M!=None)
old_angle = self.get('angle')
if M != None:
assert isinstance(M,int)
self._set(angle=2*np.pi/M)
elif angle != None:
M = np.abs(int( round(2*np.pi/angle) ))
self._set(angle=angle)
# check parameters
self._set( physical=float(physical) )
if np.abs(self.get('angle'))<1E-6:
raise Warning('Too small angle (%f) may bring numerical problems.' %self.get('angle'))
if self.get('angle')>np.pi:
raise AssertionError('angle>pi')
if np.abs(M-2*np.pi/np.abs(self.get('angle')))>1E-12 and self.get('physical'):
raise AssertionError('angle not physical: angle != 2*pi/M')
if not self.get('physical') and M<20:
warnings.warn('Quite large, non-physical angle 2*pi/%.4f.' %(2*np.pi/self.get('angle')) )
if scale_atoms:
if abs(old_angle)<1E-10:
raise ValueError('Atoms cannot be scaled; old wedge angle too small.')
newr = []
for r in self.atoms.get_positions():
x,y = r[0],r[1]
rad = np.sqrt( x**2+y**2 )
newphi = phival(x,y)*(self.get('angle')/old_angle)
newr.append( [rad*np.cos(newphi),rad*np.sin(newphi),r[2]] )
self.atoms.set_positions(newr)
if height!=None:
if scale_atoms:
r = self.atoms.get_positions()
r[:,2] = r[:,2] * height/self.get('height')
self.atoms.set_positions(r)
self._set(height=height)
if twist!=None:
if scale_atoms:
raise NotImplementedError('Atom rescale with twist not implemented.')
self._set(twist=twist)
#self._set_table()
def __eq__(self,other):
return self.atoms == other.atoms
def get_symmetry_operation_ranges(self):
""" Return ranges for symmetry operations. """
M = int( round(2*np.pi/np.abs(self.get('angle'))) )
i = M//2
zi = 0
if np.mod(M,2)==1:
ranges = np.array([[-i,i],[0,0],[-np.Inf,np.Inf]])
else:
ranges = np.array([[-i+1,i],[0,0],[-np.Inf,np.Inf]])
return ranges
def transform(self,r,n):
""" Rotate around z r by (n2*angle+n0*twist) and translate by n0*height. """
R = self.rotation(n)
trans = np.zeros((3))
trans = n[2]*np.array([0,0,self.get('height')])
return np.dot(R,r) + np.array(trans)
def rotation(self,n,angles=False):
""" Active rotation matrix of given angle wrt. z-axis."""
angle = n[0]*self.get('angle') + n[2]*self.get('twist')
R = np.array([[cos(angle),-sin(angle),0],[sin(angle),cos(angle),0],[0,0,1]])
if angles:
raise NotImplementedError('angles not implemented for ChiralWedge')
else:
return R
| gpl-2.0 | -158,940,071,351,519,900 | 35.45679 | 119 | 0.532509 | false |
asidev/aybu-core | aybu/core/models/user.py | 1 | 7972 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010-2012 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ast
from aybu.core.models.base import Base
import collections
import crypt
import re
import requests
import urllib
import json
from logging import getLogger
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Unicode
from sqlalchemy import Table
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import (relationship,
object_session,
joinedload)
from sqlalchemy.orm.exc import NoResultFound
__all__ = []
log = getLogger(__name__)
RemoteGroup = collections.namedtuple('Group', ['name'])
class RemoteUser(object):
""" This class is used in place of the User class when
remote API login management is used in place of local
database """
def __init__(self, url, username, crypted_password, cleartext_password,
remote, groups, verify_ssl):
self.url = url
self.username = username
self.crypted_password = crypted_password
self.cleartext_password = cleartext_password
self._groups = groups
self.remote = remote
self.verify_ssl = verify_ssl
@property
def groups(self):
return [RemoteGroup(name=g) for g in self._groups]
@property
def password(self):
return self.crypted_password
@password.setter
def password(self, password):
url = "{}/{}".format(self.remote, self.username)
try:
response = requests.put(
url,
auth=(self.username, self.cleartext_password),
data=dict(password=password),
verify=self.verify_ssl
)
response.raise_for_status()
content = json.loads(response.content)
except requests.exceptions.RequestException as e:
log.critical("Error connection to API: {} - {}"\
.format(type(e).__name__, e))
raise ValueError('Cannot connect to API')
except Exception:
log.exception('Invalid login: %s', response.status_code)
raise ValueError('Invalid login, upstream returned {}'\
.format(response.status_code))
else:
log.info("Updated password for %s", self.username)
self.crypted_password = content['crypted_password']
self.cleartext_password = password
@classmethod
def check(cls, request, username, password):
remote = request.registry.settings.get('remote_login_url')
log.info("Using API server at %s", remote)
try:
verify_ssl = ast.literal_eval(
request.registry.settings.get('remote_login_verify_ssl'))
except:
log.exception('Error in ast.literal_eval')
verify_ssl = False
url = "{}/{}".format(remote, username)
params = dict(
domain=request.host,
action="login"
)
try:
query = "?{}".format(urllib.urlencode(params))
query = "{}{}".format(url, query)
log.debug("GET %s", query)
response = requests.get(query, auth=(username, password),
verify=verify_ssl)
response.raise_for_status()
log.debug("Response: %s", response)
content = json.loads(response.content)
except requests.exceptions.RequestException as e:
log.critical("Error connection to API: {} - {}"\
.format(type(e).__name__, e))
raise ValueError('Cannot connect to API')
except ValueError:
log.exception("Cannot decode JSON")
raise
except Exception:
log.error('Invalid login: %s', response.status_code)
raise ValueError('Invalid login, upstream return %s',
response.status_code)
else:
return RemoteUser(url=url, username=username,
crypted_password=content['crypted_password'],
cleartext_password=password,
groups=content['groups'],
remote=remote, verify_ssl=verify_ssl)
def has_permission(self, perm):
return bool(set((perm, 'admin')) & set(self._groups))
def check_password(self, password):
if not self.cleartext_password == password:
raise ValueError('Invalid username or password')
def __repr__(self):
return "<RemoteUser {}>".format(self.username)
users_groups = Table('users_groups',
Base.metadata,
Column('users_username',
Unicode(255),
ForeignKey('users.username',
onupdate="cascade",
ondelete="cascade")),
Column('groups_name',
Unicode(32),
ForeignKey('groups.name',
onupdate="cascade",
ondelete="cascade")),
mysql_engine='InnoDB')
class User(Base):
__tablename__ = 'users'
__table_args__ = ({'mysql_engine': 'InnoDB'})
hash_re = re.compile(r'(\$[1,5-6]\$|\$2a\$)')
salt = "$6$"
username = Column(Unicode(255), primary_key=True)
crypted_password = Column("password", Unicode(128), nullable=False)
groups = relationship('Group', secondary=users_groups, backref='users')
@classmethod
def get(cls, session, pkey):
# FIXME this should raise NoResultFound if query returns None!
user = session.query(cls).options(joinedload('groups')).get(pkey)
if user is None:
raise NoResultFound("No obj with key {} in class {}"\
.format(pkey, cls.__name__))
return user
@classmethod
def check(cls, session, username, password):
try:
user = cls.get(session, username)
salt = cls.hash_re.match(user.password)
length = len(salt.group()) if salt else 2
enc_password = crypt.crypt(password, user.password[0:length])
assert user.password == enc_password
except (AssertionError, NoResultFound):
log.warn('Invalid login for %s', username)
raise ValueError('invalid username or password')
else:
return user
@hybrid_property
def password(self):
return self.crypted_password
@password.setter
def password(self, value):
self.crypted_password = crypt.crypt(value, self.salt)
def check_password(self, password):
return self.__class__.check(object_session(self), self.username,
password)
def has_permission(self, perm):
return bool(set((perm, 'admin')) & set(g.name for g in self.groups))
def __repr__(self):
return "<User {}>".format(self.username)
class Group(Base):
__tablename__ = 'groups'
__table_args__ = ({'mysql_engine': 'InnoDB'})
name = Column(Unicode(32), primary_key=True)
def __repr__(self):
return "<Group {}>".format(self.name)
| apache-2.0 | 5,875,499,489,000,414,000 | 33.214592 | 81 | 0.568239 | false |
webhamster/pidart | gamemanager/events.py | 1 | 1818 | from circuits import Event
class ReceiveInput(Event):
""" Some input arrived (fieldcode or other input) """
class StartGame(Event):
""" Start a new game. """
class GameInitialized(Event):
""" A new game was started. """
class DartStuck(Event):
""" A dart is stuck. May be fired many times in a row. """
class SkipPlayer(Event):
""" The button for skipping a player was pressed. """
class CodeNotImplemented(Event):
""" This part of the code is not implemented. """
class Hit(Event):
""" A dart hit the board. """
class HitBust(Event):
""" A dart hit the board, but busted. """
class HitWinner(Event):
""" A dart hit the board, and the player won. """
class EnterHold(Event):
""" Wait for the player to hit start. """
class LeaveHold(Event):
""" Player has pressed start to continue. """
class FrameStarted(Event):
""" A new frame was started. """
class FrameFinished(Event):
""" A player has thrown three darts (or the round was skipped, etc.) """
class GameOver(Event):
""" The game is over. """
class ManualNextPlayer(Event):
""" Manual request to advance to next player """
class ChangeLastRound(Event):
""" Change the history (last round) of the player. """
class GameStateChanged(Event):
""" Something (general) in the player's history changed. """
class UpdateSettings(Event):
""" Set come config """
class SettingsChanged(Event):
""" Something in the config has changed, new config is attached. """
class ErrorMessage(Event):
""" Some general error occured. """
class PerformSelfUpdate(Event):
""" Command to update the running python file. """
class UndoLastFrame(Event):
""" Remove a player's last frame from the history. """
class UpdatePlayers(Event):
""" Change list of current players. """
| gpl-3.0 | -3,084,225,024,514,160,600 | 24.971429 | 76 | 0.663916 | false |
uw-it-aca/sqlshare-rest | sqlshare_rest/views/download.py | 1 | 1410 | from oauth2_provider.decorators import protected_resource
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from sqlshare_rest.views import get_oauth_user, get403, get404, get400, get405
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.views.sql import response_for_query
from sqlshare_rest.models import DownloadToken
from sqlshare_rest.dao.user import get_user
import json
@csrf_exempt
def run(request, token):
if request.META['REQUEST_METHOD'] != "GET":
return get405()
get_oauth_user(request)
try:
dt = DownloadToken().validate_token(token)
except DownloadToken.DoesNotExist:
return get404()
sql = dt.sql
backend = get_backend()
user = dt.original_user
return response_for_query(sql, user, download_name="query_results.csv")
@csrf_exempt
@protected_resource()
def init(request):
if request.META['REQUEST_METHOD'] != "POST":
return get405()
get_oauth_user(request)
values = json.loads(request.body.decode("utf-8"))
sql = values["sql"]
user = get_user(request)
dt = DownloadToken()
dt.store_token_for_sql(sql, user)
url = reverse("sqlshare_view_run_download", kwargs={"token": dt.token})
response = HttpResponse(json.dumps({'token': dt.token}))
response["Location"] = url
return response
| apache-2.0 | 3,505,049,341,933,977,000 | 27.77551 | 78 | 0.70922 | false |
yakky/djangocms-text-ckeditor | tests/test_field.py | 1 | 2818 | # -*- coding: utf-8 -*-
from django.template import Context, Template
from django.utils.safestring import SafeData
from djangocms_helper.base_test import BaseTestCase
from tests.test_app.forms import SimpleTextForm
from tests.test_app.models import SimpleText
from djangocms_text_ckeditor.fields import HTMLFormField
class HtmlFieldTestCase(BaseTestCase):
def test_html_form_field(self):
html_field = HTMLFormField()
self.assertTrue(isinstance(html_field.clean('some text'), SafeData))
class FieldTestCase(BaseTestCase):
text_normal = '<p>some non malicious text</p>'
text_with_iframe = ('<p>some non malicious text</p>'
'<iframe src="http://www.w3schools.com"></iframe>')
text_with_iframe_escaped = ('<p>some non malicious text</p><iframe '
'src="http://www.w3schools.com"></iframe>')
text_with_script = ('<p>some non malicious text</p> '
'<script>alert("Hello! I am an alert box!");</script>')
text_with_script_escaped = (u'<p>some non malicious text</p> <script>'
u'alert("Hello! I am an alert box!");</script>')
def test_model_field_text_is_safe(self):
original = 'Hello <h2>There</h2>'
template = Template('{{ obj.text }}')
text = SimpleText.objects.create(text='Hello <h2>There</h2>')
# Fetching a new instance should now have the string marked
# as safe.
text = SimpleText.objects.get(pk=text.pk)
rendered = template.render(Context({'obj': text}))
self.assertEqual(original, rendered)
def test_model_field_sanitized(self):
obj = SimpleText(text=self.text_normal)
obj.full_clean()
obj.save()
obj = SimpleText.objects.get(pk=obj.pk)
self.assertEqual(obj.text, self.text_normal)
obj = SimpleText(text=self.text_with_iframe)
obj.full_clean()
obj.save()
self.assertEqual(obj.text, self.text_with_iframe_escaped)
obj = SimpleText(text=self.text_with_script)
obj.full_clean()
obj.save()
self.assertEqual(obj.text, self.text_with_script_escaped)
def test_form_field_sanitized(self):
form = SimpleTextForm(data={'text': self.text_normal})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['text'], self.text_normal)
form = SimpleTextForm(data={'text': self.text_with_iframe})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['text'], self.text_with_iframe_escaped)
form = SimpleTextForm(data={'text': self.text_with_script})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['text'], self.text_with_script_escaped)
| bsd-3-clause | -3,695,870,091,653,139,500 | 36.078947 | 86 | 0.635912 | false |
facilecoin/facilecoin-core | qa/rpc-tests/keypool.py | 1 | 4292 | #!/usr/bin/env python2
# Copyright (c) 2014 The FacileCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the wallet keypool, and interaction with wallet encryption/locking
# Add python-facilecoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-facilecoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from facilecoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def run_test(nodes, tmpdir):
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
facilecoind_processes[0].wait()
# Restart node 0
nodes[0] = start_node(0, tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after one address')
except JSONRPCException,e:
assert(e.error['code']==-12)
# put three new keys in the keypool
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(3)
nodes[0].walletlock()
# drain the keys
addr = set()
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
# assert that four unique addresses were returned
assert(len(addr) == 4)
# the next one should fail
try:
addr = nodes[0].getrawchangeaddress()
raise AssertionError('Keypool should be exhausted after three addresses')
except JSONRPCException,e:
assert(e.error['code']==-12)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave facilecoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing facilecoind/facilecoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(1, options.tmpdir)
run_test(nodes, options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(sys.exc_info()[0]))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_facilecoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| mit | -2,767,100,042,064,680,000 | 31.515152 | 105 | 0.643523 | false |
Sannoso/baxter_examples | scripts/sander_ik_test_posemessagebranche.py | 1 | 4913 | #!/usr/bin/env python
"""
Sanders ik_comparison test python script
This script is written to be ran with the gazebo simulated Baxter
It will probably run on the robot live aswel, but is not tested on it
"""
#import various libraries
import argparse
import struct
import sys
import rospy
#import library to use time.sleep function
import time
#import necessary rosmessage parts
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from std_msgs.msg import Header
#import necessary service messagestuff
#to build the request message for the IK service
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
#def ik_test(beginpose, endpose):
def ik_test():
rospy.init_node("Sander_ik_test_node")
##preparing to call the IK service
#store the name of the service in a variable for easier use
servicename = "ExternalTools/right/PositionKinematicsNode/IKService"
#wait for the service to be available. startup_time or in use by something else
# rospy.wait_for_service(servicename)#what is the name of IKService? does this work?
#create a rospy.serviceproxy to be able to call this service
ikservice = rospy.ServiceProxy(servicename, SolvePositionIK)
ikrequestmessage = SolvePositionIKRequest()
print('ikrequestemessage is: ', ikrequestmessage)
# print(ikrequestmessage)
#every request should have the correct timestamp:
#I'm making the header in a different function to ensure a correct timestamp
#the while loop is necessary because in simulator time rospy.time.now has
#to be called in a short timespace after timepublication on /clock
now = rospy.Time.now()
# count = 0
# while(now.secs == 0):
# now = rospy.Time.now()
# count += 1
# print('amount of rospy.Time.now() requests until non-zero output: ', count)
hdr = Header(stamp=now, frame_id='base')
print(hdr)
#oke the header is created
#declaring all poses
poses = {
'ik_example_pose': PoseStamped(
header=hdr,
pose=Pose(
position=Point(
x=0.656982770038,
y=-0.852598021641,
z=0.0388609422173,
),
orientation=Quaternion(
x=0.367048116303,
y=0.885911751787,
z=-0.108908281936,
w=0.261868353356,
),
),
),
'neutralpose': PoseStamped(
header=hdr,
pose=Pose(
position=Point(
x=0.573,
y=-0.181,
z=0.246,
),
orientation=Quaternion(
x=-0.141,
y=0.990,
z=-0.012,
w=0.026,
),
),
),
'poseA': PoseStamped(
header=hdr,
pose=Pose(
position=Point(
x=0.1,
y=0.51,
z=0.723,
),
orientation=Quaternion(
x=0,
y=1,
z=0,
w=0,
),
),
),
#'triangledepositpose'
#'squaredepositpose'
#'circledepositpose'
}
#put PoseStamped[] in the requestmessage
ikrequestmessage.pose_stamp.append(poses['ik_example_pose'])
# ikrequestmessage.pose_stamp.append(poses['neutralpose'])
print(ikrequestmessage)
try:
rospy.wait_for_service(servicename, 5.0)
resp = ikservice(ikrequestmessage)
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed: %s" % (e,))
return 1
print "------------------"
print "Response Message:\n", resp
return 1
def main():
"""
Sanders IK test
Uses blabla
"""
#create and initialise a rosnode
# rospy.init_node("Sander_ik_test_node")
#call the testroutine
ik_test()
return "\n IK test executed succesfully"
if __name__ == '__main__':
sys.exit(main())
"""
THIS IS THE MESSAGE WE ARE FILLING IN TO DO AN IK REQUEST
baxter_core_msgs/SolvePositionIK
uint8 SEED_AUTO=0
uint8 SEED_USER=1
uint8 SEED_CURRENT=2
uint8 SEED_NS_MAP=3
geometry_msgs/PoseStamped[] pose_stamp
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
sensor_msgs/JointState[] seed_angles
std_msgs/Header header
uint32 seq
time stamp
string frame_id
string[] name
float64[] position
float64[] velocity
float64[] effort
uint8 seed_mode
"""
| bsd-3-clause | -4,293,015,152,964,666,000 | 25.132979 | 89 | 0.58335 | false |
evansde77/cirrus | src/cirrus/delegate.py | 1 | 2385 | #!/usr/bin/env python
"""
_delegate_
Main cirrus command that delegates the call to
the sub command verb enabling
git cirrus do_a_thing to be routed to the appropriate
command call for do_a_thing
"""
import os
import os.path
import pkg_resources
import sys
import signal
import subprocess
import cirrus.environment as env
def install_signal_handlers():
"""
Need to catch SIGINT to allow the command to be CTRL-C'ed
"""
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def run_command(cmd):
"""
run the delegated command with the CTRL-C signal handler
in place
"""
install_signal_handlers()
return subprocess.call(cmd, shell=False)
HELP = \
"""
Cirrus commands available are:
{0}
Do git cirrus <command> -h for more information on a
particular command
"""
def format_help(command_list):
subs = '\n'.join(
[c for c in command_list if c != 'cirrus']
)
return HELP.format(subs)
def main():
"""
_main_
response to the cirrus <verb> command
Extracts the available verbs that are installed as
entry points by setup.py as cirrus_commands
"""
home = env.virtualenv_home()
commands = []
for script in pkg_resources.iter_entry_points(group="cirrus_commands"):
comm = str(script).split(" = ", 1)[0]
commands.append(comm)
commands.sort()
# switch to the current GIT_PREFIX working dir
old_dir = os.getcwd()
os.chdir(os.path.abspath(os.environ.get('GIT_PREFIX', '.')))
try:
args = sys.argv[1:]
if len(args) == 0 or args[0] == '-h':
# missing command or help
print(format_help(commands))
exit_code = 0
else:
command_path = "{0}/bin/{1}".format(home, args[0])
if not os.path.exists(command_path):
msg = "Unknown command: {}".format(args[0])
print(msg)
print(format_help(commands))
exit_code = 127
else:
exit_code = run_command([command_path, ] + args[1:])
except Exception as ex:
msg = "Exception Details:\n{}".format(ex)
print(msg)
raise
finally:
# always return to previous dir
os.chdir(old_dir)
return exit_code
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -2,823,482,492,811,789,000 | 21.932692 | 75 | 0.600419 | false |
moreati/revelation | epiphany/test/test_execute_bitwise.py | 1 | 7492 | from pydgin.utils import trim_32
from epiphany.instruction import Instruction
from epiphany.isa import decode
from epiphany.machine import RESET_ADDR
from epiphany.test.machine import StateChecker, new_state
import opcode_factory
import pytest
@pytest.mark.parametrize('rn,rm,is16bit', [(-1, 28, True),
(-1, 28, False),
( 1, 28, True),
( 1, 28, False)])
def test_execute_logical_shift_right(rn, rm, is16bit):
rd = 2
state = new_state(rf0=trim_32(rn), rf1=trim_32(rm))
instr = (opcode_factory.lsr16(rd=rd, rn=0, rm=1) if is16bit
else opcode_factory.lsr32(rd=rd, rn=0, rm=1))
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(AZ=(False if rn < 0 else True), # 1 >> 5 == 0
AV=0, AC=0,
pc=((2 if is16bit else 4) + RESET_ADDR),
rf2=(0b1111 if rn < 0 else 0))
expected_state.check(state)
@pytest.mark.parametrize('rn,imm,is16bit', [(-1, 28, True),
(-1, 28, False),
( 1, 28, True),
( 1, 28, False)])
def test_execute_logical_shift_right_imm(rn, imm, is16bit):
rd = 2
state = new_state(rf0=trim_32(rn))
instr = (opcode_factory.lsr16_immediate(rd=rd, rn=0, imm=imm) if is16bit
else opcode_factory.lsr32_immediate(rd=rd, rn=0, imm=imm))
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(AZ=(False if rn < 0 else True), # 1 >> 5 == 0
AV=0, AC=0,
pc=((2 if is16bit else 4) + RESET_ADDR),
rf2=(0b1111 if rn < 0 else 0))
expected_state.check(state)
@pytest.mark.parametrize('rn,rm,is16bit', [(-1, 5, True),
(-1, 5, False),
( 1, 5, True),
( 1, 5, False)])
def test_execute_arith_shift_right(rn, rm, is16bit):
rd = 2
state = new_state(rf0=trim_32(rn), rf1=trim_32(rm))
instr = (opcode_factory.asr16(rd=rd, rn=0, rm=1) if is16bit
else opcode_factory.asr32(rd=rd, rn=0, rm=1))
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(AZ=(False if rn < 0 else True), # 1 >> 5 == 0
AV=0, AC=0,
pc=((2 if is16bit else 4) + RESET_ADDR),
rf2=(trim_32(-1) if rn < 0 else 0))
expected_state.check(state)
@pytest.mark.parametrize('rn,imm,is16bit', [(-1, 5, True),
(-1, 5, False),
( 1, 5, True),
( 1, 5, False)])
def test_execute_arith_shift_right_imm(rn, imm, is16bit):
rd = 2
state = new_state(rf0=trim_32(rn))
instr = (opcode_factory.asr16_immediate(rd=rd, rn=0, imm=imm) if is16bit
else opcode_factory.asr32_immediate(rd=rd, rn=0, imm=imm))
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(AZ=(False if rn < 0 else True), # 1 >> 5 == 0
AV=0, AC=0,
pc=((2 if is16bit else 4) + RESET_ADDR),
rf2=(trim_32(-1) if rn < 0 else 0))
expected_state.check(state)
@pytest.mark.parametrize('factory,is16bit',
[(opcode_factory.lsl16, True),
(opcode_factory.lsl32, False)
])
def test_execute_shift_left(factory, is16bit):
state = new_state(rf0=5, rf1=7)
instr = factory(rd=2, rn=1, rm=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(AZ=0, AN=0, AC=0, AV=0,
pc=((2 if is16bit else 4) + RESET_ADDR),
rf2=7 << 5)
expected_state.check(state)
@pytest.mark.parametrize('factory,is16bit',
[(opcode_factory.lsl16_immediate, True),
(opcode_factory.lsl32_immediate, False)
])
def test_execute_shift_left_immediate(factory, is16bit):
state = new_state(rf1=7)
instr = factory(rd=2, rn=1, imm=5)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(AZ=0, AN=0, AC=0, AV=0,
pc=((2 if is16bit else 4) + RESET_ADDR),
rf2=7 << 5)
expected_state.check(state)
@pytest.mark.parametrize('bits,expected,is16bit',
[(0b10101010101010101010101010101010,
0b01010101010101010101010101010101,
True),
(0b01010101010101010101010101010101,
0b10101010101010101010101010101010,
True),
(0b10101010101010101010101010101010,
0b01010101010101010101010101010101,
False),
(0b01010101010101010101010101010101,
0b10101010101010101010101010101010,
False),
])
def test_execute_bitr(bits, expected, is16bit):
state = new_state(rf0=0, rf1=bits)
instr = (opcode_factory.bitr16_immediate(rd=2, rn=1, imm=0) if is16bit
else opcode_factory.bitr32_immediate(rd=2, rn=1, imm=0))
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(AZ=0, AC=0, AV=0,
pc=((2 if is16bit else 4) + RESET_ADDR),
rf2=expected)
expected_state.check(state)
@pytest.mark.parametrize('factory,expected', [(opcode_factory.and32, 5 & 7),
(opcode_factory.orr32, 5 | 7),
(opcode_factory.eor32, 5 ^ 7),
])
def test_execute_bitwise32(factory, expected):
state = new_state(rf0=5, rf1=7)
instr = factory(rd=2, rn=1, rm=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(AZ=0, AV=0, AC=0, pc=(4 + RESET_ADDR),
rf2=expected)
expected_state.check(state)
@pytest.mark.parametrize('factory,expected', [(opcode_factory.and16, 5 & 7),
(opcode_factory.orr16, 5 | 7),
(opcode_factory.eor16, 5 ^ 7),
])
def test_execute_bitwise16(factory, expected):
state = new_state(rf0=5, rf1=7)
instr = factory(rd=2, rn=1, rm=0)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(AZ=0, AV=0, AC=0, pc=(2 + RESET_ADDR),
rf2=expected)
expected_state.check(state)
| bsd-3-clause | -798,977,002,079,429,200 | 44.406061 | 79 | 0.49693 | false |
nbari/my-sandbox | python/email/server.py | 1 | 1444 | import smtplib
import smtpd
import asyncore
import email.utils
from email.mime.text import MIMEText
import threading
class SMTPReceiver(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
print 'Receiving message from:', peer
print 'Message addressed from:', mailfrom
print 'Message addressed to :', rcpttos
print 'Message length :', len(data)
print data
def send_response():
msg = MIMEText('Hello world!')
msg['To'] = email.utils.formataddr(('Recipient', mailfrom))
msg['From'] = email.utils.formataddr(
('Author', '[email protected]'))
msg['Subject'] = ''
print 'Connecting to mail server'
server = smtplib.SMTP()
server.set_debuglevel(1)
server.connect()
print 'Attempting to send message'
try:
server.sendmail(
'[email protected]',
[mailfrom],
msg.as_string())
except Exception as ex:
print 'Could not send mail', ex
finally:
server.quit()
print 'Finished sending message'
threading.Thread(target=send_response).start()
return
def main():
server = SMTPReceiver(('', 2025), None)
asyncore.loop()
if __name__ == '__main__':
main()
| bsd-3-clause | -5,075,651,557,787,795,000 | 28.469388 | 71 | 0.549169 | false |
laurentb/weboob | modules/onlinenet/module.py | 1 | 2721 | # -*- coding: utf-8 -*-
# Copyright(C) 2016 Edouard Lambert
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.capabilities.bill import DocumentTypes, CapDocument, Subscription, Document, SubscriptionNotFound, DocumentNotFound
from weboob.capabilities.base import find_object, NotAvailable
from weboob.tools.backend import Module, BackendConfig
from weboob.tools.value import ValueBackendPassword, Value
from .browser import OnlinenetBrowser
__all__ = ['OnlinenetModule']
class OnlinenetModule(Module, CapDocument):
NAME = 'onlinenet'
DESCRIPTION = 'Online.net'
MAINTAINER = 'Edouard Lambert'
EMAIL = '[email protected]'
LICENSE = 'LGPLv3+'
VERSION = '2.1'
CONFIG = BackendConfig(
Value('login', label='Identifiant'),
ValueBackendPassword('password', label='Mot de passe'),
)
BROWSER = OnlinenetBrowser
accepted_document_types = (DocumentTypes.BILL, DocumentTypes.OTHER,)
def create_default_browser(self):
return self.create_browser(self.config['login'].get(), self.config['password'].get())
def iter_subscription(self):
return self.browser.get_subscription_list()
def get_subscription(self, _id):
return find_object(self.iter_subscription(), id=_id, error=SubscriptionNotFound)
def get_document(self, _id):
subid = _id.rsplit('_', 1)[0]
subscription = self.get_subscription(subid)
return find_object(self.iter_documents(subscription), id=_id, error=DocumentNotFound)
def iter_documents(self, subscription):
if not isinstance(subscription, Subscription):
subscription = self.get_subscription(subscription)
return self.browser.iter_documents(subscription)
def download_document(self, document):
if not isinstance(document, Document):
document = self.get_document(document)
if document._url is NotAvailable:
return
return self.browser.open(document._url).content
| lgpl-3.0 | -5,601,386,455,273,830,000 | 35.77027 | 127 | 0.714076 | false |
kanairen/RegularIcosahedronDict | src/map/factory/base_shape_map_factory.py | 1 | 4162 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
from src.obj.obj3d import Obj3d
from src.obj.grid.base_grid import BaseGrid
class BaseShapeMapFactory(object):
DIST_UNDEFINED = -1
def __init__(self, model_id, obj3d, grid, n_div, cls, grid_scale):
"""
:type model_id: int or long:
:param model_id: 対象3DモデルID
:type obj3d: Obj3d
:param obj3d: 形状マップ生成対象の3Dオブジェクト
:type grid: TriangleGrid
:param grid: 形状マップを生成するための正三角形からなるグリッド
:type n_div: int or long
:param n_div: グリッド分割数
:type cls: int or long
:param cls: クラスラベル
:type grid_scale: float
:param grid_scale: グリッドのスケール率
"""
assert isinstance(model_id, (int, long))
assert isinstance(obj3d, Obj3d)
assert isinstance(grid, BaseGrid)
assert isinstance(cls, (int, long))
assert isinstance(grid_scale, float)
self.model_id = model_id
# 3Dモデル:座標系の中心に置き、正規化する
self.obj3d = obj3d.center().normal()
# 正二十面体グリッド:3Dモデルを内部に完全に含むように拡張
self.grid = grid.center().scale(grid_scale).divide_face(n_div)
# 3Dモデルの中心から最も離れた点の中心からの距離が、
# グリッドの中心から最も近い点のより中心からの距離より大きい場合はサポート外
# (原則、scale_gridは1以上で設定する)
if np.linalg.norm(self.grid.vertices, axis=1).min() < np.linalg.norm(
self.obj3d.vertices, axis=1).max():
raise NotImplementedError()
# クラスラベル
self.cls = cls
@staticmethod
def tomas_moller(origin, end, v0, v1, v2):
"""
Tomas-Mollerのアルゴリズム
線分と三角形の交点を返す
交差しない場合、Noneを返す
行列式を、外積/内積に置き換えている
:type origin: np.ndarray
:param origin: 線分の始点
:type end: np.ndarray
:param end: 線分の終点
:type v0 : np.ndarray
:param v0: 三角形の頂点その1
:type v1: np.ndarray
:param v1: 三角形の頂点その2
:type v2: np.ndarray
:param v2: 三角形の頂点その3
:rtype: np.ndarray
:return: 交点ベクトル
"""
edge1 = v1 - v0
edge2 = v2 - v0
ray = end - origin
P = np.cross(ray, edge2)
# 分母
denominator = np.dot(P, edge1)
if denominator > np.finfo(float).eps:
T = origin - v0
u = np.dot(P, T)
if 0 <= u <= denominator:
Q = np.cross(T, edge1)
v = np.dot(Q, ray)
if 0 <= v <= denominator and (u + v) <= denominator:
t = np.dot(Q, edge2) / denominator
return origin + ray * t
return None
def create(self):
raise NotImplementedError
def _distances(self):
"""
グリッド頂点に対応した距離情報のマップを取得する
"""
grid_center = np.zeros(shape=(3,))
# 距離マップ インデックスはグリッドのverticesに対応する
# 空洞など、距離が未定義のところにはDIST_UNDEFINED値を入れる
distance_map = np.full(shape=(len(self.grid.vertices)),
fill_value=BaseShapeMapFactory.DIST_UNDEFINED,
dtype=np.float64)
for i, g_vertex in enumerate(self.grid.vertices):
for f0, f1, f2 in self.obj3d.vertices[self.obj3d.face_vertices]:
p_cross = self.tomas_moller(grid_center, g_vertex, f0, f1, f2)
if p_cross is not None:
distance_map[i] = np.linalg.norm(p_cross - grid_center)
break
return distance_map
| mit | -6,878,317,469,387,684,000 | 24.382353 | 78 | 0.548378 | false |
ikben/troposphere | examples/WAF_Common_Attacks_Sample.py | 1 | 5554 | # Converted from AWS WAF Sample located at:
# https://s3.amazonaws.com/cloudformation-examples/community/common-attacks.json
from troposphere import (
Template,
Parameter,
Join,
Ref
)
from troposphere.waf import (
Rule,
SqlInjectionMatchSet,
WebACL,
SizeConstraintSet,
IPSet,
XssMatchSet,
Predicates,
SqlInjectionMatchTuples,
FieldToMatch,
Action,
Rules,
SizeConstraint,
XssMatchTuple
)
t = Template()
t.add_version("2010-09-09")
t.set_description(
"Creates an AWS WAF configuration that protects against common attacks"
)
WebACLName = t.add_parameter(Parameter(
"WebACLName",
Default="CommonAttackProtection",
Type="String",
Description="Enter the name you want to use for the WebACL. "
"This value is also added as a prefix for the names of the rules, "
"conditions, and CloudWatch metrics created by this template.",
))
SqliMatchSet = t.add_resource(SqlInjectionMatchSet(
"SqliMatchSet",
Name=Join("", [Ref(WebACLName), "SqliMatch"]),
SqlInjectionMatchTuples=[
SqlInjectionMatchTuples(
FieldToMatch=FieldToMatch(
Type="QUERY_STRING"
),
TextTransformation="URL_DECODE"
),
SqlInjectionMatchTuples(
FieldToMatch=FieldToMatch(
Type="QUERY_STRING"
),
TextTransformation="HTML_ENTITY_DECODE"
),
SqlInjectionMatchTuples(
FieldToMatch=FieldToMatch(
Type="BODY"
),
TextTransformation="URL_DECODE"
),
SqlInjectionMatchTuples(
FieldToMatch=FieldToMatch(
Type="BODY"
),
TextTransformation="HTML_ENTITY_DECODE"
),
SqlInjectionMatchTuples(
FieldToMatch=FieldToMatch(
Type="URI"
),
TextTransformation="URL_DECODE"
)
]
))
SqliRule = t.add_resource(Rule(
"SqliRule",
Predicates=[
Predicates(
DataId=Ref(SqliMatchSet),
Type="SqlInjectionMatch",
Negated=False
)
],
Name=Join("", [Ref(WebACLName), "SqliRule"]),
MetricName=Join("", [Ref(WebACLName), "SqliRule"]),
))
XssMatchSet = t.add_resource(XssMatchSet(
"XssMatchSet",
Name=Join("", [Ref(WebACLName), "XssMatch"]),
XssMatchTuples=[
XssMatchTuple(
FieldToMatch=FieldToMatch(
Type="QUERY_STRING",
),
TextTransformation="URL_DECODE"
),
XssMatchTuple(
FieldToMatch=FieldToMatch(
Type="QUERY_STRING",
),
TextTransformation="HTML_ENTITY_DECODE"
),
XssMatchTuple(
FieldToMatch=FieldToMatch(
Type="BODY",
),
TextTransformation="URL_DECODE"
),
XssMatchTuple(
FieldToMatch=FieldToMatch(
Type="BODY",
),
TextTransformation="HTML_ENTITY_DECODE"
),
XssMatchTuple(
FieldToMatch=FieldToMatch(
Type="URI",
),
TextTransformation="URL_DECODE"
)
]
))
XssRule = t.add_resource(Rule(
"XssRule",
Name=Join("", [Ref(WebACLName), "XssRule"]),
Predicates=[
Predicates(
DataId=Ref(XssMatchSet),
Type="XssMatch",
Negated=False
)
],
MetricName=Join("", [Ref(WebACLName), "XssRule"]),
))
WAFManualIPBlockSet = t.add_resource(IPSet(
"WAFManualIPBlockSet",
Name="Manual IP Block Set",
))
ManualIPBlockRule = t.add_resource(Rule(
"ManualIPBlockRule",
Name=Join("", [Ref(WebACLName), "ManualIPBlockRule"]),
MetricName=Join("", [Ref(WebACLName), "ManualIPBlockRule"]),
Predicates=[
Predicates(
DataId=Ref(WAFManualIPBlockSet),
Type="IPMatch",
Negated=False
)
]
))
SizeMatchSet = t.add_resource(SizeConstraintSet(
"SizeMatchSet",
Name=Join("", [Ref(WebACLName), "LargeBodyMatch"]),
SizeConstraints=[
SizeConstraint(
ComparisonOperator="GT",
TextTransformation="NONE",
FieldToMatch=FieldToMatch(
Type="BODY"
),
Size="8192"
)
]
))
SizeMatchRule = t.add_resource(Rule(
"SizeMatchRule",
Name=Join("", [Ref(WebACLName), "LargeBodyMatchRule"]),
MetricName=Join("", [Ref(WebACLName), "DetectLargeBody"]),
Predicates=[
Predicates(
DataId=Ref(SizeMatchSet),
Type="SizeConstraint",
Negated=False
)
]
))
MyWebACL = t.add_resource(WebACL(
"MyWebACL",
Name=Ref(WebACLName),
DefaultAction=Action(
Type="ALLOW"
),
Rules=[
Rules(
Action=Action(
Type="BLOCK"
),
Priority=1,
RuleId=Ref(ManualIPBlockRule)
),
Rules(
Action=Action(
Type="COUNT"
),
Priority=2,
RuleId=Ref(SizeMatchRule)
),
Rules(
Action=Action(
Type="BLOCK"
),
Priority=3,
RuleId=Ref(SqliRule)
),
Rules(
Action=Action(
Type="BLOCK"
),
Priority=4,
RuleId=Ref(XssRule)
)
],
MetricName=Ref(WebACLName),
))
print(t.to_json())
| bsd-2-clause | -8,663,083,156,962,772,000 | 23.359649 | 80 | 0.541232 | false |
jonlatorre/VideoCargador | video/models.py | 1 | 1791 | # encoding: utf-8
from django.db import models
import os
from mencoder import *
class Video(models.Model):
"""This is a small demo using just two fields. The slug field is really not
necessary, but makes the code simpler. ImageField depends on PIL or
pillow (where Pillow is easily installable in a virtualenv. If you have
problems installing pillow, use a more generic FileField instead.
"""
file = models.FileField(upload_to="uploaded_videos")
slug = models.SlugField(max_length=50, blank=True)
mp4_encoded = models.BooleanField(default=False)
mp4_file = models.FileField(upload_to="converted_videos", blank=True)
mp4_url = models.BooleanField(default=False)
flv_encoded = models.BooleanField(default=False)
flv_file = models.FileField(upload_to="converted_videos", blank=True)
flv_url = models.BooleanField(default=False)
def __unicode__(self):
return self.file.name
@models.permalink
def get_absolute_url(self):
return ('video-new', )
def save(self, *args, **kwargs):
self.slug = self.file.name
super(Video, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
"""delete -- Remove to leave file."""
self.file.delete(False)
super(Video, self).delete(*args, **kwargs)
def encode_mp4(self):
print "Vamos a convertir a mp4"
destino = self.mp4_file.storage.base_location
destino = os.path.join(destino,"converted_videos")
ret,salida = call_mencoder_mp4(self.file.path,destino)
if ret == 0:
print "Codificacion OK"
self.mp4_file.name = "converted_videos/"+salida
self.mp4_encoded = True
self.save()
def upload_mp4(self):
print "Subimos el MP4"
| mit | 5,213,071,513,827,455,000 | 35.55102 | 79 | 0.654941 | false |
mrshu/scikit-learn | examples/plot_permutation_test_for_classification.py | 1 | 2236 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD
print __doc__
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
from sklearn.metrics import zero_one_score
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, zero_one_score, cv=cv, n_permutations=100, n_jobs=1)
print "Classification score %s (pvalue : %s)" % (score, pvalue)
###############################################################################
# View histogram of permutation scores
pl.hist(permutation_scores, 20, label='Permutation scores')
ylim = pl.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#pl.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#pl.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
pl.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
pl.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
pl.ylim(ylim)
pl.legend()
pl.xlabel('Score')
pl.show()
| bsd-3-clause | 4,989,806,373,800,379,000 | 31.882353 | 79 | 0.61449 | false |
codebikeclimb/NASARobotComp | Robot2017_Master/Robot2016/motorTest.py | 1 | 2963 | #!/usr/bin/python
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
import serial
import time
import atexit
#initialize i2c communication with motor shield
roboMotor = Adafruit_MotorHAT(addr=0x60)
#initialize serial communications with XBee RF reciever
xBee = serial.Serial('/dev/ttyACM1',57600)
compass = serial.Serial('/dev/ttyACM0', 9600)
def turnOffMotors():
roboMotor.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
roboMotor.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
atexit.register(turnOffMotors)
#create motor objects
leftFrontRear = roboMotor.getMotor(3)
rightFrontRear = roboMotor.getMotor(4)
#set speed to start ---- 0(off) - 255(Max)
#beacon navigation
def beaconNavigation():
bHeadings = []
botHeadings = []
for x in range(0,2):
botHeading = compass.readline()
botHeading = float(botHeading)
botHeadings.append(botHeading)
print(botHeading)
beaconHeading = xBee.readline()
beaconHeading = float(beaconHeading)
bHeadings.append(beaconHeading)
print(beaconHeading)
botTotal = sum(botHeadings)
botLength = len(botHeadings)
avgBotHeading = botTotal / botLength
print "avg bot heading: ", avgBotHeading
total = sum(bHeadings)
l = len(bHeadings)
avgHeading = total / l
print "avg b heading: ", avgHeading
#calculate opposite heading
x = avgHeading + 180
oppositeHeading = x % 360
oppositeHeading = float(oppositeHeading)
print "opposite beacon heading: ", oppositeHeading
# while(botHeading <= oppositeHeading or botHeading >= oppositeHeading):
while(botHeading < oppositeHeading or botHeading > oppositeHeading + 1.0):
botHeading = compass.readline()
botHeading = float(botHeading)
print botHeading
# rightRotate()
forward()
# toTheBeacon()
#for x in range(0,20):
# heading = xBee.readline()
# botBearing = compass.readline()
# print(heading)
# print(botBearing)
#drive forwards
def forward():
# beaconNavigation()
while(True):
leftFrontRear.setSpeed(80)
rightFrontRear.setSpeed(80)
leftFrontRear.run(Adafruit_MotorHAT.FORWARD)
rightFrontRear.run(Adafruit_MotorHAT.FORWARD)
#drive backwards
def reverse():
rightFrontRear.setSpeed(150)
leftFrontRear.setSpeed(150)
rightFrontRear.run(Adafruit_MotorHAT.BACKWARD)
leftFrontRear.run(Adafruit_MotorHAT.BACKWARD)
#rotate left, rotate right
def leftRotate():
rightFrontRear.setSpeed(70)
rightFrontRear.run(Adafruit_MotorHAT.FORWARD)
def rightRotate():
leftFrontRear.setSpeed(90)
rightFrontRear.setSpeed(90)
leftFrontRear.run(Adafruit_MotorHAT.FORWARD)
rightFrontRear.run(Adafruit_MotorHAT.BACKWARD)
#turn left, turn right
def leftTurn():
rightFrontRear.setSpeed(200)
leftFrontRear.setSpeed(125)
rightFrontRear.run(Adafruit_MotorHAT.FORWARD)
leftFrontRear.run(Adafruit_MotorHAT.FORWARD)
def rightTurn():
rightFrontRear.setSpeed(150)
leftFrontRear.setSpeed(200)
leftFrontRear.run(Adafruit_MotorHAT.FORWARD)
rightFrontRear.run(Adafruit_MotorHAT.FORWARD)
beaconNavigation()
forward()
| gpl-3.0 | -6,703,005,234,935,376,000 | 21.792308 | 75 | 0.76949 | false |
rzinkstok/skymap | skymap/labeling/runner.py | 1 | 2495 | import time
import random
from PIL import Image, ImageDraw
from skymap.labeling.common import Point, BoundingBox, evaluate, POSITION_WEIGHT
from skymap.labeling.greedy import GreedyLabeler, AdvancedGreedyLabeler
from skymap.labeling.grasp import GraspLabeler
from skymap.labeling.genetic import GeneticLabeler, CachedGeneticLabeler
from deap import creator, base
def draw(points, width, height):
SCALE = 4
im = Image.new("RGB", (SCALE * width, SCALE * height), (255, 255, 255))
d = ImageDraw.Draw(im)
for p in points:
x = p.x * SCALE
y = (height - p.y) * SCALE
r = p.radius * SCALE
if p.label is None:
color = (200, 200, 200)
else:
color = "black"
d.ellipse([x - r, y - r, x + r, y + r], fill=color)
if p.label:
x1 = p.label.minx * SCALE
x2 = p.label.maxx * SCALE
y1 = (height - p.label.miny) * SCALE
y2 = (height - p.label.maxy) * SCALE
if p.label.penalty > POSITION_WEIGHT * p.label.position:
color = (256, 0, 0)
else:
color = (200, 200, 200)
d.rectangle((x1, y1, x2, y2), outline=color)
im.show()
if __name__ == "__main__":
print("Starting")
random.seed(1)
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
npoints = 1000
nlabels = 200
mapwidth = 500
mapheight = 500
bounding_box = BoundingBox(0, 0, mapwidth, mapheight)
points = []
for i in range(npoints):
x = mapwidth * random.random()
y = mapheight * random.random()
if random.random() < float(nlabels) / npoints:
text = f"Label for point {i}"
p = Point(x, y, 1, text, 0)
else:
p = Point(x, y, 1)
points.append(p)
method = 5
if method == 1:
g = GreedyLabeler(points, bounding_box)
elif method == 2:
g = AdvancedGreedyLabeler(points, bounding_box)
elif method == 3:
g = GraspLabeler(points, bounding_box)
elif method == 4:
g = GeneticLabeler(points, bounding_box)
elif method == 5:
g = CachedGeneticLabeler(creator, points, bounding_box)
t1 = time.clock()
g.run()
t2 = time.clock()
print(f"Run time: {t2 - t1}")
penalty = evaluate(g.points, g.bounding_box)
print(f"Penalty: {penalty}")
# draw(points, mapwidth, mapheight)
| gpl-3.0 | -6,816,814,384,139,079,000 | 27.678161 | 80 | 0.578357 | false |
kazarinov/python-daemon-manager | tests/test_daemon.py | 1 | 2769 | # -*- coding: utf-8 -*-
import unittest
import subprocess
import shlex
class TestConsole(unittest.TestCase):
def call(self, command):
args = shlex.split(command)
command = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return command.stdout.read()
def command(self, daemon, command, args=None):
if args is None:
args = {}
options = ['--%s=%s' % (key, value) for key, value in args.iteritems()]
result = self.call('./run_daemon.sh {daemon} {options} {command}'.format(daemon=daemon, options=' '.join(options), command=command))
return result
class TestDaemon(TestConsole):
def test_start_stop(self):
result_start = self.command('simple.py', 'start')
result_status = self.command('simple.py', 'status')
self.assertRegexpMatches(result_status, r'^running \([0-9]+\)$')
result_stop = self.command('simple.py', 'stop')
result_status = self.command('simple.py', 'status')
self.assertEqual(result_status, 'stopped\n')
def test_restart(self):
result_start = self.command('simple.py', 'start')
result_status = self.command('simple.py', 'status')
self.assertRegexpMatches(result_status, r'^running \([0-9]+\)$')
result_restart = self.command('simple.py', 'restart')
result_status = self.command('simple.py', 'status')
self.assertRegexpMatches(result_status, r'^running \([0-9]+\)$')
result_stop = self.command('simple.py', 'stop')
result_status = self.command('simple.py', 'status')
self.assertEqual(result_status, 'stopped\n')
def test_start_stop_with_options(self):
args = {
'param': 'not default value'
}
result_start = self.command('simple.py', 'start', args=args)
result_status = self.command('simple.py', 'status')
self.assertRegexpMatches(result_status, r'^running \([0-9]+\)$')
result_stop = self.command('simple.py', 'stop')
result_status = self.command('simple.py', 'status')
self.assertEqual(result_status, 'stopped\n')
def test_reload(self):
result_start = self.command('simple.py', 'start')
result_status = self.command('simple.py', 'status')
self.assertRegexpMatches(result_status, r'^running \([0-9]+\)$')
result_restart = self.command('simple.py', 'reload')
result_status = self.command('simple.py', 'status')
self.assertRegexpMatches(result_status, r'^running \([0-9]+\)$')
result_stop = self.command('simple.py', 'stop')
result_status = self.command('simple.py', 'status')
self.assertEqual(result_status, 'stopped\n')
if __name__ == '__main__':
unittest.main() | lgpl-3.0 | -7,683,339,806,421,436,000 | 39.735294 | 140 | 0.616468 | false |
google/mannequinchallenge | loaders/aligned_data_loader.py | 1 | 1933 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.utils.data
from loaders import image_folder
class DAVISDataLoader():
def __init__(self, list_path, _batch_size):
dataset = image_folder.DAVISImageFolder(list_path=list_path)
self.data_loader = torch.utils.data.DataLoader(dataset,
batch_size=_batch_size,
shuffle=False,
num_workers=int(1))
self.dataset = dataset
def load_data(self):
return self.data_loader
def name(self):
return 'TestDataLoader'
def __len__(self):
return len(self.dataset)
class TUMDataLoader():
def __init__(self, opt, list_path, is_train, _batch_size, num_threads):
dataset = image_folder.TUMImageFolder(opt=opt, list_path=list_path)
self.data_loader = torch.utils.data.DataLoader(dataset,
batch_size=_batch_size,
shuffle=False,
num_workers=int(num_threads))
self.dataset = dataset
def load_data(self):
return self.data_loader
def name(self):
return 'TUMDataLoader'
def __len__(self):
return len(self.dataset)
| apache-2.0 | -2,236,632,270,222,474,800 | 34.796296 | 84 | 0.578893 | false |
xemul/p.haul | phaul/connection.py | 1 | 1171 | #
# p.haul connection module contain logic needed to establish connection
# between p.haul and p.haul-service.
#
import logging
import socket
import util
class connection(object):
"""p.haul connection
Class encapsulate connections reqired for p.haul work, including rpc socket
(socket for RPC calls), memory socket (socket for c/r images migration) and
module specific definition of fs channel needed for disk migration.
"""
def __init__(self, rpc_sk, mem_sk, fdfs):
self.rpc_sk = rpc_sk
self.mem_sk = mem_sk
self.fdfs = fdfs
def close(self):
self.rpc_sk.close()
self.mem_sk.close()
def establish(fdrpc, fdmem, fdfs):
"""Construct required socket objects from file descriptors
Expect that each file descriptor represent socket opened in blocking mode
with domain AF_INET and type SOCK_STREAM.
"""
logging.info(
"Use existing connections, fdrpc=%d fdmem=%d fdfs=%s", fdrpc,
fdmem, fdfs)
# Create rpc socket
rpc_sk = socket.fromfd(fdrpc, socket.AF_INET, socket.SOCK_STREAM)
util.set_cloexec(rpc_sk)
# Create memory socket
mem_sk = socket.fromfd(fdmem, socket.AF_INET, socket.SOCK_STREAM)
return connection(rpc_sk, mem_sk, fdfs)
| lgpl-2.1 | -7,130,818,278,763,077,000 | 23.914894 | 76 | 0.733561 | false |
AQORN/thunder-engine | thunder_web/api/views.py | 1 | 1791 | from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view
#
from rest_framework.response import Response
from task.models import Task
from api.serializers import TaskSerializer
#
@api_view(['GET', 'POST'])
def task_list(request):
"""
List all tasks, or create a new task.
"""
if request.method == 'GET':
tasks = Task.objects.all()
print tasks.query
serializer = TaskSerializer(tasks, many=True)
print tasks
return Response(serializer.data)
elif request.method == 'POST':
serializer = TaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
#@permission_classes((IsAuthenticated, ))
def task_detail(request, pk):
"""
Get, udpate, or delete a specific task
"""
try:
task = Task.objects.get(pk=pk)
except Task.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = TaskSerializer(task)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = TaskSerializer(task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
task.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# Create your views here.
| gpl-3.0 | -5,743,528,586,565,458,000 | 27.428571 | 76 | 0.638749 | false |
PyBossa/pybossa | test/test_sched_depth_first_all.py | 1 | 48364 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
import json
import random
from mock import patch
from helper import sched
from default import Test, db, with_context
from pybossa.model.task import Task
from pybossa.model.project import Project
from pybossa.model.user import User
from pybossa.model.task_run import TaskRun
from pybossa.model.category import Category
from pybossa.sched import get_depth_first_all_task
from pybossa.core import task_repo, project_repo
from factories import TaskFactory, ProjectFactory, TaskRunFactory, UserFactory
from factories import AnonymousTaskRunFactory, ExternalUidTaskRunFactory
from factories import reset_all_pk_sequences
import pybossa
class TestSched(sched.Helper):
endpoints = ['project', 'task', 'taskrun']
def get_headers_jwt(self, project):
"""Return headesr JWT token."""
# Get JWT token
url = 'api/auth/project/%s/token' % project.short_name
res = self.app.get(url, headers={'Authorization': project.secret_key})
authorization_token = 'Bearer %s' % res.data
return {'Authorization': authorization_token}
# Tests
@with_context
def test_anonymous_01_newtask(self):
""" Test SCHED newtask returns a Task for the Anonymous User"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'))
TaskFactory.create_batch(2, project=project, info='hola')
res = self.app.get('api/project/%s/newtask' %project.id)
data = json.loads(res.data)
task_id = data['id']
assert data['info'] == 'hola', data
taskrun = dict(project_id=data['project_id'], task_id=data['id'], info="hola")
res = self.app.post('api/taskrun', data=json.dumps(taskrun))
res = self.app.get('api/project/%s/newtask' %project.id)
data = json.loads(res.data)
assert data['info'] == 'hola', data
assert data['id'] != task_id, data
@with_context
def test_anonymous_01_newtask_limits(self):
""" Test SCHED newtask returns a list of Tasks for the Anonymous User"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'))
TaskFactory.create_batch(100, project=project, info='hola')
url = 'api/project/%s/newtask?limit=100' % project.id
res = self.app.get(url)
data = json.loads(res.data)
assert len(data) == 100
for t in data:
assert t['info'] == 'hola', t
task_ids = [task['id'] for task in data]
task_ids = set(task_ids)
assert len(task_ids) == 100, task_ids
url = 'api/project/%s/newtask?limit=200' % project.id
res = self.app.get(url)
data = json.loads(res.data)
assert len(data) == 100
for t in data:
assert t['info'] == 'hola', t
task_ids = [task['id'] for task in data]
task_ids = set(task_ids)
assert len(task_ids) == 100, task_ids
@with_context
def test_anonymous_02_gets_different_tasks(self):
""" Test SCHED newtask returns N different Tasks for the Anonymous User"""
assigned_tasks = []
# Get a Task until scheduler returns None
project = ProjectFactory.create(info=dict(sched='depth_first_all'))
tasks = TaskFactory.create_batch(3, project=project, info={})
res = self.app.get('api/project/%s/newtask' % project.id)
data = json.loads(res.data)
while data.get('info') is not None:
# Save the assigned task
assigned_tasks.append(data)
task = db.session.query(Task).get(data['id'])
# Submit an Answer for the assigned task
tr = AnonymousTaskRunFactory.create(project=project, task=task)
res = self.app.get('api/project/%s/newtask' %project.id)
data = json.loads(res.data)
# Check if we received the same number of tasks that the available ones
assert len(assigned_tasks) == len(tasks), len(assigned_tasks)
# Check if all the assigned Task.id are equal to the available ones
err_msg = "Assigned Task not found in DB Tasks"
for at in assigned_tasks:
assert self.is_task(at['id'], tasks), err_msg
# Check that there are no duplicated tasks
err_msg = "One Assigned Task is duplicated"
for at in assigned_tasks:
assert self.is_unique(at['id'], assigned_tasks), err_msg
@with_context
def test_anonymous_02_gets_different_tasks_limits(self):
""" Test SCHED newtask returns N different list of Tasks for the Anonymous User"""
assigned_tasks = []
# Get a Task until scheduler returns None
project = ProjectFactory.create(info=dict(sched='depth_first_all'))
tasks = TaskFactory.create_batch(10, project=project, info={})
res = self.app.get('api/project/%s/newtask?limit=5' % project.id)
data = json.loads(res.data)
while len(data) > 0:
# Save the assigned task
for t in data:
assigned_tasks.append(t)
task = db.session.query(Task).get(t['id'])
# Submit an Answer for the assigned task
tr = AnonymousTaskRunFactory.create(project=project, task=task)
res = self.app.get('api/project/%s/newtask?limit=5' % project.id)
data = json.loads(res.data)
# Check if we received the same number of tasks that the available ones
assert len(assigned_tasks) == len(tasks), len(assigned_tasks)
# Check if all the assigned Task.id are equal to the available ones
err_msg = "Assigned Task not found in DB Tasks"
for at in assigned_tasks:
assert self.is_task(at['id'], tasks), err_msg
# Check that there are no duplicated tasks
err_msg = "One Assigned Task is duplicated"
for at in assigned_tasks:
assert self.is_unique(at['id'], assigned_tasks), err_msg
@with_context
def test_external_uid_02_gets_different_tasks(self):
""" Test SCHED newtask returns N different Tasks
for a external User ID."""
assigned_tasks = []
# Get a Task until scheduler returns None
project = ProjectFactory.create(info=dict(sched='depth_first_all'))
tasks = TaskFactory.create_batch(3, project=project, info={})
headers = self.get_headers_jwt(project)
url = 'api/project/%s/newtask?external_uid=%s' % (project.id, '1xa')
res = self.app.get(url, headers=headers)
data = json.loads(res.data)
while data.get('info') is not None:
# Save the assigned task
assigned_tasks.append(data)
task = db.session.query(Task).get(data['id'])
# Submit an Answer for the assigned task
tr = ExternalUidTaskRunFactory.create(project=project, task=task)
res = self.app.get(url, headers=headers)
data = json.loads(res.data)
# Check if we received the same number of tasks that the available ones
assert len(assigned_tasks) == len(tasks), len(assigned_tasks)
# Check if all the assigned Task.id are equal to the available ones
err_msg = "Assigned Task not found in DB Tasks"
for at in assigned_tasks:
assert self.is_task(at['id'], tasks), err_msg
# Check that there are no duplicated tasks
err_msg = "One Assigned Task is duplicated"
for at in assigned_tasks:
assert self.is_unique(at['id'], assigned_tasks), err_msg
# Check that there are task runs saved with the external UID
answers = task_repo.filter_task_runs_by(external_uid='1xa')
print answers
err_msg = "There should be the same amount of task_runs than tasks"
assert len(answers) == len(assigned_tasks), err_msg
assigned_tasks_ids = sorted([at['id'] for at in assigned_tasks])
task_run_ids = sorted([a.task_id for a in answers])
err_msg = "There should be an answer for each assigned task"
assert assigned_tasks_ids == task_run_ids, err_msg
@with_context
def test_external_uid_02_gets_different_tasks_limits(self):
""" Test SCHED newtask returns N different list of Tasks
for a external User ID."""
assigned_tasks = []
# Get a Task until scheduler returns None
project = ProjectFactory.create(info=dict(sched='depth_first_all'))
tasks = TaskFactory.create_batch(10, project=project, info={})
headers = self.get_headers_jwt(project)
url = 'api/project/%s/newtask?limit=5&external_uid=%s' % (project.id, '1xa')
res = self.app.get(url, headers=headers)
data = json.loads(res.data)
while len(data) > 0 :
# Save the assigned task
for t in data:
assigned_tasks.append(t)
task = db.session.query(Task).get(t['id'])
# Submit an Answer for the assigned task
tr = ExternalUidTaskRunFactory.create(project=project, task=task)
res = self.app.get(url, headers=headers)
data = json.loads(res.data)
# Check if we received the same number of tasks that the available ones
assert len(assigned_tasks) == len(tasks), len(assigned_tasks)
# Check if all the assigned Task.id are equal to the available ones
err_msg = "Assigned Task not found in DB Tasks"
for at in assigned_tasks:
assert self.is_task(at['id'], tasks), err_msg
# Check that there are no duplicated tasks
err_msg = "One Assigned Task is duplicated"
for at in assigned_tasks:
assert self.is_unique(at['id'], assigned_tasks), err_msg
# Check that there are task runs saved with the external UID
answers = task_repo.filter_task_runs_by(external_uid='1xa')
print answers
err_msg = "There should be the same amount of task_runs than tasks"
assert len(answers) == len(assigned_tasks), err_msg
assigned_tasks_ids = sorted([at['id'] for at in assigned_tasks])
task_run_ids = sorted([a.task_id for a in answers])
err_msg = "There should be an answer for each assigned task"
assert assigned_tasks_ids == task_run_ids, err_msg
@with_context
def test_anonymous_03_respects_limit_tasks(self):
""" Test SCHED newtask respects the limit of 10 TaskRuns per Task"""
assigned_tasks = []
project = ProjectFactory.create(owner=UserFactory.create(id=500),
info=dict(sched='depth_first_all'))
user = UserFactory.create()
task = TaskFactory.create(project=project, n_answers=10)
tasks = get_depth_first_all_task(project.id, user.id)
assert len(tasks) == 1, len(tasks)
assert tasks[0].id == task.id, tasks
assert tasks[0].state == 'ongoing', tasks
for i in range(10):
tr = TaskRun(project_id=project.id,
task_id=task.id,
user_ip='127.0.0.%s' % i)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, user.id)
assert len(tasks) == 1, len(tasks)
assert tasks[0].id == task.id, tasks
assert tasks[0].state == 'completed', tasks
for i in range(10):
tasks = get_depth_first_all_task(project.id,
user_id=None,
user_ip='127.0.0.%s' % i)
assert len(tasks) == 0, tasks
tr = TaskRun(project_id=project.id,
task_id=task.id,
user_id=user.id)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, user.id)
assert len(tasks) == 0, tasks
@with_context
def test_anonymous_03_respects_limit_tasks_limits(self):
""" Test SCHED newtask limit respects the limit of 30 TaskRuns per Task using limits"""
assigned_tasks = []
user = UserFactory.create()
project = ProjectFactory.create(info=dict(sched='depth_first_all'))
orig_tasks = TaskFactory.create_batch(2, project=project, n_answers=5)
tasks = get_depth_first_all_task(project.id, user.id, limit=2)
assert len(tasks) == 2, len(tasks)
assert tasks[0].id == orig_tasks[0].id
assert tasks[1].id == orig_tasks[1].id
for i in range(5):
tr = TaskRun(project_id=project.id,
task_id=tasks[0].id,
user_ip='127.0.0.%s' % i)
db.session.add(tr)
db.session.commit()
# Task should be marked as completed, but as user has no
# participated it should get the completed one as well.
tasks = get_depth_first_all_task(project.id, user.id, limit=2,
orderby='id', desc=False)
assert len(tasks) == 2, len(tasks)
assert tasks[0].id == orig_tasks[0].id, tasks[0]
assert tasks[0].state == 'completed', tasks[0].state
assert len(tasks[0].task_runs) == 5
assert tasks[1].id == orig_tasks[1].id
assert tasks[1].state == 'ongoing', tasks[1].state
assert len(tasks[1].task_runs) == 0
# User contributes, so only one task should be returned
tr = TaskRun(project_id=project.id,
task_id=tasks[0].id,
user_id=user.id)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, user.id, limit=2,
orderby='id', desc=False)
assert len(tasks) == 1, len(tasks)
assert tasks[0].id == orig_tasks[1].id, tasks[0]
assert tasks[0].state == 'ongoing', tasks[0].state
assert len(tasks[0].task_runs) == 0
@with_context
def test_external_uid_03_respects_limit_tasks(self):
""" Test SCHED newtask external uid respects the limit of 30 TaskRuns per Task for
external user id"""
assigned_tasks = []
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
user = UserFactory.create()
task = TaskFactory.create(project=project, n_answers=10)
uid = '1xa'
tasks = get_depth_first_all_task(project.id, external_uid=uid)
assert len(tasks) == 1, len(tasks)
assert tasks[0].id == task.id, tasks
assert tasks[0].state == 'ongoing', tasks
# Add taskruns
for i in range(10):
tr = TaskRun(project_id=project.id,
task_id=task.id,
user_ip='127.0.0.%s' % i)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, external_uid=uid)
assert len(tasks) == 1, len(tasks)
assert tasks[0].id == task.id, tasks
assert tasks[0].state == 'completed', tasks
assert len(tasks[0].task_runs) == 10, tasks
url = 'api/project/%s/newtask?external_uid=%s' % (project.id,
uid)
headers = self.get_headers_jwt(project)
res = self.app.get(url, headers=headers)
data = json.loads(res.data)
assert data['id'] == task.id
assert data['state'] == 'completed'
tr = TaskRun(project_id=project.id,
task_id=task.id,
external_uid=uid)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, external_uid=uid)
assert len(tasks) == 0, len(tasks)
res = self.app.get(url, headers=headers)
data = json.loads(res.data)
assert len(data) == 0, data
@with_context
def test_external_uid_03_respects_limit_tasks_limits(self):
""" Test SCHED newtask external uid limits respects the limit of 30 TaskRuns per list of Tasks for
external user id"""
# Get Task until scheduler returns None
project = ProjectFactory.create(info=dict(sched='depth_first_all'))
orig_tasks = TaskFactory.create_batch(2, project=project, n_answers=5)
headers = self.get_headers_jwt(project)
uid = '1xa'
url = 'api/project/%s/newtask?external_uid=%s&limit=2' % (project.id,
uid)
tasks = get_depth_first_all_task(project.id, external_uid=uid, limit=2)
assert len(tasks) == 2, len(tasks)
assert tasks[0].id == orig_tasks[0].id, tasks
assert tasks[0].state == 'ongoing', tasks
assert tasks[1].id == orig_tasks[1].id, tasks
assert tasks[1].state == 'ongoing', tasks
# Add taskruns
for i in range(5):
tr = TaskRun(project_id=project.id,
task_id=tasks[0].id,
user_ip='127.0.0.%s' % i)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, external_uid=uid, limit=2,
orderby='id', desc=False)
assert len(tasks) == 2, len(tasks)
assert tasks[0].id == orig_tasks[0].id, tasks
assert tasks[0].state == 'completed', tasks
assert len(tasks[0].task_runs) == 5, tasks
assert tasks[1].id == orig_tasks[1].id, tasks
assert tasks[1].state == 'ongoing', tasks
assert len(tasks[1].task_runs) == 0, tasks
url = 'api/project/%s/newtask?external_uid=%s&limit=2&orderby=id&desc=False' % (project.id,uid)
headers = self.get_headers_jwt(project)
res = self.app.get(url, headers=headers)
data = json.loads(res.data)
assert data[0]['id'] == orig_tasks[0].id
assert data[0]['state'] == 'completed'
assert data[1]['id'] == orig_tasks[1].id
assert data[1]['state'] == 'ongoing'
tr = TaskRun(project_id=project.id,
task_id=tasks[0].id,
external_uid=uid)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, external_uid=uid,
limit=2, orderby='id', desc=False)
assert len(tasks) == 1, len(tasks)
assert tasks[0].id == orig_tasks[1].id
assert tasks[0].state == 'ongoing'
res = self.app.get(url, headers=headers)
data = json.loads(res.data)
assert data['id'] == orig_tasks[1].id
assert data['state'] == 'ongoing'
@with_context
def test_newtask_default_orderby(self):
"""Test SCHED depth first works with orderby."""
project = ProjectFactory.create(info=dict(sched="depth_first_all"))
task1 = TaskFactory.create(project=project, fav_user_ids=None)
task2 = TaskFactory.create(project=project, fav_user_ids=[1,2,3])
url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'id', False)
res = self.app.get(url)
data = json.loads(res.data)
assert data['id'] == task1.id, data
url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'id', True)
res = self.app.get(url)
data = json.loads(res.data)
assert data['id'] == task2.id, data
url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'created', False)
res = self.app.get(url)
data = json.loads(res.data)
assert data['id'] == task1.id, data
url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'created', True)
res = self.app.get(url)
data = json.loads(res.data)
assert data['id'] == task2.id, data
url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'fav_user_ids', False)
res = self.app.get(url)
data = json.loads(res.data)
assert data['id'] == task1.id, data
url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'fav_user_ids', True)
res = self.app.get(url)
data = json.loads(res.data)
assert data['id'] == task2.id, data
assert data['fav_user_ids'] == task2.fav_user_ids, data
@with_context
def test_user_01_newtask(self):
""" Test SCHED newtask returns a Task for John Doe User"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(2, project=project, n_answers=2)
# Register
self.register()
self.signin()
url = 'api/project/%s/newtask' % project.id
res = self.app.get(url)
data = json.loads(res.data)
task_id = data['id']
assert data['id'], data
taskrun = dict(project_id=data['project_id'], task_id=data['id'], info="hola")
res = self.app.post('api/taskrun', data=json.dumps(taskrun))
res = self.app.get(url)
data = json.loads(res.data)
assert data['id'], data
assert data['id'] != task_id, data
self.signout()
@with_context
def test_user_01_newtask_limits(self):
""" Test SCHED newtask returns a Task for John Doe User with limits"""
self.register()
self.signin()
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
tasks = TaskFactory.create_batch(10, project=project, info=dict(foo=1))
# Register
url = 'api/project/%s/newtask?limit=2' % project.id
res = self.app.get(url)
data = json.loads(res.data)
assert len(data) == 2, data
for t in data:
assert t['info']['foo'] == 1, t
self.signout()
@with_context
def test_user_02_gets_different_tasks(self):
""" Test SCHED newtask returns N different Tasks for John Doe User"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
# Register
self.register()
self.signin()
assigned_tasks = []
# Get Task until scheduler returns None
url = 'api/project/%s/newtask' % project.id
res = self.app.get(url)
data = json.loads(res.data)
while data.get('id') is not None:
# Check that we received a Task
assert data.get('id'), data
# Save the assigned task
assigned_tasks.append(data)
# Submit an Answer for the assigned task
tr = dict(project_id=data['project_id'], task_id=data['id'],
info={'answer': 'No'})
tr = json.dumps(tr)
self.app.post('/api/taskrun', data=tr)
res = self.app.get(url)
data = json.loads(res.data)
# Check if we received the same number of tasks that the available ones
tasks = db.session.query(Task).filter_by(project_id=1).all()
assert len(assigned_tasks) == len(tasks), assigned_tasks
# Check if all the assigned Task.id are equal to the available ones
tasks = db.session.query(Task).filter_by(project_id=1).all()
err_msg = "Assigned Task not found in DB Tasks"
for at in assigned_tasks:
assert self.is_task(at['id'], tasks), err_msg
# Check that there are no duplicated tasks
err_msg = "One Assigned Task is duplicated"
for at in assigned_tasks:
assert self.is_unique(at['id'], assigned_tasks), err_msg
@with_context
def test_user_02_gets_different_tasks_limit(self):
""" Test SCHED newtask returns N different list of Tasks for John Doe User"""
# Register
self.register()
self.signin()
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
assigned_tasks = []
# Get Task until scheduler returns None
url = 'api/project/%s/newtask?limit=5' % project.id
res = self.app.get(url)
data = json.loads(res.data)
while len(data) > 0:
# Check that we received a Task
for t in data:
assert t.get('id'), t
# Save the assigned task
assigned_tasks.append(t)
# Submit an Answer for the assigned task
tr = dict(project_id=t['project_id'], task_id=t['id'],
info={'answer': 'No'})
tr = json.dumps(tr)
self.app.post('/api/taskrun', data=tr)
res = self.app.get(url)
data = json.loads(res.data)
# Check if we received the same number of tasks that the available ones
tasks = db.session.query(Task).filter_by(project_id=1).all()
assert len(assigned_tasks) == len(tasks), assigned_tasks
# Check if all the assigned Task.id are equal to the available ones
tasks = db.session.query(Task).filter_by(project_id=1).all()
err_msg = "Assigned Task not found in DB Tasks"
for at in assigned_tasks:
assert self.is_task(at['id'], tasks), err_msg
# Check that there are no duplicated tasks
err_msg = "One Assigned Task is duplicated"
for at in assigned_tasks:
assert self.is_unique(at['id'], assigned_tasks), err_msg
@with_context
def test_user_03_respects_limit_tasks(self):
""" Test SCHED newtask respects the limit of 30 TaskRuns per Task"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
orig_tasks = TaskFactory.create_batch(1, project=project, n_answers=10)
user = UserFactory.create()
tasks = get_depth_first_all_task(project.id, user.id)
assert len(tasks) == 1, len(tasks)
assert tasks[0].id == orig_tasks[0].id, tasks
assert tasks[0].state == 'ongoing', tasks
for i in range(10):
tr = TaskRun(project_id=project.id,
task_id=orig_tasks[0].id,
user_ip='127.0.0.%s' % i)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, user.id)
assert len(tasks) == 1, len(tasks)
assert tasks[0].id == orig_tasks[0].id, tasks
assert tasks[0].state == 'completed', tasks
assert len(tasks[0].task_runs) == 10, tasks
tr = TaskRun(project_id=project.id,
task_id=orig_tasks[0].id,
user_id=user.id)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, user.id)
assert len(tasks) == 0, tasks
@with_context
def test_user_03_respects_limit_tasks_limit(self):
""" Test SCHED limit arg newtask respects the limit of 30 TaskRuns per list of Tasks"""
# Del previous TaskRuns
assigned_tasks = []
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
user = UserFactory.create()
orig_tasks = TaskFactory.create_batch(2, project=project, n_answers=10)
tasks = get_depth_first_all_task(project.id, user.id,
limit=2, orderby='id',
desc=False)
assert len(tasks) == 2, len(tasks)
assert tasks[0].id == orig_tasks[0].id, tasks
assert tasks[0].state == 'ongoing', tasks
assert tasks[1].id == orig_tasks[1].id, tasks
assert tasks[1].state == 'ongoing', tasks
for i in range(10):
tr = TaskRun(project_id=project.id,
task_id=tasks[0].id,
user_ip='127.0.0.%s' % i)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, user.id,
limit=2, orderby='id',
desc=False)
assert len(tasks) == 2, len(tasks)
assert tasks[0].id == orig_tasks[0].id, tasks
assert tasks[0].state == 'completed', tasks
assert len(tasks[0].task_runs) == 10, tasks
assert tasks[1].id == orig_tasks[1].id, tasks
assert tasks[1].state == 'ongoing', tasks
assert len(tasks[1].task_runs) == 0, tasks
tr = TaskRun(project_id=project.id,
task_id=tasks[0].id,
user_id=user.id)
db.session.add(tr)
db.session.commit()
tasks = get_depth_first_all_task(project.id, user.id,
limit=2, orderby='id',
desc=False)
assert len(tasks) == 1, tasks
assert tasks[0].id == orig_tasks[1].id
assert tasks[0].state == 'ongoing'
@with_context
def test_task_preloading(self):
"""Test TASK Pre-loading works"""
# Del previous TaskRuns
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
# Register
self.register()
self.signin()
assigned_tasks = []
# Get Task until scheduler returns None
url = 'api/project/%s/newtask' % project.id
res = self.app.get(url)
task1 = json.loads(res.data)
# Check that we received a Task
assert task1.get('id'), task1
# Pre-load the next task for the user
res = self.app.get(url + '?offset=1')
task2 = json.loads(res.data)
# Check that we received a Task
assert task2.get('id'), task2
# Check that both tasks are different
assert task1.get('id') != task2.get('id'), "Tasks should be different"
## Save the assigned task
assigned_tasks.append(task1)
assigned_tasks.append(task2)
# Submit an Answer for the assigned and pre-loaded task
for t in assigned_tasks:
tr = dict(project_id=t['project_id'], task_id=t['id'], info={'answer': 'No'})
tr = json.dumps(tr)
self.app.post('/api/taskrun', data=tr)
# Get two tasks again
res = self.app.get(url)
task3 = json.loads(res.data)
# Check that we received a Task
assert task3.get('id'), task1
# Pre-load the next task for the user
res = self.app.get(url + '?offset=1')
task4 = json.loads(res.data)
# Check that we received a Task
assert task4.get('id'), task2
# Check that both tasks are different
assert task3.get('id') != task4.get('id'), "Tasks should be different"
assert task1.get('id') != task3.get('id'), "Tasks should be different"
assert task2.get('id') != task4.get('id'), "Tasks should be different"
# Check that a big offset returns None
res = self.app.get(url + '?offset=11')
assert json.loads(res.data) == {}, res.data
@with_context
def test_task_preloading_limit(self):
"""Test TASK Pre-loading with limit works"""
# Register
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
self.register()
self.signin()
assigned_tasks = []
url = 'api/project/%s/newtask?limit=2' % project.id
res = self.app.get(url)
tasks1 = json.loads(res.data)
# Check that we received a Task
for t in tasks1:
assert t.get('id'), t
# Pre-load the next tasks for the user
res = self.app.get(url + '&offset=2')
tasks2 = json.loads(res.data)
# Check that we received a Task
for t in tasks2:
assert t.get('id'), t
# Check that both tasks are different
tasks1_ids = set([t['id'] for t in tasks1])
tasks2_ids = set([t['id'] for t in tasks2])
assert len(tasks1_ids.union(tasks2_ids)) == 4, "Tasks should be different"
## Save the assigned task
for t in tasks1:
assigned_tasks.append(t)
for t in tasks2:
assigned_tasks.append(t)
# Submit an Answer for the assigned and pre-loaded task
for t in assigned_tasks:
tr = dict(project_id=t['project_id'], task_id=t['id'], info={'answer': 'No'})
tr = json.dumps(tr)
self.app.post('/api/taskrun', data=tr)
# Get two tasks again
res = self.app.get(url)
tasks3 = json.loads(res.data)
# Check that we received a Task
for t in tasks3:
assert t.get('id'), t
# Pre-load the next task for the user
res = self.app.get(url + '&offset=2')
tasks4 = json.loads(res.data)
# Check that we received a Task
for t in tasks4:
assert t.get('id'), t
# Check that both tasks are different
tasks3_ids = set([t['id'] for t in tasks3])
tasks4_ids = set([t['id'] for t in tasks4])
assert len(tasks3_ids.union(tasks4_ids)) == 4, "Tasks should be different"
# Check that a big offset returns None
res = self.app.get(url + '&offset=11')
assert json.loads(res.data) == {}, res.data
@with_context
def test_task_preloading_external_uid(self):
"""Test TASK Pre-loading for external user IDs works"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
assigned_tasks = []
# Get Task until scheduler returns None
project = project_repo.get(1)
headers = self.get_headers_jwt(project)
url = 'api/project/%s/newtask?external_uid=2xb' % project.id
res = self.app.get(url, headers=headers)
task1 = json.loads(res.data)
# Check that we received a Task
assert task1.get('id'), task1
# Pre-load the next task for the user
res = self.app.get(url + '&offset=1', headers=headers)
task2 = json.loads(res.data)
# Check that we received a Task
assert task2.get('id'), task2
# Check that both tasks are different
assert task1.get('id') != task2.get('id'), "Tasks should be different"
## Save the assigned task
assigned_tasks.append(task1)
assigned_tasks.append(task2)
# Submit an Answer for the assigned and pre-loaded task
for t in assigned_tasks:
tr = dict(project_id=t['project_id'],
task_id=t['id'], info={'answer': 'No'},
external_uid='2xb')
tr = json.dumps(tr)
res = self.app.post('/api/taskrun?external_uid=2xb',
data=tr, headers=headers)
# Get two tasks again
res = self.app.get(url, headers=headers)
task3 = json.loads(res.data)
# Check that we received a Task
assert task3.get('id'), task1
# Pre-load the next task for the user
res = self.app.get(url + '&offset=1', headers=headers)
task4 = json.loads(res.data)
# Check that we received a Task
assert task4.get('id'), task2
# Check that both tasks are different
assert task3.get('id') != task4.get('id'), "Tasks should be different"
assert task1.get('id') != task3.get('id'), "Tasks should be different"
assert task2.get('id') != task4.get('id'), "Tasks should be different"
# Check that a big offset returns None
res = self.app.get(url + '&offset=11', headers=headers)
assert json.loads(res.data) == {}, res.data
@with_context
def test_task_preloading_external_uid_limit(self):
"""Test TASK Pre-loading for external user IDs works with limit"""
# Del previous TaskRuns
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
assigned_tasks = []
# Get Task until scheduler returns None
headers = self.get_headers_jwt(project)
url = 'api/project/%s/newtask?external_uid=2xb&limit=2' % project.id
res = self.app.get(url, headers=headers)
tasks1 = json.loads(res.data)
# Check that we received a Task
for t in tasks1:
assert t.get('id'), task1
# Pre-load the next task for the user
res = self.app.get(url + '&offset=2', headers=headers)
tasks2 = json.loads(res.data)
# Check that we received a Task
for t in tasks2:
assert t.get('id'), t
# Check that both tasks are different
tasks1_ids = set([task['id'] for task in tasks1])
tasks2_ids = set([task['id'] for task in tasks2])
assert len(tasks1_ids.union(tasks2_ids)) == 4, "Tasks should be different"
## Save the assigned task
for t in tasks1:
assigned_tasks.append(t)
for t in tasks2:
assigned_tasks.append(t)
# Submit an Answer for the assigned and pre-loaded task
for t in assigned_tasks:
tr = dict(project_id=t['project_id'],
task_id=t['id'], info={'answer': 'No'},
external_uid='2xb')
tr = json.dumps(tr)
res = self.app.post('/api/taskrun?external_uid=2xb',
data=tr, headers=headers)
# Get two tasks again
res = self.app.get(url, headers=headers)
tasks3 = json.loads(res.data)
# Check that we received a Task
for t in tasks3:
assert t.get('id'), t
# Pre-load the next task for the user
res = self.app.get(url + '&offset=2', headers=headers)
tasks4 = json.loads(res.data)
# Check that we received a Task
for t in tasks4:
assert t.get('id'), t
# Check that both tasks are different
tasks3_ids = set([task['id'] for task in tasks3])
tasks4_ids = set([task['id'] for task in tasks4])
assert len(tasks3_ids.union(tasks4_ids)) == 4, "Tasks should be different"
# Check that a big offset returns None
res = self.app.get(url + '&offset=11', headers=headers)
assert json.loads(res.data) == {}, res.data
@with_context
def test_task_priority(self):
"""Test SCHED respects priority_0 field"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
# Register
self.register()
self.signin()
# By default, tasks without priority should be ordered by task.id (FIFO)
tasks = db.session.query(Task).filter_by(project_id=1).order_by('id').all()
url = 'api/project/%s/newtask' % project.id
res = self.app.get(url)
task1 = json.loads(res.data)
# Check that we received a Task
err_msg = "Task.id should be the same"
assert task1.get('id') == tasks[0].id, err_msg
# Now let's change the priority to a random task
import random
t = random.choice(tasks)
# Increase priority to maximum
t.priority_0 = 1
db.session.add(t)
db.session.commit()
# Request again a new task
res = self.app.get(url + '?orderby=priority_0&desc=true')
task1 = json.loads(res.data)
# Check that we received a Task
err_msg = "Task.id should be the same"
assert task1.get('id') == t.id, err_msg
err_msg = "Task.priority_0 should be the 1"
assert task1.get('priority_0') == 1, err_msg
@with_context
def test_task_priority_limit(self):
"""Test SCHED respects priority_0 field with limit"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
# Register
self.register()
self.signin()
# By default, tasks without priority should be ordered by task.id (FIFO)
tasks = db.session.query(Task).filter_by(project_id=project.id).order_by('id').all()
url = 'api/project/%s/newtask?limit=2' % project.id
res = self.app.get(url)
tasks1 = json.loads(res.data)
# Check that we received a Task
err_msg = "Task.id should be the same"
assert tasks1[0].get('id') == tasks[0].id, err_msg
# Now let's change the priority to a random task
import random
t = random.choice(tasks)
# Increase priority to maximum
t.priority_0 = 1
db.session.add(t)
db.session.commit()
# Request again a new task
res = self.app.get(url + '&orderby=priority_0&desc=true')
tasks1 = json.loads(res.data)
# Check that we received a Task
err_msg = "Task.id should be the same"
assert tasks1[0].get('id') == t.id, (err_msg, tasks1[0])
err_msg = "Task.priority_0 should be the 1"
assert tasks1[0].get('priority_0') == 1, err_msg
@with_context
def test_task_priority_external_uid(self):
"""Test SCHED respects priority_0 field for externa uid"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
# By default, tasks without priority should be ordered by task.id (FIFO)
tasks = db.session.query(Task).filter_by(project_id=1).order_by('id').all()
project = project_repo.get(1)
headers = self.get_headers_jwt(project)
url = 'api/project/%s/newtask?external_uid=342' % project.id
res = self.app.get(url, headers=headers)
task1 = json.loads(res.data)
# Check that we received a Task
err_msg = "Task.id should be the same"
assert task1.get('id') == tasks[0].id, err_msg
# Now let's change the priority to a random task
import random
t = random.choice(tasks)
# Increase priority to maximum
t.priority_0 = 1
db.session.add(t)
db.session.commit()
# Request again a new task
res = self.app.get(url + '&orderby=priority_0&desc=true', headers=headers)
task1 = json.loads(res.data)
# Check that we received a Task
err_msg = "Task.id should be the same"
assert task1.get('id') == t.id, (err_msg, task1, t)
err_msg = "Task.priority_0 should be the 1"
assert task1.get('priority_0') == 1, err_msg
@with_context
def test_task_priority_external_uid_limit(self):
"""Test SCHED respects priority_0 field for externa uid with limit"""
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=UserFactory.create(id=500))
TaskFactory.create_batch(10, project=project)
# By default, tasks without priority should be ordered by task.id (FIFO)
tasks = db.session.query(Task).filter_by(project_id=project.id).order_by('id').all()
headers = self.get_headers_jwt(project)
url = 'api/project/%s/newtask?external_uid=342&limit=2' % project.id
res = self.app.get(url, headers=headers)
tasks1 = json.loads(res.data)
# Check that we received a Task
err_msg = "Task.id should be the same"
assert tasks1[0].get('id') == tasks[0].id, err_msg
# Now let's change the priority to a random task
import random
t = random.choice(tasks)
# Increase priority to maximum
t.priority_0 = 1
db.session.add(t)
db.session.commit()
# Request again a new task
res = self.app.get(url + '&orderby=priority_0&desc=true', headers=headers)
tasks1 = json.loads(res.data)
# Check that we received a Task
err_msg = "Task.id should be the same"
assert tasks1[0].get('id') == t.id, err_msg
err_msg = "Task.priority_0 should be the 1"
assert tasks1[0].get('priority_0') == 1, err_msg
def _add_task_run(self, app, task, user=None):
tr = AnonymousTaskRunFactory.create(project=app, task=task)
@with_context
def test_no_more_tasks(self):
"""Test that a users gets always tasks"""
owner = UserFactory.create()
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=owner,
short_name='egil',
name='egil',
description='egil')
project_id = project.id
all_tasks = TaskFactory.create_batch(20, project=project, n_answers=10)
for t in all_tasks[0:10]:
TaskRunFactory.create_batch(10, task=t, project=project)
tasks = db.session.query(Task).filter_by(project_id=project.id, state='ongoing').all()
assert tasks[0].n_answers == 10
url = 'api/project/%s/newtask' % project.id
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "User should get a task"
assert 'project_id' in data.keys(), err_msg
assert data['project_id'] == project_id, err_msg
assert data['id'] == all_tasks[0].id, err_msg
assert data['state'] == 'completed', err_msg
@with_context
def test_no_more_tasks_limit(self):
"""Test that a users gets always tasks with limit"""
owner = UserFactory.create()
project = ProjectFactory.create(info=dict(sched='depth_first_all'),
owner=owner,
short_name='egil',
name='egil',
description='egil')
project_id = project.id
all_tasks = TaskFactory.create_batch(20, project=project, n_answers=10)
for t in all_tasks[0:10]:
TaskRunFactory.create_batch(10, task=t, project=project)
tasks = db.session.query(Task).filter_by(project_id=project.id, state='ongoing').all()
assert tasks[0].n_answers == 10
url = 'api/project/%s/newtask?limit=2&orderby=id' % project_id
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "User should get a task"
i = 0
for t in data:
print t['id']
assert 'project_id' in t.keys(), err_msg
assert t['project_id'] == project_id, err_msg
assert t['id'] == all_tasks[i].id, (err_msg, t, all_tasks[i].id)
assert t['state'] == 'completed', err_msg
i += 1
| agpl-3.0 | 3,603,119,875,285,052,000 | 39.744735 | 106 | 0.57549 | false |
alxnov/ansible-modules-core | cloud/amazon/ec2_vol.py | 1 | 22132 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows. Can figure out a free device_name if device_name is a string with {}, {X}, {N} templates. Template syntax: {},{X} is a character in the [f-p] range, {N} is the character in the [1-6] range, according to EBS attachment notation docs here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html .
required: false
default: null
delete_on_termination:
description:
- When set to "yes", the volume will be deleted upon instance termination.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.1"
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 100
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }} "
volume_size: 5
with_items: ec2.instances
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: ec2.instances
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Volume will pick the first free /dev/xvd* slot according to template.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvd{}
with_items: ec2.instances
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absen
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
# Attach an existing volume to instance. The volume will be deleted upon instance termination.
- ec2_vol:
instance: XXXXXX
id: XXXXXX
device_name: /dev/sdf
delete_on_termination: yes
'''
RETURN = '''
device:
description: device name of attached volume
returned: when success
type: string
sample: "/def/sdf"
volume_id:
description: the id of volume
returned: when success
type: string
sample: "vol-35b333d9"
volume_type:
description: the volume type
returned: when success
type: string
sample: "standard"
volume:
description: a dictionary containing detailed attributes of the volume
returned: when success
type: string
sample: {
"attachment_set": {
"attach_time": "2015-10-23T00:22:29.000Z",
"deleteOnTermination": "false",
"device": "/dev/sdf",
"instance_id": "i-8356263c",
"status": "attached"
},
"create_time": "2015-10-21T14:36:08.870Z",
"encrypted": false,
"id": "vol-35b333d9",
"iops": null,
"size": 1,
"snapshot_id": "",
"status": "in-use",
"tags": {
"env": "dev"
},
"type": "standard",
"zone": "us-east-1b"
}
'''
import time
from distutils.version import LooseVersion
try:
import boto.ec2
from boto.exception import BotoServerError
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
# If no name or id supplied, just try volume creation based on module parameters
if id is None and name is None:
return None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
try:
if not instance:
vols = ec2.get_all_volumes()
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
volume = get_volume(module, ec2)
if volume is None:
try:
if boto_supports_volume_encryption():
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
changed = True
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
changed = True
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
ec2.create_tags([volume.id], {"Name": name})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume, changed
# See: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
# http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html
#
DEVICE_LETTER_RANGE = tuple('fghijklmnop')
DEVICE_NUMBER_RANGE = tuple('123456')
ALL_DEVICE_NAME_TEMPLATES = ('{}', '{X}', '{N}')
def get_default_device_name_template(instance, ec2):
"""
Try to infer whether to use Windows or linux device name pattern.
Use instance.platform, password_data as indicators that instance
is a Windows machine.
"""
instance.update()
if (instance.platform or '').lower() == 'windows':
device_name = '/dev/xvd{}'
elif ec2.get_password_data(instance.id):
device_name = '/dev/xvd{}'
else:
device_name = '/dev/sd{}'
return device_name
def is_device_name_templated(device_name):
return any(t in device_name for t in ALL_DEVICE_NAME_TEMPLATES)
def get_next_device_name_from_template(device_name, module, ec2):
"""
Look at already attached volumes and device_name template,
and return the next free device name in alphabetical order
"""
volumes = get_volumes(module, ec2)
# python 2.6 str.format does not like unnamed items in templates
device_name = device_name.replace('{}', '{X}')
dev_choice_set = set(
device_name.format(X=c, N=n)
for c in DEVICE_LETTER_RANGE
for n in DEVICE_NUMBER_RANGE
)
dev_busy_set = set(v.attach_data.device for v in volumes)
dev_choices_left = sorted(dev_choice_set.difference(dev_busy_set))
if 0 == len(dev_choices_left):
module.fail_json(msg="Cant attach %s to %s: all /dev/ EBS devices busy"
% (volume.id, instance),
changed=True)
device_name = dev_choices_left[0]
return device_name
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
delete_on_termination = module.params.get('delete_on_termination')
changed = False
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
device_name = get_default_device_name_template(instance, ec2)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# If device_name is a template to grab an available spot,
# bring it into consistency with actual attachment data
device_name = adata.device
# Volume is already attached to right instance
changed = modify_dot_attribute(module, ec2, instance, device_name)
else:
if is_device_name_templated(device_name):
t = device_name
device_name = get_next_device_name_from_template(t, module, ec2)
try:
volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
changed = True
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
return volume, changed
def modify_dot_attribute(module, ec2, instance, device_name):
""" Modify delete_on_termination attribute """
delete_on_termination = module.params.get('delete_on_termination')
changed = False
try:
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
bdm = BlockDeviceMapping()
bdm[device_name] = bdt
ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
time.sleep(3)
instance.update()
changed = True
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return changed
def detach_volume(module, ec2, volume):
changed = False
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
while volume.attachment_state() is not None:
time.sleep(3)
volume.update()
changed = True
return volume, changed
def get_volume_info(volume, state):
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
volume_info = {}
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'encrypted': volume.encrypted,
'id': volume.id,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
if hasattr(attachment, 'deleteOnTermination'):
volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
return volume_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'),
iops = dict(),
encrypted = dict(type='bool', default=False),
device_name = dict(),
delete_on_termination = dict(type='bool', default=False),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
encrypted = module.params.get('encrypted')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append(get_volume_info(v, state))
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
inst = None
if instance:
try:
reservation = ec2.get_all_instances(instance_ids=instance)
except BotoServerError as e:
module.fail_json(msg=e.message)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
# Cannot resize existing volumes, but can make a new volume of larger size
# from snapshot
if volume_size and id:
module.fail_json(msg="Cannot specify volume_size together with id")
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatability
volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent':
delete_volume(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | -8,963,196,909,828,346,000 | 32.482602 | 414 | 0.628682 | false |
yyamano/RESTx | src/python/restx/render/__init__.py | 1 | 1140 | """
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
This module provides renderers for data into
different output formats.
You can import these classes straight from module level:
* BaseRenderer
"""
# Export classes on module level, so that users don't need
# to specify the individual file names in their imports.
from restx.render.htmlrenderer import HtmlRenderer
from restx.render.jsonrenderer import JsonRenderer
| gpl-3.0 | 6,498,477,152,287,166,000 | 33.545455 | 70 | 0.774561 | false |
octesian/IoTEM | IoTEM/urls.py | 1 | 1376 | """IoTEM URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib.staticfiles import views
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^login/$', auth_views.login, {'template_name': 'part_browser/login.html'}),
url(r'^logout/$', auth_views.logout_then_login),
url(r'^', include('part_browser.urls')),
url(r'^admin/', admin.site.urls),
url(r'^static/(?P<path>.*)$', views.serve),
url(r'^schedule/', include('schedule.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| gpl-3.0 | 5,857,188,549,150,959,000 | 44.866667 | 97 | 0.659884 | false |
tjcsl/cslbot | cslbot/commands/metar.py | 1 | 2400 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-2018 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Tris Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from xml.etree import ElementTree
from requests import get
from ..helpers import arguments
from ..helpers.command import Command
@Command(['metar'], ['nick', 'config', 'db', 'name', 'source', 'handler'])
def cmd(send, msg, args):
"""Gets the weather.
Syntax: {command} <station> [station2...]
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('stations', nargs='*')
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if not cmdargs.stations:
send("What station?")
return
if isinstance(cmdargs.stations, list):
cmdargs.stations = ','.join(cmdargs.stations)
req = get('http://aviationweather.gov/adds/dataserver_current/httpparam',
params={
'datasource': 'metars',
'requestType': 'retrieve',
'format': 'xml',
'mostRecentForEachStation': 'constraint',
'hoursBeforeNow': '1.25',
'stationString': cmdargs.stations
})
xml = ElementTree.fromstring(req.text)
errors = xml.find('./errors')
if len(errors):
errstring = ','.join([error.text for error in errors])
send('Error: %s' % errstring)
return
data = xml.find('./data')
if data is None or data.attrib['num_results'] == '0':
send('No results found.')
else:
for station in data:
send(station.find('raw_text').text)
| gpl-2.0 | 6,708,503,875,459,143,000 | 35.923077 | 135 | 0.64125 | false |
googleapis/google-api-java-client-services | generator/src/googleapis/codegen/schema.py | 1 | 18349 | #!/usr/bin/python2.7
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API data models - schemas and their properties.
This module handles the objects created for the "schema" section of an API.
"""
__author__ = '[email protected] (Tony Aiuto)'
import collections
import logging
from googleapis.codegen import data_types
from googleapis.codegen import template_objects
from googleapis.codegen.api_exception import ApiException
_ADDITIONAL_PROPERTIES = 'additionalProperties'
_LOGGER = logging.getLogger('codegen')
class Schema(data_types.ComplexDataType):
"""The definition of a schema."""
def __init__(self, api, default_name, def_dict, parent=None):
"""Construct a Schema object from a discovery dictionary.
Schemas represent data models in the API.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
parent: (Schema) The containing schema. To be used to establish unique
names for anonymous sub-schemas.
"""
super(Schema, self).__init__(default_name, def_dict, api, parent=parent)
name = def_dict.get('id', default_name)
_LOGGER.debug('Schema(%s)', name)
# Protect against malicious discovery
template_objects.CodeObject.ValidateName(name)
self.SetTemplateValue('wireName', name)
class_name = api.ToClassName(name, self, element_type='schema')
self.SetTemplateValue('className', class_name)
self.SetTemplateValue('isSchema', True)
self.SetTemplateValue('properties', [])
self._module = (template_objects.Module.ModuleFromDictionary(self.values)
or api.model_module)
@classmethod
def Create(cls, api, default_name, def_dict, wire_name, parent=None):
"""Construct a Schema or DataType from a discovery dictionary.
Schemas contain either object declarations, simple type declarations, or
references to other Schemas. Object declarations conceptually map to real
classes. Simple types will map to a target language built-in type.
References should effectively be replaced by the referenced Schema.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
wire_name: The name which will identify objects of this type in data on
the wire. The path of wire_names can trace an item back through
discovery.
parent: (Schema) The containing schema. To be used to establish nesting
for anonymous sub-schemas.
Returns:
A Schema or DataType.
Raises:
ApiException: If the definition dict is not correct.
"""
schema_id = def_dict.get('id')
if schema_id:
name = schema_id
else:
name = default_name
class_name = api.ToClassName(name, None, element_type='schema')
_LOGGER.debug('Create: %s, parent=%s', name,
parent.values.get('wireName', '<anon>') if parent else 'None')
# Schema objects come in several patterns.
#
# 1. Simple objects
# { type: object, properties: { "foo": {schema} ... }}
#
# 2. Maps of objects
# { type: object, additionalProperties: { "foo": {inner_schema} ... }}
#
# What we want is a data type which is Map<string, {inner_schema}>
# The schema we create here is essentially a built in type which we
# don't want to generate a class for.
#
# 3. Arrays of objects
# { type: array, items: { inner_schema }}
#
# Same kind of issue as the map, but with List<{inner_schema}>
#
# 4. Primitive data types, described by type and format.
# { type: string, format: int32 }
# { type: string, enum: ["value", ...], enumDescriptions: ["desc", ...]}
#
# 5. Refs to another schema.
# { $ref: name }
#
# 6. Variant schemas
# { type: object, variant: { discriminant: "prop", map:
# [ { 'type_value': value, '$ref': wireName }, ... ] } }
#
# What we do is map the variant schema to a schema with a single
# property for the discriminant. To that property, we attach
# the variant map which specifies which discriminator values map
# to which schema references. We also collect variant information
# in the api so we can later associate discriminator value and
# base type with the generated variant subtypes.
if 'type' in def_dict:
# The 'type' field of the schema can either be 'array', 'object', or a
# base json type.
json_type = def_dict['type']
if json_type == 'object':
# Look for variants
variant = def_dict.get('variant')
if variant:
return cls._CreateVariantType(variant, api, name,
def_dict, wire_name, parent)
# Look for full object definition. You can have properties or
# additionalProperties, but it does not do anything useful to have
# both.
# Replace properties dict with Property's
props = def_dict.get('properties')
if props:
# This case 1 from above
return cls._CreateObjectWithProperties(props, api, name,
def_dict, wire_name, parent)
# Look for case 2
additional_props = def_dict.get(_ADDITIONAL_PROPERTIES)
if additional_props:
return cls._CreateMapType(additional_props, api, name, wire_name,
class_name, parent)
# no properties
return cls._CreateSchemaWithoutProperties(api, name, def_dict,
wire_name, parent)
elif json_type == 'array':
# Case 3: Look for array definition
return cls._CreateArrayType(api, def_dict, wire_name, class_name,
schema_id, parent)
else:
# Case 4: This must be a basic type. Create a DataType for it.
return data_types.CreatePrimitiveDataType(def_dict, api, wire_name,
parent=parent)
referenced_schema = def_dict.get('$ref')
if referenced_schema:
# Case 5: Reference to another Schema.
#
# There are 4 ways you can see '$ref' in discovery.
# 1. In a property of a schema or a method request/response, pointing
# back to a previously defined schema
# 2. As above, pointing to something not defined yet.
# 3. In a method request or response or property of a schema pointing to
# something undefined.
#
# For case 1, the schema will be in the API name to schema map.
#
# For case 2, just creating this placeholder here is fine. When the
# actual schema is hit in the loop in _BuildSchemaDefinitions, we will
# replace the entry and DataTypeFromJson will resolve the to the new def.
#
# For case 3, we will end up with a dangling reference and fail later.
schema = api.SchemaByName(referenced_schema)
# The stored "schema" may not be an instance of Schema, but rather a
# data_types.PrimitiveDataType, which has no 'wireName' value.
if schema:
_LOGGER.debug('Schema.Create: %s => %s',
default_name, schema.values.get('wireName', '<unknown>'))
return schema
return data_types.SchemaReference(referenced_schema, api)
raise ApiException('Cannot decode JSON Schema for: %s' % def_dict)
@classmethod
def _CreateObjectWithProperties(cls, props, api, name, def_dict,
wire_name, parent):
properties = []
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
for prop_name in sorted(props):
prop_dict = props[prop_name]
_LOGGER.debug(' adding prop: %s to %s', prop_name, name)
properties.append(Property(api, schema, prop_name, prop_dict))
# Some APIs express etag directly in the response, others don't.
# Knowing that we have it explicitly makes special case code generation
# easier
if prop_name == 'etag':
schema.SetTemplateValue('hasEtagProperty', True)
schema.SetTemplateValue('properties', properties)
# check for @ clashing. E.g. No 'foo' and '@foo' in the same object.
names = set()
for p in properties:
wire_name = p.GetTemplateValue('wireName')
no_at_sign = wire_name.replace('@', '')
if no_at_sign in names:
raise ApiException(
'Property name clash in schema %s:'
' %s conflicts with another property' % (name, wire_name))
names.add(no_at_sign)
return schema
@classmethod
def _CreateVariantType(cls, variant, api, name, def_dict,
wire_name, parent):
"""Creates a variant type."""
variants = collections.OrderedDict()
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
discriminant = variant['discriminant']
# Walk over variants building the variant map and register
# variant info on the api.
for variant_entry in variant['map']:
discriminant_value = variant_entry['type_value']
variant_schema = api.DataTypeFromJson(variant_entry, name, parent=parent)
variants[discriminant_value] = variant_schema
# Set variant info. We get the original wire name from the JSON properties
# via '$ref' it is not currently accessible via variant_schema.
api.SetVariantInfo(variant_entry.get('$ref'), discriminant,
discriminant_value, schema)
prop = Property(api, schema, discriminant, {'type': 'string'},
key_for_variants=variants)
schema.SetTemplateValue('is_variant_base', True)
schema.SetTemplateValue('discriminant', prop)
schema.SetTemplateValue('properties', [prop])
return schema
@classmethod
def _CreateMapType(cls, additional_props, api, name, wire_name,
class_name, parent):
_LOGGER.debug('Have only additionalProps for %s, dict=%s',
name, additional_props)
# TODO(user): Remove this hack at the next large breaking change
# The "Items" added to the end is unneeded and ugly. This is for
# temporary backwards compatibility. Same for _CreateArrayType().
if additional_props.get('type') == 'array':
name = '%sItem' % name
subtype_name = additional_props.get('id', name + 'Element')
# Note, since this is an interim, non class just to hold the map
# make the parent schema the parent passed in, not myself.
_LOGGER.debug('name:%s, wire_name:%s, subtype name %s', name, wire_name,
subtype_name)
# When there is a parent, we synthesize a wirename when none exists.
# Purpose is to avoid generating an extremely long class name, since we
# don't do so for other nested classes.
if parent and wire_name:
base_wire_name = wire_name + 'Element'
else:
base_wire_name = None
base_type = api.DataTypeFromJson(
additional_props, subtype_name, parent=parent,
wire_name=base_wire_name)
map_type = data_types.MapDataType(name, base_type, parent=parent,
wire_name=wire_name)
map_type.SetTemplateValue('className', class_name)
_LOGGER.debug(' %s is MapOf<string, %s>',
class_name, base_type.class_name)
return map_type
@classmethod
def _CreateSchemaWithoutProperties(cls, api, name, def_dict, wire_name,
parent):
if parent:
# code objects have __getitem__(), but not .get()
try:
pname = parent['id']
except KeyError:
pname = '<unknown>'
name_to_log = '%s.%s' % (pname, name)
else:
name_to_log = name
logging.warning('object without properties %s: %s',
name_to_log, def_dict)
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
return schema
@classmethod
def _CreateArrayType(cls, api, def_dict, wire_name,
class_name, schema_id, parent):
items = def_dict.get('items')
if not items:
raise ApiException('array without items in: %s' % def_dict)
tentative_class_name = class_name
# TODO(user): We should not rename things items.
# if we have an anonymous type within a map or array, it should be
# called 'Item', and let the namespacing sort it out.
if schema_id:
_LOGGER.debug('Top level schema %s is an array', class_name)
tentative_class_name += 'Items'
base_type = api.DataTypeFromJson(items, tentative_class_name,
parent=parent, wire_name=wire_name)
_LOGGER.debug(' %s is ArrayOf<%s>', class_name, base_type.class_name)
array_type = data_types.ArrayDataType(tentative_class_name, base_type,
wire_name=wire_name,
parent=parent)
if schema_id:
array_type.SetTemplateValue('className', schema_id)
return array_type
@property
def class_name(self):
return self.values['className']
@property
def anonymous(self):
return 'id' not in self.raw
@property
def properties(self):
return self.values['properties']
@property
def isContainerWrapper(self):
"""Is this schema just a simple wrapper around another container.
A schema is just a wrapper for another datatype if it is an object that
contains just a single container datatype and (optionally) a kind and
etag field. This may be used by language generators to create iterators
directly on the schema. E.g. You could have
SeriesList ret = api.GetSomeSeriesMethod(args).Execute();
for (series in ret) { ... }
rather than
for (series in ret->items) { ... }
Returns:
None or ContainerDataType
"""
return self._GetPropertyWhichWeWrap() is not None
@property
def containerProperty(self):
"""If isContainerWrapper, returns the propery which holds the container."""
return self._GetPropertyWhichWeWrap()
def _GetPropertyWhichWeWrap(self):
"""Returns the property which is the type we are wrapping."""
container_property = None
for p in self.values['properties']:
if p.values['wireName'] == 'kind' or p.values['wireName'] == 'etag':
continue
if p.data_type.GetTemplateValue('isContainer'):
if container_property:
return None
container_property = p
else:
return None
return container_property
def __str__(self):
return '<%s Schema {%s}>' % (self.values['wireName'], self.values)
class Property(template_objects.CodeObject):
"""The definition of a schema property.
Example property in the discovery schema:
"id": {"type": "string"}
"""
def __init__(self, api, schema, name, def_dict, key_for_variants=None):
"""Construct a Property.
A Property requires several elements in its template value dictionary which
are set here:
wireName: the string which labels this Property in the JSON serialization.
dataType: the DataType of this property.
Args:
api: (Api) The Api which owns this Property
schema: (Schema) the schema this Property is part of
name: (string) the name for this Property
def_dict: (dict) the JSON schema dictionary
key_for_variants: (dict) if given, maps discriminator values to
variant schemas.
Raises:
ApiException: If we have an array type without object definitions.
"""
super(Property, self).__init__(def_dict, api, wire_name=name)
self.ValidateName(name)
self.schema = schema
self._key_for_variants = key_for_variants
# TODO(user): find a better way to mark a schema as an array type
# so we can display schemas like BlogList in method responses
try:
if self.values['wireName'] == 'items' and self.values['type'] == 'array':
self.schema.values['isList'] = True
except KeyError:
pass
# If the schema value for this property defines a new object directly,
# rather than refering to another schema, we will have to create a class
# name for it. We create a unique name by prepending the schema we are
# in to the object name.
tentative_class_name = api.NestedClassNameForProperty(name, schema)
self._data_type = api.DataTypeFromJson(def_dict, tentative_class_name,
parent=schema, wire_name=name)
@property
def code_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.code_type
@property
def safe_code_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.safe_code_type
@property
def primitive_data_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.primitive_data_type
@property
def data_type(self):
return self._data_type
@property
def member_name_is_json_name(self):
return self.memberName == self.values['wireName']
@property
def is_variant_key(self):
return self._key_for_variants
@property
def variant_map(self):
return self._key_for_variants
| apache-2.0 | -3,918,453,363,792,271,400 | 37.548319 | 80 | 0.642378 | false |
dthgeek/QuickOSM | core/actions.py | 1 | 4118 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
QuickOSM
A QGIS plugin
OSM Overpass API frontend
-------------------
begin : 2014-06-11
copyright : (C) 2014 by 3Liz
email : info at 3liz dot com
contributor : Etienne Trimaille
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtWebKit import QWebView
from PyQt4.QtGui import QDesktopServices
from PyQt4.QtCore import QUrl
from qgis.utils import iface
from qgis.gui import QgsMessageBar
from QuickOSM.core.utilities.tools import tr
class Actions(object):
"""
Manage actions available on layers
"""
@staticmethod
def run(field, value):
"""
Run an action with only one value as parameter
@param field:Type of the action
@type field:str
@param value:Value of the field for one entity
@type value:str
"""
if value == '':
iface.messageBar().pushMessage(
tr("QuickOSM",
u"Sorry man, this field is empty for this entity."),
level=QgsMessageBar.WARNING, duration=7)
else:
field = unicode(field, "UTF-8")
value = unicode(value, "UTF-8")
if field in ["url", "website", "wikipedia"]:
var = QDesktopServices()
url = None
if field == "url" or field == "website":
url = value
if field == "ref_UAI":
url = "http://www.education.gouv.fr/pid24302/annuaire-" \
"resultat-recherche.html?lycee_name=" + value
if field == "wikipedia":
url = "http://en.wikipedia.org/wiki/" + value
var.openUrl(QUrl(url))
elif field == "josm":
import urllib2
try:
url = "http://localhost:8111/load_object?objects=" + value
urllib2.urlopen(url).read()
except urllib2.URLError:
iface.messageBar().pushMessage(
tr("QuickOSM",
u"The JOSM remote seems to be disabled."),
level=QgsMessageBar.CRITICAL,
duration=7)
# NOT USED
elif field == "rawedit":
url = QUrl("http://rawedit.openstreetmap.fr/edit/" + value)
web_browser = QWebView(None)
web_browser.load(url)
web_browser.show()
@staticmethod
def run_sketch_line(network, ref):
"""
Run an action with two values for sketchline
@param network:network of the bus
@type network:str
@param ref:ref of the bus
@type ref:str
"""
network = unicode(network, "UTF-8")
ref = unicode(ref, "UTF-8")
if network == '' or ref == '':
iface.messageBar().pushMessage(
tr("QuickOSM",
u"Sorry man, this field is empty for this entity."),
level=QgsMessageBar.WARNING,
duration=7)
else:
var = QDesktopServices()
url = "http://www.overpass-api.de/api/sketch-line?" \
"network=" + network + "&ref=" + ref
var.openUrl(QUrl(url))
| gpl-2.0 | 1,764,082,413,191,741,200 | 34.196581 | 78 | 0.448033 | false |
Amber-MD/ambertools-conda-build | conda_tools/test/test_fix_conda_gfortran_linking_osx.py | 1 | 1041 | # pytest -vs .
import os
import sys
from mock import patch
import shutil
sys.path.insert(0, '..')
from fix_conda_gfortran_linking_osx import repack_conda_package, main
this_dir = os.path.dirname(__file__)
PACK_SCRIPT = os.path.join(this_dir, '..',
'pack_binary_without_conda_install.py')
FAKE_TAR = os.path.join(this_dir, 'fake_data', 'fake_osx.tar.bz2')
has_gfortran_local = os.path.exists('/usr/local/gfortran/')
def test_repack_conda_package():
class Opt():
pass
opt = Opt()
opt.tarfile = FAKE_TAR
opt.output_dir = '.'
opt.date = False
opt.dry_run = False
with patch('update_gfortran_libs_osx.main') as mock_g_main:
repack_conda_package(opt)
mock_g_main.assert_called_with(['.'])
os.remove(os.path.basename(FAKE_TAR))
def test_main():
junk = './tmp_fdasfda'
output_dir = '{}/heyhey'.format(junk)
main([FAKE_TAR, '-o', output_dir])
assert os.path.exists(os.path.join(output_dir, os.path.basename(FAKE_TAR)))
shutil.rmtree(junk)
| mit | 669,227,652,938,590,500 | 26.394737 | 79 | 0.635927 | false |
brightiup/brightiup | brightiup/compiler/bt_lexer.py | 1 | 1391 | import ply.lex as lex
class BTLexerException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class BTLexer(object):
"""BT lexer"""
keywords = [
# "import",
"state",
]
tokens = [keyword.upper() for keyword in keywords] + [
'ID',
'VARIABLE',
]
t_ignore = " \t"
t_VARIABLE = r'''\$[A-Za-z][A-Za-z0-9_]*'''
literals = ".{};="
_keyword_map = {}
for keyword in keywords:
_keyword_map[keyword] = keyword.upper()
@staticmethod
def t_NEWLINE(t):
r'''\n+'''
t.lexer.lineno += t.value.count('\n')
@staticmethod
def t_error(t):
raise BTLexerException('Illegal character %s at line %s'%(t.value[0], t.lineno))
@staticmethod
def t_ID(t):
r'''[A-Za-z][A-Za-z0-9_]*'''
t.type = BTLexer._keyword_map.get(t.value, 'ID')
return t
def __init__(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
def test(self, data):
self.lexer.input(data)
while True:
tok = self.lexer.token()
if not tok:
break
print tok
if __name__ == '__main__':
lexer = BTLexer()
lexer.test(open('../script/http.bt').read())
| gpl-2.0 | -243,517,580,238,202,750 | 21.803279 | 88 | 0.4867 | false |
holzman/glideinwms-old | lib/condorMonitor.py | 1 | 24800 | #
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module implements classes to query the condor daemons
# and manipulate the results
# Please notice that it also converts \" into "
#
# Author:
# Igor Sfiligoi (Aug 30th 2006)
#
import condorExe
import condorSecurity
import os
import string
import copy
import socket
import xml.parsers.expat
#
# Configuration
#
# Set path to condor binaries
def set_path(new_condor_bin_path):
global condor_bin_path
condor_bin_path = new_condor_bin_path
#
# Caching classes
#
# dummy caching class, when you don't want caching
# used as base class below, too
class NoneScheddCache:
#returns (cms arg schedd string,LOCAL_DIR)
def getScheddId(self,schedd_name,pool_name):
return (self.iGetCmdScheddStr(schedd_name),{})
# INTERNAL and for inheritance
def iGetCmdScheddStr(self,schedd_name):
if schedd_name is None:
schedd_str=""
else:
schedd_str = "-name %s " % schedd_name
return schedd_str
# The schedd can be found either through -name attr
# or through the local disk lookup
# Remember which one to use
class LocalScheddCache(NoneScheddCache):
def __init__(self):
self.enabled=True
# dictionary of
# (schedd_name,pool_name)=>(cms arg schedd string,env)
self.cache={}
self.my_ips=socket.gethostbyname_ex(socket.gethostname())[2]
try:
self.my_ips+=socket.gethostbyname_ex('localhost')[2]
except socket.gaierror,e:
pass # localhost not defined, ignore
def enable(self):
self.enabled=True
def disable(self):
self.enabled=False
#returns (cms arg schedd string,env)
def getScheddId(self,schedd_name,pool_name):
if schedd_name is None: # special case, do not cache
return ("",{})
if self.enabled:
k=(schedd_name,pool_name)
if not self.cache.has_key(k): # not in cache, discover it
env=self.iGetEnv(schedd_name, pool_name)
if env is None: #
self.cache[k]=(self.iGetCmdScheddStr(schedd_name),{})
else:
self.cache[k]=("",env)
return self.cache[k]
else: # not enabled, just return the str
return (self.iGetCmdScheddStr(schedd_name),{})
#
# PRIVATE
#
# return None if not found
# Can raise exceptions
def iGetEnv(self,schedd_name, pool_name):
cs=CondorStatus('schedd',pool_name)
data=cs.fetch(constraint='Name=?="%s"'%schedd_name,format_list=[('ScheddIpAddr','s'),('SPOOL_DIR_STRING','s'),('LOCAL_DIR_STRING','s')])
if not data.has_key(schedd_name):
raise RuntimeError, "Schedd '%s' not found"%schedd_name
el=data[schedd_name]
if 'SPOOL_DIR_STRING' not in el and 'LOCAL_DIR_STRING' not in el: # not advertising, cannot use disk optimization
return None
if not el.has_key('ScheddIpAddr'): # This should never happen
raise RuntimeError, "Schedd '%s' is not advertising ScheddIpAddr"%schedd_name
schedd_ip=el['ScheddIpAddr'][1:].split(':')[0]
if schedd_ip in self.my_ips: #seems local, go for the dir
l=el.get('SPOOL_DIR_STRING', el.get('LOCAL_DIR_STRING'))
if os.path.isdir(l): # making sure the directory exists
if 'SPOOL_DIR_STRING' in el:
return {'_CONDOR_SPOOL': '%s' %l }
else: # LOCAL_DIR_STRING
return {'_CONDOR_SPOOL': '%s/spool' %l }
else: #dir does not exist, likely not relevant, revert to standard behaviour
return None
else: # not local
return None
# default global object
local_schedd_cache=LocalScheddCache()
def condorq_attrs(q_constraint, attribute_list):
"""
Retrieves a list of a single item from the all the factory queues.
"""
attr_str = ""
for attr in attribute_list:
attr_str += " -attr %s" % attr
xml_data = condorExe.exe_cmd("condor_q","-g -l %s -xml -constraint '%s'" % (attr_str, q_constraint))
classads_xml = []
tmp_list = []
for line in xml_data:
# look for the xml header
if line[:5] == "<?xml":
if len(tmp_list) > 0:
classads_xml.append(tmp_list)
tmp_list = []
tmp_list.append(line)
q_proxy_list = []
for ad_xml in classads_xml:
cred_list = xml2list(ad_xml)
q_proxy_list.extend(cred_list)
return q_proxy_list
#
# Condor monitoring classes
#
# Generic, you most probably don't want to use these
class AbstractQuery: # pure virtual, just to have a minimum set of methods defined
# returns the data, will not modify self
def fetch(self,constraint=None,format_list=None):
raise NotImplementedError,"Fetch not implemented"
# will fetch in self.stored_data
def load(self,constraint=None,format_list=None):
raise NotImplementedError,"Load not implemented"
# constraint_func is a boolean function, with only one argument (data el)
# same output as fetch, but limited to constraint_func(el)==True
#
# if constraint_func==None, return all the data
def fetchStored(self,constraint_func=None):
raise NotImplementedError,"fetchStored not implemented"
class StoredQuery(AbstractQuery): # still virtual, only fetchStored defined
stored_data = {}
def fetchStored(self,constraint_func=None):
return applyConstraint(self.stored_data,constraint_func)
#
# format_list is a list of
# (attr_name, attr_type)
# where attr_type is one of
# "s" - string
# "i" - integer
# "r" - real (float)
# "b" - bool
#
#
# security_obj, if defined, should be a child of condorSecurity.ProtoRequest
class QueryExe(StoredQuery): # first fully implemented one, execute commands
def __init__(self,exe_name,resource_str,group_attribute,pool_name=None,security_obj=None,env={}):
self.exe_name=exe_name
self.env=env
self.resource_str=resource_str
self.group_attribute=group_attribute
self.pool_name=pool_name
if pool_name is None:
self.pool_str=""
else:
self.pool_str = "-pool %s" % pool_name
if security_obj is not None:
if security_obj.has_saved_state():
raise RuntimeError, "Cannot use a security object which has saved state."
self.security_obj=copy.deepcopy(security_obj)
else:
self.security_obj=condorSecurity.ProtoRequest()
def require_integrity(self,requested_integrity): # if none, dont change, else forse that one
if requested_integrity is None:
condor_val=None
elif requested_integrity:
condor_val="REQUIRED"
else:
# if not required, still should not fail if the other side requires it
condor_val='OPTIONAL'
self.security_obj.set('CLIENT','INTEGRITY',condor_val)
def get_requested_integrity(self):
condor_val = self.security_obj.get('CLIENT','INTEGRITY')
if condor_val is None:
return None
return (condor_val=='REQUIRED')
def require_encryption(self,requested_encryption): # if none, dont change, else forse that one
if requested_encryption is None:
condor_val=None
elif requested_encryption:
condor_val="REQUIRED"
else:
# if not required, still should not fail if the other side requires it
condor_val='OPTIONAL'
self.security_obj.set('CLIENT','ENCRYPTION',condor_val)
def get_requested_encryption(self):
condor_val = self.security_obj.get('CLIENT','ENCRYPTION')
if condor_val is None:
return None
return (condor_val=='REQUIRED')
def fetch(self,constraint=None,format_list=None):
if constraint is None:
constraint_str=""
else:
constraint_str="-constraint '%s'"%constraint
full_xml=(format_list is None)
if format_list is not None:
format_arr=[]
for format_el in format_list:
attr_name,attr_type=format_el
attr_format={'s':'%s','i':'%i','r':'%f','b':'%i'}[attr_type]
format_arr.append('-format "%s" "%s"'%(attr_format,attr_name))
format_str=string.join(format_arr," ")
# set environment for security settings
self.security_obj.save_state()
self.security_obj.enforce_requests()
if full_xml:
xml_data = condorExe.exe_cmd(self.exe_name,"%s -xml %s %s"%(self.resource_str,self.pool_str,constraint_str),env=self.env);
else:
xml_data = condorExe.exe_cmd(self.exe_name,"%s %s -xml %s %s"%(self.resource_str,format_str,self.pool_str,constraint_str),env=self.env);
# restore old values
self.security_obj.restore_state()
list_data = xml2list(xml_data)
del xml_data
dict_data = list2dict(list_data, self.group_attribute)
return dict_data
def load(self, constraint=None, format_list=None):
self.stored_data = self.fetch(constraint, format_list)
#
# Fully usable query functions
#
# condor_q
class CondorQ(QueryExe):
def __init__(self,schedd_name=None,pool_name=None,security_obj=None,schedd_lookup_cache=local_schedd_cache):
self.schedd_name=schedd_name
if schedd_lookup_cache is None:
schedd_lookup_cache=NoneScheddCache()
schedd_str,env=schedd_lookup_cache.getScheddId(schedd_name, pool_name)
QueryExe.__init__(self,"condor_q",schedd_str,["ClusterId","ProcId"],pool_name,security_obj,env)
def fetch(self, constraint=None, format_list=None):
if format_list is not None:
# check that ClusterId and ProcId are present, and if not add them
format_list = complete_format_list(format_list, [("ClusterId", 'i'), ("ProcId", 'i')])
return QueryExe.fetch(self, constraint=constraint, format_list=format_list)
# condor_q, where we have only one ProcId x ClusterId
class CondorQLite(QueryExe):
def __init__(self,schedd_name=None,pool_name=None,security_obj=None,schedd_lookup_cache=local_schedd_cache):
self.schedd_name=schedd_name
if schedd_lookup_cache is None:
schedd_lookup_cache=NoneScheddCache()
schedd_str,env=schedd_lookup_cache.getScheddId(schedd_name, pool_name)
QueryExe.__init__(self,"condor_q",schedd_str,"ClusterId",pool_name,security_obj,env)
def fetch(self, constraint=None, format_list=None):
if format_list is not None:
# check that ClusterId is present, and if not add it
format_list = complete_format_list(format_list, [("ClusterId", 'i')])
return QueryExe.fetch(self, constraint=constraint, format_list=format_list)
# condor_status
class CondorStatus(QueryExe):
def __init__(self,subsystem_name=None,pool_name=None,security_obj=None):
if subsystem_name is None:
subsystem_str=""
else:
subsystem_str = "-%s" % subsystem_name
QueryExe.__init__(self,"condor_status",subsystem_str,"Name",pool_name,security_obj,{})
def fetch(self, constraint=None, format_list=None):
if format_list is not None:
# check that Name present and if not, add it
format_list = complete_format_list(format_list, [("Name",'s')])
return QueryExe.fetch(self, constraint=constraint, format_list=format_list)
#
# Subquery classes
#
# Generic, you most probably don't want to use this
class BaseSubQuery(StoredQuery):
def __init__(self, query, subquery_func):
self.query = query
self.subquery_func = subquery_func
def fetch(self, constraint=None):
indata = self.query.fetch(constraint)
return self.subquery_func(self, indata)
#
# NOTE: You need to call load on the SubQuery object to use fetchStored
# and had query.load issued before
#
def load(self, constraint=None):
indata = self.query.fetchStored(constraint)
self.stored_data = self.subquery_func(indata)
#
# Fully usable subquery functions
#
class SubQuery(BaseSubQuery):
def __init__(self, query, constraint_func=None):
BaseSubQuery.__init__(self, query, lambda d:applyConstraint(d, constraint_func))
class Group(BaseSubQuery):
# group_key_func - Key extraction function
# One argument: classad dictionary
# Returns: value of the group key
# group_data_func - Key extraction function
# One argument: list of classad dictionaries
# Returns: a summary classad dictionary
def __init__(self, query, group_key_func, group_data_func):
BaseSubQuery.__init__(self, query, lambda d:doGroup(d, group_key_func, group_data_func))
#
# Summarizing classes
#
class Summarize:
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
def __init__(self, query, hash_func=lambda x:1):
self.query = query
self.hash_func = hash_func
# Parameters:
# constraint - string to be passed to query.fetch()
# hash_func - if !=None, use this instead of the main one
# Returns a dictionary of hash values
# Elements are counts (or more dictionaries if hash returns lists)
def count(self, constraint=None, hash_func=None):
data = self.query.fetch(constraint)
return fetch2count(data, self.getHash(hash_func))
# Use data pre-stored in query
# Same output as count
def countStored(self, constraint_func=None, hash_func=None):
data = self.query.fetchStored(constraint_func)
return fetch2count(data, self.getHash(hash_func))
# Parameters, same as count
# Returns a dictionary of hash values
# Elements are lists of keys (or more dictionaries if hash returns lists)
def list(self, constraint=None, hash_func=None):
data = self.query.fetch(constraint)
return fetch2list(data, self.getHash(hash_func))
# Use data pre-stored in query
# Same output as list
def listStored(self,constraint_func=None,hash_func=None):
data=self.query.fetchStored(constraint_func)
return fetch2list(data,self.getHash(hash_func))
### Internal
def getHash(self, hash_func):
if hash_func is None:
return self.hash_func
else:
return hash_func
class SummarizeMulti:
def __init__(self, queries, hash_func=lambda x:1):
self.counts = []
for query in queries:
self.counts.append(self.count(query,hash_func))
self.hash_func=hash_func
# see Count for description
def count(self, constraint=None, hash_func=None):
out = {}
for c in self.counts:
data = c.count(constraint, hash_func)
addDict(out, data)
return out
# see Count for description
def countStored(self, constraint_func=None, hash_func=None):
out = {}
for c in self.counts:
data = c.countStored(constraint_func, hash_func)
addDict(out, data)
return out
############################################################
#
# P R I V A T E, do not use
#
############################################################
# check that req_format_els are present in in_format_list, and if not add them
# return a new format_list
def complete_format_list(in_format_list, req_format_els):
out_format_list = in_format_list[0:]
for req_format_el in req_format_els:
found = False
for format_el in in_format_list:
if format_el[0] == req_format_el[0]:
found = True
break
if not found:
out_format_list.append(req_format_el)
return out_format_list
#
# Convert Condor XML to list
#
# For Example:
#
#<?xml version="1.0"?>
#<!DOCTYPE classads SYSTEM "classads.dtd">
#<classads>
#<c>
# <a n="MyType"><s>Job</s></a>
# <a n="TargetType"><s>Machine</s></a>
# <a n="AutoClusterId"><i>0</i></a>
# <a n="ExitBySignal"><b v="f"/></a>
# <a n="TransferOutputRemaps"><un/></a>
# <a n="WhenToTransferOutput"><s>ON_EXIT</s></a>
#</c>
#<c>
# <a n="MyType"><s>Job</s></a>
# <a n="TargetType"><s>Machine</s></a>
# <a n="AutoClusterId"><i>0</i></a>
# <a n="OnExitRemove"><b v="t"/></a>
# <a n="x509userproxysubject"><s>/DC=gov/DC=fnal/O=Fermilab/OU=People/CN=Igor Sfiligoi/UID=sfiligoi</s></a>
#</c>
#</classads>
#
# 3 xml2list XML handler functions
def xml2list_start_element(name, attrs):
global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype
if name == "c":
xml2list_inclassad = {}
elif name == "a":
xml2list_inattr = {"name": attrs["n"], "val": ""}
xml2list_intype = "s"
elif name == "i":
xml2list_intype = "i"
elif name == "r":
xml2list_intype = "r"
elif name == "b":
xml2list_intype = "b"
if attrs.has_key('v'):
xml2list_inattr["val"] = (attrs["v"] in ('T', 't', '1'))
else:
# extended syntax... value in text area
xml2list_inattr["val"] = None
elif name == "un":
xml2list_intype = "un"
xml2list_inattr["val"] = None
elif name in ("s", "e"):
pass # nothing to do
elif name == "classads":
pass # top element, nothing to do
else:
raise TypeError, "Unsupported type: %s" % name
def xml2list_end_element(name):
global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype
if name == "c":
xml2list_data.append(xml2list_inclassad)
xml2list_inclassad = None
elif name == "a":
xml2list_inclassad[xml2list_inattr["name"]] = xml2list_inattr["val"]
xml2list_inattr = None
elif name in ("i", "b", "un", "r"):
xml2list_intype = "s"
elif name in ("s", "e"):
pass # nothing to do
elif name == "classads":
pass # top element, nothing to do
else:
raise TypeError, "Unexpected type: %s" % name
def xml2list_char_data(data):
global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype
if xml2list_inattr is None:
# only process when in attribute
return
if xml2list_intype == "i":
xml2list_inattr["val"] = int(data)
elif xml2list_intype == "r":
xml2list_inattr["val"] = float(data)
elif xml2list_intype == "b":
if xml2list_inattr["val"] is not None:
#nothing to do, value was in attribute
pass
else:
xml2list_inattr["val"] = (data[0] in ('T', 't', '1'))
elif xml2list_intype == "un":
#nothing to do, value was in attribute
pass
else:
unescaped_data = string.replace(data, '\\"', '"')
xml2list_inattr["val"] += unescaped_data
def xml2list(xml_data):
global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype
xml2list_data = []
xml2list_inclassad = None
xml2list_inattr = None
xml2list_intype = None
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = xml2list_start_element
p.EndElementHandler = xml2list_end_element
p.CharacterDataHandler = xml2list_char_data
found_xml = -1
for line in range(len(xml_data)):
# look for the xml header
if xml_data[line][:5] == "<?xml":
found_xml = line
break
if found_xml >= 0:
try:
p.Parse(string.join(xml_data[found_xml:]), 1)
except TypeError, e:
raise RuntimeError, "Failed to parse XML data, TypeError: %s" % e
except:
raise RuntimeError, "Failed to parse XML data, generic error"
# else no xml, so return an empty list
return xml2list_data
#
# Convert a list to a dictionary
#
def list2dict(list_data, attr_name):
if type(attr_name) in (type([]), type((1, 2))):
attr_list = attr_name
else:
attr_list = [attr_name]
dict_data = {}
for list_el in list_data:
if type(attr_name) in (type([]), type((1, 2))):
dict_name = []
list_keys=list_el.keys()
for an in attr_name:
if an in list_keys:
dict_name.append(list_el[an])
else:
# Try lower cases
for k in list_keys:
if an.lower()==k.lower():
dict_name.append(list_el[k])
break
dict_name=tuple(dict_name)
else:
dict_name = list_el[attr_name]
# dict_el will have all the elements but those in attr_list
dict_el = {}
for a in list_el:
if not (a in attr_list):
dict_el[a] = list_el[a]
dict_data[dict_name] = dict_el
return dict_data
def applyConstraint(data, constraint_func):
if constraint_func is None:
return data
else:
outdata = {}
for key in data.keys():
if constraint_func(data[key]):
outdata[key] = data[key]
return outdata
def doGroup(indata, group_key_func, group_data_func):
gdata = {}
for k in indata.keys():
inel = indata[k]
gkey = group_key_func(inel)
if gdata.has_key(gkey):
gdata[gkey].append(inel)
else:
gdata[gkey] = [inel]
outdata = {}
for k in gdata.keys():
outdata[k] = group_data_func(gdata[k])
return outdata
#
# Inputs
# data - data from a fetch()
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
#
# Returns a dictionary of hash values
# Elements are counts (or more dictionaries if hash returns lists)
#
def fetch2count(data, hash_func):
count = {}
for k in data.keys():
el = data[k]
hid = hash_func(el)
if hid is None:
# hash tells us it does not want to count this
continue
# cel will point to the real counter
cel = count
# check if it is a list
if (type(hid) == type([])):
# have to create structure inside count
for h in hid[:-1]:
if not cel.has_key(h):
cel[h] = {}
cel = cel[h]
hid = hid[-1]
if cel.has_key(hid):
count_el = cel[hid] + 1
else:
count_el = 1
cel[hid] = count_el
return count
#
# Inputs
# data - data from a fetch()
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
#
# Returns a dictionary of hash values
# Elements are lists of keys (or more dictionaries if hash returns lists)
#
def fetch2list(data, hash_func):
return_list = {}
for k in data.keys():
el = data[k]
hid = hash_func(el)
if hid is None:
# hash tells us it does not want to list this
continue
# lel will point to the real list
lel = return_list
# check if it is a list
if (type(hid) == type([])):
# have to create structure inside list
for h in hid[:-1]:
if not lel.has_key(h):
lel[h] = {}
lel = lel[h]
hid = hid[-1]
if lel.has_key(hid):
list_el = lel[hid].append[k]
else:
list_el = [k]
lel[hid] = list_el
return return_list
#
# Recursivelly add two dictionaries
# Do it in place, using the first one
#
def addDict(base_dict, new_dict):
for k in new_dict.keys():
new_el = new_dict[k]
if not base_dict.has_key(k):
# nothing there?, just copy
base_dict[k] = new_el
else:
if type(new_el) == type({}):
#another dictionary, recourse
addDict(base_dict[k], new_el)
else:
base_dict[k] += new_el
| bsd-3-clause | 6,990,342,089,642,197,000 | 31.124352 | 148 | 0.586048 | false |
vinu76jsr/django_profiler | docs/conf.py | 1 | 8147 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import profiler
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'profile-middleware'
copyright = u'2014, Vaibhav Mishra'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = profiler.__version__
# The full version, including alpha/beta/rc tags.
release = profiler.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'profilerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'profiler.tex', u'profile-middleware Documentation',
u'Vaibhav Mishra', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'profiler', u'profile-middleware Documentation',
[u'Vaibhav Mishra'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'profiler', u'profile-middleware Documentation',
u'Vaibhav Mishra', 'profiler', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | bsd-3-clause | -2,368,062,073,458,400,000 | 31.07874 | 80 | 0.707377 | false |
nvoron23/netmiko | tests/test_cisco_ios.py | 1 | 2115 | #!/usr/bin/env python
import pytest
import netmiko
import time
from DEVICE_CREDS import *
def setup_module(module):
module.EXPECTED_RESPONSES = {
'base_prompt' : 'pynet-rtr1',
'interface_ip' : '10.220.88.20'
}
show_ver_command = 'show version'
module.basic_command = 'show ip int brief'
SSHClass = netmiko.ssh_dispatcher(cisco_881['device_type'])
net_connect = SSHClass(**cisco_881)
module.show_version = net_connect.send_command(show_ver_command)
module.show_ip = net_connect.send_command(module.basic_command)
module.base_prompt = net_connect.base_prompt
# Test buffer clearing
net_connect.remote_conn.send(show_ver_command)
time.sleep(2)
net_connect.clear_buffer()
# Should not be anything there on the second pass
module.clear_buffer_check = net_connect.clear_buffer()
def test_disable_paging():
'''
Verify paging is disabled by looking for string after when paging would
normally occur
'''
assert 'Configuration register is' in show_version
def test_verify_ssh_connect():
'''
Verify the connection was established successfully
'''
assert 'Cisco IOS Software' in show_version
def test_verify_send_command():
'''
Verify a command can be sent down the channel successfully
'''
assert EXPECTED_RESPONSES['interface_ip'] in show_ip
def test_base_prompt():
'''
Verify the router prompt is detected correctly
'''
assert base_prompt == EXPECTED_RESPONSES['base_prompt']
def test_strip_prompt():
'''
Ensure the router prompt is not in the command output
'''
assert EXPECTED_RESPONSES['base_prompt'] not in show_ip
def test_strip_command():
'''
Ensure that the command that was executed does not show up in the
command output
'''
assert basic_command not in show_ip
def test_normalize_linefeeds():
'''
Ensure no '\r\n' sequences
'''
assert not '\r\n' in show_version
def test_clear_buffer():
'''
Test that clearing the buffer works
'''
assert clear_buffer_check is None
| mit | -1,181,263,027,566,322,700 | 22.764045 | 75 | 0.665248 | false |
apuigsech/emv-framework | iso7816.py | 1 | 5968 | #!/usr/bin/python
#
# Python ISO7816 (as part of EMV Framework)
# Copyrigh 2012 Albert Puigsech Galicia <[email protected]>
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
from smartcard.CardType import AnyCardType
from smartcard.CardRequest import CardRequest
from smartcard.CardConnection import CardConnection
from smartcard.CardConnectionObserver import ConsoleCardConnectionObserver
from smartcard.Exceptions import CardRequestTimeoutException
from tlv import *
INS_DB = (
{
'name':'READ_BINARY',
'code':0xb0
},
{
'name':'WRITE_BINARY',
'code':0xd0
},
{
'name':'UPDATE_BINARY',
'code':0xd6
},
{
'name':'ERASE_BINARY',
'code':0x0e
},
{
'name':'READ_RECORD',
'code':0xb2
},
{
'name':'WRITE_RECORD',
'code':0xd2
},
{
'name':'APPEND_RECORD',
'code':0xe2
},
{
'name':'UPDATE RECORD',
'code':0xdc
},
{
'name':'GET_DATA',
'code':0xca
},
{
'name':'PUT_DATA',
'code':0xda
},
{
'name':'SELECT_FILE',
'code':0xa4
},
{
'name':'VERIFY',
'code':0x20
},
{
'name':'INTERNAL_AUTHENTICATE',
'code':0x88
},
{
'name':'EXTERNAL AUTHENTICATE',
'code':0xb2
},
{
'name':'GET_CHALLENGE',
'code':0xb4
},
{
'name':'MANAGE_CHANNEL',
'code':0x70
},
{
'name':'GET_RESPONSE',
'code':0xc0
}
)
class APDU_Command:
def __init__(self, cla=0x00, ins=0x00, p1=0x00, p2=0x00, lc=None, data=None, le=None):
self.cla = cla
self.ins = ins
self.p1 = p1
self.p2 = p2
if data != None and lc == None:
lc = len(data)
self.lc = lc
self.data = data
self.le = le
def raw(self):
apdu_cmd_raw = [self.cla, self.ins, self.p1, self.p2]
if self.data != None:
apdu_cmd_raw += [self.lc] + self.data
if self.le != None:
apdu_cmd_raw += [self.le]
return apdu_cmd_raw
def str(self):
apdu_cmd_str = '{0:02x} {1:02x} {2:02x} {3:02x}'.format(self.cla, self.ins, self.p1, self.p2)
if self.data != None:
apdu_cmd_str += ' {0:02x}'.format(self.lc)
for d in self.data:
apdu_cmd_str += ' {0:02x}'.format(d)
if self.le != None:
apdu_cmd_str += ' {0:02x}'.format(self.le)
return apdu_cmd_str
class APDU_Response:
def __init__(self, sw1=0x00, sw2=0x00, data=None):
self.sw1 = sw1
self.sw2 = sw2
self.data = data
def raw(self):
apdu_res_raw = []
if self.data != None:
apdu_res_raw += self.data
apdu_res_raw += [self.sw1, self.sw2]
return apdu_res_raw
def str(self):
apdu_res_str = ''
if self.data != None:
for d in self.data:
apdu_res_str += '{0:02x} '.format(d)
apdu_res_str += '{0:02x} {1:02x}'.format(self.sw1, self.sw2)
return apdu_res_str
class ISO7816:
def __init__(self):
cardtype = AnyCardType()
cardrequest = CardRequest(timeout=10, cardType=cardtype)
self.card = cardrequest.waitforcard()
self.card.connection.connect()
self.ins_db = []
self.ins_db_update(INS_DB)
self.log = []
self.auto_get_response = True
def ins_db_update(self, new):
self.ins_db += new
def ins_db_resolv(self, name=None, code=None):
for e in self.ins_db:
if name != None and e['name'] == name:
return e['code']
if code != None and e['code'] == code:
return e['name']
return None
def send_command(self, cmd, p1=0, p2=0, tlvparse=False, cla=0x00, data=None, le=None):
ins = self.ins_db_resolv(name=cmd)
return self.send_apdu(APDU_Command(ins=ins, p1=p1, p2=p2, cla=cla, data=data, le=le))
def send_apdu(self, apdu_cmd):
#print '>>> ' + apdu_cmd.str()
data,sw1,sw2 = self.send_apdu_raw(apdu_cmd.raw())
apdu_res = APDU_Response(sw1=sw1, sw2=sw2, data=data)
#print '<<< ' + apdu_res.str()
if self.auto_get_response == True:
if sw1 == 0x6c:
apdu_cmd.le = sw2
apdu_res = self.send_apdu(apdu_cmd)
if sw1 == 0x61:
apdu_res = self.GET_RESPONSE(sw2)
return apdu_res
def send_apdu_raw(self, apdu):
return self.card.connection.transmit(apdu)
def log_add(self, log_item):
self.log.append(log_item)
def log_print(self):
return
def READ_BINARY(self, p1=0x00, p2=0x00, len=0x00):
return self.send_command('READ_BINARY', p1=p1, p2=p2, le=len)
def WRITE_BINARY(self, p1=0x00, p2=0x00, data=[]):
return self.send_command('WRITE_BINARY', p1=p1, p2=p2, data=data)
def UPDATE_BINRY(self, p1=0x00, p2=0x00, data=[]):
return self.send_command('UPDATE_BINRY', p1=p1, p2=p2, data=data)
def ERASE_BINARY(self, p1=0x00, p2=0x00, data=None):
return self.send_command('ERASE_BINARY', p1=p1, p2=p2, data=data)
def READ_RECORD(self, sfi, record=0x00, variation=0b100):
return self.send_command('READ_RECORD', p1=record, p2=(sfi<<3)+variation, le=0)
def WRITE_RECORD(self, sfi, data, record=0x00, variation=0b100):
return self.send_command('WRITE_RECORD', p1=record, p2=(sfi<<3)+variation, data=data)
def APPEND_RECORD(self, sfi, variation=0b100):
return self.send_command('APPEND_RECORD', p1=0x00, p2=(sfi<<3)+variation, data=data)
def UPDATE_RECORD(self, sfi, data, record=0x00, variation=0b100):
return self.send_command('UPDATE_RECORD', p1=record, p2=(sfi<<3)+variation, data=data)
def GET_DATA(self, data_id):
return self.send_command('GET_DATA', p1=data_id[0], p2=data_id[1])
def PUT_DATA(self, data_id, data):
return self.send_command('PUT_DATA', p1=data_id[0], p2=data_id[1], data=data)
def SELECT_FILE(self, data, p1=0x00, p2=0x00):
return self.send_command('SELECT_FILE', p1=p1, p2=p2, data=data)
def VERIFY(self):
return
def INTERNAL_AUTHENTICATE(self):
return
def EXTERNAL_AUTHENTICATE(self):
return
def GET_CHALLENGE(self):
return
def MANAGE_CHANNEL(self):
return
def GET_RESPONSE(self, le):
return self.send_command('GET_RESPONSE', le=le)
def ENVELOPPE(self):
return
def SEARCH_RECORD(self):
return
def DISABLE_CHV(self):
return
def UNBLOCK_CHV(self):
return
| gpl-3.0 | 7,305,658,734,531,248,000 | 22.131783 | 95 | 0.649464 | false |
guegue/forocacao | forocacao/users/views.py | 1 | 3882 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import base64
from PIL import Image, ImageDraw, ImageFont
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from braces.views import LoginRequiredMixin
from easy_thumbnails.files import get_thumbnailer
from .models import User
class UserBadgeJPEG(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
def get(self, request, username):
participant = self.get_object()
event = participant.event
img = Image.new('RGBA', (event.badge_size_x, event.badge_size_y), event.badge_color)
draw = ImageDraw.Draw(img)
match = {
'event': event.name,
'name': "%s %s" % (participant.first_name.partition(' ')[0], participant.last_name.partition(' ')[0] ),
'first_name': participant.first_name,
'last_name': participant.last_name,
'profession': participant.profession,
'country': participant.country.name,
'type': participant.type,
'email': participant.email,
}
for field in event.eventbadge_set.all():
x = field.x
y = field.y
size = field.size
if field.field == 'logo':
if participant.event.logo:
logo = Image.open(participant.event.logo.file.file)
logo.thumbnail((size,size))
img.paste(logo, (x,y))
elif field.field == 'photo':
if participant.photo:
photo = Image.open(participant.photo)
photo.thumbnail((size,size))
img.paste(photo, (x,y))
else:
if field.field == 'text':
content = field.format
else:
content = match[field.field]
fnt = ImageFont.truetype(field.font.filename, size)
color = field.color
draw.text((x,y), ("%s") % (content), font=fnt, fill=color)
response = HttpResponse(content_type="image/png")
img.save(response, "PNG")
return HttpResponse(response, content_type="image/png")
class UserBadgeView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
template_name = 'users/user_badge.html'
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['first_name', 'last_name', 'phone', 'activities' ] #FIXME : add all needed fields
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
| bsd-3-clause | -8,969,089,653,582,079,000 | 34.290909 | 119 | 0.61154 | false |
icgood/pymap | pymap/parsing/specials/options.py | 1 | 5476 |
from __future__ import annotations
import re
from collections.abc import Iterable, Mapping
from typing import Optional
from . import AString, SequenceSet
from .. import Params, Parseable
from ..exceptions import NotParseable
from ..primitives import Number, List
from ...bytes import BytesFormat, rev
__all__ = ['ExtensionOption', 'ExtensionOptions']
class ExtensionOption(Parseable[bytes]):
"""Represents a single command option, which may or may not have an
associated value.
See Also:
`RFC 4466 2.1. <https://tools.ietf.org/html/rfc4466#section-2.1>`_
Args:
option: The name of the option.
arg: The option argument, if any.
"""
_opt_pattern = rev.compile(br'[a-zA-Z_.-][a-zA-Z0-9_.:-]*')
def __init__(self, option: bytes, arg: List) -> None:
super().__init__()
self.option = option
self.arg = arg
self._raw_arg: Optional[bytes] = None
@property
def value(self) -> bytes:
return self.option
def __bytes__(self) -> bytes:
if self.arg.value:
return BytesFormat(b'%b %b') % (self.option, self.raw_arg)
else:
return self.option
@property
def raw_arg(self) -> bytes:
if self._raw_arg is None:
if not self.arg:
self._raw_arg = b''
elif len(self.arg) == 1:
arg_0 = self.arg.value[0]
if isinstance(arg_0, (Number, SequenceSet)):
self._raw_arg = bytes(arg_0)
else:
self._raw_arg = bytes(self.arg)
else:
self._raw_arg = bytes(self.arg)
return self._raw_arg
@classmethod
def _parse_arg(cls, buf: memoryview, params: Params) \
-> tuple[List, memoryview]:
try:
num, buf = Number.parse(buf, params)
except NotParseable:
pass
else:
arg = List([num])
return arg, buf
try:
seq_set, buf = SequenceSet.parse(buf, params)
except NotParseable:
pass
else:
arg = List([seq_set])
return arg, buf
try:
params_copy = params.copy(list_expected=[AString, List])
return List.parse(buf, params_copy)
except NotParseable:
pass
return List([]), buf
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> tuple[ExtensionOption, memoryview]:
start = cls._whitespace_length(buf)
match = cls._opt_pattern.match(buf, start)
if not match:
raise NotParseable(buf[start:])
option = match.group(0).upper()
buf = buf[match.end(0):]
arg, buf = cls._parse_arg(buf, params)
return cls(option, arg), buf
class ExtensionOptions(Parseable[Mapping[bytes, List]]):
"""Represents a set of command options, which may or may not have an
associated argument. Command options are always optional, so the parsing
will not fail, it will just return an empty object.
See Also:
`RFC 4466 2.1. <https://tools.ietf.org/html/rfc4466#section-2.1>`_
Args:
options: The mapping of options to argument.
"""
_opt_pattern = re.compile(br'[a-zA-Z_.-][a-zA-Z0-9_.:-]*')
_empty: Optional[ExtensionOptions] = None
def __init__(self, options: Iterable[ExtensionOption]) -> None:
super().__init__()
self.options: Mapping[bytes, List] = \
{opt.option: opt.arg for opt in options}
self._raw: Optional[bytes] = None
@classmethod
def empty(cls) -> ExtensionOptions:
"""Return an empty set of command options."""
if cls._empty is None:
cls._empty = ExtensionOptions({})
return cls._empty
@property
def value(self) -> Mapping[bytes, List]:
return self.options
def has(self, option: bytes) -> bool:
return option in self.options
def get(self, option: bytes) -> Optional[List]:
return self.options.get(option, None)
def __bool__(self) -> bool:
return bool(self.options)
def __len__(self) -> int:
return len(self.options)
def __bytes__(self) -> bytes:
if self._raw is None:
parts = [ExtensionOption(option, arg)
for option, arg in sorted(self.options.items())]
self._raw = b'(' + BytesFormat(b' ').join(parts) + b')'
return self._raw
@classmethod
def _parse_paren(cls, buf: memoryview, paren: bytes) -> memoryview:
start = cls._whitespace_length(buf)
if buf[start:start + 1] != paren:
raise NotParseable(buf)
return buf[start + 1:]
@classmethod
def _parse(cls, buf: memoryview, params: Params) \
-> tuple[ExtensionOptions, memoryview]:
buf = cls._parse_paren(buf, b'(')
result: list[ExtensionOption] = []
while True:
try:
option, buf = ExtensionOption.parse(buf, params)
except NotParseable:
break
else:
result.append(option)
buf = cls._parse_paren(buf, b')')
return cls(result), buf
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> tuple[ExtensionOptions, memoryview]:
try:
return cls._parse(buf, params)
except NotParseable:
return cls.empty(), buf
| mit | 3,286,872,938,212,346,000 | 29.422222 | 76 | 0.56355 | false |
squirrelo/qiita | qiita_ware/dispatchable.py | 1 | 8731 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from .analysis_pipeline import RunAnalysis
from qiita_ware.commands import submit_EBI, submit_VAMPS
from qiita_db.analysis import Analysis
def submit_to_ebi(preprocessed_data_id, submission_type):
"""Submit a study to EBI"""
submit_EBI(preprocessed_data_id, submission_type, True)
def submit_to_VAMPS(preprocessed_data_id):
"""Submit a study to VAMPS"""
return submit_VAMPS(preprocessed_data_id)
def run_analysis(analysis_id, commands, comm_opts=None,
rarefaction_depth=None, merge_duplicated_sample_ids=False,
**kwargs):
"""Run an analysis"""
analysis = Analysis(analysis_id)
ar = RunAnalysis(**kwargs)
return ar(analysis, commands, comm_opts, rarefaction_depth,
merge_duplicated_sample_ids)
def create_raw_data(artifact_type, prep_template, filepaths, name=None):
"""Creates a new raw data
Needs to be dispachable because it moves large files
Parameters
----------
artifact_type: str
The artifact type
prep_template : qiita_db.metadata_template.prep_template.PrepTemplate
The template to attach the artifact
filepaths : list of (str, str)
The list with filepaths and their filepath types
name : str, optional
The name of the new artifact
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
from qiita_db.artifact import Artifact
status = 'success'
msg = ''
try:
Artifact.create(filepaths, artifact_type, name=name,
prep_template=prep_template)
except Exception as e:
# We should hit this exception rarely (that's why it is an
# exception) since at this point we have done multiple checks.
# However, it can occur in weird cases, so better let the GUI know
# that this failed
return {'status': 'danger',
'message': "Error creating artifact: %s" % str(e)}
return {'status': status, 'message': msg}
def copy_raw_data(prep_template, artifact_id):
"""Creates a new raw data by copying from artifact_id
Parameters
----------
prep_template : qiita_db.metadata_template.prep_template.PrepTemplate
The template to attach the artifact
artifact_id : int
The id of the artifact to duplicate
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
from qiita_db.artifact import Artifact
status = 'success'
msg = ''
try:
Artifact.copy(Artifact(artifact_id), prep_template)
except Exception as e:
# We should hit this exception rarely (that's why it is an
# exception) since at this point we have done multiple checks.
# However, it can occur in weird cases, so better let the GUI know
# that this failed
return {'status': 'danger',
'message': "Error creating artifact: %s" % str(e)}
return {'status': status, 'message': msg}
def delete_artifact(artifact_id):
"""Deletes an artifact from the system
Parameters
----------
artifact_id : int
The artifact to delete
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
from qiita_db.artifact import Artifact
status = 'success'
msg = ''
try:
Artifact.delete(artifact_id)
except Exception as e:
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
def create_sample_template(fp, study, is_mapping_file, data_type=None):
"""Creates a sample template
Parameters
----------
fp : str
The file path to the template file
study : qiita_db.study.Study
The study to add the sample template to
is_mapping_file : bool
Whether `fp` contains a mapping file or a sample template
data_type : str, optional
If `is_mapping_file` is True, the data type of the prep template to be
created
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
# The imports need to be in here because this code is executed in
# the ipython workers
import warnings
from os import remove
from qiita_db.metadata_template.sample_template import SampleTemplate
from qiita_db.metadata_template.util import load_template_to_dataframe
from qiita_ware.metadata_pipeline import (
create_templates_from_qiime_mapping_file)
status = 'success'
msg = ''
try:
with warnings.catch_warnings(record=True) as warns:
if is_mapping_file:
create_templates_from_qiime_mapping_file(fp, study,
data_type)
else:
SampleTemplate.create(load_template_to_dataframe(fp),
study)
remove(fp)
# join all the warning messages into one. Note that this
# info will be ignored if an exception is raised
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
status = 'warning'
except Exception as e:
# Some error occurred while processing the sample template
# Show the error to the user so they can fix the template
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
def update_sample_template(study_id, fp):
"""Updates a sample template
Parameters
----------
study_id : int
Study id whose template is going to be updated
fp : str
The file path to the template file
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
import warnings
from os import remove
from qiita_db.metadata_template.util import load_template_to_dataframe
from qiita_db.metadata_template.sample_template import SampleTemplate
msg = ''
status = 'success'
try:
with warnings.catch_warnings(record=True) as warns:
# deleting previous uploads and inserting new one
st = SampleTemplate(study_id)
df = load_template_to_dataframe(fp)
st.extend(df)
st.update(df)
remove(fp)
# join all the warning messages into one. Note that this info
# will be ignored if an exception is raised
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
status = 'warning'
except Exception as e:
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
def delete_sample_template(study_id):
"""Delete a sample template
Parameters
----------
study_id : int
Study id whose template is going to be deleted
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
from qiita_db.metadata_template.sample_template import SampleTemplate
msg = ''
status = 'success'
try:
SampleTemplate.delete(study_id)
except Exception as e:
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
def update_prep_template(prep_id, fp):
"""Updates a prep template
Parameters
----------
prep_id : int
Prep template id to be updated
fp : str
The file path to the template file
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
import warnings
from os import remove
from qiita_db.metadata_template.util import load_template_to_dataframe
from qiita_db.metadata_template.prep_template import PrepTemplate
msg = ''
status = 'success'
prep = PrepTemplate(prep_id)
try:
with warnings.catch_warnings(record=True) as warns:
df = load_template_to_dataframe(fp)
prep.extend(df)
prep.update(df)
remove(fp)
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
status = 'warning'
except Exception as e:
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
| bsd-3-clause | 4,269,830,663,802,809,300 | 28.59661 | 79 | 0.590654 | false |
bepatient-fr/itools | itools/pkg/build_gulp.py | 1 | 4272 | # -*- coding: UTF-8 -*-
# Copyright (C) 2016 Sylvain Taverne <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from standard library
import sys
from subprocess import Popen
# Import from itools
from itools.fs.lfs import LocalFolder
from itools.uri import get_uri_name, Path
class GulpBuilder(object):
"""
Run "gulp build" in project's repository & add generated files
$ ui/{SKINS}/*
into the project MANIFEST file.
That allow to avoid commit compiled JS/CSS files into GIT.
"""
def __init__(self, package_root, worktree, manifest):
self.package_root = package_root
if self.package_root != '.':
self.ui_path = '{0}/ui/'.format(self.package_root)
else:
self.ui_path = 'ui/'
self.worktree = worktree
self.manifest = manifest
self.fs = LocalFolder('.')
if self.fs.is_folder(self.ui_path):
self.dist_folders = tuple(['{0}{1}'.format(self.ui_path, x)
for x in LocalFolder(self.ui_path).get_names()])
def run(self):
npm_done = self.launch_npm_install()
gulp_done = self.launch_gulp_build()
webpack_done = self.launch_webpack()
# Add DIST files into manifest
if (npm_done or gulp_done or webpack_done) and self.fs.exists(self.ui_path):
for path in self.fs.traverse(self.ui_path):
relative_path = self.fs.get_relative_path(path)
if (relative_path and
relative_path.startswith(self.dist_folders) and self.fs.is_file(path)):
self.manifest.add(relative_path)
def launch_npm_install(self):
done = False
for path in self.manifest:
filename = get_uri_name(path)
if filename == 'package.json':
print '***'*25
print '*** Run $ npm install on ', path
print '***'*25
path = str(Path(path)[:-1]) + '/'
p = Popen(['npm', 'install'], cwd=path)
p.wait()
if p.returncode == 1:
print '***'*25
print '*** Error running npm install ', path
print '***'*25
sys.exit(1)
done = True
return done
def launch_gulp_build(self):
done = False
for path in self.manifest:
filename = get_uri_name(path)
if filename == 'gulpfile.js':
print '***'*25
print '*** Run $ gulp build on ', path
print '***'*25
path = str(Path(path)[:-1]) + '/'
p = Popen(['gulp', 'build'], cwd=path)
p.wait()
if p.returncode == 1:
print '***'*25
print '*** Error running gulp ', path
print '***'*25
sys.exit(1)
done = True
return done
def launch_webpack(self):
done = False
for path in self.manifest:
filename = get_uri_name(path)
if filename == 'webpack.config.js':
print '***'*25
print '*** Run $ webpack ', path
print '***'*25
path = str(Path(path)[:-1]) + '/'
p = Popen(['webpack', '--mode=production'], cwd=path)
p.wait()
if p.returncode == 1:
print '***'*25
print '*** Error running webpack ', path
print '***'*25
sys.exit(1)
done = True
return done
| gpl-3.0 | 1,900,275,981,483,111,000 | 34.6 | 91 | 0.526919 | false |
lnls-fac/sirius | pymodels/TS_V03_03/lattice.py | 1 | 10721 | """Lattice module.
In this module the lattice of the corresponding accelerator is defined.
"""
import math as _math
import numpy as _np
from pyaccel import lattice as _pyacc_lat, elements as _pyacc_ele, \
accelerator as _pyacc_acc, optics as _pyacc_opt
energy = 0.15e9 # [eV]
default_optics_mode = 'M1'
class LatticeError(Exception):
"""LatticeError class."""
def create_lattice(optics_mode=default_optics_mode):
"""Create lattice function."""
strengths, twiss_at_start = get_optics_mode(optics_mode)
# -- shortcut symbols --
marker = _pyacc_ele.marker
drift = _pyacc_ele.drift
quadrupole = _pyacc_ele.quadrupole
rbend_sirius = _pyacc_ele.rbend
hcorrector = _pyacc_ele.hcorrector
vcorrector = _pyacc_ele.vcorrector
# --- drift spaces ---
ldif = 0.1442
l015 = drift('l015', 0.1500)
l020 = drift('l020', 0.2000)
l025 = drift('l025', 0.2500)
l040 = drift('l040', 0.4000)
l060 = drift('l060', 0.6000)
l080 = drift('l080', 0.8000)
l090 = drift('l090', 0.9000)
l130 = drift('l130', 1.3000)
l220 = drift('l220', 2.2000)
l280 = drift('l280', 2.8000)
la2p = drift('la2p', 0.08323)
lb2p = drift('lb2p', 0.1330)
ld2p = drift('ld2p', 0.1920)
ld3p = drift('ld3p', 0.1430)
la3p = drift('la3p', 0.2320 - ldif)
lb1p = drift('lb1p', 0.2200 - ldif)
lb3p = drift('lb3p', 0.19897 - ldif)
lc1p = drift('lc1p', 0.18704 - ldif)
lc2p = drift('lc2p', 0.2260 - ldif)
ld1p = drift('ld1p', 0.21409 - ldif)
# --- markers ---
inicio = marker('start')
fim = marker('end')
# --- beam screens ---
scrn = marker('Scrn')
# --- beam current monitors ---
ict = marker('ICT')
fct = marker('FCT')
# --- beam position monitors ---
bpm = marker('BPM')
# --- correctors ---
ch = hcorrector('CH', 0.0)
cv = vcorrector('CV', 0.0)
# --- quadrupoles ---
qf1a = quadrupole('QF1A', 0.14, strengths['qf1a'])
qf1b = quadrupole('QF1B', 0.14, strengths['qf1b'])
qd2 = quadrupole('QD2', 0.14, strengths['qd2'])
qf2 = quadrupole('QF2', 0.20, strengths['qf2'])
qf3 = quadrupole('QF3', 0.20, strengths['qf3'])
qd4a = quadrupole('QD4A', 0.14, strengths['qd4a'])
qf4 = quadrupole('QF4', 0.20, strengths['qf4'])
qd4b = quadrupole('QD4B', 0.14, strengths['qd4b'])
# --- bending magnets ---
d2r = (_math.pi/180)
# -- b --
f = 5.011542/5.333333
h1 = rbend_sirius(
'B', 0.196, d2r*0.8597*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.163, -1.443, 0])*f)
h2 = rbend_sirius(
'B', 0.192, d2r*0.8467*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.154, -1.418, 0])*f)
h3 = rbend_sirius(
'B', 0.182, d2r*0.8099*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.140, -1.403, 0])*f)
h4 = rbend_sirius(
'B', 0.010, d2r*0.0379*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.175, -1.245, 0])*f)
h5 = rbend_sirius(
'B', 0.010, d2r*0.0274*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.115, -0.902, 0])*f)
h6 = rbend_sirius(
'B', 0.013, d2r*0.0244*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.042, -1.194, 0])*f)
h7 = rbend_sirius(
'B', 0.017, d2r*0.0216*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.008, -1.408, 0])*f)
h8 = rbend_sirius(
'B', 0.020, d2r*0.0166*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, 0.004, -1.276, 0])*f)
h9 = rbend_sirius(
'B', 0.030, d2r*0.0136*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, 0.006, -0.858, 0])*f)
h10 = rbend_sirius(
'B', 0.05, d2r*0.0089*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, 0.000, -0.050, 0])*f)
mbend = marker('mB')
bend = [h10, h9, h8, h7, h6, h5, h4, h3, h2, h1, mbend,
h1, h2, h3, h4, h5, h6, h7, h8, h9, h10]
# -- Thin Septum --
dip_nam = 'EjeSeptF'
dip_len = 0.5773
dip_ang = -3.6 * d2r
dip_K = 0.0
dip_S = 0.00
h1 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
h2 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 0*dip_ang/2, 1*dip_ang/2, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
bejesf = marker('bEjeSeptF') # marker at the beginning of thin septum
mejesf = marker('mEjeSeptF') # marker at the center of thin septum
eejesf = marker('eEjeSeptF') # marker at the end of thin septum
ejesf = [bejesf, h1, mejesf, h2, eejesf]
# -- bo thick ejection septum --
dip_nam = 'EjeSeptG'
dip_len = 0.5773
dip_ang = -3.6 * d2r
dip_K = 0.0
dip_S = 0.00
h1 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
h2 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 0*dip_ang, 1*dip_ang/2, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
bejesg = marker('bEjeSeptG') # marker at the beginning of thick septum
mejesg = marker('mEjeSeptG') # marker at the center of thick septum
eejesg = marker('eEjeSeptG') # marker at the end of thick septum
ejesg = [bejesg, h1, mejesg, h2, eejesg]
# -- si thick injection septum (2 of these are used) --
dip_nam = 'InjSeptG'
dip_len = 0.5773
dip_ang = +3.6 * d2r
dip_K = 0.0
dip_S = 0.00
h1 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
h2 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 0*dip_ang, 1*dip_ang/2, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
binjsg = marker('bInjSeptG') # marker at the beginning of thick septum
minjsg = marker('mInjSeptG') # marker at the center of thick septum
einjsg = marker('eInjSeptG') # marker at the end of thick septum
injsg = [binjsg, h1, minjsg, h2, einjsg]
# -- si thin injection septum --
dip_nam = 'InjSeptF'
dip_len = 0.5773
dip_ang = +3.118 * d2r
dip_K = 0.0
dip_S = 0.00
h1 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
h2 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 0*dip_ang, 1*dip_ang/2, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
binjsf = marker('bInjSeptF') # marker at the beginning of thin septum
minjsf = marker('mInjSeptF') # marker at the center of thin septum
einjsf = marker('eInjSeptF') # marker at the end of thin septum
injsf = [binjsf, h1, minjsf, h2, einjsf]
# --- lines ---
sec01 = [
ejesf, l025, ejesg, l060, cv, l090, qf1a, la2p, ict, l280, scrn, bpm,
l020, ch, l020, qf1b, l020, cv, l020, la3p, bend]
sec02 = [
l080, lb1p, qd2, lb2p, l080, scrn, bpm, l020, qf2, l020, ch, l025, cv,
l015, lb3p, bend]
sec03 = [lc1p, l220, qf3, l025, scrn, bpm, l020, ch, l025, cv, lc2p, bend]
sec04 = [
ld1p, l130, qd4a, ld2p, l060, scrn, bpm, l020, cv, l025, ch, l020,
qf4, ld3p, l020, qd4b, l060, fct, l040, ict, l040, scrn, bpm, cv,
l020, injsg, l025, injsg, l025, injsf, scrn]
elist = [inicio, sec01, sec02, sec03, sec04, fim]
the_line = _pyacc_lat.build(elist)
# shifts model to marker 'start'
idx = _pyacc_lat.find_indices(the_line, 'fam_name', 'start')
the_line = _pyacc_lat.shift(the_line, idx[0])
lengths = _pyacc_lat.get_attribute(the_line, 'length')
for length in lengths:
if length < 0:
raise LatticeError('Model with negative drift!')
# sets number of integration steps
set_num_integ_steps(the_line)
# -- define vacuum chamber for all elements
the_line = set_vacuum_chamber(the_line)
return the_line, twiss_at_start
def get_optics_mode(optics_mode):
"""Return magnet strengths of a given opics mode."""
twiss_at_start = _pyacc_opt.Twiss.make_new(
beta=[9.321, 12.881], alpha=[-2.647, 2.000], etax=[0.231, 0.069])
# -- selection of optics mode --
if optics_mode == 'M1':
strengths = {
'qf1a': 1.70521151606,
'qf1b': 1.734817173998,
'qd2': -2.8243902951,
'qf2': 2.76086143922,
'qf3': 2.632182549934,
'qd4a': -3.048732667316,
'qf4': 3.613066375692,
'qd4b': -1.46213606815,
}
elif optics_mode == 'M2':
strengths = {
'qf1a': 1.670801801437,
'qf1b': 2.098494339697,
'qd2': -2.906779151209,
'qf2': 2.807031512313,
'qf3': 2.533815202102,
'qd4a': -2.962460334623,
'qf4': 3.537403658428,
'qd4b': -1.421177262593,
}
else:
_pyacc_acc.AcceleratorException(
'Invalid TS optics mode: ' + optics_mode)
return strengths, twiss_at_start
def set_num_integ_steps(the_line):
"""Set number of integration steps in each lattice element."""
for i, _ in enumerate(the_line):
if the_line[i].angle:
length = the_line[i].length
the_line[i].nr_steps = max(10, int(_math.ceil(length/0.035)))
elif the_line[i].polynom_b[1]:
the_line[i].nr_steps = 10
elif the_line[i].polynom_b[2]:
the_line[i].nr_steps = 5
else:
the_line[i].nr_steps = 1
def set_vacuum_chamber(the_line):
"""Set vacuum chamber for all elements."""
# -- default physical apertures --
for i, _ in enumerate(the_line):
the_line[i].hmin = -0.012
the_line[i].hmax = +0.012
the_line[i].vmin = -0.012
the_line[i].vmax = +0.012
# -- bo ejection septa --
beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bEjeSeptF')[0]
end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eEjeSeptG')[0]
for i in range(beg, end+1):
the_line[i].hmin = -0.0150
the_line[i].hmax = +0.0150
the_line[i].vmin = -0.0040
the_line[i].vmax = +0.0040
# -- si thick injection septum --
beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bInjSeptG')[0]
end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eInjSeptG')[0]
for i in range(beg, end+1):
the_line[i].hmin = -0.0045
the_line[i].hmax = +0.0045
the_line[i].vmin = -0.0035
the_line[i].vmax = +0.0035
# -- si thin injection septum --
beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bInjSeptF')[0]
end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eInjSeptF')[0]
for i in range(beg, end+1):
the_line[i].hmin = -0.0150
the_line[i].hmax = +0.0150
the_line[i].vmin = -0.0035
the_line[i].vmax = +0.0035
return the_line
| mit | -4,336,585,749,294,970,400 | 33.583871 | 78 | 0.544819 | false |
toshka/torrt | torrt/notifiers/telegram.py | 1 | 1820 | import logging
import requests
from requests import RequestException
from torrt.base_notifier import BaseNotifier
from torrt.utils import NotifierClassesRegistry
LOGGER = logging.getLogger(__name__)
class TelegramNotifier(BaseNotifier):
"""Telegram bot notifier. See instructions how to create bot at https://core.telegram.org/bots/api"""
alias = 'telegram'
url = 'https://api.telegram.org/bot'
def __init__(self, token, chat_id):
"""
:param token: str - Telegram's bot token
:param chat_id: str - Telegram's chat ID
"""
self.token = token
self.chat_id = chat_id
def make_message(self, torrent_data):
return '''The following torrents were updated:\n%s''' \
% '\n'.join(map(lambda t: t['name'], torrent_data.values()))
def test_configuration(self):
url = '%s%s/getMe' % (self.url, self.token)
r = requests.get(url)
return r.json().get('ok', False)
def send_message(self, msg):
url = '%s%s/sendMessage' % (self.url, self.token)
try:
response = requests.post(url, data={'chat_id': self.chat_id, 'text': msg})
except RequestException as e:
LOGGER.error('Failed to send Telegram message: %s', e)
else:
if response.ok:
json_data = response.json()
if json_data['ok']:
LOGGER.debug('Telegram message was sent to user %s', self.chat_id)
else:
LOGGER.error('Telegram notification not send: %s', json_data['description'])
else:
LOGGER.error('Telegram notification not send. Response code: %s (%s)',
response.status_code, response.reason)
NotifierClassesRegistry.add(TelegramNotifier)
| bsd-3-clause | -1,036,498,508,307,981,600 | 34 | 105 | 0.593407 | false |
alpine9000/amiga_examples | tools/external/amitools/amitools/fs/validate/DirScan.py | 1 | 6864 | from BlockScan import BlockScan
from amitools.fs.FSString import FSString
from amitools.fs.FileName import FileName
from amitools.fs.validate.Log import Log
import amitools.fs.DosType as DosType
class DirChainEntry:
"""entry of the hash chain"""
def __init__(self, blk_info):
self.blk_info = blk_info
self.parent_ok = False
self.fn_hash_ok = False
self.valid = False
self.end = False
self.orphaned = False
self.sub = None
def __str__(self):
l = []
if self.parent_ok:
l.append("parent_ok")
if self.fn_hash_ok:
l.append("fn_hash_ok")
if self.valid:
l.append("valid")
if self.end:
l.append("end")
if self.orphaned:
l.append("orphaned")
return "[DCE @%d '%s': %s]" % \
(self.blk_info.blk_num, self.blk_info.name, " ".join(l))
class DirChain:
"""representing a chain of the hashtable in a directory"""
def __init__(self, hash_val):
self.hash_val = hash_val
self.chain = []
def add(self, dce):
self.chain.append(dce)
def get_entries(self):
return self.chain
def __str__(self):
return "{DirChain +%d: #%d}" % (self.hash_val, len(self.chain))
class DirInfo:
"""information structure on a directory"""
def __init__(self, blk_info):
self.blk_info = blk_info
self.chains = {}
self.children = []
def add(self, dc):
self.chains[dc.hash_val] = dc
def add_child(self, c):
self.children.append(c)
def get(self, hash_val):
if hash_val in self.chains:
return self.chains[hash_val]
else:
return None
def get_chains(self):
return self.chains
def __str__(self):
bi = self.blk_info
blk_num = bi.blk_num
name = bi.name
parent_blk = bi.parent_blk
return "<DirInfo @%d '%s' #%d parent:%d child:#%d>" % (blk_num, name, len(self.chains), parent_blk, len(self.children))
class DirScan:
"""directory tree scanner"""
def __init__(self, block_scan, log):
self.log = log
self.block_scan = block_scan
self.root_di = None
self.intl = DosType.is_intl(block_scan.dos_type)
self.files = []
self.dirs = []
def scan_tree(self, root_blk_num, progress=None):
"""scan the root tree"""
# get root block info
root_bi = self.block_scan.get_block(root_blk_num)
if root_bi == None:
self.log.msg(Log.ERROR,"Root block not found?!",root_blk_num)
return None
# do tree scan
if progress != None:
progress.begin("dir")
self.root_di = self.scan_dir(root_bi, progress)
if progress != None:
progress.end()
return self.root_di
def scan_dir(self, dir_bi, progress):
"""check a directory by scanning through the hash table entries and follow the chains
Returns (all_chains_ok, dir_obj)
"""
# create new dir info
di = DirInfo(dir_bi)
self.dirs.append(di)
# run through hash_table of directory and build chains
chains = {}
hash_val = 0
for blk_num in dir_bi.hash_table:
if blk_num != 0:
# build chain
chain = DirChain(hash_val)
self.build_chain(chain, dir_bi, blk_num, progress)
di.add(chain)
hash_val += 1
return di
def build_chain(self, chain, dir_blk_info, blk_num, progress):
"""build a block chain"""
dir_blk_num = dir_blk_info.blk_num
dir_name = dir_blk_info.name
hash_val = chain.hash_val
# make sure entry block is first used
block_used = self.block_scan.is_block_available(blk_num)
# get entry block
blk_info = self.block_scan.read_block(blk_num)
# create dir chain entry
dce = DirChainEntry(blk_info)
chain.add(dce)
# account
if progress != None:
progress.add()
# block already used?
if block_used:
self.log.msg(Log.ERROR, "dir block already used in chain #%d of dir '%s (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# self reference?
if blk_num == dir_blk_num:
self.log.msg(Log.ERROR, "dir block in its own chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# not a block in range
if blk_info == None:
self.log.msg(Log.ERROR, "out-of-range block terminates chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# check type of entry block
b_type = blk_info.blk_type
if b_type not in (BlockScan.BT_DIR, BlockScan.BT_FILE_HDR):
self.log.msg(Log.ERROR, "invalid block terminates chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# check referenceed block type in chain
blk_type = blk_info.blk_type
if blk_type in (BlockScan.BT_ROOT, BlockScan.BT_FILE_LIST, BlockScan.BT_FILE_DATA):
self.log.msg(Log.ERROR, "invalid block type %d terminates chain #%d of dir '%s' (%d)" % (blk_type, hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# all following are ok
dce.valid = True
# check parent of block
name = blk_info.name
dce.parent_ok = (blk_info.parent_blk == dir_blk_num)
if not dce.parent_ok:
self.log.msg(Log.ERROR, "invalid parent in '%s' chain #%d of dir '%s' (%d)" % (name, hash_val, dir_name, dir_blk_num), blk_num)
# check name hash
fn = FileName(name, self.intl)
fn_hash = fn.hash()
dce.fn_hash_ok = (fn_hash == hash_val)
if not dce.fn_hash_ok:
self.log.msg(Log.ERROR, "invalid name hash in '%s' chain #%d of dir '%s' (%d)" % (name, hash_val, dir_name, dir_blk_num), blk_num)
# recurse into dir?
if blk_type == BlockScan.BT_DIR:
dce.sub = self.scan_dir(blk_info, progress)
elif blk_type == BlockScan.BT_FILE_HDR:
self.files.append(dce)
# check next block in chain
next_blk = blk_info.next_blk
if next_blk != 0:
self.build_chain(chain, dir_blk_info, next_blk, progress)
else:
dce.end = True
def get_all_file_hdr_blk_infos(self):
"""return all file chain entries"""
result = []
for f in self.files:
result.append(f.blk_info)
return result
def get_all_dir_infos(self):
"""return all dir infos"""
return self.dirs
def dump(self):
"""dump whole dir info structure"""
self.dump_dir_info(self.root_di, 0)
def dump_dir_info(self, di, indent):
"""dump a single dir info structure and its sub dirs"""
istr = " " * indent
print istr, di
for hash_value in sorted(di.get_chains().keys()):
dc = di.get(hash_value)
print istr," ",dc
for dce in dc.get_entries():
print istr," ",dce
sub = dce.sub
if sub != None and dce.blk_info.blk_type == BlockScan.BT_DIR:
self.dump_dir_info(sub, indent+1)
| bsd-2-clause | -732,434,269,223,529,100 | 28.333333 | 147 | 0.60169 | false |
oaubert/advene | setup.py | 1 | 6968 | #! /usr/bin/env python3
import logging
logger = logging.getLogger(__name__)
import os
from setuptools import setup, find_packages
import sys
# We define the main script name here (file in bin), since we have to change it for MacOS X
SCRIPTNAME='advene'
def check_changelog(maindir, version):
"""Check that the changelog for maindir matches the given version."""
with open(os.path.join( maindir, "CHANGES.txt" ), 'r') as f:
l=f.readline()
if not l.startswith('advene (' + version + ')'):
logger.error("The CHANGES.txt does not seem to match version %s\n%s\nUpdate either the CHANGES.txt or the lib/advene/core/version.py file", version, l)
sys.exit(1)
return True
def get_plugin_list(*package):
"""Return a plugin list from the given package.
package is in fact a list of path/module path elements.
No recursion is done.
"""
package= [ 'advene' ] + list(package)
path=os.path.sep.join(package)
prefix='.'.join(package)
plugins=[]
d=os.path.join('lib', path)
if not os.path.exists(d):
raise Exception("%s does not match a directory (%s does not exist)" % (prefix, d))
for n in os.listdir(d):
name, ext = os.path.splitext(n)
if ext != '.py':
continue
# Poor man's grep.
if [ l for l in open(os.path.join(d, n)).readlines() if 'def register' in l ]:
# It may be a plugin. Include it.
plugins.append('.'.join((prefix, name)))
return plugins
def get_version():
"""Get the version number of the package."""
maindir = os.path.dirname(os.path.abspath(sys.argv[0]))
if os.path.exists(os.path.join(maindir, "setup.py")):
# Chances are that we were in a development tree...
libpath=os.path.join(maindir, "lib")
sys.path.insert (0, libpath)
import advene.core.version
version=advene.core.version.version
else:
raise Exception("Unable to determine advene version number.")
check_changelog(maindir, version)
return version
_version=get_version()
platform_options={}
def get_packages_list():
"""Recursively find packages in lib.
Return a list of packages (dot notation) suitable as packages parameter
for distutils.
"""
if 'linux' in sys.platform:
return find_packages('lib', exclude=["cherrypy.*"])
else:
return find_packages('lib')
def generate_data_dir(dir_, prefix="", postfix=""):
"""Return a structure suitable for datafiles from a directory.
It will return a sequence of (directory, files) corresponding to the
data in the given directory.
prefix and postfix are dumbly added to dirname, so do not forget
the trailing / for prefix, and leading / for postfix if necessary.
"""
l = []
installdir=prefix+dir_+postfix
for dirname, dnames, fnames in os.walk(dir_):
if fnames:
if dirname.startswith(dir_):
installdirname=dirname.replace(dir_, installdir, 1)
l.append((installdirname, [ absf
for absf in [ os.path.sep.join((dirname,f))
for f in fnames ]
if not os.path.isdir(absf) ]))
return l
def generate_data_files():
# On Win32, we will install data files in
# \Program Files\Advene\share\...
# On MacOS X, it will be in Advene.app/Contents/Resources
# On Unix, it will be
# /usr/share/advene/...
if sys.platform == 'win32' or sys.platform == 'darwin':
prefix=''
postfix=''
else:
prefix="share"+os.path.sep
postfix=os.path.sep+"advene"
r=generate_data_dir("share", postfix=postfix)
r.extend(generate_data_dir("doc", prefix=prefix, postfix=postfix))
if not os.path.isdir("locale"):
logger.warning("""**WARNING** Generating the locales with "cd po; make mo".""")
os.system("pwd; cd po; make mo")
if os.path.isdir("locale"):
r.extend(generate_data_dir("locale", prefix=prefix))
else:
logger.warning("""**WARNING** Cannot find locale directory.""")
if sys.platform.startswith('linux'):
# Install specific data files
r.append( ( 'share/applications', [ 'share/advene.desktop' ] ) )
return r
myname = "Olivier Aubert"
myemail = "[email protected]"
setup (name = "advene",
version = _version,
description = "Annotate DVds, Exchange on the NEt",
keywords = "dvd,video,annotation",
author = "Advene project team",
author_email = myemail,
maintainer = myname,
maintainer_email = myemail,
url = "https://www.advene.org/",
license = "GPL",
long_description = """Annotate DVds, Exchange on the NEt
The Advene (Annotate DVd, Exchange on the NEt) project is aimed
towards communities exchanging discourses (analysis, studies) about
audiovisual documents (e.g. movies) in DVD format. This requires that
audiovisual content and hypertext facilities be integrated, thanks to
annotations providing explicit structures on audiovisual streams, upon
which hypervideo documents can be engineered.
.
The cross-platform Advene application allows users to easily
create comments and analyses of video comments, through the
definition of time-aligned annotations and their mobilisation
into automatically-generated or user-written comment views (HTML
documents). Annotations can also be used to modify the rendition
of the audiovisual document, thus providing virtual montage,
captioning, navigation... capabilities. Users can exchange their
comments/analyses in the form of Advene packages, independently from
the video itself.
.
The Advene framework provides models and tools allowing to design and reuse
annotations schemas; annotate video streams according to these schemas;
generate and create Stream-Time Based (mainly video-centred) or User-Time
Based (mainly text-centred) visualisations of the annotations. Schemas
(annotation- and relation-types), annotations and relations, queries and
views can be clustered and shared in units called packages. Hypervideo
documents are generated when needed, both from packages (for annotation and
view description) and DVDs (audiovisual streams).
""",
package_dir = {'': 'lib'},
packages = get_packages_list(),
scripts = [ 'bin/%s' % SCRIPTNAME, 'bin/advene_import', 'bin/advene_export' ],
data_files = generate_data_files(),
classifiers = [
'Environment :: X11 Applications :: GTK',
'Environment :: Win32 (MS Windows)',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python',
'Intended Audience :: End Users/Desktop',
'Operating System :: OS Independent',
'Topic :: Multimedia :: Video :: Non-Linear Editor'
],
**platform_options
)
| gpl-2.0 | 829,313,294,184,711,600 | 37.076503 | 159 | 0.65729 | false |
ytsapras/robonet_site | scripts/OBSOLETE_CODE/run_tap.py | 1 | 5800 | #################################################################################
# Collection of routines to update the RoboNet database tables
# Keywords match the class model fields in ../robonet_site/events/models.py
#
# Written by Yiannis Tsapras Oct 2016
# Last update:
#################################################################################
# Import dependencies
from update_db_2 import *
import warnings
warnings.filterwarnings('ignore',module='astropy.coordinates')
def visibility(event,mlsites=['CPT','COJ','LSC']):
vis=0.
for site in mlsites:
lonrobo=float(Telescope.objects.filter(site=site).values('longitude')[0]['longitude'])*u.deg
latrobo=float(Telescope.objects.filter(site=site).values('latitude')[0]['latitude'])*u.deg
###WARNING IF ASTROPY FIXES THAT BUG, LON AND LAT NEED TO BE CHANGED AT SOME POINT!!!!
RoboSite=EarthLocation(lat=lonrobo,lon=latrobo,height=float(Telescope.objects.filter(site=site).values('altitude')[0]['altitude'])*u.m)
mlcrd=SkyCoord(str(event.ev_ra)+' '+str(event.ev_dec),unit=(u.hourangle, u.deg))
time=Time(datetime.utcnow(),scale='utc')
mlalt=mlcrd.transform_to(AltAz(obstime=time,location=RoboSite))
delta_midnight=linspace(-12, 12, 24)*u.hour
mlaltazs=mlalt.transform_to(AltAz(obstime=time+delta_midnight,location=RoboSite))
times=time+delta_midnight
altazframe=AltAz(obstime=times,location=RoboSite)
sunaltazs=get_sun(times).transform_to(altazframe)
altsun = interp1d(times.jd2,sunaltazs.alt*u.deg,kind='linear',fill_value=0.,bounds_error=False)
altml = interp1d(times.jd2,mlaltazs.alt*u.deg,kind='linear',fill_value=0.,bounds_error=False)
deltat=1.0/200.
for timetest in arange(-0.5,+0.5,deltat):
if altsun(timetest)<-18. and altml(timetest)>30.:
vis+=deltat
print vis
return vis
#RUN TAP
def omegas(t,u0,te,t0,fs,fb,k2):
k1=0.4
g=fb/fs
usqr=u0**2+((t-t0)/te)**2
pspl_deno=(usqr*(usqr+4.))**0.5
psip=4.0/(pspl_deno)-2.0/(usqr+2.0+pspl_deno)
a=(usqr+2.)/pspl_deno
return psip/(a**0.5*((a+g)*(1.0+g)*k2/(a*a)+k1))
def pspl(t,u0,te,t0):
usqr=u0**2+((t-t0)/te)**2
return (usqr+2.0)/(usqr*(usqr+4.0))**0.5
def run_tap():
from jdcal import gcal2jd
from math import log10
ut_current=time.gmtime()
t_current=gcal2jd(ut_current[0],ut_current[1],ut_current[2])[1]-49999.5+ut_current[3]/24.0+ut_current[4]/(1440.)
active_events_list=Event.objects.select_related().filter(status='AC')
for event in active_events_list:
event_id=event.pk
event_name=EventName.objects.select_related().filter(event=event)[0].name
t0=float(SingleModel.objects.select_related().filter(event=event).values().latest('last_updated')['Tmax'])
#print event_name,e,e.pk,t0#,DataFile.objects.filter(event=e.pk).values()
timestamp = timezone.now()
telclass='1m'
inst= 'default'
filt= 'default'
if DataFile.objects.filter(event=event).exists():
u0=float(SingleModel.objects.select_related().filter(event=event).values().latest('last_updated')['umin'])
te=float(SingleModel.objects.select_related().filter(event=event).values().latest('last_updated')['tau'])
t0=float(SingleModel.objects.select_related().filter(event=event).values().latest('last_updated')['Tmax'])
ibase=float(DataFile.objects.select_related().filter(event=event).values().latest('last_upd')['baseline'])
g=float(DataFile.objects.select_related().filter(event=event).values().latest('last_upd')['g'])
# print u0,te,t0,ibase,g,e.pk,event_name,e
k2=10.0**(0.4*(ibase-18.))
ftot=10.0**(-0.4*ibase)
fs_ref=ftot/(1.0+g)
fb_ref=g*fs_ref
omega=omegas(t_current,u0,te,t0,fs_ref,fb_ref,k2)
err_omega=0.
peak_omega=omegas(t0,u0,te,t0,fs_ref,fb_ref,k2)
munow=pspl(t_current,u0,te,t0)
dailyvisibility=0.
cost1m=0.
if omega<6.0:
priority='L'
else:
##CALCULATE VISIBILITY FOR MEDIUM AND HIGH PRIORITY EVENTS
dailyvisibility=24.0*visibility(event)
print dailyvisibility,event
if omega<10. and omega>6.:
priority='M'
if omega>10.:
priority='H'
sig_te=float(SingleModel.objects.filter(event=event).values().latest('last_updated')['e_tau'])
winit=1./900
if sig_te==0.:
sig_te=0.001
wte=1./sig_te**2
if munow==0.:
tsamp=-1.
else:
#USE MODIFIED SAMPLING INTERVAL FROM DOMINIK (FULLY DETERMINISTIC...)
tsamp=(te*wte/26.+winit)/(winit+wte)*1.7374382779324/(munow)**0.5
else:
wte=1./sig_te**2
if munow==0.:
tsamp=-1.
else:
tsamp=(te*wte/26.+winit)/(winit+wte)*1.7374382779324/(munow)**0.5
if munow==0.:
imag=99.
else:
#EMPIRICAL EXPTIME CALCULATOR BASED ON IMAG -> NEEDS TO BE REVISITED FOR 2016
imag=-2.5*log10(fs_ref*munow+fb_ref)
texp=10.**(0.43214*imag-4.79556)
if texp>300.:
texp=300.
else:
if texp<60.:
texp=6.
#ALWAYS TAKE 2 EXPOSURES
nexp = 2.
overhead=2.
cost1m=dailyvisibility/tsamp*(overhead+nexp*texp/60.)
err_omega = 0.
#DEFINE A BLENDED EVENT AS g>0.5
blended = g>0.5
add_tap(event_name=event_name, timestamp=timestamp, priority=priority, tsamp=tsamp,
texp=texp, nexp=nexp, telclass=telclass, imag=imag, omega=omega,
err_omega=err_omega, peak_omega=peak_omega, blended=blended, visibility=dailyvisibility, cost1m=cost1m)
| gpl-2.0 | -7,316,641,612,667,854,000 | 40.134752 | 141 | 0.602414 | false |
tensorflow/datasets | tensorflow_datasets/structured/genomics_ood_test.py | 1 | 1468 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for genomics_ood dataset."""
import os
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.structured import genomics_ood
_FAKE_DATA_FILE = os.path.join(
os.path.normpath(os.path.dirname(__file__) + '/../../'),
'tensorflow_datasets', 'testing', 'test_data', 'fake_examples',
'genomics_ood', 'genomics_ood.zip')
class GenomicsOodTest(tfds.testing.DatasetBuilderTestCase):
genomics_ood._DATA_URL = _FAKE_DATA_FILE
DATASET_CLASS = genomics_ood.GenomicsOod
SPLITS = {
'train': 5, # Number of fake train example
'validation': 5, # Number of fake validation example
'test': 5, # Number of fake test example
'validation_ood': 5, # Number of fake validation ood example
'test_ood': 5, # Number of fake test ood example
}
if __name__ == '__main__':
tfds.testing.test_main()
| apache-2.0 | -8,410,193,488,320,906,000 | 33.952381 | 74 | 0.709128 | false |
ArcherSys/ArcherSys | Lib/encodings/iso2022_jp.py | 1 | 3299 | <<<<<<< HEAD
<<<<<<< HEAD
#
# iso2022_jp.py: Python Unicode Codec for ISO2022_JP
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
=======
#
# iso2022_jp.py: Python Unicode Codec for ISO2022_JP
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#
# iso2022_jp.py: Python Unicode Codec for ISO2022_JP
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | 5,202,806,530,791,460,000 | 25.821138 | 74 | 0.700515 | false |
willcassella/SinGE | Tools/SinGED/types.py | 1 | 12310 | # types.py
import bpy
from bpy.types import PropertyGroup
from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, PointerProperty, EnumProperty, FloatVectorProperty
from functools import partial
def get_unused_component_types(scene=None, context=None):
# Unused arguments
del scene, context
node_id = bpy.context.active_object.sge_node_id
sge_scene = SinGEDProps.sge_scene
node = sge_scene.get_node(node_id)
used_component = sge_scene.get_node_components(node)
result = []
for component_type in SinGEDProps.sge_typedb.component_types:
if component_type not in (c.type.type_name for c in used_component):
result.append((component_type, component_type, ''))
return result
def construct_property_display_name(prop_name):
return prop_name.replace("_", " ")
def construct_property_path(property_path_str, prop_name):
if len(property_path_str) == 0:
return [prop_name]
return property_path_str.split('.') + [prop_name]
def property_getter(component_type_name, property_path, default):
try:
# Get the active node and component instance
sge_scene = SinGEDProps.sge_scene
node_id = bpy.context.active_object.sge_node_id
node = sge_scene.get_node(node_id)
component_type = sge_scene.get_component_type(component_type_name)
component_instance = component_type.get_instance(node)
# Get the property value
return component_instance.get_sub_property_immediate(property_path, default)
except Exception:
path = [component_type_name]
path.extend(property_path)
print("ERROR RETREIVING PROPERTY: {}".format(path))
return default
def property_setter(component_type_name, property_path, value):
# Get the active node and component instance
sge_scene = SinGEDProps.sge_scene
node_id = bpy.context.active_object.sge_node_id
node = sge_scene.get_node(node_id)
component_type = sge_scene.get_component_type(component_type_name)
component_instance = component_type.get_instance(node)
# Set the property value
component_instance.set_sub_property_immediate(property_path, value)
class SGETypes(PropertyGroup):
sge_component_types = EnumProperty(items=get_unused_component_types)
class SinGEDProps(PropertyGroup):
sge_host = StringProperty(name='Host', default='localhost')
sge_port = IntProperty(name='Port', default=1995)
sge_types = PointerProperty(type=SGETypes)
sge_realtime_update_delay = FloatProperty(default=0.033, precision=3, unit='TIME')
sge_scene_path = StringProperty(name='Path', default='')
sge_lightmap_light_dir = FloatVectorProperty(name="Light direction", subtype='XYZ', size=3, default=[0.0, -0.5, -0.5])
sge_lightmap_light_color = FloatVectorProperty(name="Light color", subtype='COLOR', size=3, default=[0.5, 0.5, 0.5])
sge_lightmap_light_intensity = FloatProperty(name="Light intensity", default=8.0)
sge_lightmap_ambient_color = FloatVectorProperty(name="Ambient light color", subtype='COLOR', size=3, default=[0.5, 0.5, 0.5])
sge_lightmap_ambient_intensity = FloatProperty(name="Ambient light intensity", default=0.0)
sge_lightmap_num_indirect_sample_sets = IntProperty(name="Indirect sample sets", subtype='UNSIGNED', default=16)
sge_lightmap_num_accumulation_steps = IntProperty(name="Accumulation steps", subtype='UNSIGNED', default=1)
sge_lightmap_num_post_steps = IntProperty(name="Post processing steps", subtype='UNSIGNED', default=2)
sge_lightmap_path = StringProperty(name="Lightmap path")
sge_session = None
sge_typedb = None
sge_scene = None
sge_resource_manager = None
class SGETypeBase(PropertyGroup):
@classmethod
def sge_unregister(cls):
bpy.utils.unregister_class(cls)
@classmethod
def sge_create_property(cls, name):
return PointerProperty(name=name, type=cls)
@classmethod
def sge_draw(cls, layout, parent_obj, parent_attr_name):
# Draw each property recursively
self = getattr(parent_obj, parent_attr_name)
for attr_name, prop_name, prop_type in cls.sge_property_list:
# If the property is a primitive type, don't give it a label
if not issubclass(prop_type, SGEPrimitiveBase):
layout.label(construct_property_display_name(prop_name))
prop_type.sge_draw(layout.column(), self, attr_name)
class SGEPrimitiveBase(object):
@staticmethod
def sge_unregister():
return
@staticmethod
def sge_draw(layout, parent_obj, parent_attr_name):
# Draw the property
layout.prop(parent_obj, parent_attr_name)
class SGEBool(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return BoolProperty(
name=construct_property_display_name(name),
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), False),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEInt(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return IntProperty(
name=construct_property_display_name(name),
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEUInt(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return IntProperty(
name=construct_property_display_name(name),
subtype='UNSIGNED',
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEFloat(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return FloatProperty(
name=construct_property_display_name(name),
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0.0),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEString(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return StringProperty(
name=construct_property_display_name(name),
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), ""),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEAngle(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return FloatProperty(
name=construct_property_display_name(name),
subtype='ANGLE',
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEColorRGBA8(SGEPrimitiveBase):
@staticmethod
def sge_get(outer, prop_name):
value = property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), "ffffffff")
red = int(value[: 2], 16)
green = int(value[2: 4], 16)
blue = int(value[4: 6], 16)
alpha = int(value[6: 8], 16)
return [float(red)/255, float(green)/255, float(blue)/255, float(alpha)/255]
@staticmethod
def sge_set(outer, prop_name, value):
red = int(value[0] * 255)
green = int(value[1] * 255)
blue = int(value[2] * 255)
alpha = int(value[3] * 255)
property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), "%0.2x%0.2x%0.2x%0.2x" % (red, green, blue, alpha))
@staticmethod
def sge_create_property(name):
return FloatVectorProperty(
name=name,
subtype='COLOR',
size=4,
min=0.0,
max=1.0,
get=lambda outer: SGEColorRGBA8.sge_get(outer, name),
set=lambda outer, value: SGEColorRGBA8.sge_set(outer, name, value))
class SGEColorRGBF32(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return FloatVectorProperty(
name=construct_property_display_name(name),
subtype='COLOR',
size=3,
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), [0.0, 0.0, 0.0]),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEVec2(SGEPrimitiveBase):
@staticmethod
def sge_get(outer, prop_name):
value = property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), None)
if value is None:
return [0.0, 0.0]
else:
return [value['x'], value['y']]
@staticmethod
def sge_set(outer, prop_name, value):
property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), {'x': value[0], 'y': value[1]})
@staticmethod
def sge_create_property(name):
return FloatVectorProperty(
name=construct_property_display_name(name),
subtype='XYZ',
size=2,
get=lambda outer: SGEVec2.sge_get(outer, name),
set=lambda outer, value: SGEVec2.sge_set(outer, name, value))
class SGEVec3(SGEPrimitiveBase):
@staticmethod
def sge_get(outer, prop_name):
value = property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), None)
if value is None:
return [0.0, 0.0, 0.0]
else:
return [value['x'], value['y'], value['z']]
@staticmethod
def sge_set(outer, prop_name, value):
property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), {'x': value[0], 'y': value[1], 'z': value[2]})
@staticmethod
def sge_create_property(name):
return FloatVectorProperty(
name=construct_property_display_name(name),
subtype='XYZ',
size=3,
get=lambda outer: SGEVec3.sge_get(outer, name),
set=lambda outer, value: SGEVec3.sge_set(outer, name, value))
def create_blender_type(typedb, type_name, type_info):
# Create dictionaries for the class and the properties
property_list = list()
class_dict = {
'sge_type_name': type_name,
'sge_property_list': property_list,
'sge_component_type_name': StringProperty(),
'sge_property_path': StringProperty(),
}
# Define each property
if 'properties' in type_info:
properties = list(type_info['properties'].items())
properties.sort(key=lambda prop: prop[1]['index'])
for prop_name, prop_info in properties:
# Get the property's type
prop_type = typedb.get_type(prop_info['type'])
# Create an attribute name for the property
attr_name = "sge_prop_{}".format(prop_name)
# Create the class dictionary entry
class_dict[attr_name] = prop_type.sge_create_property(prop_name)
# Create the property list entry
property_list.append((attr_name, prop_name, prop_type))
# Generate a sanitary name for the type
class_name = type_name.replace("::", "_")
# Create the type
blender_type = type(class_name, (SGETypeBase,), class_dict)
# Register it with Blender
bpy.utils.register_class(blender_type)
return blender_type
| mit | -3,868,031,757,075,581,000 | 39.89701 | 167 | 0.670106 | false |
Seattle-Meal-Maps/seattle-meal-maps-api | meal_api/meal_api/urls.py | 1 | 1205 | """meal_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from api.views import DataViewSet, HoursViewSet
router = routers.DefaultRouter()
router.register(r'services', DataViewSet)
router.register(r'hours', HoursViewSet)
hours_list = HoursViewSet.as_view({
'get': 'list'
})
data_list = DataViewSet.as_view({
'get': 'list'
})
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| mit | -6,016,341,527,609,289,000 | 32.472222 | 82 | 0.703734 | false |
hpcugent/vsc-mympirun | test/pmi_utils.py | 1 | 6270 | #
# Copyright 2019-2021 Ghent University
#
# This file is part of vsc-mympirun,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# the Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/hpcugent/vsc-mympirun
#
# vsc-mympirun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# vsc-mympirun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with vsc-mympirun. If not, see <http://www.gnu.org/licenses/>.
#
"""
PMI common test code
"""
import copy
import glob
import os
import sys
import re
from vsc.install.testing import TestCase
from vsc.utils.py2vs3 import is_string
from vsc.utils.run import run
from sched import reset_env
from mock import patch
from end2end import install_fake_cmd
from vsc.mympirun.main import get_mpi_and_sched_and_options
from vsc.mympirun.factory import getinstance
import vsc.mympirun.pmi.mpi as mpim
import vsc.mympirun.pmi.sched as schedm
from vsc.mympirun.pmi.option import MypmirunOption as mpiopt
SLURM_2NODES = """
SLURM_CPUS_ON_NODE=32
SLURM_JOB_CPUS_PER_NODE=32(x2)
SLURM_JOB_ID=123456
SLURM_JOB_NODELIST=node[3302-3303]
SLURM_JOB_NUM_NODES=2
SLURM_MEM_PER_CPU=7600
SLURM_NNODES=2
SLURM_NPROCS=64
SLURM_NTASKS=64
"""
SLURM_2NODES_4GPUS = SLURM_2NODES + """
SLURM_JOB_GPUS=0,1,2,3
"""
class PMITest(TestCase):
def setUp(self):
"""Prepare to run test."""
super(PMITest, self).setUp()
self.orig_environ = copy.deepcopy(os.environ)
# add /bin to $PATH, /lib to $PYTHONPATH
self.topdir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self.script = os.path.join(os.path.join(self.topdir, 'bin'), 'mypmirun.py')
lib = os.path.join(self.topdir, 'lib')
# make sure subshell finds .egg files by adding them to the pythonpath
eggs = ':'.join(glob.glob(os.path.join(self.topdir, '.eggs', '*.egg')))
os.environ['PYTHONPATH'] = '%s:%s:%s' % (eggs, lib, os.getenv('PYTHONPATH', ''))
# make sure we're using the right mympirun installation...
ec, out = run([sys.executable, '-c', "import vsc.mympirun; print(vsc.mympirun.__file__)"])
out = out.strip()
expected_path = os.path.join(self.topdir, 'lib', 'vsc', 'mympirun')
self.assertTrue(os.path.samefile(os.path.dirname(out), expected_path), "%s not in %s" % (out, expected_path))
self.which_patcher = patch('vsc.mympirun.common.which')
self.mock_which = self.which_patcher.start()
def tearDown(self):
"""Clean up after running test."""
self.which_patcher.stop()
reset_env(self.orig_environ)
super(PMITest, self).tearDown()
def eb(self, name, version):
"""setup EB for name/version"""
root = os.path.join(self.tmpdir, name, version)
os.environ['EBROOT'+name.upper()] = root
os.environ['EBVERSION'+name.upper()] = version
return root
def set_mpi(self, name, version):
"""set mpi enviroment"""
root = self.eb(name, version)
mpirun = os.path.join(root, "bin", "mpirun")
self.mock_which.return_value = mpirun
if 'End2End' in self.__class__.__name__:
# can't mock which in end2end
path = os.path.dirname(mpirun)
os.environ['PATH'] = "%s:%s" % (path, os.environ['PATH'])
if not os.path.exists(path):
os.makedirs(path)
os.symlink(os.path.join(self.topdir, 'test', 'mpirun'), mpirun)
return mpirun
def set_env(self, env):
if is_string(env):
for line in env.split("\n"):
if '=' in line:
os.environ.update(dict([line.strip().split("=", 1)]))
else:
os.environ.update(env)
def install_fake_ompi_info(self, ompi_ver):
# add dummy ompi_info command (required by OpenMPI31xOr4x.has_ucx method)
ompi_info_lines = [
"#!/bin/bash",
"echo ' MCA btl: openib (MCA v2.1.0, API v3.0.0, Component v%s)'" % ompi_ver,
"echo ' MCA pml: ucx (MCA v2.1.0, API v2.0.0, Component v%s)'" % ompi_ver,
]
install_fake_cmd('ompi_info', self.tmpdir, '\n'.join(ompi_info_lines))
def set_slurm_ompi3(self, env):
self.set_env(env)
# OpenMPI used in foss/2019b (no UCX dep)
ompi_ver = '3.1.4'
self.set_mpi('OpenMPI', ompi_ver)
self.install_fake_ompi_info(ompi_ver)
def set_slurm_ompi4_ucx(self, env):
self.set_env(env)
# OpenMPI with UCX dep used in foss/2020a
ompi_ver = '4.0.3'
self.set_mpi('OpenMPI', ompi_ver)
self.eb('ucx', '1.8.0')
self.install_fake_ompi_info(ompi_ver)
def get_instance(self):
opts = get_mpi_and_sched_and_options(mpim, mpiopt, schedm)
instance = getinstance(*opts)
return instance
def pmirun(self, args, ok=True, pattern=None):
"""run script"""
ec, out = run([sys.executable, self.script] + args)
if ok:
test = self.assertEqual
else:
test = self.assertNotEqual
test(ec, 0, "Command exited normally: exit code %s; output: %s" % (ec, out))
if pattern is not None:
regex = re.compile(pattern)
self.assertTrue(regex.search(out.strip()), "Pattern '%s' found in: %s" % (regex.pattern, out))
def _check_vars(self, envs, missing=False):
"""Check the environment variables are present (or missing)"""
for env in envs:
self.assertEqual(env in os.environ, not missing, "variable %s missing %s %s" % (env, missing, os.environ))
| gpl-2.0 | -978,821,178,101,611,400 | 34.224719 | 118 | 0.626156 | false |
ZeitOnline/z3c.celery | src/z3c/celery/session.py | 1 | 1860 | import threading
import transaction
import zope.interface
import transaction.interfaces
class CelerySession(threading.local):
"""Thread local session of data to be sent to Celery."""
def __init__(self):
self.tasks = []
self._needs_to_join = True
def add_call(self, method, *args, **kw):
self._join_transaction()
self.tasks.append((method, args, kw))
def reset(self):
self.tasks = []
self._needs_to_join = True
def _join_transaction(self):
if not self._needs_to_join:
return
dm = CeleryDataManager(self)
transaction.get().join(dm)
self._needs_to_join = False
def _flush(self):
for method, args, kw in self.tasks:
method(*args, **kw)
self.reset()
def __len__(self):
"""Number of tasks in the session."""
return len(self.tasks)
celery_session = CelerySession()
@zope.interface.implementer(transaction.interfaces.IDataManager)
class CeleryDataManager(object):
"""DataManager embedding the access to celery into the transaction."""
transaction_manager = None
def __init__(self, session):
self.session = session
def abort(self, transaction):
self.session.reset()
def tpc_begin(self, transaction):
pass
def commit(self, transaction):
pass
tpc_abort = abort
def tpc_vote(self, transaction):
self.session._flush()
def tpc_finish(self, transaction):
pass
def sortKey(self):
# Sort last, so that sending to celery is done after all other
# DataManagers signalled an okay.
return "~z3c.celery"
def __repr__(self):
"""Custom repr."""
return '<{0.__module__}.{0.__name__} for {1}, {2}>'.format(
self.__class__, transaction.get(), self.session)
| bsd-3-clause | -8,065,323,295,136,975,000 | 23.473684 | 74 | 0.601075 | false |
bnoi/scikit-tracker | sktracker/tracker/cost_function/tests/test_abstract_cost_functions.py | 1 | 1500 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from nose.tools import assert_raises
import sys
import pandas as pd
import numpy as np
from sktracker.tracker.cost_function import AbstractCostFunction
def test_abstract_cost_function():
cost_func = AbstractCostFunction(context={}, parameters={})
assert cost_func.get_block() == None
def test_abstract_cost_function_check_context():
cost_func = AbstractCostFunction(context={'cost': 1}, parameters={})
assert_raises(ValueError, cost_func.check_context, 'test_string', str)
cost_func.context['test_string'] = 5
assert_raises(TypeError, cost_func.check_context, 'test_string', str)
cost_func.context['test_string'] = "i am a string"
### This fails in py2.7
if sys.version_info[0] > 2:
cost_func.check_context('test_string', str)
assert True
def test_abstract_cost_function_check_columns():
cost_func = AbstractCostFunction(context={}, parameters={})
df = pd.DataFrame([np.arange(0, 5), np.arange(20, 25)],
columns=['x', 'y', 'z', 'w', 't'])
cost_func.check_columns(df, ['t', 'z', 'y'])
cost_func.check_columns([df], ['t', 'z', 'y'])
df = pd.DataFrame([np.arange(0, 4), np.arange(20, 24)],
columns=['x', 'y', 'w', 't'])
assert_raises(ValueError, cost_func.check_columns, df, ['t', 'z', 'y'])
| bsd-3-clause | 8,376,291,811,204,249,000 | 26.777778 | 75 | 0.64 | false |
arindampradhan/yaaHN | tests/test_item_model.py | 1 | 1836 | import httpretty
import unittest
from os import path
import types
import sys
import requests
from yaaHN.models import item
from yaaHN import hn_client
from yaaHN.helpers import item_parser, API_BASE
from test_utils import get_content, PRESET_DIR
class TestItem(unittest.TestCase):
def setUp(self):
httpretty.HTTPretty.enable()
httpretty.register_uri(
httpretty.GET, '{0}{1}'.format(API_BASE,
'item/8863.json'),
body=get_content('item_8863.json'), status=200, content_type='text/json')
response = requests.get(
'https://hacker-news.firebaseio.com/v0/item/8863.json')
self.item_type = ['pollopt', 'poll', 'comment', 'story', 'job']
self.item = hn_client.get_item('8863')
def tearDown(self):
httpretty.HTTPretty.disable()
def test_item_data_type(self):
"""
Test types of fields of a Item object
"""
assert type(self.item.id) == int
assert type(self.item.deleted) == types.NoneType
assert self.item.type in self.item_type
assert type(self.item.by) == types.UnicodeType
assert type(self.item.time) == int
assert type(self.item.text) == types.NoneType
assert type(self.item.dead) == types.NoneType
assert type(self.item.parent) == types.NoneType
assert type(self.item.kids) == types.ListType
assert type(self.item.url) == types.UnicodeType
assert type(self.item.score) == types.IntType
assert type(self.item.title) == types.UnicodeType
assert type(self.item.parts) == types.NoneType
def test_item_by(self):
"""
Tests the submitter name
"""
self.assertEqual(self.item.by, 'dhouston')
if __name__ == '__main__':
unittest.main()
| mit | 6,554,353,882,973,497,000 | 31.785714 | 85 | 0.619281 | false |
chubbymaggie/idalink | idalink/memory.py | 1 | 10682 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013- Yan Shoshitaishvili aka. zardus
# Ruoyu Wang aka. fish
# Andrew Dutcher aka. rhelmot
# Kevin Borgolte aka. cao
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
__all__ = ["get_memory", "IDAMemory", "CachedIDAMemory",
"IDAPermissions", "CachedIDAPermissions"]
import collections
import itertools
import logging
import operator
LOG = logging.getLogger("idalink.ida_mem")
# Helper functions.
def _dict_values_sorted_by_key(dictionary):
# This should be a yield from instead.
"""Internal helper to return the values of a dictionary, sorted by key.
"""
for _, value in sorted(dictionary.iteritems(), key=operator.itemgetter(0)):
yield value
def _ondemand(f):
"""Decorator to only request information if not in cache already.
"""
name = f.__name__
def func(self, *args, **kwargs):
if not args and not kwargs:
if hasattr(self, "_" + name):
return getattr(self, "_" + name)
a = f(self, *args, **kwargs)
setattr(self, "_" + name, a)
return a
else:
return f(self, *args, **kwargs)
func.__name__ = name
return func
# Functions others are allowed to call.
def get_memory(idaapi, start, size, default_byte=None):
# TODO: Documentation
if idaapi is None:
idaapi = __import__("idaapi")
if size == 0:
return {}
# We are optimistic and assume it's a continous memory area
at_address = idaapi.get_many_bytes(start, size)
d = {}
if at_address is None: # It was not, resort to binary research
if size == 1:
if default_byte is not None:
LOG.debug("Using default byte for %d", start)
d[start] = default_byte
return d
mid = start + size / 2
first_size = mid - start
second_size = size - first_size
left = get_memory(idaapi, start, first_size, default_byte=default_byte)
right = get_memory(idaapi, mid, second_size, default_byte=default_byte)
if default_byte is None:
# will be nonsequential
d.update(left)
d.update(right)
else:
# it will be sequential, so let's combine it
chained = itertools.chain(_dict_values_sorted_by_key(left),
_dict_values_sorted_by_key(right))
d[start] = "".join(chained)
else:
d[start] = at_address
return d
class IDAKeys(collections.MutableMapping): # pylint: disable=W0223
# TODO: delitem, setitem, getitem are abstract, should be fixed,
# disabled warning should be removed
def __init__(self, ida):
self.ida = ida
# Gets the "heads" (instructions and data items) and head sizes from IDA
@_ondemand
def heads(self, exclude=()):
# TODO: Documentation
LOG.debug("Getting heads from IDA for file %s", self.ida.filename)
keys = [-1] + list(exclude) + [self.ida.idc.MAXADDR + 1]
ranges = []
for i in range(len(keys) - 1):
a, b = keys[i], keys[i+1]
if a - b > 1:
ranges.append((a+1, b-1))
heads = {}
for start, end in ranges:
for head in self.ida.idautils.Heads(start, end, 1):
heads[head] = self.ida.idc.ItemSize(head)
return heads
@_ondemand
def segments(self):
# TODO: Documentation
LOG.debug("Getting segments from IDA for file %s", self.ida.filename)
segments_size = {}
for s in self.ida.idautils.Segments():
segments_size[s] = self.ida.idc.SegEnd(s) - self.ida.idc.SegStart(s)
return segments_size
@_ondemand
def idakeys(self):
# TODO: Documentation
keys = set()
for h, s in self.segments().iteritems():
for i in range(s):
keys.add(h + i)
for h, s in self.heads(exclude=keys).iteritems():
for i in range(s):
keys.add(h + i)
LOG.debug("Done getting keys.")
return keys
def __iter__(self):
# TODO: Refactor to be more pythonic
for key in self.idakeys():
yield key
def __len__(self):
# This is significantly faster than list(self.__iter__) because
# we do not need to keep the whole list in memory, just the accumulator.
return sum(1 for _ in self)
def __contains__(self, key):
return key in self.keys()
def reset(self):
# TODO: Documentation
if hasattr(self, "_heads"):
delattr(self, "_heads")
if hasattr(self, "_segments"):
delattr(self, "_segments")
if hasattr(self, "_idakeys"):
delattr(self, "_idakeys")
class IDAPermissions(IDAKeys):
def __init__(self, ida, default_perm=7):
super(IDAPermissions, self).__init__(ida)
self.default_perm = default_perm
def __getitem__(self, address):
# Only do things that we actually have in IDA
if address not in self:
raise KeyError(address)
seg_start = self.ida.idc.SegStart(address)
if seg_start == self.ida.idc.BADADDR:
# We can really only return the default here
return self.default_perm
return self.ida.idc.GetSegmentAttr(seg_start, self.ida.idc.SEGATTR_PERM)
def __setitem__(self, address, value):
# Nothing we can do here
pass
def __delitem__(self, address, value):
# Nothing we can do here
pass
class CachedIDAPermissions(IDAPermissions):
def __init__(self, ida, default_perm=7):
super(CachedIDAPermissions, self).__init__(ida)
self.permissions = {}
self.default_perm = default_perm
def __getitem__(self, address):
if address in self.permissions:
return self.permissions[address]
p = super(CachedIDAPermissions, self).__getitem__(address)
# cache the segment
seg_start = self.ida.idc.SegStart(address)
seg_end = self.ida.idc.SegEnd(address)
if seg_start == self.ida.idc.BADADDR:
self.permissions[address] = p
else:
for i in range(seg_start, seg_end):
self.permissions[i] = p
return p
def __setitem__(self, address, value):
self.permissions[address] = value
def __delitem__(self, address):
self.permissions.pop(address, None)
def reset(self):
# TODO: Documentation
self.permissions.clear()
super(CachedIDAPermissions, self).reset()
class IDAMemory(IDAKeys):
def __init__(self, ida, default_byte=chr(0xff)):
super(IDAMemory, self).__init__(ida)
self.default_byte = default_byte
def __getitem__(self, address):
# only do things that we actually have in IDA
if address not in self:
raise KeyError(address)
value = self.ida.idaapi.get_many_bytes(address, 1)
if value is None:
value = self.default_byte
return value
def __setitem__(self, address, value):
self.ida.idaapi.patch_byte(address, value)
def __delitem__(self, address):
# nothing we can really do here
pass
class CachedIDAMemory(IDAMemory):
def __init__(self, ida, default_byte=chr(0xff)):
super(CachedIDAMemory, self).__init__(ida, default_byte)
self.local = {}
self._pulled = False
@property
def pulled(self):
"""Check if memory has been pulled from the remote link.
"""
return self._pulled
def __getitem__(self, address):
if address in self.local:
return self.local[address]
LOG.debug("Uncached byte: 0x%x", address)
one = super(CachedIDAMemory, self).__getitem__(address)
# cache the byte if it's not in a segment
seg_start = self.ida.idc.SegStart(address)
if seg_start == self.ida.idc.BADADDR:
self.local[address] = one
else:
# otherwise, cache the segment
seg_end = self.ida.idc.SegEnd(address)
seg_size = seg_end - seg_start
self._load_memory(seg_start, seg_size)
return one
def __iter__(self):
if self.pulled:
return self.local.__iter__()
else:
return super(CachedIDAMemory, self).__iter__()
def __setitem__(self, address, value):
self.local[address] = value
def __delitem__(self, address):
self.local.pop(address, None)
def get_memory(self, start, size):
"""Retrieve an area of memory from IDA.
Returns a sparse dictionary of address -> value.
"""
LOG.debug("get_memory: %d bytes from %x", size, start)
return get_memory(self.ida.idaapi, start, size,
default_byte=self.default_byte)
def pull_defined(self):
if self.pulled:
return
start = self.ida.idc.MinEA()
size = self.ida.idc.MaxEA() - start
LOG.debug("Loading memory of %s (%d bytes)...", self.ida.filename, size)
chunks = self.ida.remote_idalink_module.get_memory(None, start, size)
LOG.debug("Storing loaded memory of %s...", self.ida.filename)
self._store_loaded_chunks(chunks)
self._pulled = True
def reset(self):
self.local.clear()
self._pulled = False
super(CachedIDAMemory, self).reset()
# Helpers
def _load_memory(self, start, size):
chunks = self.get_memory(start, size)
self.store_loaded_chunks(chunks)
def _store_loaded_chunks(self, chunks):
LOG.debug("Updating cache with %d chunks", len(chunks))
for start, buff in chunks.iteritems():
for n, i in enumerate(buff):
if start + n not in self.local:
self.local[start + n] = i
| gpl-3.0 | -4,595,868,428,126,769,700 | 30.791667 | 80 | 0.586407 | false |
sdpp/python-keystoneclient | keystoneclient/tests/unit/v2_0/test_service_catalog.py | 1 | 9165 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import access
from keystoneclient import exceptions
from keystoneclient import fixture
from keystoneclient.tests.unit.v2_0 import client_fixtures
from keystoneclient.tests.unit.v2_0 import utils
class ServiceCatalogTest(utils.TestCase):
def setUp(self):
super(ServiceCatalogTest, self).setUp()
self.AUTH_RESPONSE_BODY = client_fixtures.auth_response_body()
def test_building_a_service_catalog(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
self.assertEqual(sc.url_for(service_type='compute'),
"https://compute.north.host/v1/1234")
self.assertEqual(sc.url_for('tenantId', '1', service_type='compute'),
"https://compute.north.host/v1/1234")
self.assertEqual(sc.url_for('tenantId', '2', service_type='compute'),
"https://compute.north.host/v1.1/3456")
self.assertRaises(exceptions.EndpointNotFound, sc.url_for, "region",
"South", service_type='compute')
def test_service_catalog_endpoints(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
public_ep = sc.get_endpoints(service_type='compute',
endpoint_type='publicURL')
self.assertEqual(public_ep['compute'][1]['tenantId'], '2')
self.assertEqual(public_ep['compute'][1]['versionId'], '1.1')
self.assertEqual(public_ep['compute'][1]['internalURL'],
"https://compute.north.host/v1.1/3456")
def test_service_catalog_regions(self):
self.AUTH_RESPONSE_BODY['access']['region_name'] = "North"
# Setting region_name on the catalog is deprecated.
with self.deprecations.expect_deprecations_here():
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image', endpoint_type='publicURL')
self.assertEqual(url, "https://image.north.host/v1/")
self.AUTH_RESPONSE_BODY['access']['region_name'] = "South"
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image', endpoint_type='internalURL')
self.assertEqual(url, "https://image-internal.south.host/v1/")
def test_service_catalog_empty(self):
self.AUTH_RESPONSE_BODY['access']['serviceCatalog'] = []
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
self.assertRaises(exceptions.EmptyCatalog,
auth_ref.service_catalog.url_for,
service_type='image',
endpoint_type='internalURL')
def test_service_catalog_get_endpoints_region_names(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
endpoints = sc.get_endpoints(service_type='image', region_name='North')
self.assertEqual(len(endpoints), 1)
self.assertEqual(endpoints['image'][0]['publicURL'],
'https://image.north.host/v1/')
endpoints = sc.get_endpoints(service_type='image', region_name='South')
self.assertEqual(len(endpoints), 1)
self.assertEqual(endpoints['image'][0]['publicURL'],
'https://image.south.host/v1/')
endpoints = sc.get_endpoints(service_type='compute')
self.assertEqual(len(endpoints['compute']), 2)
endpoints = sc.get_endpoints(service_type='compute',
region_name='North')
self.assertEqual(len(endpoints['compute']), 2)
endpoints = sc.get_endpoints(service_type='compute',
region_name='West')
self.assertEqual(len(endpoints['compute']), 0)
def test_service_catalog_url_for_region_names(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image', region_name='North')
self.assertEqual(url, 'https://image.north.host/v1/')
url = sc.url_for(service_type='image', region_name='South')
self.assertEqual(url, 'https://image.south.host/v1/')
url = sc.url_for(service_type='compute',
region_name='North',
attr='versionId',
filter_value='1.1')
self.assertEqual(url, 'https://compute.north.host/v1.1/3456')
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
service_type='image', region_name='West')
def test_servcie_catalog_get_url_region_names(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
urls = sc.get_urls(service_type='image')
self.assertEqual(len(urls), 2)
urls = sc.get_urls(service_type='image', region_name='North')
self.assertEqual(len(urls), 1)
self.assertEqual(urls[0], 'https://image.north.host/v1/')
urls = sc.get_urls(service_type='image', region_name='South')
self.assertEqual(len(urls), 1)
self.assertEqual(urls[0], 'https://image.south.host/v1/')
urls = sc.get_urls(service_type='image', region_name='West')
self.assertIsNone(urls)
def test_service_catalog_param_overrides_body_region(self):
self.AUTH_RESPONSE_BODY['access']['region_name'] = "North"
# Setting region_name on the catalog is deprecated.
with self.deprecations.expect_deprecations_here():
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image')
self.assertEqual(url, 'https://image.north.host/v1/')
url = sc.url_for(service_type='image', region_name='South')
self.assertEqual(url, 'https://image.south.host/v1/')
endpoints = sc.get_endpoints(service_type='image')
self.assertEqual(len(endpoints['image']), 1)
self.assertEqual(endpoints['image'][0]['publicURL'],
'https://image.north.host/v1/')
endpoints = sc.get_endpoints(service_type='image', region_name='South')
self.assertEqual(len(endpoints['image']), 1)
self.assertEqual(endpoints['image'][0]['publicURL'],
'https://image.south.host/v1/')
def test_service_catalog_service_name(self):
auth_ref = access.AccessInfo.factory(resp=None,
body=self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_name='Image Servers', endpoint_type='public',
service_type='image', region_name='North')
self.assertEqual('https://image.north.host/v1/', url)
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
service_name='Image Servers', service_type='compute')
urls = sc.get_urls(service_type='image', service_name='Image Servers',
endpoint_type='public')
self.assertIn('https://image.north.host/v1/', urls)
self.assertIn('https://image.south.host/v1/', urls)
urls = sc.get_urls(service_type='image', service_name='Servers',
endpoint_type='public')
self.assertIsNone(urls)
def test_service_catalog_multiple_service_types(self):
token = fixture.V2Token()
token.set_scope()
for i in range(3):
s = token.add_service('compute')
s.add_endpoint(public='public-%d' % i,
admin='admin-%d' % i,
internal='internal-%d' % i,
region='region-%d' % i)
auth_ref = access.AccessInfo.factory(resp=None, body=token)
urls = auth_ref.service_catalog.get_urls(service_type='compute',
endpoint_type='publicURL')
self.assertEqual(set(['public-0', 'public-1', 'public-2']), set(urls))
urls = auth_ref.service_catalog.get_urls(service_type='compute',
endpoint_type='publicURL',
region_name='region-1')
self.assertEqual(('public-1', ), urls)
| apache-2.0 | -4,503,097,674,112,767,500 | 43.926471 | 79 | 0.602728 | false |
jantman/biweeklybudget | biweeklybudget/interest.py | 1 | 37651 | """
The latest version of this package is available at:
<http://github.com/jantman/biweeklybudget>
################################################################################
Copyright 2016 Jason Antman <[email protected]> <http://www.jasonantman.com>
This file is part of biweeklybudget, also known as biweeklybudget.
biweeklybudget is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
biweeklybudget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with biweeklybudget. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/biweeklybudget> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <[email protected]> <http://www.jasonantman.com>
################################################################################
"""
import logging
from datetime import timedelta
from decimal import Decimal
from dateutil.relativedelta import relativedelta
from calendar import monthrange
from biweeklybudget.models.account import Account, AcctType
logger = logging.getLogger(__name__)
class InterestHelper(object):
def __init__(self, db_sess, increases={}, onetimes={}):
"""
Initialize interest calculation helper.
:param db_sess: Database Session
:type db_sess: sqlalchemy.orm.session.Session
:param increases: dict of :py:class:`datetime.date` to
:py:class:`decimal.Decimal` for new max payment amount to take effect
on the specified date.
:type increases: dict
:param onetimes: dict of :py:class:`datetime.date` to
:py:class:`decimal.Decimal` for additional amounts to add to the first
maximum payment on or after the given date
:type onetimes: dict
"""
self._sess = db_sess
self._accounts = self._get_credit_accounts()
self._statements = self._make_statements(self._accounts)
self._increases = increases
self._onetimes = onetimes
@property
def accounts(self):
"""
Return a dict of `account_id` to :py:class:`~.Account` for all Credit
type accounts with OFX data present.
:return: dict of account_id to Account instance
:rtype: dict
"""
return self._accounts
def _get_credit_accounts(self):
"""
Return a dict of `account_id` to :py:class:`~.Account` for all Credit
type accounts with OFX data present.
:return: dict of account_id to Account instance
:rtype: dict
"""
accts = self._sess.query(Account).filter(
Account.acct_type.__eq__(AcctType.Credit),
Account.is_active.__eq__(True)
).all()
res = {a.id: a for a in accts}
return res
def _make_statements(self, accounts):
"""
Make :py:class:`~.CCStatement` instances for each account; return a
dict of `account_id` to CCStatement instance.
:param accounts: dict of (int) account_id to Account instance
:type accounts: dict
:return: dict of (int) account_id to CCStatement instance
:rtype: dict
"""
res = {}
for a_id, acct in accounts.items():
icls = INTEREST_CALCULATION_NAMES[acct.interest_class_name]['cls'](
acct.effective_apr
)
bill_period = _BillingPeriod(acct.balance.ledger_date.date())
min_pay_cls = MIN_PAYMENT_FORMULA_NAMES[
acct.min_payment_class_name]['cls']()
res[a_id] = CCStatement(
icls,
abs(acct.balance.ledger),
min_pay_cls,
bill_period,
end_balance=abs(acct.balance.ledger),
interest_amt=acct.last_interest_charge
)
logger.debug('Statements: %s', res)
return res
@property
def min_payments(self):
"""
Return a dict of `account_id` to minimum payment for the latest
statement, for each account.
:return: dict of `account_id` to minimum payment (Decimal)
:rtype: dict
"""
res = {}
for a_id, stmt in self._statements.items():
res[a_id] = stmt.minimum_payment
logger.debug('Minimum payments by account_id: %s', res)
return res
def calculate_payoffs(self):
"""
Calculate payoffs for each account/statement.
:return: dict of payoff information. Keys are payoff method names.
Values are dicts, with keys "description" (str description of the
payoff method), "doc" (the docstring of the class), and "results".
The "results" dict has integer `account_id` as the key, and values are
dicts with keys "payoff_months" (int), "total_payments" (Decimal),
"total_interest" (Decimal) and ``next_payment`` (Decimal).
:rtype: dict
"""
res = {}
max_total = sum(list(self.min_payments.values()))
for name in sorted(PAYOFF_METHOD_NAMES.keys()):
cls = PAYOFF_METHOD_NAMES[name]['cls']
klass = cls(
max_total, increases=self._increases, onetimes=self._onetimes
)
if not cls.show_in_ui:
continue
res[name] = {
'description': PAYOFF_METHOD_NAMES[name]['description'],
'doc': PAYOFF_METHOD_NAMES[name]['doc']
}
try:
res[name]['results'] = self._calc_payoff_method(klass)
except Exception as ex:
res[name]['error'] = str(ex)
logger.error('Minimum payment method %s failed: %s',
name, ex)
return res
def _calc_payoff_method(self, cls):
"""
Calculate payoffs using one method.
:param cls: payoff method class
:type cls: biweeklybudget.interest._PayoffMethod
:return: Dict with integer `account_id` as the key, and values are
dicts with keys "payoff_months" (int), "total_payments" (Decimal),
"total_interest" (Decimal), "next_payment" (Decimal).
:rtype: dict
"""
balances = {
x: self._statements[x].principal for x in self._statements.keys()
}
res = {}
calc = calculate_payoffs(cls, list(self._statements.values()))
for idx, result in enumerate(calc):
a_id = list(self._statements.keys())[idx]
res[a_id] = {
'payoff_months': result[0],
'total_payments': result[1],
'total_interest': result[1] - balances[a_id],
'next_payment': result[2]
}
return res
class _InterestCalculation(object):
#: Human-readable string name of the interest calculation type.
description = None
def __init__(self, apr):
"""
:param apr: Annual Percentage Rate as a decimal
:type apr: decimal.Decimal
"""
self._apr = apr
def __repr__(self):
return '<%s(decimal.Decimal(\'%s\'))>' % (
self.__class__.__name__, self.apr
)
@property
def apr(self):
return self._apr
def calculate(self, principal, first_d, last_d, transactions={}):
"""
Calculate compound interest for the specified principal.
:param principal: balance at beginning of statement period
:type principal: decimal.Decimal
:param first_d: date of beginning of statement period
:type first_d: datetime.date
:param last_d: last date of statement period
:type last_d: datetime.date
:param transactions: dict of datetime.date to float amount adjust
the balance by on the specified dates.
:type transactions: dict
:return: dict describing the result: end_balance (float),
interest_paid (float)
:rtype: dict
"""
raise NotImplementedError("Must implement in subclass")
class AdbCompoundedDaily(_InterestCalculation):
"""
Average Daily Balance method, compounded daily (like American Express).
"""
#: Human-readable string name of the interest calculation type.
description = 'Average Daily Balance Compounded Daily (AmEx)'
def calculate(self, principal, first_d, last_d, transactions={}):
"""
Calculate compound interest for the specified principal.
:param principal: balance at beginning of statement period
:type principal: decimal.Decimal
:param first_d: date of beginning of statement period
:type first_d: datetime.date
:param last_d: last date of statement period
:type last_d: datetime.date
:param transactions: dict of datetime.date to float amount adjust
the balance by on the specified dates.
:type transactions: dict
:return: dict describing the result: end_balance (float),
interest_paid (float)
:rtype: dict
"""
dpr = self._apr / Decimal(365.0)
interest = Decimal(0.0)
num_days = 0
bal_total = Decimal(0.0)
bal = principal
d = first_d
while d <= last_d:
num_days += 1
if d in transactions:
bal += transactions[d]
int_amt = bal * dpr
interest += int_amt
bal += int_amt
bal_total += bal
d += timedelta(days=1)
adb = bal_total / Decimal(num_days)
final = adb * self._apr * num_days / Decimal(365.0)
bal += final * dpr
return {
'interest_paid': final,
'end_balance': bal
}
class SimpleInterest(_InterestCalculation):
"""
Simple interest, charged on balance at the end of the billing period.
"""
#: Human-readable string name of the interest calculation type.
description = 'Interest charged once on the balance at end of period.'
def calculate(self, principal, first_d, last_d, transactions={}):
"""
Calculate compound interest for the specified principal.
:param principal: balance at beginning of statement period
:type principal: decimal.Decimal
:param first_d: date of beginning of statement period
:type first_d: datetime.date
:param last_d: last date of statement period
:type last_d: datetime.date
:param transactions: dict of datetime.date to float amount adjust
the balance by on the specified dates.
:type transactions: dict
:return: dict describing the result: end_balance (float),
interest_paid (float)
:rtype: dict
"""
num_days = 0
bal = principal
d = first_d
while d <= last_d:
num_days += 1
if d in transactions:
bal += transactions[d]
d += timedelta(days=1)
final = bal * self._apr * num_days / Decimal(365.0)
return {
'interest_paid': final,
'end_balance': bal + final
}
class _BillingPeriod(object):
#: human-readable string description of the billing period type
description = None
def __init__(self, end_date, start_date=None):
"""
Construct a billing period that is defined by a number of days.
:param end_date: end date of the billing period
:type end_date: datetime.date
:param start_date: start date for billing period; if specified, will
override calculation of start date
:type start_date: datetime.date
"""
self._period_for_date = end_date
if start_date is None:
if end_date.day < 15:
# if end date is < 15, period is month before end_date
self._end_date = (end_date.replace(day=1) - timedelta(days=1))
self._start_date = self._end_date.replace(day=1)
else:
# if end date >= 15, period is month containing end_date
self._start_date = end_date.replace(day=1)
self._end_date = end_date.replace(
day=(monthrange(
end_date.year, end_date.month
)[1])
)
else:
self._start_date = start_date
self._end_date = self._start_date.replace(
day=(monthrange(
self._start_date.year, self._start_date.month
)[1])
)
def __repr__(self):
return '<BillingPeriod(%s, start_date=%s)>' % (
self._end_date, self._start_date
)
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def payment_date(self):
period_length = (self._end_date - self._start_date).days
return self._start_date + timedelta(days=int(period_length / 2))
@property
def next_period(self):
"""
Return the next billing period after this one.
:return: next billing period
:rtype: _BillingPeriod
"""
return _BillingPeriod(
self._end_date + relativedelta(months=1),
start_date=(self._end_date + timedelta(days=1))
)
@property
def prev_period(self):
"""
Return the previous billing period before this one.
:return: previous billing period
:rtype: _BillingPeriod
"""
e = self._start_date - timedelta(days=1)
return _BillingPeriod(e, start_date=e.replace(day=1))
class _MinPaymentFormula(object):
#: human-readable string description of the formula
description = None
def __init__(self):
pass
def calculate(self, balance, interest):
"""
Calculate the minimum payment for a statement with the given balance
and interest amount.
:param balance: balance amount for the statement
:type balance: decimal.Decimal
:param interest: interest charged for the statement period
:type interest: decimal.Decimal
:return: minimum payment for the statement
:rtype: decimal.Decimal
"""
raise NotImplementedError()
class MinPaymentAmEx(_MinPaymentFormula):
"""
Interest on last statement plus 1% of balance,
or $35 if balance is less than $35.
"""
#: human-readable string description of the formula
description = 'AmEx - Greatest of Interest Plus 1% of Principal, or $35'
def __init__(self):
super(MinPaymentAmEx, self).__init__()
def calculate(self, balance, interest):
"""
Calculate the minimum payment for a statement with the given balance
and interest amount.
:param balance: balance amount for the statement
:type balance: decimal.Decimal
:param interest: interest charged for the statement period
:type interest: decimal.Decimal
:return: minimum payment for the statement
:rtype: decimal.Decimal
"""
amt = interest + (balance * Decimal('.01'))
if amt < 35:
amt = 35
return amt
class MinPaymentDiscover(_MinPaymentFormula):
"""
Greater of:
- $35; or
- 2% of the New Balance shown on your billing statement; or
- $20, plus any of the following charges as shown on your billing statement:
fees for any debt protection product that you enrolled in on or after
2/1/2015; Interest Charges; and Late Fees.
"""
#: human-readable string description of the formula
description = 'Discover - Greatest of 2% of Principal, or $20 plus ' \
'Interest, or $35'
def __init__(self):
super(MinPaymentDiscover, self).__init__()
def calculate(self, balance, interest):
"""
Calculate the minimum payment for a statement with the given balance
and interest amount.
:param balance: balance amount for the statement
:type balance: decimal.Decimal
:param interest: interest charged for the statement period
:type interest: decimal.Decimal
:return: minimum payment for the statement
:rtype: decimal.Decimal
"""
options = [
Decimal(35),
balance * Decimal('0.02'),
Decimal(20) + interest
]
return max(options)
class MinPaymentCiti(_MinPaymentFormula):
"""
Greater of:
- $25;
- The new balance, if it's less than $25;
- 1 percent of the new balance, plus the current statement's interest
charges or minimum interest charges, plus late fees;
- 1.5% of the new balance, rounded to the nearest dollar amount.
In all cases, add past fees and finance charges due, plus any amount in
excess of credit line.
"""
#: human-readable string description of the formula
description = 'Citi - Greatest of 1.5% of Principal, or 1% of Principal ' \
'plus interest and fees, or $25, or Principal'
def __init__(self):
super(MinPaymentCiti, self).__init__()
def calculate(self, balance, interest):
"""
Calculate the minimum payment for a statement with the given balance
and interest amount.
:param balance: balance amount for the statement
:type balance: decimal.Decimal
:param interest: interest charged for the statement period
:type interest: decimal.Decimal
:return: minimum payment for the statement
:rtype: decimal.Decimal
"""
options = [
25,
(balance * Decimal('0.01')) + interest,
round(balance * Decimal('0.015'))
]
if balance < Decimal('25'):
options.append(balance)
return max(options)
class _PayoffMethod(object):
"""
A payoff method for multiple cards; a method of figuring out how much to
pay on each card, each month.
"""
#: human-readable string name of the payoff method
description = None
def __init__(self, max_total_payment=None, increases={}, onetimes={}):
"""
Initialize a payment method.
:param max_total_payment: maximum total payment for all statements
:type max_total_payment: decimal.Decimal
:param increases: dict of :py:class:`datetime.date` to
:py:class:`decimal.Decimal` for new max payment amount to take effect
on the specified date.
:type increases: dict
:param onetimes: dict of :py:class:`datetime.date` to
:py:class:`decimal.Decimal` for additional amounts to add to the first
maximum payment on or after the given date
:type onetimes: dict
"""
self._max_total = max_total_payment
self._increases = increases
self._onetimes = onetimes
def __repr__(self):
return '<%s(%s, increases=%s, onetimes=%s)>' % (
self.__class__.__name__, self._max_total, self._increases,
self._onetimes
)
def max_total_for_period(self, period):
"""
Given a :py:class:`~._BillingPeriod`, calculate the maximum total
payment for that period, including both `self._max_total` and the
increases and onetimes specified on the class constructor.
:param period: billing period to get maximum total payment for
:type period: _BillingPeriod
:return: maximum total payment for the period
:rtype: decimal.Decimal
"""
res = self._max_total
for inc_d in sorted(self._increases.keys(), reverse=True):
if inc_d > period.payment_date:
continue
inc_amt = self._increases[inc_d]
logger.debug('Found increase of %s starting on %s, applied to '
'period %s', inc_amt, inc_d, period)
res = inc_amt
break
for ot_d, ot_amt in self._onetimes.items():
if period.prev_period.payment_date < ot_d <= period.payment_date:
logger.debug('Found onetime of %s on %s in period %s',
ot_amt, ot_d, period)
res += ot_amt
logger.debug('Period %s _max_total=%s max_total_for_period=%s',
period, self._max_total, res)
return res
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
raise NotImplementedError()
class MinPaymentMethod(_PayoffMethod):
"""
Pay only the minimum on each statement.
"""
description = 'Minimum Payment Only'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
return [s.minimum_payment for s in statements]
class FixedPaymentMethod(_PayoffMethod):
"""
TESTING ONLY - pay the same amount on every statement.
"""
description = 'TESTING ONLY - Fixed Payment for All Statements'
show_in_ui = False
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
return [self._max_total for _ in statements]
class HighestBalanceFirstMethod(_PayoffMethod):
"""
Pay statements off from highest to lowest balance.
"""
description = 'Highest to Lowest Balance'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
max_total = self.max_total_for_period(statements[0].billing_period)
min_sum = sum([s.minimum_payment for s in statements])
if min_sum > max_total:
raise TypeError(
'ERROR: Max total payment of %s is less than sum of minimum '
'payments (%s)' % (max_total, min_sum)
)
max_bal = Decimal('0.00')
max_idx = None
for idx, stmt in enumerate(statements):
if stmt.principal > max_bal:
max_bal = stmt.principal
max_idx = idx
res = [None for _ in statements]
max_pay = max_total - (
min_sum - statements[max_idx].minimum_payment
)
for idx, stmt in enumerate(statements):
if idx == max_idx:
res[idx] = max_pay
else:
res[idx] = statements[idx].minimum_payment
return res
class HighestInterestRateFirstMethod(_PayoffMethod):
"""
Pay statements off from highest to lowest interest rate.
"""
description = 'Highest to Lowest Interest Rate'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
max_total = self.max_total_for_period(statements[0].billing_period)
min_sum = sum([s.minimum_payment for s in statements])
if min_sum > max_total:
raise TypeError(
'ERROR: Max total payment of %s is less than sum of minimum '
'payments (%s)' % (max_total, min_sum)
)
max_apr = Decimal('0.00')
max_idx = None
for idx, stmt in enumerate(statements):
if stmt.apr > max_apr:
max_apr = stmt.apr
max_idx = idx
res = [None for _ in statements]
max_pay = max_total - (
min_sum - statements[max_idx].minimum_payment
)
for idx, stmt in enumerate(statements):
if idx == max_idx:
res[idx] = max_pay
else:
res[idx] = statements[idx].minimum_payment
return res
class LowestBalanceFirstMethod(_PayoffMethod):
"""
Pay statements off from lowest to highest balance, a.k.a. the "snowball"
method.
"""
description = 'Lowest to Highest Balance (a.k.a. Snowball Method)'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
max_total = self.max_total_for_period(statements[0].billing_period)
min_sum = sum([s.minimum_payment for s in statements])
if min_sum > max_total:
raise TypeError(
'ERROR: Max total payment of %s is less than sum of minimum '
'payments (%s)' % (max_total, min_sum)
)
min_bal = Decimal('+Infinity')
min_idx = None
for idx, stmt in enumerate(statements):
if stmt.principal < min_bal:
min_bal = stmt.principal
min_idx = idx
res = [None for _ in statements]
min_pay = max_total - (
min_sum - statements[min_idx].minimum_payment
)
for idx, stmt in enumerate(statements):
if idx == min_idx:
res[idx] = min_pay
else:
res[idx] = statements[idx].minimum_payment
return res
class LowestInterestRateFirstMethod(_PayoffMethod):
"""
Pay statements off from lowest to highest interest rate.
"""
description = 'Lowest to Highest Interest Rate'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
max_total = self.max_total_for_period(statements[0].billing_period)
min_sum = sum([s.minimum_payment for s in statements])
if min_sum > max_total:
raise TypeError(
'ERROR: Max total payment of %s is less than sum of minimum '
'payments (%s)' % (max_total, min_sum)
)
min_apr = Decimal('+Infinity')
min_idx = None
for idx, stmt in enumerate(statements):
if stmt.apr < min_apr:
min_apr = stmt.apr
min_idx = idx
res = [None for _ in statements]
min_pay = max_total - (
min_sum - statements[min_idx].minimum_payment
)
for idx, stmt in enumerate(statements):
if idx == min_idx:
res[idx] = min_pay
else:
res[idx] = statements[idx].minimum_payment
return res
def calculate_payoffs(payment_method, statements):
"""
Calculate the amount of time (in years) and total amount of money required
to pay off the cards associated with the given list of statements. Return a
list of (`float` number of years, `decimal.Decimal` amount paid,
`decimal.Decimal` first payment amount) tuples for each item in
`statements`.
:param payment_method: method used for calculating payment amount to make
on each statement; subclass of _PayoffMethod
:type payment_method: _PayoffMethod
:param statements: list of :py:class:`~.CCStatement` objects to pay off.
:type statements: list
:return: list of (`float` number of billing periods, `decimal.Decimal`
amount paid, `decimal.Decimal` first payment amount) tuples for each item
in `statements`
:rtype: list
"""
def unpaid(s): return [x for x in s.keys() if s[x]['done'] is False]
payoffs = {}
logger.debug(
'calculating payoff via %s for: %s', payment_method, statements
)
for idx, stmt in enumerate(statements):
payoffs[stmt] = {
'months': 0, 'amt': Decimal('0.0'), 'idx': idx, 'done': False,
'next_pymt_amt': None
}
while len(unpaid(payoffs)) > 0:
u = unpaid(payoffs)
to_pay = payment_method.find_payments(u)
for stmt, p_amt in dict(zip(u, to_pay)).items():
if stmt.principal <= Decimal('0'):
payoffs[stmt]['done'] = True
continue
if stmt.principal <= p_amt:
payoffs[stmt]['done'] = True
payoffs[stmt]['months'] += 1 # increment months
payoffs[stmt]['amt'] += stmt.principal
if payoffs[stmt]['next_pymt_amt'] is None:
payoffs[stmt]['next_pymt_amt'] = stmt.principal
continue
payoffs[stmt]['months'] += 1 # increment months
payoffs[stmt]['amt'] += p_amt
if payoffs[stmt]['next_pymt_amt'] is None:
payoffs[stmt]['next_pymt_amt'] = p_amt
new_s = stmt.pay(Decimal('-1') * p_amt)
payoffs[new_s] = payoffs[stmt]
del payoffs[stmt]
res = []
for s in sorted(payoffs, key=lambda x: payoffs[x]['idx']):
tmp = (
payoffs[s]['months'],
payoffs[s]['amt'],
payoffs[s]['next_pymt_amt']
)
if payoffs[s]['next_pymt_amt'] is None:
tmp = (
payoffs[s]['months'],
payoffs[s]['amt'],
Decimal('0.0')
)
res.append(tmp)
return res
class CCStatement(object):
"""
Represent a credit card statement (one billing period).
"""
def __init__(self, interest_cls, principal, min_payment_cls, billing_period,
transactions={}, end_balance=None, interest_amt=None):
"""
Initialize a CCStatement. At least one of `start_date` and `end_date`
must be specified.
:param interest_cls: Interest calculation method
:type interest_cls: _InterestCalculation
:param principal: starting principal for this billing period
:type principal: decimal.Decimal
:param min_payment_cls: Minimum payment calculation method
:type min_payment_cls: _MinPaymentFormula
:param billing_period: Billing period
:type billing_period: _BillingPeriod
:param transactions: transactions applied during this statement. Dict
of :py:class:`datetime.date` to :py:class:`decimal.Decimal`.
:type transactions: dict
:param end_balance: the ending balance of the statement, if known. If
not specified, this value will be calculated.
:type end_balance: decimal.Decimal
:param interest_amt: The amount of interest charged this statement. If
not specified, this value will be calculated.
:type interest_amt: decimal.Decimal
"""
if not isinstance(billing_period, _BillingPeriod):
raise TypeError(
'billing_period must be an instance of _BillingPeriod'
)
self._billing_period = billing_period
if not isinstance(interest_cls, _InterestCalculation):
raise TypeError(
'interest_cls must be an instance of _InterestCalculation'
)
self._interest_cls = interest_cls
if not isinstance(min_payment_cls, _MinPaymentFormula):
raise TypeError(
'min_payment_cls must be an instance of _MinPaymentFormula'
)
self._min_pay_cls = min_payment_cls
self._orig_principal = principal
self._min_pay = None
self._transactions = transactions
self._principal = end_balance
self._interest_amt = interest_amt
if end_balance is None or interest_amt is None:
res = self._interest_cls.calculate(
principal, self._billing_period.start_date,
self._billing_period.end_date, self._transactions
)
if end_balance is None:
self._principal = res['end_balance']
if interest_amt is None:
self._interest_amt = res['interest_paid']
def __repr__(self):
return '<CCStatement(interest_cls=%s principal=%s min_payment_cls=%s ' \
'transactions=%s end_balance=%s ' \
'interest_amt=%s start_date=%s end_date=%s)>' % (
self._interest_cls, self._principal, self._min_pay_cls,
self._transactions, self._principal,
self._interest_amt, self.start_date, self.end_date
)
@property
def principal(self):
return self._principal
@property
def billing_period(self):
"""
Return the Billing Period for this statement.
:return: billing period for this statement
:rtype: _BillingPeriod
"""
return self._billing_period
@property
def interest(self):
return self._interest_amt
@property
def start_date(self):
return self._billing_period.start_date
@property
def end_date(self):
return self._billing_period.end_date
@property
def apr(self):
return self._interest_cls.apr
@property
def minimum_payment(self):
"""
Return the minimum payment for the next billing cycle.
:return: minimum payment for the next billing cycle
:rtype: decimal.Decimal
"""
return self._min_pay_cls.calculate(
self._principal, self._interest_amt
)
def next_with_transactions(self, transactions={}):
"""
Return a new CCStatement reflecting the next billing period, with a
payment of `amount` applied to it.
:param transactions: dict of transactions, `datetime.date` to `Decimal`
:type transactions: dict
:return: next period statement, with transactions applied
:rtype: CCStatement
"""
return CCStatement(
self._interest_cls,
self._principal,
self._min_pay_cls,
self._billing_period.next_period,
transactions=transactions
)
def pay(self, amount):
"""
Return a new CCStatement reflecting the next billing period, with a
payment of `amount` applied to it at the middle of the period.
:param amount: amount to pay during the next statement period
:type amount: decimal.Decimal
:return: next period statement, with payment applied
:rtype: CCStatement
"""
return self.next_with_transactions({
self._billing_period.next_period.payment_date: amount
})
def subclass_dict(klass):
d = {}
for cls in klass.__subclasses__():
d[cls.__name__] = {
'description': cls.description,
'doc': cls.__doc__.strip(),
'cls': cls
}
return d
#: Dict mapping interest calculation class names to their description and
#: docstring.
INTEREST_CALCULATION_NAMES = subclass_dict(_InterestCalculation)
#: Dict mapping Minimum Payment Formula class names to their description and
#: docstring.
MIN_PAYMENT_FORMULA_NAMES = subclass_dict(_MinPaymentFormula)
#: Dict mapping Payoff Method class names to their description and docstring.
PAYOFF_METHOD_NAMES = subclass_dict(_PayoffMethod)
| agpl-3.0 | -4,902,928,390,450,669,000 | 34.319887 | 80 | 0.587235 | false |
rouge8/20questions | admin.py | 1 | 2796 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
'''
admin.py
Andy Freeland and Dan Levy
5 June 2010
Provides administrative functions, such as retraining characters and deleting
objects and characters. Accessed at the /admin url. Laughably insecure.
'''
import web
import config, model
import twentyquestions as game
urls = (
'', 'admin',
'/', 'admin',
'/dq', 'delete_question',
'/do', 'delete_object',
'/data', 'data',
'/retrain/(\d+)', 'retrain'
)
render = web.template.render('templates', base='base')
app = web.application(urls, locals())
class admin:
def GET(self):
'''Renders the admin page, presenting a menu of administrative functions.'''
return render.admin()
class delete_question:
def GET(self):
'''Lists all of the questions so that selected questions can be deleted.'''
questions = model.get_questions()
return render.delete_question(questions)
def POST(self):
'''Deletes selected questions and returns to the admin page.'''
question_ids = web.input()
for id in question_ids:
model.delete_question(id)
raise web.seeother('/')
class delete_object:
def GET(self):
'''Lists all of the objects so that selected objects can be deleted.'''
objects = model.get_objects()
return render.delete_object(objects)
def POST(self):
'''Deletes selected objects. and returns to the admin page.'''
object_ids = web.input()
for id in object_ids:
model.delete_object(id)
raise web.seeother('/')
class data:
def GET(self):
'''Renders a page listing all of the objects so that they can be retrained.'''
objects = model.get_objects()
return render.data(list(objects))
class retrain:
def GET(self, object_id):
'''Renders a page with all of the questions and values for a specified
object_id so that it can be retrained manually.'''
object = model.get_object_by_id(object_id)
questions = model.get_questions()
data = model.get_data_dictionary()
if object:
return render.retrain(object, list(questions), data)
else:
raise web.seeother('/') # returns to admin page
def POST(self, object_id):
'''Updates object_id with the newly selected answers to questions.'''
inputs = web.input()
for question_id in inputs:
answer = inputs[question_id]
if answer in ['yes','no']:
value = eval('game.' + answer) * game.RETRAIN_SCALE # STRONGLY weights values learned this way
model.update_data(object_id, question_id, value)
raise web.seeother('/data')
| mit | 3,521,186,683,873,155,000 | 31.511628 | 110 | 0.609084 | false |
gunny26/webstorage | bin/filename_to_checksum_dict.py | 1 | 6212 | #!/usr/bin/python3
# pylint: disable=line-too-long
# disable=locally-disabled, multiple-statements, fixme, line-too-long
"""
command line program to create/restore/test WebStorageArchives
"""
import os
import hashlib
import datetime
import dateutil.parser
import time
import sys
import socket
import argparse
import stat
import re
import sqlite3
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
import json
import dbm
# own modules
from webstorage import WebStorageArchive as WebStorageArchive
from webstorage import FileStorageClient as FileStorageClient
class NtoM(object):
"""
build n : m dependent key value stores
"""
def __init__(self, keyname1, keyname2):
self.__keyname1 = keyname1
self.__keyname2 = keyname2
self.__filename = filename
self.__data = {
self.__keyname1 : {},
self.__keyname2 : {}
}
self.__dirty = False # indicate if data is modified in memory
def add(self, **kwds):
key1 = kwds[self.__keyname1]
key2 = kwds[self.__keyname2]
if key1 in self.__data[self.__keyname1]:
if key2 not in self.__data[self.__keyname1][key1]:
self.__data[self.__keyname1][key1].add(key2)
# ignore if value is already in list
else:
self.__data[self.__keyname1][key1] = set([key2, ])
if key2 in self.__data[self.__keyname2]:
if key1 not in self.__data[self.__keyname2][key2]:
self.__data[self.__keyname2][key2].add(key1)
# ignore if value is already in list
else:
self.__data[self.__keyname2][key2] = set([key1, ])
self.__dirty = True
def save(self, filename):
"""
dump internal data to sqlite database
"""
starttime = time.time()
conn = sqlite3.connect(filename)
cur = conn.cursor()
# key 1
tablename1 = "%s_to_%s" % (self.__keyname1, self.__keyname2)
logging.debug("saving to %s", tablename1)
cur.execute("drop table if exists %s" % tablename1)
conn.commit()
cur.execute("create table if not exists %s ('%s', '%s')" % (tablename1, self.__keyname1, self.__keyname2))
for key, value in self.__data[self.__keyname1].items():
cur.execute("insert into %s values (?, ?)" % tablename1, (key, json.dumps(list(value))))
conn.commit()
# key 2
tablename2 = "%s_to_%s" % (self.__keyname2, self.__keyname1)
logging.debug("saving to %s", tablename2)
cur.execute("drop table if exists %s" % tablename2)
conn.commit()
cur.execute("create table if not exists %s ('%s', '%s')" % (tablename2, self.__keyname2, self.__keyname1))
for key, value in self.__data[self.__keyname2].items():
cur.execute("insert into %s values (?, ?)" % tablename2, (key, json.dumps(list(value))))
conn.commit()
logging.debug("save done in %0.2f s", time.time()-starttime)
logging.debug("saved %d in %s", len(self.__data[self.__keyname1]), self.__keyname1)
logging.debug("saved %d in %s", len(self.__data[self.__keyname2]), self.__keyname2)
self.__dirty = False
def load(self, filename):
"""
dump internal data to sqlite database
"""
starttime = time.time()
conn = sqlite3.connect(filename)
cur = conn.cursor()
try:
# key 1
tablename1 = "%s_to_%s" % (self.__keyname1, self.__keyname2)
for row in cur.execute("select * from %s" % tablename1).fetchall():
self.__data[self.__keyname1][row[0]] = set(json.loads(row[1]))
# key 2
tablename2 = "%s_to_%s" % (self.__keyname2, self.__keyname1)
for row in cur.execute("select * from %s" % tablename2).fetchall():
self.__data[self.__keyname2][row[0]] = set(json.loads(row[1]))
logging.debug("load done in %0.2f s", time.time()-starttime)
logging.debug("loaded %d in %s", len(self.__data[self.__keyname1]), self.__keyname1)
logging.debug("loaded %d in %s", len(self.__data[self.__keyname2]), self.__keyname2)
except sqlite3.OperationalError as exc:
logging.info("ignoring if table does not exist")
def update(filename):
conn = sqlite3.connect(filename)
cur = conn.cursor()
cur.execute("create table if not exists backupsets_done (backupset)")
myhostname = socket.gethostname()
wsa = WebStorageArchive()
backupsets = wsa.get_backupsets(myhostname)
# like wse0000107_mesznera_2016-12-06T13:48:13.400565.wstar.gz
filename_to_checksum = NtoM("absfile", "checksum")
filename_to_checksum.load(filename)
filename_to_backupset = NtoM("absfile", "backupset")
filename_to_backupset.load(filename)
backupsets_done = [row[0] for row in cur.execute("select backupset from backupsets_done").fetchall()]
for backupset in backupsets:
starttime = time.time()
#if backupset in backupsets_done:
# print(" backupset %s already done" % backupset)
# continue
hostname, tag, isoformat_ext = backupset.split("_")
isoformat = isoformat_ext[:-9]
datestring = dateutil.parser.parse(isoformat)
print(hostname, tag, dateutil.parser.parse(isoformat))
data = wsa.get(backupset)
for absfile in data["filedata"].keys():
checksum = data["filedata"][absfile]["checksum"]
filename_to_checksum.add(absfile=absfile, checksum=checksum)
filename_to_backupset.add(absfile=absfile, backupset=backupset)
# print(data["filedata"][absfile])
#cur.execute("insert into backupsets_done values (?)", (backupset,))
#conn.commit()
logging.info(" done in %0.2f s", time.time()-starttime)
filename_to_checksum.save(filename)
filename_to_backupset.save(filename)
if __name__ == "__main__":
filename = "filename_to_checksum_dict.db"
#main(filename)
update(filename)
| gpl-2.0 | 6,207,287,639,097,450,000 | 40.139073 | 114 | 0.61027 | false |
ionux/bitforge | bitforge/utils/encoding.py | 1 | 10832 | # -*- coding: utf-8 -*-
"""
Various utilities useful for converting one Bitcoin format to another, including some
the human-transcribable format hashed_base58.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import hashlib
from .intbytes import byte_to_int, bytes_from_int
BASE58_ALPHABET = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
BASE58_BASE = len(BASE58_ALPHABET)
BASE58_LOOKUP = dict((c, i) for i, c in enumerate(BASE58_ALPHABET))
class EncodingError(Exception):
pass
def ripemd160(data):
return hashlib.new("ripemd160", data)
try:
ripemd160(b'').digest()
except Exception:
# stupid Google App Engine hashlib doesn't support ripemd160 for some stupid reason
# import it from pycrypto. You need to add
# - name: pycrypto
# version: "latest"
# to the "libraries" section of your app.yaml
from Crypto.Hash.RIPEMD import RIPEMD160Hash as ripemd160
def to_long(base, lookup_f, s):
"""
Convert an array to a (possibly bignum) integer, along with a prefix value
of how many prefixed zeros there are.
base:
the source base
lookup_f:
a function to convert an element of s to a value between 0 and base-1.
s:
the value to convert
"""
prefix = 0
v = 0
for c in s:
v *= base
try:
v += lookup_f(c)
except Exception:
raise EncodingError("bad character %s in string %s" % (c, s))
if v == 0:
prefix += 1
return v, prefix
def from_long(v, prefix, base, charset):
"""The inverse of to_long. Convert an integer to an arbitrary base.
v: the integer value to convert
prefix: the number of prefixed 0s to include
base: the new base
charset: an array indicating what printable character to use for each value.
"""
l = bytearray()
while v > 0:
try:
v, mod = divmod(v, base)
l.append(charset(mod))
except Exception:
raise EncodingError("can't convert to character corresponding to %d" % mod)
l.extend([charset(0)] * prefix)
l.reverse()
return bytes(l)
def to_bytes_32(v):
v = from_long(v, 0, 256, lambda x: x)
if len(v) > 32:
raise ValueError("input to to_bytes_32 is too large")
return ((b'\0' * 32) + v)[-32:]
if hasattr(int, "to_bytes"):
to_bytes_32 = lambda v: v.to_bytes(32, byteorder="big")
def from_bytes_32(v):
if len(v) != 32:
raise ValueError("input to from_bytes_32 is wrong length")
return to_long(256, byte_to_int, v)[0]
if hasattr(int, "from_bytes"):
from_bytes_32 = lambda v: int.from_bytes(v, byteorder="big")
def double_sha256(data):
"""A standard compound hash."""
return hashlib.sha256(hashlib.sha256(data).digest()).digest()
def hash160(data):
"""A standard compound hash."""
return ripemd160(hashlib.sha256(data).digest()).digest()
def b2a_base58(s):
"""Convert binary to base58 using BASE58_ALPHABET. Like Bitcoin addresses."""
v, prefix = to_long(256, byte_to_int, s)
s = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v])
return s.decode("utf8")
def a2b_base58(s):
"""Convert base58 to binary using BASE58_ALPHABET."""
v, prefix = to_long(BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode("utf8"))
return from_long(v, prefix, 256, lambda x: x)
def b2a_hashed_base58(data):
"""
A "hashed_base58" structure is a base58 integer (which looks like a string)
with four bytes of hash data at the end. Bitcoin does this in several places,
including Bitcoin addresses.
This function turns data (of type "bytes") into its hashed_base58 equivalent.
"""
return b2a_base58(data + double_sha256(data)[:4])
def a2b_hashed_base58(s):
"""
If the passed string is hashed_base58, return the binary data.
Otherwise raises an EncodingError.
"""
data = a2b_base58(s)
data, the_hash = data[:-4], data[-4:]
if double_sha256(data)[:4] == the_hash:
return data
raise EncodingError("hashed base58 has bad checksum %s" % s)
def is_hashed_base58_valid(base58):
"""Return True if and only if base58 is valid hashed_base58."""
try:
a2b_hashed_base58(base58)
except EncodingError:
return False
return True
def wif_to_tuple_of_prefix_secret_exponent_compressed(wif):
"""
Return a tuple of (prefix, secret_exponent, is_compressed).
"""
decoded = a2b_hashed_base58(wif)
actual_prefix, private_key = decoded[:1], decoded[1:]
compressed = len(private_key) > 32
return actual_prefix, from_bytes_32(private_key[:32]), compressed
def wif_to_tuple_of_secret_exponent_compressed(wif, allowable_wif_prefixes=[b'\x80']):
"""Convert a WIF string to the corresponding secret exponent. Private key manipulation.
Returns a tuple: the secret exponent, as a bignum integer, and a boolean indicating if the
WIF corresponded to a compressed key or not.
Not that it matters, since we can use the secret exponent to generate both the compressed
and uncompressed Bitcoin address."""
actual_prefix, secret_exponent, is_compressed = wif_to_tuple_of_prefix_secret_exponent_compressed(wif)
if actual_prefix not in allowable_wif_prefixes:
raise EncodingError("unexpected first byte of WIF %s" % wif)
return secret_exponent, is_compressed
def wif_to_secret_exponent(wif, allowable_wif_prefixes=[b'\x80']):
"""Convert a WIF string to the corresponding secret exponent."""
return wif_to_tuple_of_secret_exponent_compressed(wif, allowable_wif_prefixes=allowable_wif_prefixes)[0]
def is_valid_wif(wif, allowable_wif_prefixes=[b'\x80']):
"""Return a boolean indicating if the WIF is valid."""
try:
wif_to_secret_exponent(wif, allowable_wif_prefixes=allowable_wif_prefixes)
except EncodingError:
return False
return True
def secret_exponent_to_wif(secret_exp, compressed=True, wif_prefix=b'\x80'):
"""Convert a secret exponent (correspdong to a private key) to WIF format."""
d = wif_prefix + to_bytes_32(secret_exp)
if compressed:
d += b'\01'
return b2a_hashed_base58(d)
def public_pair_to_sec(public_pair, compressed=True):
"""Convert a public pair (a pair of bignums corresponding to a public key) to the
gross internal sec binary format used by OpenSSL."""
x_str = to_bytes_32(public_pair[0])
if compressed:
return bytes_from_int((2 + (public_pair[1] & 1))) + x_str
y_str = to_bytes_32(public_pair[1])
return b'\4' + x_str + y_str
def sec_to_public_pair(sec):
"""Convert a public key in sec binary format to a public pair."""
x = from_bytes_32(sec[1:33])
sec0 = sec[:1]
if sec0 == b'\4':
y = from_bytes_32(sec[33:65])
from ecdsa import is_public_pair_valid
from secp256k1 import generator_secp256k1
public_pair = (x, y)
# verify this is on the curve
if not is_public_pair_valid(generator_secp256k1, public_pair):
raise EncodingError("invalid (x, y) pair")
return public_pair
if sec0 in (b'\2', b'\3'):
from ecdsa import public_pair_for_x
from secp256k1 import generator_secp256k1
return public_pair_for_x(generator_secp256k1, x, is_even=(sec0 == b'\2'))
raise EncodingError("bad sec encoding for public key")
def is_sec_compressed(sec):
"""Return a boolean indicating if the sec represents a compressed public key."""
return sec[:1] in (b'\2', b'\3')
def public_pair_to_hash160_sec(public_pair, compressed=True):
"""Convert a public_pair (corresponding to a public key) to hash160_sec format.
This is a hash of the sec representation of a public key, and is used to generate
the corresponding Bitcoin address."""
return hash160(public_pair_to_sec(public_pair, compressed=compressed))
def hash160_sec_to_bitcoin_address(hash160_sec, address_prefix=b'\0'):
"""Convert the hash160 of a sec version of a public_pair to a Bitcoin address."""
return b2a_hashed_base58(address_prefix + hash160_sec)
def bitcoin_address_to_hash160_sec_with_prefix(bitcoin_address):
"""
Convert a Bitcoin address back to the hash160_sec format and
also return the prefix.
"""
blob = a2b_hashed_base58(bitcoin_address)
if len(blob) != 21:
raise EncodingError("incorrect binary length (%d) for Bitcoin address %s" %
(len(blob), bitcoin_address))
if blob[:1] not in [b'\x6f', b'\0']:
raise EncodingError("incorrect first byte (%s) for Bitcoin address %s" % (blob[0], bitcoin_address))
return blob[1:], blob[:1]
def bitcoin_address_to_hash160_sec(bitcoin_address, address_prefix=b'\0'):
"""Convert a Bitcoin address back to the hash160_sec format of the public key.
Since we only know the hash of the public key, we can't get the full public key back."""
hash160, actual_prefix = bitcoin_address_to_hash160_sec_with_prefix(bitcoin_address)
if (address_prefix == actual_prefix):
return hash160
raise EncodingError("Bitcoin address %s for wrong network" % bitcoin_address)
def public_pair_to_bitcoin_address(public_pair, compressed=True, address_prefix=b'\0'):
"""Convert a public_pair (corresponding to a public key) to a Bitcoin address."""
return hash160_sec_to_bitcoin_address(public_pair_to_hash160_sec(
public_pair, compressed=compressed), address_prefix=address_prefix)
def is_valid_bitcoin_address(bitcoin_address, allowable_prefixes=b'\0'):
"""Return True if and only if bitcoin_address is valid."""
try:
hash160, prefix = bitcoin_address_to_hash160_sec_with_prefix(bitcoin_address)
except EncodingError:
return False
return prefix in allowable_prefixes
| mit | -1,994,404,400,331,621,600 | 34.631579 | 108 | 0.681961 | false |
DedMemez/ODS-August-2017 | dna/DNAVisGroup.py | 1 | 2344 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.dna.DNAVisGroup
from panda3d.core import LVector3, LVector3f
import DNAGroup
import DNABattleCell
import DNAUtil
class DNAVisGroup(DNAGroup.DNAGroup):
COMPONENT_CODE = 2
def __init__(self, name):
DNAGroup.DNAGroup.__init__(self, name)
self.visibles = []
self.suitEdges = []
self.battleCells = []
def getVisGroup(self):
return self
def addBattleCell(self, battleCell):
self.battleCells.append(battleCell)
def addSuitEdge(self, suitEdge):
self.suitEdges.append(suitEdge)
def addVisible(self, visible):
self.visibles.append(visible)
def getBattleCell(self, i):
return self.battleCells[i]
def getNumBattleCells(self):
return len(self.battleCells)
def getNumSuitEdges(self):
return len(self.suitEdges)
def getNumVisibles(self):
return len(self.visibles)
def getSuitEdge(self, i):
return self.suitEdges[i]
def getVisibleName(self, i):
return self.visibles[i]
def getVisibles(self):
return self.visibles
def removeBattleCell(self, cell):
self.battleCells.remove(cell)
def removeSuitEdge(self, edge):
self.suitEdges.remove(edge)
def removeVisible(self, visible):
self.visibles.remove(visible)
def makeFromDGI(self, dgi, dnaStorage):
DNAGroup.DNAGroup.makeFromDGI(self, dgi)
numEdges = dgi.getUint16()
for _ in xrange(numEdges):
index = dgi.getUint16()
endPoint = dgi.getUint16()
self.addSuitEdge(dnaStorage.getSuitEdge(index, endPoint))
numVisibles = dgi.getUint16()
for _ in xrange(numVisibles):
self.addVisible(DNAUtil.dgiExtractString8(dgi))
numCells = dgi.getUint16()
for _ in xrange(numCells):
w = dgi.getUint8()
h = dgi.getUint8()
x, y, z = [ dgi.getInt32() / 100.0 for i in xrange(3) ]
self.addBattleCell(DNABattleCell.DNABattleCell(w, h, LVector3f(x, y, z)))
def destroy(self):
del self.visibles[:]
del self.suitEdges[:]
del self.battleCells[:]
DNAGroup.DNAGroup.destroy(self) | apache-2.0 | 3,608,324,131,246,925,300 | 26.962963 | 85 | 0.611348 | false |
erykoff/redmapper | redmapper/__init__.py | 1 | 1821 | import os
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
from ._version import __version__, __version_info__
version = __version__
from . import calibration
from . import pipeline
from . import redmagic
from .configuration import Configuration
from .runcat import RunCatalog
from .run_zscan import RunZScan
from .solver_nfw import Solver
from .catalog import DataObject, Entry, Catalog
from .redsequence import RedSequenceColorPar
from .chisq_dist import compute_chisq
from .background import Background, ZredBackground, BackgroundGenerator, ZredBackgroundGenerator
from .cluster import Cluster, ClusterCatalog
from .galaxy import Galaxy, GalaxyCatalog, GalaxyCatalogMaker
from .mask import Mask, HPMask, get_mask
from .zlambda import Zlambda, ZlambdaCorrectionPar
from .cluster_runner import ClusterRunner
from .run_firstpass import RunFirstPass
from .run_likelihoods import RunLikelihoods
from .run_percolation import RunPercolation
from .run_colormem import RunColormem
from .zred_color import ZredColor
from .centering import Centering, CenteringWcenZred, CenteringBCG, CenteringRandom, CenteringRandomSatellite
from .depthmap import DepthMap
from .color_background import ColorBackground, ColorBackgroundGenerator
from .fitters import MedZFitter, RedSequenceFitter, RedSequenceOffDiagonalFitter, CorrectionFitter, EcgmmFitter
from .zred_runner import ZredRunCatalog, ZredRunPixels
from .redmapper_run import RedmapperRun
from .depth_fitting import DepthLim, applyErrorModel
from .plotting import SpecPlot, NzPlot
from .volumelimit import VolumeLimitMask, VolumeLimitMaskFixed
from .utilities import read_members
from .randoms import GenerateRandoms, RandomCatalog, RandomCatalogMaker, RandomWeigher
from .run_randoms_zmask import RunRandomsZmask
| apache-2.0 | 5,558,822,357,060,048,000 | 40.386364 | 111 | 0.830862 | false |
CopyChat/Plotting | Python/PythonNetCDF.py | 1 | 10821 | '''
NAME
NetCDF with Python
PURPOSE
To demonstrate how to read and write data with NetCDF files using
a NetCDF file from the NCEP/NCAR Reanalysis.
Plotting using Matplotlib and Basemap is also shown.
PROGRAMMER(S)
Chris Slocum
REVISION HISTORY
20140320 -- Initial version created and posted online
20140722 -- Added basic error handling to ncdump
Thanks to K.-Michael Aye for highlighting the issue
REFERENCES
netcdf4-python -- http://code.google.com/p/netcdf4-python/
NCEP/NCAR Reanalysis -- Kalnay et al. 1996
http://dx.doi.org/10.1175/1520-0477(1996)077<0437:TNYRP>2.0.CO;2
'''
import datetime as dt # Python standard library datetime module
import numpy as np
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
def ncdump(nc_fid, verb=True):
'''
ncdump outputs dimensions, variables and their attribute information.
The information is similar to that of NCAR's ncdump utility.
ncdump requires a valid instance of Dataset.
Parameters
----------
nc_fid : netCDF4.Dataset
A netCDF4 dateset object
verb : Boolean
whether or not nc_attrs, nc_dims, and nc_vars are printed
Returns
-------
nc_attrs : list
A Python list of the NetCDF file global attributes
nc_dims : list
A Python list of the NetCDF file dimensions
nc_vars : list
A Python list of the NetCDF file variables
'''
def print_ncattr(key):
"""
Prints the NetCDF file attributes for a given key
Parameters
----------
key : unicode
a valid netCDF4.Dataset.variables key
"""
try:
print "\t\ttype:", repr(nc_fid.variables[key].dtype)
for ncattr in nc_fid.variables[key].ncattrs():
print '\t\t%s:' % ncattr,\
repr(nc_fid.variables[key].getncattr(ncattr))
except KeyError:
print "\t\tWARNING: %s does not contain variable attributes" % key
# NetCDF global attributes
nc_attrs = nc_fid.ncattrs()
if verb:
print "NetCDF Global Attributes:"
for nc_attr in nc_attrs:
print '\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr))
nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions
# Dimension shape information.
if verb:
print "NetCDF dimension information:"
for dim in nc_dims:
print "\tName:", dim
print "\t\tsize:", len(nc_fid.dimensions[dim])
print_ncattr(dim)
# Variable information.
nc_vars = [var for var in nc_fid.variables] # list of nc variables
if verb:
print "NetCDF variable information:"
for var in nc_vars:
if var not in nc_dims:
print '\tName:', var
print "\t\tdimensions:", nc_fid.variables[var].dimensions
print "\t\tsize:", nc_fid.variables[var].size
print_ncattr(var)
return nc_attrs, nc_dims, nc_vars
nc_f = './CLM45_Micro_UW_SRF.2005120100.for.test.nc' # Your filename
nc_fid = Dataset(nc_f, 'r') # Dataset is the class behavior to open the file
# and create an instance of the ncCDF4 class
nc_attrs, nc_dims, nc_vars = ncdump(nc_fid)
# Extract data from NetCDF file
lats = nc_fid.variables['xlat'][:] # extract/copy the data
lons = nc_fid.variables['xlon'][:]
time = nc_fid.variables['time'][:]
rsds = nc_fid.variables['rsds'][:] # shape is time, lat, lon as shown above
time_idx = 237 # some random day in 2012
# Python and the renalaysis are slightly off in time so this fixes that problem
offset = dt.timedelta(hours=48)
# List of all times in the file as datetime objects
dt_time = [dt.date(1, 1, 1) + dt.timedelta(hours=t/20) - offset\
for t in time]
cur_time = dt_time[time_idx]
# Plot of global temperature on our random day
fig = plt.figure()
fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
# Setup the map. See http://matplotlib.org/basemap/users/mapsetup.html
# for other projections.
m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
m.drawcoastlines()
m.drawmapboundary()
# Make the plot continuous
test=rsds[0,:,:]
print test.shape
print rsds.shape
print lons.shape
rsds_cyclic, lons_cyclic = addcyclic(rsds[time_idx,:,:], lons)
# Shift the grid so lons go from -180 to 180 instead of 0 to 360.
rsds_cyclic, lons_cyclic = shiftgrid(180., rsds_cyclic, lons_cyclic, start=False)
# Create 2D lat/lon arrays for Basemap
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
# Transforms lat/lon into plotting coordinates for projection
x, y = m(lon2d, lat2d)
# Plot of rsds temperature with 11 contour intervals
cs = m.contourf(x, y, rsds_cyclic, 11, cmap=plt.cm.Spectral_r)
cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
cbar.set_label("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\
nc_fid.variables['rsds'].units))
plt.title("%s on %s" % (nc_fid.variables['rsds'].var_desc, cur_time))
# Writing NetCDF files
# For this example, we will create two NetCDF4 files. One with the global rsds
# temperature departure from its value at Darwin, Australia. The other with
# the temperature profile for the entire year at Darwin.
darwin = {'name': 'Darwin, Australia', 'lat': -12.45, 'lon': 130.83}
# Find the nearest latitude and longitude for Darwin
lat_idx = np.abs(lats - darwin['lat']).argmin()
lon_idx = np.abs(lons - darwin['lon']).argmin()
# Simple example: temperature profile for the entire year at Darwin.
# Open a new NetCDF file to write the data to. For format, you can choose from
# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
w_nc_fid = Dataset('darwin_2012.nc', 'w', format='NETCDF4')
w_nc_fid.description = "NCEP/NCAR Reanalysis %s from its value at %s. %s" %\
(nc_fid.variables['rsds'].var_desc.lower(),\
darwin['name'], nc_fid.description)
# Using our previous dimension info, we can create the new time dimension
# Even though we know the size, we are going to set the size to unknown
w_nc_fid.createDimension('time', None)
w_nc_dim = w_nc_fid.createVariable('time', nc_fid.variables['time'].dtype,\
('time',))
# You can do this step yourself but someone else did the work for us.
for ncattr in nc_fid.variables['time'].ncattrs():
w_nc_dim.setncattr(ncattr, nc_fid.variables['time'].getncattr(ncattr))
# Assign the dimension data to the new NetCDF file.
w_nc_fid.variables['time'][:] = time
w_nc_var = w_nc_fid.createVariable('rsds', 'f8', ('time'))
w_nc_var.setncatts({'long_name': u"mean Daily Air temperature",\
'units': u"degK", 'level_desc': u'Surface',\
'var_desc': u"Air temperature",\
'statistic': u'Mean\nM'})
w_nc_fid.variables['rsds'][:] = rsds[time_idx, lat_idx, lon_idx]
w_nc_fid.close() # close the new file
# A plot of the temperature profile for Darwin in 2012
fig = plt.figure()
plt.plot(dt_time, rsds[:, lat_idx, lon_idx], c='r')
plt.plot(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], c='b', marker='o')
plt.text(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], cur_time,\
ha='right')
fig.autofmt_xdate()
plt.ylabel("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\
nc_fid.variables['rsds'].units))
plt.xlabel("Time")
plt.title("%s from\n%s for %s" % (nc_fid.variables['rsds'].var_desc,\
darwin['name'], cur_time.year))
# Complex example: global temperature departure from its value at Darwin
departure = rsds[:, :, :] - rsds[:, lat_idx, lon_idx].reshape((time.shape[0],\
1, 1))
# Open a new NetCDF file to write the data to. For format, you can choose from
# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
w_nc_fid = Dataset('rsds.departure.sig995.2012.nc', 'w', format='NETCDF4')
w_nc_fid.description = "The departure of the NCEP/NCAR Reanalysis " +\
"%s from its value at %s. %s" %\
(nc_fid.variables['rsds'].var_desc.lower(),\
darwin['name'], nc_fid.description)
# Using our previous dimension information, we can create the new dimensions
data = {}
for dim in nc_dims:
w_nc_fid.createDimension(dim, nc_fid.variables[dim].size)
data[dim] = w_nc_fid.createVariable(dim, nc_fid.variables[dim].dtype,\
(dim,))
# You can do this step yourself but someone else did the work for us.
for ncattr in nc_fid.variables[dim].ncattrs():
data[dim].setncattr(ncattr, nc_fid.variables[dim].getncattr(ncattr))
# Assign the dimension data to the new NetCDF file.
w_nc_fid.variables['time'][:] = time
w_nc_fid.variables['lat'][:] = lats
w_nc_fid.variables['lon'][:] = lons
# Ok, time to create our departure variable
w_nc_var = w_nc_fid.createVariable('rsds_dep', 'f8', ('time', 'lat', 'lon'))
w_nc_var.setncatts({'long_name': u"mean Daily Air temperature departure",\
'units': u"degK", 'level_desc': u'Surface',\
'var_desc': u"Air temperature departure",\
'statistic': u'Mean\nM'})
w_nc_fid.variables['rsds_dep'][:] = departure
w_nc_fid.close() # close the new file
# Rounded maximum absolute value of the departure used for contouring
max_dep = np.round(np.abs(departure[time_idx, :, :]).max()+5., decimals=-1)
# Generate a figure of the departure for a single day
fig = plt.figure()
fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
m.drawcoastlines()
m.drawmapboundary()
dep_cyclic, lons_cyclic = addcyclic(departure[time_idx, :, :], lons)
dep_cyclic, lons_cyclic = shiftgrid(180., dep_cyclic, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
levels = np.linspace(-max_dep, max_dep, 11)
cs = m.contourf(x, y, dep_cyclic, levels=levels, cmap=plt.cm.bwr)
x, y = m(darwin['lon'], darwin['lat'])
plt.plot(x, y, c='c', marker='o')
plt.text(x, y, 'Darwin,\nAustralia', color='r', weight='semibold')
cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
cbar.set_label("%s departure (%s)" % (nc_fid.variables['rsds'].var_desc,\
nc_fid.variables['rsds'].units))
plt.title("Departure of Global %s from\n%s for %s" %\
(nc_fid.variables['rsds'].var_desc, darwin['name'], cur_time))
plt.show()
# Close original NetCDF file.
nc_fid.close()
| gpl-3.0 | 2,362,746,802,840,747,500 | 42.633065 | 81 | 0.641253 | false |
tek/amino | amino/tree.py | 1 | 14049 | import abc
from typing import Callable, TypeVar, Generic, Union, cast, Any
from amino.logging import Logging
from amino import LazyList, Boolean, __, _, Either, Right, Maybe, Left, L, Map, curried
from amino.boolean import false, true
from amino.tc.base import Implicits
from amino.tc.flat_map import FlatMap
from amino.func import call_by_name
from amino.lazy_list import LazyLists
def indent(strings: LazyList[str]) -> LazyList[str]:
return strings.map(' ' + _)
Data = TypeVar('Data')
Data1 = TypeVar('Data1')
Sub = TypeVar('Sub')
Sub1 = TypeVar('Sub1')
A = TypeVar('A')
B = TypeVar('B')
Z = TypeVar('Z')
Key = Union[str, int]
class Node(Generic[Data, Sub], Logging, abc.ABC, Implicits, implicits=True, auto=True):
@abc.abstractproperty
def sub(self) -> Sub:
...
@abc.abstractproperty
def sub_l(self) -> LazyList['Node[Data, Any]']:
...
@abc.abstractmethod
def _strings(self) -> LazyList[str]:
...
@property
def strings(self) -> LazyList[str]:
return self._strings()
def _show(self) -> str:
return self._strings().mk_string('\n')
@property
def show(self) -> str:
return self._show()
@abc.abstractmethod
def foreach(self, f: Callable[['Node'], None]) -> None:
...
@abc.abstractmethod
def filter(self, pred: Callable[['Node'], bool]) -> 'Node':
...
def filter_not(self, pred: Callable[['Node'], bool]) -> 'Node':
return self.filter(lambda a: not pred(a))
@abc.abstractproperty
def flatten(self) -> 'LazyList[Any]':
...
@abc.abstractmethod
def contains(self, target: 'Node') -> Boolean:
...
@abc.abstractmethod
def lift(self, key: Key) -> 'SubTree':
...
def __getitem__(self, key: Key) -> 'SubTree':
return self.lift(key)
@abc.abstractproperty
def s(self) -> 'SubTree':
...
@abc.abstractproperty
def empty(self) -> Boolean:
...
@curried
def fold_left(self, z: Z, f: Callable[[Z, 'Node'], Z]) -> Z:
z1 = f(z, self)
return self.sub_l.fold_left(z1)(lambda z2, a: a.fold_left(z2)(f))
@abc.abstractmethod
def replace(self, data: LazyList['Node[Data1, Sub1]']) -> 'Node[Data1, Sub1]':
...
@abc.abstractmethod
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
...
class Inode(Generic[Data, Sub], Node[Data, Sub]):
@abc.abstractproperty
def sub(self) -> LazyList[Any]:
...
def foreach(self, f: Callable[[Node], None]) -> None:
f(self)
self.sub_l.foreach(__.foreach(f))
@property
def flatten(self) -> LazyList[Any]:
return self.sub_l.flat_map(_.flatten).cons(self)
def contains(self, target: Node) -> Boolean:
return self.sub_l.contains(target)
@property
def empty(self) -> Boolean:
return self.data.empty
class ListNode(Generic[Data], Inode[Data, LazyList[Node[Data, Any]]]):
def __init__(self, sub: LazyList[Node[Data, Any]]) -> None:
self.data = sub
@property
def sub(self) -> LazyList[Node[Data, Any]]:
return self.data
@property
def sub_l(self) -> LazyList[Node[Data, Any]]:
return self.sub
@property
def _desc(self) -> str:
return '[]'
def _strings(self) -> LazyList[str]:
return indent(self.sub // (lambda a: a._strings())).cons(self._desc)
@property
def head(self) -> 'SubTree':
return self.lift(0)
@property
def last(self) -> 'SubTree':
return self.lift(-1)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.sub.map(str).mk_string(','))
def __repr__(self) -> str:
return str(self)
def lift(self, key: Key) -> 'SubTree':
return (
SubTreeInvalid(key, 'ListNode index must be int')
if isinstance(key, str) else
self.sub.lift(key) / L(SubTree.cons)(_, key) | (lambda: SubTreeInvalid(key, 'ListNode index oob'))
)
def replace(self, sub: LazyList[Any]) -> Node:
return ListNode(sub)
def filter(self, pred: Callable[[Node], bool]) -> Node:
def filt(n: Node) -> bool:
return (
pred(n)
if isinstance(n, LeafNode) else
not n.empty
)
return self.replace(self.sub.map(__.filter(pred)).filter(filt))
@property
def s(self) -> 'SubTree':
return SubTreeList(self, 'root')
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
return f(ListNode(self.sub.map(lambda a: a.map_nodes(f))))
class MapNode(Generic[Data], Inode[Data, Map[str, Node[Data, Any]]]):
def __init__(self, data: Map[str, Node[Data, Any]]) -> None:
self.data = data
@property
def sub(self) -> Map[str, Node[Data, Any]]:
return self.data
@property
def sub_l(self) -> LazyList[Node[Data, Any]]:
return LazyList(self.data.v)
@property
def _desc(self) -> str:
return '{}'
def _strings(self) -> LazyList[str]:
return indent(self.sub_l // (lambda a: a._strings())).cons(self._desc)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.sub_l)
def __repr__(self) -> str:
return str(self)
# TODO allow int indexes into sub_l
def lift(self, key: Key) -> 'SubTree':
def err() -> 'SubTree':
keys = ', '.join(self.data.keys())
return SubTreeInvalid(key, f'MapNode({self.rule}) invalid key ({keys})')
return (
self.data.lift(key) /
L(SubTree.cons)(_, key) |
err
)
def replace(self, sub: Map[str, Node]) -> Node:
return MapNode(sub)
def filter(self, pred: Callable[[Node], bool]) -> Node:
def filt(n: Node) -> bool:
return (
pred(n)
if isinstance(n, LeafNode) else
not n.empty
)
return self.replace(self.data.valmap(__.filter(pred)).valfilter(filt))
@property
def s(self) -> 'SubTree':
return SubTreeMap(self, 'root')
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
return f(MapNode(self.sub.valmap(lambda a: a.map_nodes(f))))
class LeafNode(Generic[Data], Node[Data, None]):
def __init__(self, data: Data) -> None:
self.data = data
def _strings(self) -> LazyList[Data]:
return LazyLists.cons(self.data)
@property
def sub(self) -> None:
pass
@property
def sub_l(self) -> LazyList[Node[Data, Any]]:
return LazyList([])
def foreach(self, f: Callable[[Node], None]) -> None:
f(self)
def filter(self, pred: Callable[[Node], bool]) -> Node:
return self
@property
def flatten(self) -> LazyList[Any]:
return LazyLists.cons(self)
def contains(self, target: Node) -> Boolean:
return false
def lift(self, key: Key) -> 'SubTree':
return SubTreeInvalid(key, 'LeafNode cannot be indexed')
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.data)
def __repr__(self) -> str:
return str(self)
@property
def empty(self) -> Boolean:
return false
@property
def s(self) -> 'SubTree':
return SubTreeLeaf(self, 'root')
def replace(self, sub: Data) -> Node:
return LeafNode(sub)
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
return f(self)
class TreeFlatMap(FlatMap, tpe=Node):
def flat_map(self, fa: Node[A, Any], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return (
self.flat_map_inode(fa, f)
if isinstance(fa, Inode) else
self.flat_map_leaf(fa, f)
)
def flat_map_inode(self, fa: Inode[A, Any], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
def err() -> Inode[A, Any]:
raise Exception(f'invalid sub for `TreeFlatMap.flat_map_inode`: {fa}')
return (
self.flat_map_map(fa, f)
if isinstance(fa, MapNode) else
self.flat_map_list(fa, f)
if isinstance(fa, ListNode) else
err()
)
def flat_map_map(self, fa: MapNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return MapNode(fa.sub.valmap(lambda a: self.flat_map(a, f)))
def flat_map_list(self, fa: ListNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return ListNode(fa.sub.map(lambda a: self.flat_map(a, f)))
def flat_map_leaf(self, fa: LeafNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return f(fa.data)
def map(self, fa: Node[A, Any], f: Callable[[A], B]) -> Node[B, Any]:
return (
self.map_inode(fa, f)
if isinstance(fa, Inode) else
self.map_leaf(fa, f)
)
def map_inode(self, fa: Inode[A, Any], f: Callable[[A], B]) -> Node[B, Any]:
def err() -> Inode[A, Any]:
raise Exception(f'invalid sub for `TreeFlatMap.map_inode`: {fa}')
return (
self.map_map(fa, f)
if isinstance(fa, MapNode) else
self.map_list(fa, f)
if isinstance(fa, ListNode) else
err()
)
def map_map(self, fa: MapNode[A], f: Callable[[A], B]) -> Node[B, Any]:
return MapNode(fa.data.valmap(lambda a: self.map(a, f)))
def map_list(self, fa: ListNode[A], f: Callable[[A], B]) -> Node[B, Any]:
return ListNode(fa.sub.map(lambda a: self.map(a, f)))
def map_leaf(self, fa: LeafNode[A], f: Callable[[A], B]) -> Node[B, Any]:
return LeafNode(f(fa.data))
class SubTree(Implicits, implicits=True, auto=True):
@staticmethod
def cons(fa: Node, key: Key) -> 'SubTree':
return ( # type: ignore
cast(SubTree, SubTreeList(fa, key))
if isinstance(fa, ListNode) else
SubTreeLeaf(fa, key)
if isinstance(fa, LeafNode) else
SubTreeMap(fa, key)
)
@staticmethod
def from_maybe(data: Maybe[Node], key: Key, err: str) -> 'SubTree':
return data.cata(SubTree.cons, SubTreeInvalid(key, err))
def __getattr__(self, key: Key) -> 'SubTree':
try:
return super().__getattr__(key)
except AttributeError:
return self._getattr(key)
@abc.abstractmethod
def _getattr(self, key: Key) -> 'SubTree':
...
def __getitem__(self, key: Key) -> 'SubTree':
return self._getitem(key)
@abc.abstractmethod
def _getitem(self, key: Key) -> 'SubTree':
...
def cata(self, f: Callable[[Node], A], b: Union[A, Callable[[], A]]) -> A:
return (
f(self.data)
if isinstance(self, SubTreeValid)
else call_by_name(b)
)
@abc.abstractproperty
def e(self) -> Either[str, Node]:
...
@abc.abstractproperty
def valid(self) -> Boolean:
...
@abc.abstractproperty
def strings(self) -> LazyList[str]:
...
@abc.abstractproperty
def show(self) -> LazyList[str]:
...
@property
def rule(self) -> Either[str, str]:
return self.e.map(_.rule)
class SubTreeValid(SubTree):
def __init__(self, data: Node, key: Key) -> None:
self.data = data
self._key = key
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.data)
@property
def e(self) -> Either[str, Node]:
return Right(self.data)
@property
def valid(self) -> Boolean:
return true
@property
def strings(self) -> LazyList[str]:
return self.data.strings
@property
def show(self) -> str:
return self.data.show
class SubTreeList(SubTreeValid):
@property
def head(self) -> SubTree:
return self[0]
@property
def last(self) -> SubTree:
return self[-1]
def _getattr(self, key: Key) -> SubTree:
return SubTreeInvalid(key, 'cannot access attrs in SubTreeList')
def _getitem(self, key: Key) -> SubTree:
return self.data.lift(key)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.data.sub_l.drain.join_comma)
@property
def _keys(self) -> LazyList[str]:
return self.data.k
class SubTreeLeaf(SubTreeValid):
def err(self, key: Key) -> SubTree:
return SubTreeInvalid(key, 'cannot access attrs in SubTreeLeaf')
def _getattr(self, key: Key) -> SubTree:
return self.err(key)
def _getitem(self, key: Key) -> SubTree:
return self.err(key)
class SubTreeMap(SubTreeValid):
def _getattr(self, key: Key) -> SubTree:
return self.data.lift(key)
def _getitem(self, key: Key) -> SubTree:
return self.data.lift(key)
@property
def _keys(self) -> LazyList[str]:
return self.data.k
class SubTreeInvalid(SubTree):
def __init__(self, key: Key, reason: str) -> None:
self.key = key
self.reason = reason
def __str__(self) -> str:
s = 'SubTreeInvalid({}, {})'
return s.format(self.key, self.reason)
def __repr__(self) -> str:
return str(self)
@property
def valid(self) -> Boolean:
return false
@property
def _error(self) -> str:
return 'no subtree `{}`: {}'.format(self.key, self.reason)
def _getattr(self, key: Key) -> SubTree:
return self
def _getitem(self, key: Key) -> SubTree:
return self
@property
def e(self) -> Either[str, Node]:
return Left(self._error)
@property
def strings(self) -> LazyList[str]:
return LazyList([])
@property
def show(self) -> LazyList[str]:
return str(self)
__all__ = ('Node', 'Inode', 'LeafNode', 'MapNode', 'LeafNode', 'ListNode')
| mit | -5,395,947,376,289,710,000 | 25.76 | 110 | 0.55947 | false |
marshallward/payu | test/test_manifest.py | 1 | 16595 | import copy
import os
from pathlib import Path
import pdb
import pytest
import shutil
import yaml
import payu
import payu.models.test
from .common import cd, make_random_file, get_manifests
from .common import tmpdir, ctrldir, labdir, workdir
from .common import sweep_work, payu_init, payu_setup
from .common import config as config_orig
from .common import write_config
from .common import make_exe, make_inputs, make_restarts, make_all_files
verbose = True
config = copy.deepcopy(config_orig)
def make_config_files():
"""
Create files required for test model
"""
config_files = payu.models.test.config_files
for file in config_files:
make_random_file(ctrldir/file, 29)
def setup_module(module):
"""
Put any test-wide setup code in here, e.g. creating test files
"""
if verbose:
print("setup_module module:%s" % module.__name__)
# Should be taken care of by teardown, in case remnants lying around
try:
shutil.rmtree(tmpdir)
except FileNotFoundError:
pass
try:
tmpdir.mkdir()
labdir.mkdir()
ctrldir.mkdir()
make_all_files()
except Exception as e:
print(e)
write_config(config)
def teardown_module(module):
"""
Put any test-wide teardown code in here, e.g. removing test outputs
"""
if verbose:
print("teardown_module module:%s" % module.__name__)
try:
# shutil.rmtree(tmpdir)
print('removing tmp')
except Exception as e:
print(e)
# These are integration tests. They have an undesirable dependence on each
# other. It would be possible to make them independent, but then they'd
# be reproducing previous "tests", like init. So this design is deliberate
# but compromised. It means when running an error in one test can cascade
# and cause other tests to fail.
#
# Unfortunate but there you go.
def test_init():
# Initialise a payu laboratory
with cd(ctrldir):
payu_init(None, None, str(labdir))
# Check all the correct directories have been created
for subdir in ['bin', 'input', 'archive', 'codebase']:
assert((labdir / subdir).is_dir())
def test_setup():
# Create some input and executable files
make_inputs()
make_exe()
bindir = labdir / 'bin'
exe = config['exe']
make_config_files()
# Run setup
payu_setup(lab_path=str(labdir))
assert(workdir.is_symlink())
assert(workdir.is_dir())
assert((workdir/exe).resolve() == (bindir/exe).resolve())
workdirfull = workdir.resolve()
config_files = payu.models.test.config_files
for f in config_files + ['config.yaml']:
assert((workdir/f).is_file())
for i in range(1, 4):
assert((workdir/'input_00{i}.bin'.format(i=i)).stat().st_size
== 1000**2 + i)
manifests = get_manifests(ctrldir/'manifests')
for mfpath in manifests:
assert((ctrldir/'manifests'/mfpath).is_file())
# Check manifest in work directory is the same as control directory
assert(manifests == get_manifests(workdir/'manifests'))
# Sweep workdir and recreate
sweep_work()
assert(not workdir.is_dir())
assert(not workdirfull.is_dir())
payu_setup(lab_path=str(labdir))
assert(manifests == get_manifests(workdir/'manifests'))
def test_setup_restartdir():
restartdir = labdir / 'archive' / 'restarts'
# Set a restart directory in config
config['restart'] = str(restartdir)
write_config(config)
make_restarts()
manifests = get_manifests(ctrldir/'manifests')
payu_setup(lab_path=str(labdir))
# Manifests should not match, as have added restarts
assert(not manifests == get_manifests(ctrldir/'manifests'))
def test_exe_reproduce():
# Set reproduce exe to True
config['manifest']['reproduce']['exe'] = True
write_config(config)
manifests = get_manifests(ctrldir/'manifests')
# Run setup with unchanged exe but reproduce exe set to True.
# Should run without error
payu_setup(lab_path=str(labdir))
assert(manifests == get_manifests(ctrldir/'manifests'))
bindir = labdir / 'bin'
exe = config['exe']
# Update the modification time of the executable, should run ok
(bindir/exe).touch()
# Run setup with changed exe but reproduce exe set to False
payu_setup(lab_path=str(labdir))
# Manifests will have changed as fasthash is altered
assert(not manifests == get_manifests(ctrldir/'manifests'))
# Reset manifests "truth"
manifests = get_manifests(ctrldir/'manifests')
# Delete exe path from config, should get it from manifest
del(config['exe'])
write_config(config)
# Run setup with changed exe but reproduce exe set to False
payu_setup(lab_path=str(labdir))
# Manifests will not have changed
assert(manifests == get_manifests(ctrldir/'manifests'))
assert((workdir/exe).resolve() == (bindir/exe).resolve())
# Reinstate exe path
config['exe'] = exe
# Recreate fake executable file
make_exe()
# Run setup again, which should raise an error due to changed executable
with pytest.raises(SystemExit) as pytest_wrapped_e:
# Run setup with unchanged exe but reproduce exe set to True
payu_setup(lab_path=str(labdir))
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
# Change reproduce exe back to False
config['manifest']['reproduce']['exe'] = False
write_config(config)
# Run setup with changed exe but reproduce exe set to False
payu_setup(lab_path=str(labdir))
# Check manifests have changed as expected
assert(not manifests == get_manifests(ctrldir/'manifests'))
# Reset manifests "truth"
manifests = get_manifests(ctrldir/'manifests')
# Make exe in config.yaml unfindable by giving it a non-existent
# path but crucially the same name as the proper executable
config['exe'] = '/bogus/test.exe'
# Change reproduce exe back to True
config['manifest']['reproduce']['exe'] = True
write_config(config)
# Run setup with changed exe but reproduce exe set to True. Should
# work fine as the exe path is in the manifest
payu_setup(lab_path=str(labdir))
assert(manifests == get_manifests(ctrldir/'manifests'))
def test_input_reproduce():
inputdir = labdir / 'input' / config['input']
inputdir.mkdir(parents=True, exist_ok=True)
# Set reproduce input to True
config['manifest']['reproduce']['exe'] = False
config['manifest']['reproduce']['input'] = True
config['exe'] = config_orig['exe']
write_config(config)
manifests = get_manifests(ctrldir/'manifests')
# Run setup with unchanged input reproduce input set to True
# to make sure works with no changes
payu_setup(lab_path=str(labdir))
assert(manifests == get_manifests(ctrldir/'manifests'))
# Delete input directory from config, should still work from
# manifest with input reproduce True
input = config['input']
write_config(config)
del(config['input'])
# Run setup, should work
payu_setup(lab_path=str(labdir))
assert(manifests == get_manifests(ctrldir/'manifests'))
# Update modification times for input files
for i in range(1, 4):
(inputdir/'input_00{i}.bin'.format(i=i)).touch()
# Run setup, should work as only fasthash will differ, code then
# checks full hash and updates fasthash if fullhash matches
payu_setup(lab_path=str(labdir))
# Manifests should no longer match as fasthashes have been updated
assert(not manifests == get_manifests(ctrldir/'manifests'))
# Reset manifest "truth"
manifests = get_manifests(ctrldir/'manifests')
# Re-create input files. Have to set input path for this purpose
# but not written to config.yaml, so doesn't affect payu commands
config['input'] = input
make_inputs()
del(config['input'])
# Run setup again, which should raise an error due to changed inputs
with pytest.raises(SystemExit) as pytest_wrapped_e:
payu_setup(lab_path=str(labdir))
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
# Change reproduce input back to False
config['manifest']['reproduce']['input'] = False
write_config(config)
# Run setup with changed inputs but reproduce input set to False
payu_setup(lab_path=str(labdir))
# Check manifests have changed as expected and input files
# linked in work
assert(not manifests == get_manifests(ctrldir/'manifests'))
for i in range(1, 4):
assert((workdir/'input_00{i}.bin'.format(i=i)).is_file())
# Reset manifest "truth"
manifests = get_manifests(ctrldir/'manifests')
# Delete input manifest
(ctrldir/'manifests'/'input.yaml').unlink()
# Setup with no input dir and no manifest. Should work ok
payu_setup(lab_path=str(labdir))
# Check there are no linked inputs
for i in range(1, 4):
assert(not (workdir/'input_00{i}.bin'.format(i=i)).is_file())
# Set input path back and recreate input manifest
config['input'] = input
write_config(config)
payu_setup(lab_path=str(labdir))
def test_input_scaninputs():
# Re-create input files
make_config_files()
make_inputs()
inputdir = labdir / 'input' / config['input']
inputdir.mkdir(parents=True, exist_ok=True)
# Set scaninputs input to True
config['manifest']['scaninputs'] = True
write_config(config)
# Run setup with unchanged input
payu_setup(lab_path=str(labdir))
manifests = get_manifests(ctrldir/'manifests')
# Set scaninputs input to False
config['manifest']['scaninputs'] = False
write_config(config)
# Run setup, should work and manifests unchanged
payu_setup(lab_path=str(labdir))
assert(manifests == get_manifests(ctrldir/'manifests'))
# Update modification times for input files
for i in range(1, 4):
(inputdir/'input_00{i}.bin'.format(i=i)).touch()
# Run setup, should work as only fasthash will differ, code then
# checks full hash and updates fasthash if fullhash matches
payu_setup(lab_path=str(labdir))
# Manifests should no longer match as fasthashes have been updated
assert(not manifests == get_manifests(ctrldir/'manifests'))
# Reset manifest "truth"
manifests = get_manifests(ctrldir/'manifests')
# Re-create input files
make_inputs()
# Run setup again. Should be fine, but manifests changed
payu_setup(lab_path=str(labdir))
assert(not manifests == get_manifests(ctrldir/'manifests'))
# Reset manifest "truth"
manifests = get_manifests(ctrldir/'manifests')
# Make a new input file
(inputdir/'lala').touch()
# Run setup again. Should be fine, manifests unchanged as
# scaninputs=False
payu_setup(lab_path=str(labdir))
assert(manifests == get_manifests(ctrldir/'manifests'))
# Set scaninputs input to True
config['manifest']['scaninputs'] = True
write_config(config)
# Run setup again. Should be fine, but manifests changed now
# as scaninputs=False
payu_setup(lab_path=str(labdir))
assert(not manifests == get_manifests(ctrldir/'manifests'))
assert((workdir/'lala').is_file())
# Delete silly input file
(inputdir/'lala').unlink()
# Re-run after removing silly input file
payu_setup(lab_path=str(labdir))
# Reset manifest "truth"
manifests = get_manifests(ctrldir/'manifests')
def test_restart_reproduce():
# Set reproduce restart to True
config['manifest']['reproduce']['input'] = False
config['manifest']['reproduce']['restart'] = True
del(config['restart'])
write_config(config)
manifests = get_manifests(ctrldir/'manifests')
# Run setup with unchanged restarts
payu_setup(lab_path=str(labdir))
assert(manifests == get_manifests(ctrldir/'manifests'))
restartdir = labdir / 'archive' / 'restarts'
# Change modification times on restarts
for i in range(1, 4):
(restartdir/'restart_00{i}.bin'.format(i=i)).touch()
# Run setup with touched restarts, should work with modified
# manifest
payu_setup(lab_path=str(labdir))
# Manifests should have changed
assert(not manifests == get_manifests(ctrldir/'manifests'))
# Reset manifest "truth"
manifests = get_manifests(ctrldir/'manifests')
# Modify restart files
make_restarts()
# Run setup again, which should raise an error due to changed restarts
with pytest.raises(SystemExit) as pytest_wrapped_e:
# Run setup with unchanged exe but reproduce exe set to True
payu_setup(lab_path=str(labdir))
# Set reproduce restart to False
config['manifest']['reproduce']['restart'] = False
write_config(config)
# Run setup with modified restarts reproduce set to False
payu_setup(lab_path=str(labdir))
# Manifests should have changed
assert(not manifests == get_manifests(ctrldir/'manifests'))
def test_all_reproduce():
# Remove reproduce options from config
del(config['manifest']['reproduce'])
write_config(config)
# Run setup
payu_setup(lab_path=str(labdir))
manifests = get_manifests(ctrldir/'manifests')
make_all_files()
# Run setup with reproduce=True, which should raise an error as
# all files changed
with pytest.raises(SystemExit) as pytest_wrapped_e:
# Run setup with unchanged exe but reproduce exe set to True
payu_setup(lab_path=str(labdir), reproduce=True)
# Run setup
payu_setup(lab_path=str(labdir))
# Manifests should have changed
assert(not manifests == get_manifests(ctrldir/'manifests'))
def test_get_all_fullpaths():
make_all_files()
make_config_files()
# Run setup
payu_setup(lab_path=str(labdir))
manifests = get_manifests(ctrldir/'manifests')
sweep_work()
with cd(ctrldir):
lab = payu.laboratory.Laboratory(lab_path=str(labdir))
expt = payu.experiment.Experiment(lab, reproduce=False)
expt.setup()
files = expt.manifest.get_all_fullpaths()
allfiles = []
for mf in manifests:
for f in manifests[mf]:
allfiles.append(manifests[mf][f]['fullpath'])
assert(set(files) == set(allfiles))
def test_get_hashes():
make_all_files()
make_config_files()
# Run setup
payu_setup(lab_path=str(labdir))
manifests = get_manifests(ctrldir/'manifests')
sweep_work()
with cd(ctrldir):
lab = payu.laboratory.Laboratory(lab_path=str(labdir))
expt = payu.experiment.Experiment(lab, reproduce=False)
expt.setup()
hashes = expt.manifest.manifests['input'].get_hashes('md5')
allhashes = []
for f in manifests['input.yaml']:
allhashes.append(manifests['input.yaml'][f]['hashes']['md5'])
assert(set(hashes) == set(allhashes))
def test_set_hash():
# Revert to original config
config = copy.deepcopy(config_orig)
write_config(config)
make_all_files()
make_config_files()
# Run setup
payu_setup(lab_path=str(labdir))
manifests = get_manifests(ctrldir/'manifests')
sweep_work()
# Remove existing manifests. Don't support changing
# hashes and retaining manifests
shutil.rmtree(ctrldir/'manifests')
# Change full hash from md5 to sha256
config['manifest']['fullhash'] = 'sha256'
write_config(config)
# Run setup
payu_setup(lab_path=str(labdir))
assert(not manifests == get_manifests(ctrldir/'manifests'))
manifests = get_manifests(ctrldir/'manifests')
for mf in manifests:
for f in manifests[mf]:
assert(manifests[mf][f]['hashes']['sha256'])
assert(len(manifests[mf][f]['hashes']['sha256']) == 64)
sweep_work()
# Remove existing manifests. Don't support changing
# hashes and retaining manifests
shutil.rmtree(ctrldir / 'manifests')
# Change full hash from md5 to binhash
config['manifest']['fullhash'] = 'binhash'
write_config(config)
# Run setup
payu_setup(lab_path=str(labdir))
manifests = get_manifests(ctrldir/'manifests')
for mf in manifests:
for f in manifests[mf]:
assert(list(manifests[mf][f]['hashes'].keys()) == ['binhash'])
def test_hard_sweep():
# Sweep workdir
sweep_work(hard_sweep=True)
# Check all the correct directories have been removed
assert(not (labdir / 'archive' / 'ctrl').is_dir())
assert(not (labdir / 'work' / 'ctrl').is_dir())
| apache-2.0 | -2,940,327,849,327,437,000 | 27.367521 | 76 | 0.666225 | false |
KyleJamesWalker/ansible-modules-core | cloud/amazon/ec2_asg.py | 1 | 34388 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: true
min_size:
description:
- Minimum number of instances in group, if unspecified then the current group value will be used.
required: false
max_size:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
required: false
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
required: false
version_added: "1.8"
default: False
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
required: false
version_added: "1.8"
default: None
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config.
required: false
version_added: "1.8"
default: True
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
required: false
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: yes
required: False
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Basic configuration
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
a rolling fashion with instances using the current launch configuration, "my_new_lc".
This could also be considered a rolling deploy of a pre-baked AMI.
If this is a newly created group, the instances will not be replaced since all instances
will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
To only replace a couple of instances instead of all of them, supply a list
to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
import time
import logging as log
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
log.getLogger('boto').setLevel(log.CRITICAL)
#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES)
# Ugly hack to make this JSON-serializable. We take a list of boto Tag
# objects and replace them with a dict-representation. Needed because the
# tags are included in ansible's return value (which is jsonified)
if 'tags' in properties and isinstance(properties['tags'], list):
serializable_tags = {}
for tag in properties['tags']:
serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch]
properties['tags'] = serializable_tags
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
instance_facts = {}
if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
for i in autoscaling_group.instances:
instance_facts[i.instance_id] = {'health_status': i.health_status,
'lifecycle_state': i.lifecycle_state,
'launch_config_name': i.launch_config_name }
if i.health_status == 'Healthy' and i.lifecycle_state == 'InService':
properties['viable_instances'] += 1
if i.health_status == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i.lifecycle_state == 'InService':
properties['in_service_instances'] += 1
if i.lifecycle_state == 'Terminating':
properties['terminating_instances'] += 1
if i.lifecycle_state == 'Pending':
properties['pending_instances'] += 1
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
as_group = asg_connection.get_all_groups(names=[group_name])[0]
wait_timeout = module.params.get('wait_timeout')
props = get_properties(as_group)
count = 1
if as_group.load_balancers and as_group.health_check_type == 'ELB':
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
return
for lb in as_group.load_balancers:
elb_connection.deregister_instances(lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group.load_balancers:
lb_instances = elb_connection.describe_instance_health(lb)
for i in lb_instances:
if i.instance_id == instance_id and i.state == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
healthy_instances = []
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ELB instance status:")
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.InvalidInstance:
pass
for i in lb_instances:
if i.state == "InService":
healthy_instances.append(i.instance_id)
log.debug("{0}: {1}".format(i.instance_id, i.state))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = asg_connection.get_all_groups(names=[group_name])[0]
if as_group.load_balancers and as_group.health_check_type == 'ELB':
log.debug("Waiting for ELB to consider intances healthy.")
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
while healthy_instances < as_group.min_size and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances')
as_groups = connection.get_all_groups(names=[group_name])
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
for k,v in tag.iteritems():
if k !='propagate_at_launch':
asg_tags.append(Tag(key=k,
value=v,
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
enforce_required_arguments(module)
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
ag = AutoScalingGroup(
group_name=group_name,
load_balancers=load_balancers,
availability_zones=availability_zones,
launch_config=launch_configs[0],
min_size=min_size,
max_size=max_size,
desired_capacity=desired_capacity,
vpc_zone_identifier=vpc_zone_identifier,
connection=connection,
tags=asg_tags,
health_check_period=health_check_period,
health_check_type=health_check_type,
default_cooldown=default_cooldown,
termination_policies=termination_policies)
try:
connection.create_auto_scaling_group(ag)
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
changed = True
return(changed, asg_properties)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
as_group = as_groups[0]
changed = False
for attr in ASG_ATTRIBUTES:
if module.params.get(attr, None) is not None:
module_attr = module.params.get(attr)
if attr == 'vpc_zone_identifier':
module_attr = ','.join(module_attr)
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently
try:
module_attr.sort()
except:
pass
try:
group_attr.sort()
except:
pass
if group_attr != module_attr:
changed = True
setattr(as_group, attr, module_attr)
if len(set_tags) > 0:
have_tags = {}
want_tags = {}
for tag in asg_tags:
want_tags[tag.key] = [tag.value, tag.propagate_at_launch]
dead_tags = []
for tag in as_group.tags:
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
if tag.key not in want_tags:
changed = True
dead_tags.append(tag)
if dead_tags != []:
connection.delete_tags(dead_tags)
if have_tags != want_tags:
changed = True
connection.create_or_update_tags(asg_tags)
# handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or []
if load_balancers and as_group.load_balancers != load_balancers:
changed = True
as_group.load_balancers = module.params.get('load_balancers')
if changed:
try:
as_group.update()
except BotoServerError, e:
module.fail_json(msg=str(e))
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
try:
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
except BotoServerError, e:
module.fail_json(msg=str(e))
return(changed, asg_properties)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
groups = connection.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = connection.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
while len(connection.get_all_groups(names=[group_name])):
time.sleep(5)
changed=True
return changed
else:
changed=False
return changed
def get_chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def update_size(group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size ))
group.max_size = max_size
group.min_size = min_size
group.desired_capacity = dc
group.update()
def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
lc_check = module.params.get('lc_check')
replace_instances = module.params.get('replace_instances')
as_group = connection.get_all_groups(names=[group_name])[0]
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
props = get_properties(as_group)
instances = props.get('instances', [])
if replace_instances:
instances = replace_instances
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group.min_size
if max_size is None:
max_size = as_group.max_size
if desired_capacity is None:
desired_capacity = as_group.desired_capacity
# set temporary settings and wait for them to be reached
# This should get overriden if the number of instances left is less than the batch size.
as_group = connection.get_all_groups(names=[group_name])[0]
update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instances = props.get('instances', [])
if replace_instances:
instances = replace_instances
log.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
if break_early:
log.debug("breaking loop")
break
update_size(as_group, max_size, min_size, desired_capacity)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
log.debug("Rolling update complete.")
changed=True
return(changed, asg_properties)
def get_instances_by_lc(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props.get('instances', []):
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props.get('instances', []):
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props.get('instances', []))
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
desired_size = as_group.min_size
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group.min_size != min_size:
as_group.min_size = min_size
as_group.update()
log.debug("Updating minimum size back to original of {0}".format(min_size))
#if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed !=0 :
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("decrementing capacity: {0}".format(decrement_capacity))
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
count = 0
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = ( i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or healthy == 'Unhealthy':
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
time.sleep(10)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
return props
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default')
),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
changed = create_changed = replace_changed = False
if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
module.exit_json( changed = changed )
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
if create_changed or replace_changed:
changed = True
module.exit_json( changed = changed, **asg_properties )
if __name__ == '__main__':
main()
| gpl-3.0 | -6,694,148,389,157,273,000 | 40.331731 | 232 | 0.645022 | false |
sireliah/poniat | menu.py | 1 | 2882 | #-*- coding: utf-8 -*-
import sys
import pygame
from pygame.locals import *
from utils import *
from initial import LoadMenuTextures
class MainMenu(LoadMenuTextures):
def __init__(self, modes, win_w, win_h):
self.showmain = True
self.submenu = False
self.click = False
self.modes = modes
LoadMenuTextures.__init__(self, win_w, win_h)
self.menuloop()
def mousepos(self):
self.pos = pygame.mouse.get_pos()
def is_inside(self, coords):
x, y = self.pos
if (x > coords[0] and x < coords[4]) and (y > coords[1] and y < coords[5]):
return True
else:
return False
def startbutton(self):
if self.is_inside(self.start_coords):
self.start.show_button(hover=True)
if self.click:
self.showmain = False
else:
self.start.show_button()
def aboutbutton(self):
if self.is_inside(self.about_coords):
self.about.show_button(hover=True)
if self.click:
self.submenu = True
else:
self.about.show_button()
def gobackbutton(self):
if self.is_inside(self.goback_coords):
self.goback.show_button(hover=True)
if self.click:
self.submenu = False
else:
self.goback.show_button()
def exitbutton(self):
if self.is_inside(self.exit_coords):
self.exit.show_button(hover=True)
if self.click:
sys.exit()
else:
self.exit.show_button()
def events(self):
self.mousepos()
self.click = False
for event in pygame.event.get():
if event.type == QUIT:
print("koniec")
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
if event.key == K_SPACE:
pass
if event.key == K_RETURN:
self.showmain = False
if event.key == K_LCTRL:
pass
elif event.type == MOUSEBUTTONDOWN:
self.click = True
def menuloop(self):
while self.showmain:
clear()
self.events()
self.mainback.show(0, 0)
if self.submenu:
self.aboutback.show(0, 0)
self.gobackbutton()
else:
self.startbutton()
self.aboutbutton()
self.exitbutton()
self.font.show(u"X: %s, Y: %s" % (self.pos), DARKRED, 10, 30, 1, 1)
pygame.display.flip()
clear()
self.mainback.show(0, 0)
self.frame.show(13, 14, 1.0, 1.0)
self.font.show(u"Ładuję...", DARKRED, 10, 30, 2, 2)
pygame.display.flip()
| gpl-3.0 | 1,126,106,222,080,723,800 | 27.514851 | 83 | 0.498958 | false |
FeodorM/some_code | some_nice_python_things/weather.py | 1 | 2096 | #! /usr/bin/env python3
import pyowm
import datetime
owm = pyowm.OWM('2642ecf7132b8918b8f073910006483c', language='ru')
now = pyowm.timeutils.now().date()
tomorrow = pyowm.timeutils.tomorrow().date()
def to_human_time(unix):
return datetime.datetime.fromtimestamp(unix)
def weather_date(weather):
return to_human_time(weather.get_reference_time()).date()
def temperature_to_str(weather):
rain = weather.get_rain()
if not rain:
rain = 'no rain'
return "{}: {}, {}C, {}, humidity: {}%\n".format(
to_human_time(weather.get_reference_time()).time(),
weather.get_detailed_status(),
weather.get_temperature('celsius')['temp'],
rain,
weather.get_humidity()
)
def forecast():
f = owm.three_hours_forecast('Voronezh,RU')
weathers = f.get_forecast().get_weathers()
if weather_date(weathers[0]) == now:
print('Сегодня:\n')
for w in (weather for weather in weathers if weather_date(weather) == now):
print(temperature_to_str(w))
print('Завтра:\n')
for w in (weather for weather in weathers if weather_date(weather) == tomorrow):
print(temperature_to_str(w))
def current_weather():
w = owm.weather_at_place('Voronezh,RU').get_weather()
print("""
{}
Temperature: {}C -- {}C ({}C)
Clouds: {}%
Rain: {}
Humidity: {}%
Wind speed: {}m/s
Time: {}
""".format(
w.get_detailed_status(),
w.get_temperature('celsius')['temp_min'],
w.get_temperature('celsius')['temp_max'],
w.get_temperature('celsius')['temp'],
w.get_clouds(),
w.get_rain(),
w.get_humidity(),
w.get_wind()['speed'],
w.get_reference_time('iso')
))
if __name__ == '__main__':
import sys
arg = '' if len(sys.argv) == 1 else sys.argv[1]
if arg == '':
current_weather()
forecast()
elif arg == '-n' or arg == '--now':
current_weather()
elif arg == '-f' or arg == '--forecast':
forecast()
else:
print('Wrong argument')
| mit | 2,245,925,329,467,947,500 | 23.797619 | 84 | 0.572732 | false |
jeremiedecock/snippets | python/tkinter/python3/cairo_with_pil.py | 1 | 3643 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# SEE: http://stackoverflow.com/questions/25480853/cairo-with-tkinter
# http://effbot.org/tkinterbook/photoimage.htm#patterns
# Required Debian package (Debian 8.1 Jessie): python3-pil.imagetk
import tkinter as tk
import PIL.Image as pil # PIL.Image is a module not a class...
import PIL.ImageTk as piltk # PIL.ImageTk is a module not a class...
import cairo
if tk.TkVersion < 8.6:
print("*" * 80)
print("WARNING: Tk version {} is installed on your system.".format(tk.TkVersion))
print("Tk < 8.6 only supports three file formats: GIF, PGM and PPM.")
print("You need to install Tk >= 8.6 if you want to read JPEG and PNG images!")
print("*" * 80)
# CAIRO
w, h = 800, 600
cairo_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
cairo_context = cairo.Context(cairo_surface)
# Draw something
cairo_context.scale(w, h)
cairo_context.rectangle(0, 0, 1, 1)
cairo_context.set_source_rgba(1, 0, 0, 0.8)
cairo_context.fill()
# TKINTER
# WARNING:
# A Tk window MUST be created before you can call PhotoImage!
# See: http://stackoverflow.com/questions/3177231/python-pil-imagetk-photoimage-is-giving-me-a-bus-error
# http://stackoverflow.com/questions/1236540/how-do-i-use-pil-with-tkinter
root = tk.Tk()
# PIL
# WARNING:
# You must keep a reference to the image object in your Python program,
# either by storing it in a global variable, or by attaching it to another
# object!
#
# When a PhotoImage object is garbage-collected by Python (e.g. when you
# return from a function which stored an image in a local variable), the
# image is cleared even if it’s being displayed by a Tkinter widget.
#
# To avoid this, the program must keep an extra reference to the image
# object. A simple way to do this is to assign the image to a widget
# attribute, like this:
#
# label = Label(image=tk_photo)
# label.image = tk_photo # keep a reference!
# label.pack()
#
# (src: http://effbot.org/tkinterbook/photoimage.htm#patterns)
# See also http://infohost.nmt.edu/tcc/help/pubs/pil/image-tk.html
# WARNING:
# "cairo_surface.get_data()" is not yet implemented for Python3 (but it works with Python2).
# See http://www.cairographics.org/documentation/pycairo/3/reference/surfaces.html#cairo.ImageSurface.get_data
pil_image = pil.frombuffer("RGBA", (w,h), cairo_surface.get_data(), "raw", "BGRA", 0, 1)
tk_photo = piltk.PhotoImage(pil_image)
# TKINTER
label = tk.Label(root, image=tk_photo)
label.pack()
root.mainloop()
| mit | 2,852,471,698,596,769,000 | 34.676471 | 110 | 0.732344 | false |
ostinelli/pyopenspime | lib/dns/rdataset.py | 1 | 11607 | # Copyright (C) 2001-2007 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
import random
import StringIO
import struct
import dns.exception
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.set
# define SimpleSet here for backwards compatibility
SimpleSet = dns.set.Set
class DifferingCovers(dns.exception.DNSException):
"""Raised if an attempt is made to add a SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
pass
class IncompatibleTypes(dns.exception.DNSException):
"""Raised if an attempt is made to add rdata of an incompatible type."""
pass
class Rdataset(dns.set.Set):
"""A DNS rdataset.
@ivar rdclass: The class of the rdataset
@type rdclass: int
@ivar rdtype: The type of the rdataset
@type rdtype: int
@ivar covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@ivar ttl: The DNS TTL (Time To Live) value
@type ttl: int
"""
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Create a new rdataset of the specified class and type.
@see: the description of the class instance variables for the
meaning of I{rdclass} and I{rdtype}"""
super(Rdataset, self).__init__()
self.rdclass = rdclass
self.rdtype = rdtype
self.covers = covers
self.ttl = 0
def _clone(self):
obj = super(Rdataset, self)._clone()
obj.rdclass = self.rdclass
obj.rdtype = self.rdtype
obj.covers = self.covers
obj.ttl = self.ttl
return obj
def update_ttl(self, ttl):
"""Set the TTL of the rdataset to be the lesser of the set's current
TTL or the specified TTL. If the set contains no rdatas, set the TTL
to the specified TTL.
@param ttl: The TTL
@type ttl: int"""
if len(self) == 0:
self.ttl = ttl
elif ttl < self.ttl:
self.ttl = ttl
def add(self, rd, ttl=None):
"""Add the specified rdata to the rdataset.
If the optional I{ttl} parameter is supplied, then
self.update_ttl(ttl) will be called prior to adding the rdata.
@param rd: The rdata
@type rd: dns.rdata.Rdata object
@param ttl: The TTL
@type ttl: int"""
#
# If we're adding a signature, do some special handling to
# check that the signature covers the same type as the
# other rdatas in this rdataset. If this is the first rdata
# in the set, initialize the covers field.
#
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
raise IncompatibleTypes
if not ttl is None:
self.update_ttl(ttl)
if self.rdtype == dns.rdatatype.RRSIG or \
self.rdtype == dns.rdatatype.SIG:
covers = rd.covers()
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
self.covers = covers
elif self.covers != covers:
raise DifferingCovers
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
self.clear()
super(Rdataset, self).add(rd)
def union_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).union_update(other)
def intersection_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).intersection_update(other)
def update(self, other):
"""Add all rdatas in other to self.
@param other: The rdataset from which to update
@type other: dns.rdataset.Rdataset object"""
self.update_ttl(other.ttl)
super(Rdataset, self).update(other)
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
"""Two rdatasets are equal if they have the same class, type, and
covers, and contain the same rdata.
@rtype: bool"""
if not isinstance(other, Rdataset):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype or \
self.covers != other.covers:
return False
return super(Rdataset, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param name: If name is not None, emit a RRs with I{name} as
the owner name.
@type name: dns.name.Name object
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
if not name is None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO.StringIO()
if not override_rdclass is None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
print >> s, '%s%s%s %s' % (ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype))
else:
for rd in self:
print >> s, '%s%s%d %s %s %s' % \
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize, **kw))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
def to_wire(self, name, file, compress=None, origin=None,
override_rdclass=None, want_shuffle=True):
"""Convert the rdataset to wire format.
@param name: The owner name of the RRset that will be emitted
@type name: dns.name.Name object
@param file: The file to which the wire format data will be appended
@type file: file
@param compress: The compression table to use; the default is None.
@type compress: dict
@param origin: The origin to be appended to any relative names when
they are emitted. The default is None.
@returns: the number of records emitted
@rtype: int
"""
if not override_rdclass is None:
rdclass = override_rdclass
want_shuffle = False
else:
rdclass = self.rdclass
file.seek(0, 2)
if len(self) == 0:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
file.write(stuff)
return 1
else:
if want_shuffle:
l = list(self)
random.shuffle(l)
else:
l = self
for rd in l:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
self.ttl, 0)
file.write(stuff)
start = file.tell()
rd.to_wire(file, compress, origin)
end = file.tell()
assert end - start < 65536
file.seek(start - 2)
stuff = struct.pack("!H", end - start)
file.write(stuff)
file.seek(0, 2)
return len(self)
def match(self, rdclass, rdtype, covers):
"""Returns True if this rdataset matches the specified class, type,
and covers"""
if self.rdclass == rdclass and \
self.rdtype == rdtype and \
self.covers == covers:
return True
return False
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified list of rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
if isinstance(rdclass, str):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
r = Rdataset(rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(rdclass, rdtype, ttl, *text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
def from_rdata_list(ttl, rdatas):
"""Create an rdataset with the specified TTL, and with
the specified list of rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
if len(rdatas) == 0:
raise ValueError, "rdata list must not be empty"
r = None
for rd in rdatas:
if r is None:
r = Rdataset(rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
first_time = False
r.add(rd)
return r
def from_rdata(ttl, *rdatas):
"""Create an rdataset with the specified TTL, and with
the specified rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
return from_rdata_list(ttl, rdatas)
| gpl-3.0 | 7,146,983,658,648,308,000 | 34.279635 | 78 | 0.591023 | false |
killbill/killbill-client-python | killbill/api/account_api.py | 1 | 210059 | # coding: utf-8
#
# Copyright 2010-2014 Ning, Inc.
# Copyright 2014-2020 Groupon, Inc
# Copyright 2020-2021 Equinix, Inc
# Copyright 2014-2021 The Billing Project, LLC
#
# The Billing Project, LLC licenses this file to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Kill Bill
Kill Bill is an open-source billing and payments platform # noqa: E501
OpenAPI spec version: 0.22.22-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from killbill.api_client import ApiClient
class AccountApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_account_blocking_state(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Block an account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_account_blocking_state(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param BlockingState body: (required)
:param Str created_by: (required)
:param Date requested_date:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: List[BlockingState]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.add_account_blocking_state_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.add_account_blocking_state_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def add_account_blocking_state_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Block an account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_account_blocking_state_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param BlockingState body: (required)
:param Str created_by: (required)
:param Date requested_date:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: List[BlockingState]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'requested_date', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_account_blocking_state" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `add_account_blocking_state`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_account_blocking_state`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `add_account_blocking_state`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `add_account_blocking_state`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'requested_date' in params:
query_params.append(('requestedDate', params['requested_date'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/block', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[BlockingState]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_email(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add account email # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_email(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param AccountEmail body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[AccountEmail]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.add_email_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.add_email_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def add_email_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add account email # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_email_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param AccountEmail body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[AccountEmail]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_email" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `add_email`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_email`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `add_email`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `add_email`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/emails', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AccountEmail]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def close_account(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Close account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.close_account(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Bool cancel_all_subscriptions:
:param Bool write_off_unpaid_invoices:
:param Bool item_adjust_unpaid_invoices:
:param Bool remove_future_notifications:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.close_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.close_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def close_account_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Close account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.close_account_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Bool cancel_all_subscriptions:
:param Bool write_off_unpaid_invoices:
:param Bool item_adjust_unpaid_invoices:
:param Bool remove_future_notifications:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'cancel_all_subscriptions', 'write_off_unpaid_invoices', 'item_adjust_unpaid_invoices', 'remove_future_notifications', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method close_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `close_account`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `close_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `close_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'cancel_all_subscriptions' in params:
query_params.append(('cancelAllSubscriptions', params['cancel_all_subscriptions'])) # noqa: E501
if 'write_off_unpaid_invoices' in params:
query_params.append(('writeOffUnpaidInvoices', params['write_off_unpaid_invoices'])) # noqa: E501
if 'item_adjust_unpaid_invoices' in params:
query_params.append(('itemAdjustUnpaidInvoices', params['item_adjust_unpaid_invoices'])) # noqa: E501
if 'remove_future_notifications' in params:
query_params.append(('removeFutureNotifications', params['remove_future_notifications'])) # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_account(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Create account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Account body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_account_with_http_info(body, created_by, **kwargs) # noqa: E501
else:
(data) = self.create_account_with_http_info(body, created_by, **kwargs) # noqa: E501
return data
def create_account_with_http_info(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Create account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_with_http_info(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Account body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_account`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `create_account`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_account_custom_fields(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add custom fields to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_custom_fields(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[CustomField] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.create_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def create_account_custom_fields_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add custom fields to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_custom_fields_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[CustomField] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `create_account_custom_fields`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_account_custom_fields`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `create_account_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `create_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/customFields', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[CustomField]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_account_tags(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add tags to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_tags(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[Str] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_account_tags_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.create_account_tags_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def create_account_tags_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add tags to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_tags_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[Str] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `create_account_tags`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_account_tags`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `create_account_tags`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `create_account_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/tags', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Tag]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_payment_method(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add a payment method # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_payment_method(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param PaymentMethod body: (required)
:param Str created_by: (required)
:param Bool is_default:
:param Bool pay_all_unpaid_invoices:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: PaymentMethod
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_payment_method_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.create_payment_method_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def create_payment_method_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add a payment method # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_payment_method_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param PaymentMethod body: (required)
:param Str created_by: (required)
:param Bool is_default:
:param Bool pay_all_unpaid_invoices:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: PaymentMethod
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'is_default', 'pay_all_unpaid_invoices', 'control_plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_payment_method" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `create_payment_method`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_payment_method`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `create_payment_method`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `create_payment_method`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'is_default' in params:
query_params.append(('isDefault', params['is_default'])) # noqa: E501
if 'pay_all_unpaid_invoices' in params:
query_params.append(('payAllUnpaidInvoices', params['pay_all_unpaid_invoices'])) # noqa: E501
if 'control_plugin_name' in params:
query_params.append(('controlPluginName', params['control_plugin_name'])) # noqa: E501
collection_formats['controlPluginName'] = 'multi' # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/paymentMethods', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaymentMethod', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_custom_fields(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Remove custom fields from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account_custom_fields(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param List[Str] custom_field:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_account_custom_fields_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.delete_account_custom_fields_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def delete_account_custom_fields_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Remove custom fields from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account_custom_fields_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param List[Str] custom_field:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'custom_field', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `delete_account_custom_fields`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `delete_account_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `delete_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'custom_field' in params:
query_params.append(('customField', params['custom_field'])) # noqa: E501
collection_formats['customField'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/customFields', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_tags(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Remove tags from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account_tags(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param List[Str] tag_def:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_account_tags_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.delete_account_tags_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def delete_account_tags_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Remove tags from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account_tags_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param List[Str] tag_def:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'tag_def', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `delete_account_tags`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `delete_account_tags`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `delete_account_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'tag_def' in params:
query_params.append(('tagDef', params['tag_def'])) # noqa: E501
collection_formats['tagDef'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/tags', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve an account by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve an account by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_audit_logs(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve audit logs by account id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_audit_logs(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_audit_logs_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_audit_logs_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_audit_logs_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve audit logs by account id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_audit_logs_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_audit_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_audit_logs`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_audit_logs`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/auditLogs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AuditLog]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_audit_logs_with_history(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account audit logs with history by account id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_audit_logs_with_history(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_audit_logs_with_history_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_audit_logs_with_history_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_audit_logs_with_history_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account audit logs with history by account id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_audit_logs_with_history_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_audit_logs_with_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_audit_logs_with_history`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/auditLogsWithHistory', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AuditLog]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_bundles(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve bundles for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_bundles(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str external_key:
:param Str bundles_filter:
:param Str audit:
:return: List[Bundle]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_bundles_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_bundles_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_bundles_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve bundles for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_bundles_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str external_key:
:param Str bundles_filter:
:param Str audit:
:return: List[Bundle]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'external_key', 'bundles_filter', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_bundles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_bundles`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_bundles`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'external_key' in params:
query_params.append(('externalKey', params['external_key'])) # noqa: E501
if 'bundles_filter' in params:
query_params.append(('bundlesFilter', params['bundles_filter'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/bundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Bundle]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_by_key(self, external_key=None, **kwargs): # noqa: E501
"""Retrieve an account by external key # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_by_key(external_key, async=True)
>>> result = thread.get()
:param async bool
:param Str external_key: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_by_key_with_http_info(external_key, **kwargs) # noqa: E501
else:
(data) = self.get_account_by_key_with_http_info(external_key, **kwargs) # noqa: E501
return data
def get_account_by_key_with_http_info(self, external_key=None, **kwargs): # noqa: E501
"""Retrieve an account by external key # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_by_key_with_http_info(external_key, async=True)
>>> result = thread.get()
:param async bool
:param Str external_key: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['external_key', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_by_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'external_key' is set
if ('external_key' not in params or
params['external_key'] is None):
raise ValueError("Missing the required parameter `external_key` when calling `get_account_by_key`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'external_key' in params:
query_params.append(('externalKey', params['external_key'])) # noqa: E501
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_custom_fields(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account custom fields # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_custom_fields(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str audit:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_custom_fields_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account custom fields # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_custom_fields_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str audit:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/customFields', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[CustomField]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_email_audit_logs_with_history(self, account_id=None, account_email_id=None, **kwargs): # noqa: E501
"""Retrieve account email audit logs with history by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_email_audit_logs_with_history(account_id, account_email_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str account_email_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_email_audit_logs_with_history_with_http_info(account_id, account_email_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_email_audit_logs_with_history_with_http_info(account_id, account_email_id, **kwargs) # noqa: E501
return data
def get_account_email_audit_logs_with_history_with_http_info(self, account_id=None, account_email_id=None, **kwargs): # noqa: E501
"""Retrieve account email audit logs with history by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_email_audit_logs_with_history_with_http_info(account_id, account_email_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str account_email_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'account_email_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_email_audit_logs_with_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_email_audit_logs_with_history`") # noqa: E501
# verify the required parameter 'account_email_id' is set
if ('account_email_id' not in params or
params['account_email_id'] is None):
raise ValueError("Missing the required parameter `account_email_id` when calling `get_account_email_audit_logs_with_history`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_email_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
if 'account_email_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_email_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_email_id` when calling `get_account_email_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
if 'account_email_id' in params:
path_params['accountEmailId'] = params['account_email_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/emails/{accountEmailId}/auditLogsWithHistory', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AuditLog]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_tags(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account tags # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_tags(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool included_deleted:
:param Str audit:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_tags_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_tags_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_tags_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account tags # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_tags_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool included_deleted:
:param Str audit:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'included_deleted', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_tags`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'included_deleted' in params:
query_params.append(('includedDeleted', params['included_deleted'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/tags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Tag]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_timeline(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account timeline # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_timeline(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool parallel:
:param Str audit:
:return: AccountTimeline
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_timeline_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_timeline_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_timeline_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account timeline # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_timeline_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool parallel:
:param Str audit:
:return: AccountTimeline
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'parallel', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_timeline" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_timeline`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_timeline`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'parallel' in params:
query_params.append(('parallel', params['parallel'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/timeline', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountTimeline', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_accounts(self, **kwargs): # noqa: E501
"""List accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_accounts(async=True)
>>> result = thread.get()
:param async bool
:param Int offset:
:param Int limit:
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_accounts_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_accounts_with_http_info(**kwargs) # noqa: E501
return data
def get_accounts_with_http_info(self, **kwargs): # noqa: E501
"""List accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_accounts_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param Int offset:
:param Int limit:
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_accounts" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/pagination', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Account]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_custom_fields(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account customFields # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_all_custom_fields(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str object_type:
:param Str audit:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_all_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_all_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_all_custom_fields_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account customFields # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_all_custom_fields_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str object_type:
:param Str audit:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'object_type', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_all_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_all_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'object_type' in params:
query_params.append(('objectType', params['object_type'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/allCustomFields', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[CustomField]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_tags(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account tags # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_all_tags(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str object_type:
:param Bool included_deleted:
:param Str audit:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_all_tags_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_all_tags_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_all_tags_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account tags # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_all_tags_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str object_type:
:param Bool included_deleted:
:param Str audit:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'object_type', 'included_deleted', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_all_tags`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_all_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'object_type' in params:
query_params.append(('objectType', params['object_type'])) # noqa: E501
if 'included_deleted' in params:
query_params.append(('includedDeleted', params['included_deleted'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/allTags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Tag]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_blocking_state_audit_logs_with_history(self, blocking_id=None, **kwargs): # noqa: E501
"""Retrieve blocking state audit logs with history by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_blocking_state_audit_logs_with_history(blocking_id, async=True)
>>> result = thread.get()
:param async bool
:param Str blocking_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_blocking_state_audit_logs_with_history_with_http_info(blocking_id, **kwargs) # noqa: E501
else:
(data) = self.get_blocking_state_audit_logs_with_history_with_http_info(blocking_id, **kwargs) # noqa: E501
return data
def get_blocking_state_audit_logs_with_history_with_http_info(self, blocking_id=None, **kwargs): # noqa: E501
"""Retrieve blocking state audit logs with history by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_blocking_state_audit_logs_with_history_with_http_info(blocking_id, async=True)
>>> result = thread.get()
:param async bool
:param Str blocking_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['blocking_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_blocking_state_audit_logs_with_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'blocking_id' is set
if ('blocking_id' not in params or
params['blocking_id'] is None):
raise ValueError("Missing the required parameter `blocking_id` when calling `get_blocking_state_audit_logs_with_history`") # noqa: E501
if 'blocking_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['blocking_id']): # noqa: E501
raise ValueError("Invalid value for parameter `blocking_id` when calling `get_blocking_state_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'blocking_id' in params:
path_params['blockingId'] = params['blocking_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/block/{blockingId}/auditLogsWithHistory', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AuditLog]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_blocking_states(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve blocking states for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_blocking_states(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[Str] blocking_state_types:
:param List[Str] blocking_state_svcs:
:param Str audit:
:return: List[BlockingState]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_blocking_states_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_blocking_states_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_blocking_states_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve blocking states for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_blocking_states_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[Str] blocking_state_types:
:param List[Str] blocking_state_svcs:
:param Str audit:
:return: List[BlockingState]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'blocking_state_types', 'blocking_state_svcs', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_blocking_states" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_blocking_states`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_blocking_states`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'blocking_state_types' in params:
query_params.append(('blockingStateTypes', params['blocking_state_types'])) # noqa: E501
collection_formats['blockingStateTypes'] = 'multi' # noqa: E501
if 'blocking_state_svcs' in params:
query_params.append(('blockingStateSvcs', params['blocking_state_svcs'])) # noqa: E501
collection_formats['blockingStateSvcs'] = 'multi' # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/block', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[BlockingState]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_children_accounts(self, account_id=None, **kwargs): # noqa: E501
"""List children accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_children_accounts(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_children_accounts_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_children_accounts_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_children_accounts_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""List children accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_children_accounts_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_children_accounts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_children_accounts`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_children_accounts`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/children', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Account]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_emails(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve an account emails # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_emails(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AccountEmail]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_emails_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_emails_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_emails_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve an account emails # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_emails_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AccountEmail]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_emails" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_emails`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_emails`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/emails', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AccountEmail]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_invoice_payments(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account invoice payments # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_invoice_payments(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_plugin_info:
:param Bool with_attempts:
:param List[Str] plugin_property:
:param Str audit:
:return: List[InvoicePayment]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_invoice_payments_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_invoice_payments_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_invoice_payments_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account invoice payments # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_invoice_payments_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_plugin_info:
:param Bool with_attempts:
:param List[Str] plugin_property:
:param Str audit:
:return: List[InvoicePayment]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'with_plugin_info', 'with_attempts', 'plugin_property', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_invoice_payments" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_invoice_payments`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_invoice_payments`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'with_plugin_info' in params:
query_params.append(('withPluginInfo', params['with_plugin_info'])) # noqa: E501
if 'with_attempts' in params:
query_params.append(('withAttempts', params['with_attempts'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/invoicePayments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[InvoicePayment]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_invoices_for_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account invoices # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_invoices_for_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Date start_date:
:param Date end_date:
:param Bool with_migration_invoices:
:param Bool unpaid_invoices_only:
:param Bool include_voided_invoices:
:param Str invoices_filter:
:param Str audit:
:return: List[Invoice]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_invoices_for_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_invoices_for_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_invoices_for_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account invoices # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_invoices_for_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Date start_date:
:param Date end_date:
:param Bool with_migration_invoices:
:param Bool unpaid_invoices_only:
:param Bool include_voided_invoices:
:param Str invoices_filter:
:param Str audit:
:return: List[Invoice]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'start_date', 'end_date', 'with_migration_invoices', 'unpaid_invoices_only', 'include_voided_invoices', 'invoices_filter', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_invoices_for_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_invoices_for_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_invoices_for_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'start_date' in params:
query_params.append(('startDate', params['start_date'])) # noqa: E501
if 'end_date' in params:
query_params.append(('endDate', params['end_date'])) # noqa: E501
if 'with_migration_invoices' in params:
query_params.append(('withMigrationInvoices', params['with_migration_invoices'])) # noqa: E501
if 'unpaid_invoices_only' in params:
query_params.append(('unpaidInvoicesOnly', params['unpaid_invoices_only'])) # noqa: E501
if 'include_voided_invoices' in params:
query_params.append(('includeVoidedInvoices', params['include_voided_invoices'])) # noqa: E501
if 'invoices_filter' in params:
query_params.append(('invoicesFilter', params['invoices_filter'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/invoices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Invoice]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_overdue_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve overdue state for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_overdue_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: OverdueState
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_overdue_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_overdue_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_overdue_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve overdue state for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_overdue_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: OverdueState
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_overdue_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_overdue_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_overdue_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/overdue', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OverdueState', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_payment_methods_for_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account payment methods # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_methods_for_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_plugin_info:
:param Bool included_deleted:
:param List[Str] plugin_property:
:param Str audit:
:return: List[PaymentMethod]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_payment_methods_for_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_payment_methods_for_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_payment_methods_for_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account payment methods # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_methods_for_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_plugin_info:
:param Bool included_deleted:
:param List[Str] plugin_property:
:param Str audit:
:return: List[PaymentMethod]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'with_plugin_info', 'included_deleted', 'plugin_property', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payment_methods_for_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_payment_methods_for_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_payment_methods_for_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'with_plugin_info' in params:
query_params.append(('withPluginInfo', params['with_plugin_info'])) # noqa: E501
if 'included_deleted' in params:
query_params.append(('includedDeleted', params['included_deleted'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/paymentMethods', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[PaymentMethod]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_payments_for_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account payments # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payments_for_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_attempts:
:param Bool with_plugin_info:
:param List[Str] plugin_property:
:param Str audit:
:return: List[Payment]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_payments_for_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_payments_for_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_payments_for_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account payments # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payments_for_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_attempts:
:param Bool with_plugin_info:
:param List[Str] plugin_property:
:param Str audit:
:return: List[Payment]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'with_attempts', 'with_plugin_info', 'plugin_property', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payments_for_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_payments_for_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_payments_for_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'with_attempts' in params:
query_params.append(('withAttempts', params['with_attempts'])) # noqa: E501
if 'with_plugin_info' in params:
query_params.append(('withPluginInfo', params['with_plugin_info'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/payments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Payment]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_account_custom_fields(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Modify custom fields to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.modify_account_custom_fields(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[CustomField] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.modify_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.modify_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def modify_account_custom_fields_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Modify custom fields to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.modify_account_custom_fields_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[CustomField] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_account_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `modify_account_custom_fields`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `modify_account_custom_fields`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `modify_account_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `modify_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/customFields', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def pay_all_invoices(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment for all unpaid invoices # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.pay_all_invoices(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param Bool external_payment:
:param Float payment_amount:
:param Date target_date:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: List[Invoice]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.pay_all_invoices_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.pay_all_invoices_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def pay_all_invoices_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment for all unpaid invoices # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.pay_all_invoices_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param Bool external_payment:
:param Float payment_amount:
:param Date target_date:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: List[Invoice]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'payment_method_id', 'external_payment', 'payment_amount', 'target_date', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method pay_all_invoices" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `pay_all_invoices`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `pay_all_invoices`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `pay_all_invoices`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'payment_method_id' in params:
query_params.append(('paymentMethodId', params['payment_method_id'])) # noqa: E501
if 'external_payment' in params:
query_params.append(('externalPayment', params['external_payment'])) # noqa: E501
if 'payment_amount' in params:
query_params.append(('paymentAmount', params['payment_amount'])) # noqa: E501
if 'target_date' in params:
query_params.append(('targetDate', params['target_date'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/invoicePayments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Invoice]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def process_payment(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment (authorization, purchase or credit) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.process_payment(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param PaymentTransaction body: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.process_payment_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.process_payment_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def process_payment_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment (authorization, purchase or credit) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.process_payment_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param PaymentTransaction body: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'payment_method_id', 'control_plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method process_payment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `process_payment`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `process_payment`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `process_payment`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `process_payment`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'payment_method_id' in params:
query_params.append(('paymentMethodId', params['payment_method_id'])) # noqa: E501
if 'control_plugin_name' in params:
query_params.append(('controlPluginName', params['control_plugin_name'])) # noqa: E501
collection_formats['controlPluginName'] = 'multi' # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/payments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def process_payment_by_external_key(self, body=None, external_key=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment using the account external key (authorization, purchase or credit) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.process_payment_by_external_key(body, external_key, created_by, async=True)
>>> result = thread.get()
:param async bool
:param PaymentTransaction body: (required)
:param Str external_key: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.process_payment_by_external_key_with_http_info(body, external_key, created_by, **kwargs) # noqa: E501
else:
(data) = self.process_payment_by_external_key_with_http_info(body, external_key, created_by, **kwargs) # noqa: E501
return data
def process_payment_by_external_key_with_http_info(self, body=None, external_key=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment using the account external key (authorization, purchase or credit) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.process_payment_by_external_key_with_http_info(body, external_key, created_by, async=True)
>>> result = thread.get()
:param async bool
:param PaymentTransaction body: (required)
:param Str external_key: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'external_key', 'created_by', 'payment_method_id', 'control_plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method process_payment_by_external_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `process_payment_by_external_key`") # noqa: E501
# verify the required parameter 'external_key' is set
if ('external_key' not in params or
params['external_key'] is None):
raise ValueError("Missing the required parameter `external_key` when calling `process_payment_by_external_key`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `process_payment_by_external_key`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'external_key' in params:
query_params.append(('externalKey', params['external_key'])) # noqa: E501
if 'payment_method_id' in params:
query_params.append(('paymentMethodId', params['payment_method_id'])) # noqa: E501
if 'control_plugin_name' in params:
query_params.append(('controlPluginName', params['control_plugin_name'])) # noqa: E501
collection_formats['controlPluginName'] = 'multi' # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/payments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rebalance_existing_cba_on_account(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Rebalance account CBA # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.rebalance_existing_cba_on_account(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.rebalance_existing_cba_on_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.rebalance_existing_cba_on_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def rebalance_existing_cba_on_account_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Rebalance account CBA # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.rebalance_existing_cba_on_account_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rebalance_existing_cba_on_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `rebalance_existing_cba_on_account`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `rebalance_existing_cba_on_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `rebalance_existing_cba_on_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/cbaRebalancing', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def refresh_payment_methods(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Refresh account payment methods # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.refresh_payment_methods(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.refresh_payment_methods_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.refresh_payment_methods_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def refresh_payment_methods_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Refresh account payment methods # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.refresh_payment_methods_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method refresh_payment_methods" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `refresh_payment_methods`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `refresh_payment_methods`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `refresh_payment_methods`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'plugin_name' in params:
query_params.append(('pluginName', params['plugin_name'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/paymentMethods/refresh', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_email(self, account_id=None, email=None, created_by=None, **kwargs): # noqa: E501
"""Delete email from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.remove_email(account_id, email, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str email: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.remove_email_with_http_info(account_id, email, created_by, **kwargs) # noqa: E501
else:
(data) = self.remove_email_with_http_info(account_id, email, created_by, **kwargs) # noqa: E501
return data
def remove_email_with_http_info(self, account_id=None, email=None, created_by=None, **kwargs): # noqa: E501
"""Delete email from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.remove_email_with_http_info(account_id, email, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str email: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'email', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_email" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `remove_email`") # noqa: E501
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `remove_email`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `remove_email`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `remove_email`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
if 'email' in params:
path_params['email'] = params['email'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/emails/{email}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_accounts(self, search_key=None, **kwargs): # noqa: E501
"""Search accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.search_accounts(search_key, async=True)
>>> result = thread.get()
:param async bool
:param Str search_key: (required)
:param Int offset:
:param Int limit:
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.search_accounts_with_http_info(search_key, **kwargs) # noqa: E501
else:
(data) = self.search_accounts_with_http_info(search_key, **kwargs) # noqa: E501
return data
def search_accounts_with_http_info(self, search_key=None, **kwargs): # noqa: E501
"""Search accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.search_accounts_with_http_info(search_key, async=True)
>>> result = thread.get()
:param async bool
:param Str search_key: (required)
:param Int offset:
:param Int limit:
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['search_key', 'offset', 'limit', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_accounts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'search_key' is set
if ('search_key' not in params or
params['search_key'] is None):
raise ValueError("Missing the required parameter `search_key` when calling `search_accounts`") # noqa: E501
if 'search_key' in params and not re.search('.*', params['search_key']): # noqa: E501
raise ValueError("Invalid value for parameter `search_key` when calling `search_accounts`, must conform to the pattern `/.*/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'search_key' in params:
path_params['searchKey'] = params['search_key'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/search/{searchKey}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Account]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_default_payment_method(self, account_id=None, payment_method_id=None, created_by=None, **kwargs): # noqa: E501
"""Set the default payment method # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.set_default_payment_method(account_id, payment_method_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str payment_method_id: (required)
:param Str created_by: (required)
:param Bool pay_all_unpaid_invoices:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.set_default_payment_method_with_http_info(account_id, payment_method_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.set_default_payment_method_with_http_info(account_id, payment_method_id, created_by, **kwargs) # noqa: E501
return data
def set_default_payment_method_with_http_info(self, account_id=None, payment_method_id=None, created_by=None, **kwargs): # noqa: E501
"""Set the default payment method # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.set_default_payment_method_with_http_info(account_id, payment_method_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str payment_method_id: (required)
:param Str created_by: (required)
:param Bool pay_all_unpaid_invoices:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'payment_method_id', 'created_by', 'pay_all_unpaid_invoices', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_default_payment_method" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `set_default_payment_method`") # noqa: E501
# verify the required parameter 'payment_method_id' is set
if ('payment_method_id' not in params or
params['payment_method_id'] is None):
raise ValueError("Missing the required parameter `payment_method_id` when calling `set_default_payment_method`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `set_default_payment_method`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `set_default_payment_method`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
if 'payment_method_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['payment_method_id']): # noqa: E501
raise ValueError("Invalid value for parameter `payment_method_id` when calling `set_default_payment_method`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
if 'payment_method_id' in params:
path_params['paymentMethodId'] = params['payment_method_id'] # noqa: E501
query_params = []
if 'pay_all_unpaid_invoices' in params:
query_params.append(('payAllUnpaidInvoices', params['pay_all_unpaid_invoices'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/paymentMethods/{paymentMethodId}/setDefault', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def transfer_child_credit_to_parent(self, child_account_id=None, created_by=None, **kwargs): # noqa: E501
"""Move a given child credit to the parent level # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transfer_child_credit_to_parent(child_account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str child_account_id: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.transfer_child_credit_to_parent_with_http_info(child_account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.transfer_child_credit_to_parent_with_http_info(child_account_id, created_by, **kwargs) # noqa: E501
return data
def transfer_child_credit_to_parent_with_http_info(self, child_account_id=None, created_by=None, **kwargs): # noqa: E501
"""Move a given child credit to the parent level # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transfer_child_credit_to_parent_with_http_info(child_account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str child_account_id: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['child_account_id', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method transfer_child_credit_to_parent" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'child_account_id' is set
if ('child_account_id' not in params or
params['child_account_id'] is None):
raise ValueError("Missing the required parameter `child_account_id` when calling `transfer_child_credit_to_parent`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `transfer_child_credit_to_parent`") # noqa: E501
if 'child_account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['child_account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `child_account_id` when calling `transfer_child_credit_to_parent`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'child_account_id' in params:
path_params['childAccountId'] = params['child_account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{childAccountId}/transferCredit', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_account(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Update account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_account(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Account body: (required)
:param Str created_by: (required)
:param Bool treat_null_as_reset:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_account_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.update_account_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def update_account_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Update account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_account_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Account body: (required)
:param Str created_by: (required)
:param Bool treat_null_as_reset:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'treat_null_as_reset', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `update_account`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_account`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `update_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `update_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'treat_null_as_reset' in params:
query_params.append(('treatNullAsReset', params['treat_null_as_reset'])) # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| apache-2.0 | 8,223,314,462,339,926,000 | 42.553597 | 207 | 0.58277 | false |
nmarley/dash | contrib/zmq/zmq_sub.py | 1 | 5988 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Dash should be started with the command line arguments:
dashd -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 5):
print("This example only works with Python 3.5 and greater")
exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashchainlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtxlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernancevote")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernanceobject")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashinstantsenddoublespend")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlocksig")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlocksig")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernancevote")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernanceobject")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawinstantsenddoublespend")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
async def handle(self) :
msg = await self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashchainlock":
print('- HASH CHAINLOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashtx":
print ('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashtxlock":
print('- HASH TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashgovernancevote":
print('- HASH GOVERNANCE VOTE ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashgovernanceobject":
print('- HASH GOVERNANCE OBJECT ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashinstantsenddoublespend":
print('- HASH IS DOUBLE SPEND ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawchainlock":
print('- RAW CHAINLOCK ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawchainlocksig":
print('- RAW CHAINLOCK SIG ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawtxlock":
print('- RAW TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawtxlocksig":
print('- RAW TX LOCK SIG ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawgovernancevote":
print('- RAW GOVERNANCE VOTE ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawgovernanceobject":
print('- RAW GOVERNANCE OBJECT ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawinstantsenddoublespend":
print('- RAW IS DOUBLE SPEND ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| mit | 356,747,117,795,739,300 | 43.686567 | 107 | 0.629092 | false |
mpoullet/audio-tools | KissFFT/kiss_fft130/test/testkiss.py | 1 | 3565 | #!/usr/bin/env python
import math
import sys
import os
import random
import struct
import popen2
import getopt
import numpy
pi=math.pi
e=math.e
j=complex(0,1)
doreal=0
datatype = os.environ.get('DATATYPE','float')
util = '../tools/fft_' + datatype
minsnr=90
if datatype == 'double':
fmt='d'
elif datatype=='int16_t':
fmt='h'
minsnr=10
elif datatype=='int32_t':
fmt='i'
elif datatype=='simd':
fmt='4f'
sys.stderr.write('testkiss.py does not yet test simd')
sys.exit(0)
elif datatype=='float':
fmt='f'
else:
sys.stderr.write('unrecognized datatype %s\n' % datatype)
sys.exit(1)
def dopack(x,cpx=1):
x = numpy.reshape( x, ( numpy.size(x),) )
if cpx:
s = ''.join( [ struct.pack(fmt*2,c.real,c.imag) for c in x ] )
else:
s = ''.join( [ struct.pack(fmt,c.real) for c in x ] )
return s
def dounpack(x,cpx):
uf = fmt * ( len(x) / struct.calcsize(fmt) )
s = struct.unpack(uf,x)
if cpx:
return numpy.array(s[::2]) + numpy.array( s[1::2] )*j
else:
return numpy.array(s )
def make_random(dims=[1]):
res = []
for i in range(dims[0]):
if len(dims)==1:
r=random.uniform(-1,1)
if doreal:
res.append( r )
else:
i=random.uniform(-1,1)
res.append( complex(r,i) )
else:
res.append( make_random( dims[1:] ) )
return numpy.array(res)
def flatten(x):
ntotal = numpy.size(x)
return numpy.reshape(x,(ntotal,))
def randmat( ndims ):
dims=[]
for i in range( ndims ):
curdim = int( random.uniform(2,5) )
if doreal and i==(ndims-1):
curdim = int(curdim/2)*2 # force even last dimension if real
dims.append( curdim )
return make_random(dims )
def test_fft(ndims):
x=randmat( ndims )
if doreal:
xver = numpy.fft.rfftn(x)
else:
xver = numpy.fft.fftn(x)
open('/tmp/fftexp.dat','w').write(dopack( flatten(xver) , True ) )
x2=dofft(x,doreal)
err = xver - x2
errf = flatten(err)
xverf = flatten(xver)
errpow = numpy.vdot(errf,errf)+1e-10
sigpow = numpy.vdot(xverf,xverf)+1e-10
snr = 10*math.log10(abs(sigpow/errpow) )
print 'SNR (compared to NumPy) : %.1fdB' % float(snr)
if snr<minsnr:
print 'xver=',xver
print 'x2=',x2
print 'err',err
sys.exit(1)
def dofft(x,isreal):
dims=list( numpy.shape(x) )
x = flatten(x)
scale=1
if datatype=='int16_t':
x = 32767 * x
scale = len(x) / 32767.0
elif datatype=='int32_t':
x = 2147483647.0 * x
scale = len(x) / 2147483647.0
cmd='%s -n ' % util
cmd += ','.join([str(d) for d in dims])
if doreal:
cmd += ' -R '
print cmd
p = popen2.Popen3(cmd )
open('/tmp/fftin.dat','w').write(dopack( x , isreal==False ) )
p.tochild.write( dopack( x , isreal==False ) )
p.tochild.close()
res = dounpack( p.fromchild.read() , 1 )
open('/tmp/fftout.dat','w').write(dopack( flatten(res) , True ) )
if doreal:
dims[-1] = int( dims[-1]/2 ) + 1
res = scale * res
p.wait()
return numpy.reshape(res,dims)
def main():
opts,args = getopt.getopt(sys.argv[1:],'r')
opts=dict(opts)
global doreal
doreal = opts.has_key('-r')
if doreal:
print 'Testing multi-dimensional real FFTs'
else:
print 'Testing multi-dimensional FFTs'
for dim in range(1,4):
test_fft( dim )
if __name__ == "__main__":
main()
| mit | -8,352,119,809,851,959,000 | 21.006173 | 72 | 0.555961 | false |
chrissorchard/malucrawl | malware_crawl/tasks.py | 1 | 2599 | from celery import task, chord
from .scan import scanners, heavy_scanners
from .search import search_engines
from .source import sources
from datetime import datetime
from dateutil.tz import tzutc
from models import TopicSet
# validator = jsonschema.Draft3Validator(json.loads(pkgutil.get_data("malware_crawl", "malware_discovery_schema.json")))
def complete_crawl():
for source in sources:
source.apply_async(
link=begin_search.subtask(args=(source,))
)
# todo: repeat old searches
@task
def begin_search(keywords, source):
discovered = datetime.now(tzutc())
ts = TopicSet.objects.create(
discovered=discovered,
source=source
)
for keyword in keywords:
topic = ts.topic_set.create(
keyword=keyword
)
for engine in search_engines:
engine.apply_async(
args=(keyword,), link=begin_scan.subtask(args=(engine, topic))
)
@task
def begin_scan(urls, engine, topic):
discovered = datetime.now(tzutc())
search = topic.search_set.create(
discovered=discovered,
source=engine
)
for url in urls:
result = search.result_set.create(
url=url
)
for scanner in scanners:
report = result.malwarereport_set.create(
reporter=scanner
)
scanner.apply_async(
args=(url,),
link=begin_store.subtask(
args=(report,)
)
)
"""
# Check to see if we should scan heavily
def check_opinions(all_opinions, reporters):
print all_opinions
return False
@task
def accept_scan(all_opinions, reporters, url, result):
if check_opinions(all_opinions, reporters):
for scanner in heavy_scanners:
report = result.malwarereport_set.create(
reporter=scanner
)
scanner.apply_async(
args=(url,),
link=begin_store.subtask(
args=(report,)
)
)
for opinions, reporter in zip(all_opinions, reporters):
begin_store.apply_async(
args=(opinions, report)
)
"""
@task
def begin_store(opinions, report):
for opinion in opinions:
report.opinion_set.create(
type=opinion["type"],
confidence=opinion["confidence"]
)
@task
def tprint(content):
print content
| mit | -8,517,734,512,202,998,000 | 22.518868 | 120 | 0.560215 | false |
atvKumar/TheWatcher | mkEmail.py | 1 | 5302 | from smtplib import SMTP, SMTP_SSL
from smtplib import SMTPException
from mimetypes import guess_type
from os.path import basename
from email.utils import COMMASPACE
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.encoders import encode_base64
class EmailConnectionError(Exception):
pass
class SendEmailError(Exception):
pass
def get_email(email):
if '<' in email:
data = email.split('<')
email = data[1].split('>')[0].strip()
return email.strip()
class Email(object):
def __init__(self, from_, to, subject, message, message_type='plain',
attachments=None, cc=None, bcc=None,
message_encoding='us-ascii', multi_to=False, multi_cc=False,
multi_bcc=False, multi_attach=False):
self.email = MIMEMultipart()
self.message = message
self.email['From'] = from_
if not multi_to:
self.email['To'] = to
else:
self.email['To'] = COMMASPACE.join(to)
self.email['Subject'] = subject
self.email['subject'] = subject # Case Sensitive Email-Readers
if cc is not None:
if not multi_cc:
self.email['Cc'] = cc
else:
self.email['Cc'] = COMMASPACE.join(cc)
if bcc is not None:
if not multi_bcc:
self.email['bcc'] = bcc
else:
self.email['bcc'] = COMMASPACE.join(bcc)
text = MIMEText(message, message_type, message_encoding)
self.email.attach(text)
if attachments is not None:
if multi_attach:
for filename in attachments:
self.attach(filename)
else:
self.attach(attachments)
def debug(self, mime=False):
print 'From : ', self.email['From']
print 'To : ', self.email['To']
print 'Cc : ', self.email['Cc']
print 'Bcc : ', self.email['bcc']
print 'Subject : ', self.email['Subject']
print 'Message :', self.message
if mime:
print self.email.as_string()
def attach(self, filename):
mimetype, encoding = guess_type(filename)
if mimetype is None:
mimetype = 'application/octet-stream'
mimetype = mimetype.split('/', 1)
fp = open(filename, 'rb')
attachment = MIMEBase(mimetype[0], mimetype[1])
attachment.set_payload(fp.read())
fp.close()
encode_base64(attachment)
attachment.add_header('Content-Disposition', 'attachment',
filename=basename(filename))
self.email.attach(attachment)
def __str__(self):
return self.email.as_string()
class EmailConnection(object):
def __init__(self, server, username, password, debug=False):
if ':' in server:
data = server.split(':')
self.server = data[0]
self.port = int(data[1])
else:
self.server = server
self.port = 25
self.username = username
self.password = password
self.connect(debug)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_val, trace):
self.close()
def connect(self, debug):
self.connection = SMTP(host=self.server, port=self.port)
if debug: # Debug Information
# self.debuglevel = 1
self.connection.set_debuglevel(debug)
# identify ourselves, prompting server for supported features
self.connection.ehlo()
# If we can encrypt this session, do it
if self.connection.has_extn('STARTTLS'):
self.connection.starttls()
self.connection.ehlo()
self.connection.esmtp_features['auth'] = 'PLAIN LOGIN'
self.connection.login(self.username, self.password)
def send(self, message, from_=None, to=None, verify=False):
if type(message) == str:
if from_ is None or to is None:
raise EmailConnectionError('You need to specify `from_` '
'and `to`')
else:
from_ = get_email(from_)
to = get_email(to)
else:
from_ = message.email['From']
if 'Cc' not in message.email:
message.email['Cc'] = ''
if 'bcc' not in message.email:
message.email['bcc'] = ''
to_emails = list(message.email['To'].split(',')) + \
message.email['Cc'].split(',') + \
message.email['bcc'].split(',')
to = [get_email(complete_email) for complete_email in to_emails]
message = str(message)
if verify:
for each_email in to_emails:
self.connection.verify(each_email)
# TODO option - remove emails that failed verification
# return self.connection.sendmail(from_, to, message)
try:
self.connection.sendmail(from_, to, message)
except SMTPException:
raise SendEmailError('Message Could not be sent!')
def close(self):
self.connection.close()
| apache-2.0 | -3,787,192,903,398,085,000 | 35.068027 | 77 | 0.557714 | false |
chrislit/abydos | tests/distance/test_distance_faith.py | 1 | 4993 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_faith.
This module contains unit tests for abydos.distance.Faith
"""
import unittest
from abydos.distance import Faith
class FaithTestCases(unittest.TestCase):
"""Test Faith functions.
abydos.distance.Faith
"""
cmp = Faith()
cmp_no_d = Faith(alphabet=0)
def test_faith_sim(self):
"""Test abydos.distance.Faith.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 0.5)
self.assertEqual(self.cmp.sim('a', ''), 0.4987244897959184)
self.assertEqual(self.cmp.sim('', 'a'), 0.4987244897959184)
self.assertEqual(self.cmp.sim('abc', ''), 0.49744897959183676)
self.assertEqual(self.cmp.sim('', 'abc'), 0.49744897959183676)
self.assertEqual(self.cmp.sim('abc', 'abc'), 0.5025510204081632)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.49362244897959184)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.4980867347)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.4980867347)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.4980867347)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.4980867347)
self.assertAlmostEqual(self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.5)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.sim('', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('a', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(
self.cmp_no_d.sim('Nigel', 'Niall'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Niall', 'Nigel'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Colin', 'Coiln'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Coiln', 'Colin'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('ATCAACGAGT', 'AACGATTAG'), 0.5
)
def test_faith_dist(self):
"""Test abydos.distance.Faith.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.5)
self.assertEqual(self.cmp.dist('a', ''), 0.5012755102040816)
self.assertEqual(self.cmp.dist('', 'a'), 0.5012755102040816)
self.assertEqual(self.cmp.dist('abc', ''), 0.5025510204081632)
self.assertEqual(self.cmp.dist('', 'abc'), 0.5025510204081632)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.49744897959183676)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 0.5063775510204082)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.5019132653)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.5019132653)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.5019132653)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.5019132653)
self.assertAlmostEqual(self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.5)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.dist('', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('a', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'a'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(
self.cmp_no_d.dist('Nigel', 'Niall'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Niall', 'Nigel'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Colin', 'Coiln'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Coiln', 'Colin'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('ATCAACGAGT', 'AACGATTAG'), 0.5
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -651,294,029,013,972,900 | 39.92623 | 77 | 0.622471 | false |
thinkle/gourmet | gourmet/plugins/duplicate_finder/recipeMerger.py | 1 | 25533 | """recipeMerger.py
This module contains code for handling the 'merging' of duplicate
recipes.
"""
import os.path
import time
from typing import Union
from gettext import gettext as _
from gi.repository import Gtk, Pango
from gourmet import convert, gglobals, recipeIdentifier, recipeManager
from gourmet.gtk_extras import ratingWidget, mnemonic_manager, dialog_extras
NEWER = 1
OLDER = 2
try:
current_path = os.path.split(os.path.join(os.getcwd(),__file__))[0]
except:
current_path = ''
def time_to_text (val):
curtime = time.time()
if val == 0:
return 'Unknown'
# within 18 hours, return in form 4 hours 23 minutes ago or some such
if curtime - val < 18 * 60 * 60:
return _("%s ago")%convert.seconds_to_timestring(curtime-val,round_at=1)
tupl=time.localtime(val)
if curtime - val < 7 * 24 * 60 * 60:
return time.strftime('%A %T',tupl)
else:
return time.strftime('%D %T',tupl)
class ConflictError (ValueError):
def __init__ (self, conflicts):
self.conflicts = conflicts
class RecipeMergerDialog:
"""A dialog to allow the user to merge recipes.
"""
# These line up to the position of the options in the search-type
# combo box in glade...
RECIPE_DUP_MODE = 0
ING_DUP_MODE = 1
COMPLETE_DUP_MODE = 2
DUP_INDEX_PAGE = 0
MERGE_PAGE = 1
def __init__ (self, rd=None, in_recipes=None, on_close_callback=None):
if rd:
self.rd = rd
else:
self.rd = recipeManager.get_recipe_manager()
self.in_recipes = in_recipes
self.on_close_callback = on_close_callback
self.to_merge = [] # Queue of recipes to be merged...
self.ui = Gtk.Builder()
self.ui.add_from_file(os.path.join(current_path,'recipeMerger.ui'))
self.get_widgets()
self.searchTypeCombo.set_active(self.COMPLETE_DUP_MODE)
self.mm = mnemonic_manager.MnemonicManager()
self.mm.add_builder(self.ui)
self.mm.fix_conflicts_peacefully()
self.ui.connect_signals(
{
'on_searchTypeCombo_changed':lambda *args: self.populate_tree(),
'on_includeDeletedRecipesCheckButton_toggled':lambda *args: self.populate_tree(),
'on_mergeAllButton_clicked':self.merge_all,
'on_cancelMergeButton_clicked':self.cancel_merge,
'on_mergeSelectedButton_clicked':self.merge_selected,
'on_applyButton_clicked':self.apply_merge,
'auto_merge':self.offer_auto_merge,
'close':self.close,
}
)
def get_widgets (self):
for w in [
'recipeDiffScrolledWindow',
'duplicateRecipeTreeView',
'mergeAllButton','mergeSelectedButton', # buttons on list-dups page (minus close button)
'applyMergeButton','closeMergeButton','cancelMergeButton', # buttons on merge-recs page
'searchTypeCombo','includeDeletedRecipesCheckButton','notebook',
'mergeInfoLabel'
]:
setattr(self,w,self.ui.get_object(w))
self.setup_treeview()
def setup_treeview (self):
renderer = Gtk.CellRendererText()
col = Gtk.TreeViewColumn('Recipe',renderer,text=2)
self.duplicateRecipeTreeView.append_column(col)
self.duplicateRecipeTreeView.insert_column_with_data_func(
-1, # position
'Last Modified', # title
renderer, # renderer
self.time_cell_data_func, # function
3 # data column
)
col = Gtk.TreeViewColumn('Duplicates',renderer,text=4)
self.duplicateRecipeTreeView.append_column(col)
self.duplicateRecipeTreeView.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
def time_cell_data_func (self, tree_column, cell, model, titer, data_col):
"""Display time in treeview cell.
"""
val = model.get_value(titer,data_col)
cell.set_property('text',time_to_text(val))
def populate_tree (self):
"""Populate treeview with duplicate recipes.
"""
#print 'CALL: populate_tree'
search_mode =self.searchTypeCombo.get_active()
include_deleted = self.includeDeletedRecipesCheckButton.get_active()
if search_mode == self.RECIPE_DUP_MODE:
dups = self.rd.find_duplicates(by='recipe',
recipes=self.in_recipes,
include_deleted=include_deleted)
elif search_mode == self.ING_DUP_MODE:
dups = self.rd.find_duplicates(by='ingredient',
recipes=self.in_recipes,
include_deleted=include_deleted)
else: # == self.COMPLETE_DUP_MODE
dups = self.rd.find_complete_duplicates(include_deleted=include_deleted,
recipes=self.in_recipes)
self.setup_treemodel(dups)
self.dups = dups
self.duplicateRecipeTreeView.set_model(self.treeModel)
def setup_treemodel (self, dups):
self.treeModel = Gtk.TreeStore(int,int,str,int,str) # dup_index, rec_id, rec_title, last_modified, number_of_duplicates
for dup_index,duplicate_recipes in enumerate(dups):
first = duplicate_recipes[0]
others = duplicate_recipes[1:]
nduplicates = len(duplicate_recipes)
r = self.rd.get_rec(first)
firstIter = self.treeModel.append(
None,
(dup_index or 0, first or 0, r.title or '', r.last_modified or 0, str(nduplicates))
)
for o in others:
r = self.rd.get_rec(o)
self.treeModel.append(firstIter,
(dup_index,o,r.title,r.last_modified or 0,'')
)
def merge_next_recipe (self, ):
if self.to_merge:
self.current_dup_index = self.to_merge.pop(0)
self.mergeInfoLabel.set_text(
'Merging recipe %(index)s of %(total)s'%{
'index':self.total_to_merge - len(self.to_merge),
'total':self.total_to_merge
})
duplicate_recipes = self.dups[self.current_dup_index]
#self.idt = IngDiffTable(self.rd,duplicate_recipes[0],duplicate_recipes[1])
self.current_recs = [self.rd.get_rec(i) for i in duplicate_recipes]
last_modified = {'last_modified':[r.last_modified for r in self.current_recs]}
self.current_diff_data = recipeIdentifier.diff_recipes(self.rd,self.current_recs)
last_modified.update(self.current_diff_data)
self.diff_table = DiffTable(last_modified,self.current_recs[0],parent=self.recipeDiffScrolledWindow)
self.diff_table.add_ingblocks(self.rd, self.current_recs)
if not self.diff_table.idiffs and not self.current_diff_data:
# If there are no differences, just merge the recipes...
self.apply_merge()
return
if self.recipeDiffScrolledWindow.get_child():
self.recipeDiffScrolledWindow.remove(self.recipeDiffScrolledWindow.get_child())
self.diff_table.show()
#self.idt.show()
vb = Gtk.VBox()
vb.add(self.diff_table)
#vb.add(self.idt)
vb.show()
#self.recipeDiffScrolledWindow.add_with_viewport(self.diff_table)
self.recipeDiffScrolledWindow.add_with_viewport(vb)
self.notebook.set_current_page(self.MERGE_PAGE)
else:
self.notebook.set_current_page(self.DUP_INDEX_PAGE)
def do_merge (self, merge_dic, recs, to_keep=None):
if not to_keep:
to_keep = recs[0]
if isinstance(to_keep, int):
to_keep = self.rd.get_rec(to_keep)
self.rd.modify_rec(to_keep,merge_dic)
for r in recs:
if r.id != to_keep.id:
self.rd.delete_rec(r)
def apply_merge (self, *args):
#print "CALL: apply_merge"
#print 'Apply ',self.diff_table.selected_dic,'on ',self.diff_table.rec
self.do_merge(self.diff_table.selected_dic,
self.current_recs,
to_keep=self.diff_table.rec)
self.merge_next_recipe()
if not self.to_merge:
self.populate_tree()
def merge_selected (self, *args):
"""Merge currently selected row from treeview.
"""
#print "CALL: merge_selected"
mod,rows = self.duplicateRecipeTreeView.get_selection().get_selected_rows()
dup_indices = [mod[r][0] for r in rows]
self.to_merge = []
for d in dup_indices:
if d not in self.to_merge:
self.to_merge.append(d)
self.total_to_merge = len(self.to_merge)
self.merge_next_recipe()
def merge_all (self, *args):
"""Merge all rows currently in treeview.
"""
self.total_to_merge = len(self.dups)
self.to_merge = list(range(self.total_to_merge))
self.merge_next_recipe()
def offer_auto_merge (self, *args):
try:
option =dialog_extras.getOption(
label=_('Auto-Merge recipes'),
options=[
(_('Always use newest recipe'),NEWER),
(_('Always use oldest recipe'),OLDER),
# The following would be nice to add eventually...
#_('Always use longer field'),
#_('Ignore differences in ingredient keys')
]
)
if not option:
return
self.do_auto_merge(NEWER)
except dialog_extras.UserCancelledError:
pass
def do_auto_merge (self, mode):
if self.recipeDiffScrolledWindow.get_child():
self.recipeDiffScrolledWindow.remove(self.recipeDiffScrolledWindow.get_child())
vb = Gtk.VBox()
l = Gtk.Label()
l.set_markup('<u>Automatically merged recipes</u>')
vb.pack_start(l,expand=False,fill=False); vb.show_all()
self.recipeDiffScrolledWindow.add_with_viewport(vb)
def do_auto_merge ():
kept = self.auto_merge_current_rec(mode)
label = Gtk.Label(label='%s'%kept.title)
vb.pack_start(label,expand=False,fill=False); label.show()
self.cancelMergeButton.hide()
self.applyMergeButton.hide()
self.closeMergeButton.set_sensitive(False)
do_auto_merge()
while self.to_merge:
self.mergeInfoLabel.set_text(
'Automatically merging recipe %(index)s of %(total)s'%{
'index':self.total_to_merge - len(self.to_merge),
'total':self.total_to_merge
})
self.current_dup_index = self.to_merge.pop(0)
duplicate_recipes = self.dups[self.current_dup_index]
self.current_recs = [self.rd.get_rec(i) for i in duplicate_recipes]
do_auto_merge()
while Gtk.events_pending(): Gtk.main_iteration()
self.mergeInfoLabel.set_text('Automatically merged %s recipes'%self.total_to_merge)
self.closeMergeButton.set_sensitive(True)
def auto_merge_current_rec (self, mode):
assert(mode in [NEWER, OLDER]) # TODO make this to an enum and type annotate it
self.current_recs.sort(key=lambda x: x.last_modified, reverse=(mode==OLDER))
keeper = self.current_recs[0]
tossers = self.current_recs[1:]
for to_toss in tossers:
self.rd.delete_rec(to_toss)
return keeper
def cancel_merge (self, *args):
self.merge_next_recipe()
if not self.to_merge:
self.populate_tree()
def populate_tree_if_possible (self):
self.populate_tree()
if not self.dups:
self.searchTypeCombo.set_active(self.RECIPE_DUP_MODE)
self.populate_tree()
if not self.dups:
self.searchTypeCombo.set_active(self.ING_DUP_MODE)
self.populate_tree()
def show_if_there_are_dups (self, label=None):
self.populate_tree_if_possible()
if self.dups:
self.show(label=label)
else:
self.ui.get_object('window1').destroy()
def show (self, label=None):
if label:
messagebox = self.ui.get_object('messagebox')
l = Gtk.Label(label=label)
l.set_line_wrap(True)
infobar = Gtk.InfoBar()
infobar.set_message_type(Gtk.MessageType.INFO)
infobar.get_content_area().add(l)
infobar.show_all()
messagebox.pack_start(infobar, True, False)
self.ui.get_object('window1').show()
def close (self, *args):
#print "CALL: close"
w = self.ui.get_object('window1')
w.hide()
w.destroy()
if self.on_close_callback:
self.on_close_callback(self)
class RecipeMerger:
"""A class to handle recipe merging.
"""
def __init__ (self, rd):
self.rd = rd
def autoMergeRecipes (self, recs):
to_fill,conflicts = recipeIdentifier.merge_recipes(self.rd, recs)
if conflicts:
raise ConflictError(conflicts)
else:
to_keep = recs[0]
# Update a single recipe with our information...
self.rd.modify_rec(to_keep,to_fill)
# Delete the other recipes...
for r in recs[1:]:
self.rd.delete_rec(r.id)
def uiMergeRecipes (self, recs):
diffs = recipeIdentifier.diff_recipes(self.rd, recs)
idiffs = recipeIdentifier.diff_ings(self.rd, r1, r2)
if diffs:
return DiffTable(diffs,recs[0])
else:
return None
class DiffTable (Gtk.Table):
"""A Table displaying differences in a recipe.
diff_dic is a dictionary with the differences.
{'attribute':(VAL1,VAL2,...)}
recipe_object is a recipe object representing one of our duplicate
recs, from which we can grab attributes that are not different.
dont_choose is a list of attributes whose differences are
displayed, but where no choice is offered (such as modification
time for the recipe).
"""
def __init__ (self, diff_dic, recipe_object=None, parent=None,
dont_choose=[]):
self.idiffs = []
self.diff_dic = diff_dic
Gtk.Table.__init__(self)
self.selected_dic = {}
self.set_col_spacings(6)
self.set_row_spacings(6)
self.row = 0
self.max_cols = 1
for attr,name,typ in [('last_modified','Last Modified',None)] + gglobals.REC_ATTRS \
+ [('image','Image',None)] \
+ [(attr,gglobals.TEXT_ATTR_DIC[attr],None) for attr in gglobals.DEFAULT_TEXT_ATTR_ORDER]:
if attr in diff_dic:
buttons = self.build_options(attr,self.diff_dic[attr])
label = Gtk.Label(label='_'+name+':')
label.set_alignment(0.0,0.5)
label.set_use_underline(True)
label.show()
self.attach(label,0,1,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
target = None
for col,b in enumerate(buttons):
self.setup_widget_size(b,in_col=True)
b.show()
if not target:
target = b
label.set_mnemonic_widget(target)
self.attach(b,col+1,col+2,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
if col > self.max_cols: self.max_cols = col
self.row += 1
elif recipe_object and hasattr(recipe_object,attr) and getattr(recipe_object,attr):
att_label = Gtk.Label(label=name+':')
att_label.set_use_underline(True)
att_label.set_alignment(0,0.5)
att_label.show()
constructor = get_display_constructor(attr)
val = getattr(recipe_object,attr)
val_label = constructor(getattr(recipe_object,attr))
val_label.show()
self.setup_widget_size(val_label,False)
if hasattr(val_label,'set_alignment'): val_label.set_alignment(0,0.5)
self.attach(att_label,0,1,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
self.attach(val_label,1,5,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
self.row += 1
self.mm = mnemonic_manager.MnemonicManager()
self.mm.add_toplevel_widget(self)
self.mm.fix_conflicts_peacefully()
self.rec = recipe_object.id
def setup_widget_size (self, w, in_col=True):
if in_col:
w.set_size_request(230,-1)
else:
w.set_size_request(650,-1)
def build_options (self, attribute, values):
buttons = []
group_rb = None
make_widget = get_display_constructor(attribute)
for v in values:
rb = Gtk.RadioButton(group=group_rb)
if not group_rb: group_rb = rb
if v is not None:
rb.add(make_widget(v))
else:
rb.add(Gtk.Label(label=_("None")))
rb.show_all()
buttons.append(rb)
rb.connect('toggled',self.value_toggled,attribute,v)
self.selected_dic[attribute] = values[0]
for n,v in enumerate(values):
if v:
buttons[n].set_active(True)
break
return buttons
def value_toggled (self, rb, attribute, v):
self.selected_dic[attribute] = v
def add_ingblocks (self, rd, recs):
#print 'add_ingblocks for ',[r.id for r in recs]
self.rd = rd
self.iblock_dic = {}
if len(recs) == 1:
blocks = recipeIdentifier.format_ingdiff_line(recipeIdentifier.format_ings(recs[0],self.rd))
self.iblock_dic[blocks[0]] = recs[0]
else:
blocks = []
rec_0 = recs[0]
for r in recs[1:]:
chunks = self.get_ing_text_blobs(rec_0,r)
if not chunks and not blocks:
# If there is no diff, in other words, and we
# don't yet have any block...
chunks = [recipeIdentifier.format_ings(recs[0],self.rd)]
elif not chunks:
# Otherwise if there are no diffs we just continue
# our loop...
continue
if not blocks:
blocks = [chunks[0]]
self.iblock_dic[blocks[0]] = rec_0
if chunks and len(chunks) > 1:
new_block = chunks[1]
if new_block not in blocks:
blocks.append(new_block)
self.iblock_dic[new_block] = r
group_rb = None
name = _('Ingredients')
if len(blocks) > 1:
lab = Gtk.Label(label='_'+_("Ingredients")); lab.set_use_underline(True)
for col,block in enumerate(blocks):
rb = Gtk.RadioButton(
label=_("Recipe")+ ' ' +'%i'%(col+1),
group=group_rb
)
if not group_rb:
group_rb = rb
lab.set_mnemonic_widget(rb)
if not block:
rb.add(Gtk.Label(label=_("None")))
else:
for n,txt in enumerate(block):
l = Gtk.Label(label=txt)
l.set_alignment(0.0,0.0)
l.set_use_markup(True)
l.set_line_wrap(True); l.set_line_wrap_mode(Pango.WrapMode.WORD)
l.show()
self.setup_widget_size(l,in_col=True)
self.attach(l,col+1,col+2,self.row+1+n,self.row+2+n,
xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,
yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
#rb.add(l)
rb.connect('toggled',self.ing_value_toggled,block)
self.setup_widget_size(rb,in_col=True)
rb.show()
self.attach(rb,col+1,col+2,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
else:
lab = Gtk.Label(label=_("Ingredients")); lab.show()
l = Gtk.Label(label=blocks[0])
l.set_alignment(0.0,0.0)
l.set_use_markup(True)
l.set_line_wrap(True); l.set_line_wrap_mode(Pango.WrapMode.WORD)
l.show()
self.attach(l,1,5,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
lab.set_alignment(0.0,0.0); lab.show()
self.attach(lab,0,1,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
def ing_value_toggled (self, rb, block):
if rb.get_active():
#print 'RB clicked',rb,'for block',block
#print 'ING TOGGLED - REC = ',
self.rec = self.iblock_dic[block]
#print self.rec
def get_ing_text_blobs (self, r1, r2):
"""Return an ing-blurb for r1 and r2 suitable for display."""
idiff = recipeIdentifier.diff_ings(self.rd, r1, r2)
if idiff: self.idiffs.append(idiff)
def is_line (l):
return not (l == '<diff/>')
if idiff:
ret = []
for igroup in idiff:
ret.append((recipeIdentifier.format_ingdiff_line(i)
for i in filter(is_line,igroup)))
return ret
def put_text_in_scrolled_window(text: str) -> Gtk.ScrolledWindow:
sw = Gtk.ScrolledWindow()
tv = Gtk.TextView()
sw.add(tv)
tv.get_buffer().set_text(text)
tv.set_editable(False)
tv.set_wrap_mode(Gtk.WrapMode.WORD)
sw.set_policy(Gtk.PolicyType.NEVER,Gtk.PolicyType.AUTOMATIC)
tv.show()
return sw
def make_text_label(text: str, use_markup: bool = False) -> Union[Gtk.Label, Gtk.ScrolledWindow]:
if not text:
return Gtk.Label(label=_('None'))
elif len(text) < 30:
return Gtk.Label(label=text)
elif len(text) < 250:
label = Gtk.Label(label=text)
if use_markup:
label.set_use_markup(use_markup)
label.set_line_wrap_mode(Pango.WrapMode.WORD)
return label
else:
return put_text_in_scrolled_window(text)
def get_display_constructor (attribute):
if attribute == 'rating':
return lambda v: ratingWidget.StarImage(
ratingWidget.star_generator,
value=v,
upper=10)
elif attribute in ['preptime','cooktime']:
return lambda v: Gtk.Label(label=convert.seconds_to_timestring(v))
elif attribute=='image':
return lambda v: (v and Gtk.Label(label="An Image") or Gtk.Label(label="No Image"))
elif attribute in gglobals.DEFAULT_TEXT_ATTR_ORDER:
return make_text_label
elif attribute == 'last_modified':
return lambda v: Gtk.Label(label=time_to_text(v))
else:
return lambda v: v and Gtk.Label(label=v) or Gtk.Label(label=_('None'))
if __name__ == '__main__':
def test_in_window (widget):
"""Put widget in window and show it"""
w = Gtk.Window()
w.add(widget)
w.connect('delete-event',Gtk.main_quit)
w.show()
Gtk.main()
def test_difftable ():
class FakeRec:
pass
test_rec = FakeRec()
test_rec.title = 'Shloppidy Recipe'
test_data = {'rating':[4,7],
'category':['Dessert','Dessert, Cake'],
'cuisine':['American','All-American'],
'preptime':[6000,12000],
'cooktime':[6543,None]}
t = DiffTable(test_data,test_rec)
t.show()
test_in_window(t)
print(t.selected_dic)
def test_merger (rd, conflicts):
recs = [rd.get_rec(i) for i in conflicts]
rmerger = RecipeMerger(rd)
to_fill,conflict_dic = recipeIdentifier.merge_recipes(rd,recs)
if conflict_dic:
dt = rmerger.uiMergeRecipes(recs)
dt.show()
test_in_window(dt)
print(dt.selected_dic)
elif to_fill:
print('Differences in ',conflicts,'can be auto-filled with',to_fill)
else:
print('No differences in ',conflicts)
rd = recipeManager.default_rec_manager()
rmd = RecipeMergerDialog(rd)
rmd.populate_tree()
rmd.show()
rmd.ui.get_object('window1').connect('delete-event',Gtk.main_quit)
Gtk.main()
#dups = rd.find_complete_duplicates()
#for d in dups[5:]:
# test_merger(rd,d)
| gpl-2.0 | 6,182,747,767,306,343,000 | 39.464342 | 180 | 0.567932 | false |
JoseBlanca/seq_crumbs | test/seq/test_seqio.py | 1 | 10372 | # Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=C0111
import os
import unittest
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import Popen, PIPE
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seqio import (guess_seq_type, fastaqual_to_fasta, seqio,
_write_seqrecords, _read_seqrecords,
_itemize_fastx, read_seqs, write_seqs)
from crumbs.utils.tags import SEQITEM, SEQRECORD
from crumbs.exceptions import IncompatibleFormatError, MalformedFile
FASTA = ">seq1\natctagtc\n>seq2\natctagtc\n>seq3\natctagtc\n"
QUAL = ">seq1\n30 30 30 30 30 30 30 30\n>seq2\n30 30 30 30 30 30 30 30\n"
QUAL += ">seq3\n30 30 30 30 30 30 30 30\n"
FASTQ = '@seq1\natcgt\n+\n?????\n@seq2\natcgt\n+\n?????\n@seq3\natcgt\n+\n'
FASTQ += '?????\n'
class SeqIOTest(unittest.TestCase):
'It tests the seqio functions'
@staticmethod
def _make_fhand(content=None):
'It makes temporary fhands'
if content is None:
content = ''
fhand = NamedTemporaryFile()
fhand.write(content)
fhand.flush()
return fhand
def test_guess_seq_type(self):
'It guesses if the sequence is nucleotide or protein'
fpath = os.path.join(TEST_DATA_DIR, 'arabidopsis_genes')
assert guess_seq_type(open(fpath)) == 'nucl'
fpath = os.path.join(TEST_DATA_DIR, 'pairend2.sfastq')
assert guess_seq_type(open(fpath)) == 'nucl'
@staticmethod
def test_fastaqual_to_fasta():
seq_fhand = StringIO('>seq1\nattct\n>seq2\natc\n')
qual_fhand = StringIO('>seq1\n2 2 2 2 2\n>seq2\n2 2 2\n')
out_fhand = NamedTemporaryFile()
fastaqual_to_fasta(seq_fhand, qual_fhand, out_fhand)
fastq = open(out_fhand.name).read()
assert fastq == "@seq1\nattct\n+\n#####\n@seq2\natc\n+\n###\n"
def test_seqio(self):
'It tets the seqio function'
# fastq to fasta
out_fhand = NamedTemporaryFile()
seqio([self._make_fhand(FASTQ)], out_fhand, 'fasta')
assert ">seq1\natcgt" in open(out_fhand.name).read()
# fastq to fastq-illumina
out_fhand = NamedTemporaryFile()
seqio([self._make_fhand(FASTQ)], out_fhand, 'fastq-illumina')
assert "@seq1\natcgt\n+\n^^^^" in open(out_fhand.name).read()
out_fhand = NamedTemporaryFile()
seqio([self._make_fhand(FASTQ), self._make_fhand(FASTQ)],
out_fhand, 'fastq-illumina')
assert "@seq3\natcgt\n+\n^^^^^\n@seq1" in open(out_fhand.name).read()
# fasta to fastq
out_fhand = NamedTemporaryFile()
try:
seqio([self._make_fhand(FASTA)], out_fhand, 'fastq')
self.fail("error previously expected")
except IncompatibleFormatError as error:
assert 'No qualities available' in str(error)
# bad_format fastq
bad_fastq_fhand = self._make_fhand(FASTQ + 'aklsjhdas')
try:
seqio([bad_fastq_fhand], out_fhand, 'fasta')
self.fail("error previously expected")
except MalformedFile as error:
assert 'Lengths of sequence and quality' in str(error)
# genbank to fasta
out_fhand = NamedTemporaryFile()
genbank_fhand = open(os.path.join(TEST_DATA_DIR, 'sequence.gb'))
seqio([genbank_fhand], out_fhand, 'fasta')
result = open(out_fhand.name).read()
assert '>NM_019354.2' in result
class ReadWriteSeqRecordsTest(unittest.TestCase):
'It writes seqrecords in a file'
def test_write_empy_seq(self):
'It does not write an empty sequence'
seq1 = SeqRecord(Seq('ACTG'), id='seq1')
fhand = StringIO()
_write_seqrecords([seq1, None, SeqRecord(Seq(''), id='seq2')], fhand,
file_format='fasta')
fhand.flush()
assert fhand.getvalue() == '>seq1\nACTG\n'
def test_read_fasta(self):
'It tests the reading of a fasta file'
fhand = StringIO('>seq1\nACTG\n')
assert not list(_read_seqrecords([fhand]))[0].description
class SimpleIOTest(unittest.TestCase):
'It tests the simple input and output read'
def test_singleline_itemizer(self):
fhand = StringIO('@s1\nACTG\n+\n1234\n' * 1100)
seqs = list(_itemize_fastx(fhand))
names = [seq[0] for seq in seqs]
assert len(names) == 1100
assert len(set([seq[1][1] for seq in seqs])) == 1
def test_fasta_itemizer(self):
'It tests the fasta itemizer'
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['>s1\n', 'ACTG\n'], {}),
('s2', ['>s2 desc\n', 'ACTG\n'], {})]
# with several lines
fhand = StringIO('>s1\nACTG\nGTAC\n>s2 desc\nACTG\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['>s1\n', 'ACTGGTAC\n'], {}),
('s2', ['>s2 desc\n', 'ACTG\n'], {})]
# With empty lines
fhand = StringIO('>s1\nACTG\n\n>s2 desc\nACTG\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['>s1\n', 'ACTG\n'], {}),
('s2', ['>s2 desc\n', 'ACTG\n'], {})]
def test_fastq_itemizer(self):
'It tests the fasta itemizer'
fhand = StringIO('@s1\nACTG\n+\n1234\n@s2 desc\nACTG\n+\n4321\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['@s1\n', 'ACTG\n', '+\n', '1234\n'], {}),
('s2', ['@s2 desc\n', 'ACTG\n', '+\n', '4321\n'], {})]
# Empty line
fhand = StringIO('@s1\nACTG\n+\n1234\n\n@s2 desc\nACTG\n+\n4321\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['@s1\n', 'ACTG\n', '+\n', '1234\n'], {}),
('s2', ['@s2 desc\n', 'ACTG\n', '+\n', '4321\n'], {})]
# Empty line
fhand = StringIO('@s1\nACTG\nATTA\n+\n1234\n1234\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['@s1\n', 'ACTGATTA\n', '+\n', '12341234\n'],
{})]
def test_seqitems_io(self):
'It checks the different seq class streams IO'
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
seqs = list(read_seqs([fhand], prefered_seq_classes=[SEQITEM]))
assert seqs[0].kind == SEQITEM
fhand = StringIO()
write_seqs(seqs, fhand)
assert fhand.getvalue() == '>s1\nACTG\n>s2 desc\nACTG\n'
assert seqs[0].object.name == 's1'
# SeqRecord
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
seqs = list(read_seqs([fhand], prefered_seq_classes=[SEQRECORD]))
assert seqs[0].kind == SEQRECORD
fhand = StringIO()
write_seqs(seqs, fhand, 'fasta')
assert fhand.getvalue() == '>s1\nACTG\n>s2 desc\nACTG\n'
# seqitem not possible with different input and output formats
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
try:
seqs = list(read_seqs([fhand], out_format='fastq',
prefered_seq_classes=[SEQITEM]))
self.fail('ValueError expected')
except ValueError:
pass
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
seqs = list(read_seqs([fhand], out_format='fasta',
prefered_seq_classes=[SEQITEM]))
fhand = StringIO()
write_seqs(seqs, fhand)
assert fhand.getvalue() == '>s1\nACTG\n>s2 desc\nACTG\n'
class PipingTest(unittest.TestCase):
'It tests that we get no error when trying to write in a closed pipe'
def test_write_closed_pipe(self):
seq_fhand = NamedTemporaryFile(suffix='.fasta')
n_seqs = 1000
for i in range(n_seqs):
seq_fhand.write('>s\nACTG\n')
seq_fhand.flush()
in_fpath = seq_fhand.name
seq_head = os.path.join(BIN_DIR, 'seq_head')
process_seq = Popen([seq_head, '-n', str(n_seqs), in_fpath],
stdout=PIPE)
stdout = NamedTemporaryFile(suffix='.stdout')
process_head = Popen(['head', '-n', '1'], stdin=process_seq.stdout,
stdout=stdout)
process_seq.stdout.close() # Allow seq_head to receive a SIGPIPE if
# head exits.
process_head.communicate()
assert open(stdout.name).read() == '>s\n'
seq_fhand.close()
stdout.close()
# With SeqRecords
gb_fpath = os.path.join(TEST_DATA_DIR, 'sequence.gb')
gb_content = open(gb_fpath).read()
seq_fhand = NamedTemporaryFile(suffix='.gb')
n_seqs = 100
for i in range(n_seqs):
seq_fhand.write(gb_content)
seq_fhand.flush()
in_fpath = seq_fhand.name
process_seq = Popen([seq_head, '-n', str(n_seqs), in_fpath],
stdout=PIPE)
stdout = NamedTemporaryFile(suffix='.stdout')
process_head = Popen(['head', '-n', '1'], stdin=process_seq.stdout,
stdout=stdout)
process_seq.stdout.close() # Allow seq_head to receive a SIGPIPE if
# head exits.
process_head.communicate()
seq_fhand.close()
assert 'LOCUS' in open(stdout.name).read()
stdout.close()
if __name__ == '__main__':
#import sys;sys.argv = ['', 'SeqIOTest.test_guess_seq_type']
unittest.main()
| gpl-3.0 | -4,225,053,976,801,928,000 | 38.139623 | 78 | 0.58253 | false |
ishanatmuz/HangmanMinimalist | hangman.py | 1 | 4431 | import random
import string
import os
import platform
# Defining the text file containing the list of words
WORDLIST_FILENAME = "words.txt"
MAX_GUESSES = 8
def loadWords():
# Returns a list of valid words. Words are taken from the file words.txt
print "Loading word list from file..."
# Open file for reading with no buffering
inFile = open(WORDLIST_FILENAME, 'r', 0)
# Read the file in single line
line = inFile.readline()
# Split all the words separated by whitespaces
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
def chooseWord(wordlist):
# Choose a word at random which the user have to guess
return random.choice(wordlist)
def isWordGuessed(secretWord, lettersGuessed):
# Checking for the non-existence of any character from the secretWord
# The result is stored as True of False
result = True;
for secretLetter in secretWord:
if not secretLetter in lettersGuessed:
result = False;
break;
return result;
def getGuessedWord(secretWord, lettersGuessed):
# Returns the guessed word in a specific format
# Example - the word 'apple' with the guessed characters ['a', 'b','l','s','e']
# would look like this 'a_ _ l _ '
result = "'";
for letter in secretWord:
if letter in lettersGuessed:
result += letter;
else:
result += '_ ';
result += "'";
return result;
def getAvailableLetters(lettersGuessed):
# Return the list of letters that are available to be used
# The letters returned are in lowercase
availableLetters = string.ascii_lowercase;
for letter in lettersGuessed:
availableLetters = availableLetters.replace(letter, '');
return availableLetters;
def clearTerminal():
# Clears the terminal on which the output is being displayed.
# Works at least on Windows and Linux, I haven't tested it on Mac OS
if platform.system() == 'Windows':
os.system('cls')
else:
os.system('clear')
def hangman(secretWord):
# Total number of wrong guesses allowed is 8
numberOfGuesses = MAX_GUESSES
# The letters guessed by the user
lettersGuessed = {}
# Welcome message
print 'Welcome to the game, Hangman!'
print 'I am thinking of a word that is %s letters long.' %(str(len(secretWord)))
# Infinite loop which breaks from inside the loop's conditions
while True:
print '-------------'
if not isWordGuessed(secretWord, lettersGuessed):
# Word not guessed
if numberOfGuesses == 0:
# All guesses exhausted, end the game
print 'Sorry, you ran out of guesses. The word was %s.' %(secretWord)
break
else:
# Guesses left, Display guesses left and available letters
print 'You have %s guesses left.' %(str(numberOfGuesses))
print 'Available letters: %s' %(getAvailableLetters(lettersGuessed))
# Take input from the user
guessedLetter = raw_input('Please guess a letter: ')
# Clearing the terminal
# Can use and cannot use depending on the preference
clearTerminal()
if guessedLetter in lettersGuessed:
# Already guessed letter, display guessed word
print 'Oops! You\'ve already guessed that letter:%s' %(getGuessedWord(secretWord, lettersGuessed))
else:
# New guess, add to lettersGuessed
lettersGuessed[guessedLetter] = True
if guessedLetter not in secretWord:
# Wrong Guess, decrement number of guesses
print 'Oops! That letter is not in my word:%s' %(getGuessedWord(secretWord, lettersGuessed))
numberOfGuesses -= 1
else:
# Correct guess
print 'Good guess:%s' %(getGuessedWord(secretWord, lettersGuessed))
else:
# Word guessed
print 'Congratulations, you won!'
break
# Execution sequence of the game
# Load the words from file
wordlist = loadWords()
# Choose a secret word for the user to guess
secretWord = chooseWord(wordlist).lower()
# Start the game for user
hangman(secretWord) | mit | 7,097,529,540,876,522,000 | 36.880342 | 118 | 0.61995 | false |
mirestrepo/voxels-at-lems | registration_eval/results/compute_trans_geo_accuracy.py | 1 | 13935 | #!/usr/bin/env python
# encoding: utf-8
"""
compute_transformation_error.py
Created by Maria Isabel Restrepo on 2012-09-24.
Copyright (c) 2012 . All rights reserved.
This script computes the distances betweeen an estimated similarity transformation and its ground truth
The transformation is used to transform a "source" coordinate system into a "target coordinate system"
To compute the error between the translations, the L2 norm diference translation vectors in the
"source coordinate system" is computed. Since distances are preserved under R and T, only scale is applied.
The rotation error is computed as the half angle between the normalized queternions i.e acos(|<q1,q2>|) in [0, pi/2]
"""
import os
import sys
import logging
import argparse
import vpcl_adaptor as vpcl
import numpy as np
from numpy import linalg as LA
import transformations as tf
import math
import matplotlib.pyplot as plt
sys.path.append(os.pardir)
import reg3d_transformations as reg3d_T
LOG = None
"""Compute the accuracy between the LIDAR fiducial points
and corresponding geo-register correspondances"""
def compute_ref_accuracy(fid_path, original_corrs_path,
geo_tform):
#Load fiducial .ply
fid = open(fid_path, 'r')
fid_points = np.genfromtxt(fid, dtype=float, delimiter=' ',
skip_header=9)
fid.close()
#Load original corrs .ply
fid = open(original_corrs_path, 'r')
original_corrs = np.genfromtxt(fid, dtype=float,
delimiter=' ', skip_header=9)
fid.close()
#Load transformation
#************GEO**************"
Tfis = open(geo_tform, 'r')
lines = []
lines = Tfis.readlines()
scale_geo = float(lines[0])
Ss_geo = tf.scale_matrix(scale_geo)
quat_line = lines[1].split(" ")
quat_geo = np.array([float(quat_line[3]), float(quat_line[0]),
float(quat_line[1]), float(quat_line[2])])
Rs_geo = tf.quaternion_matrix(quat_geo)
trans_line = lines[2].split(" ")
trans_geo = np.array([float(trans_line[0]), float(trans_line[1]),
float(trans_line[2])])
Tfis.close()
Hs_geo = Rs_geo.copy()
Hs_geo[:3, 3] = trans_geo[:3]
Hs_geo = Ss_geo.dot(Hs_geo)
LOG.debug("\n******Geo***** \n Scale: \n%s \nR:\n%s \nT:\n%s \nH:\n%s",
Ss_geo, Rs_geo, trans_geo, Hs_geo)
#Compute the "reference error"
#i.e. fiducial points - geo registered correspondances
npoints, c = fid_points.shape
if npoints != 30:
LOG.warn("Number of fiducial point is NOT 30")
if c != 3:
LOG.error("Fiducial points has the wrong number of dimensions")
# import code; code.interact(local=locals())
fid_points_hom = np.hstack((fid_points, np.ones([npoints, 1]))).T
original_corrs_hom = np.hstack((original_corrs, np.ones([npoints, 1]))).T
geo_corrs_hom = Hs_geo.dot(original_corrs_hom)
geo_ref_diff = geo_corrs_hom - fid_points_hom
# import pdb; pdb.set_trace()
delta_z = np.sqrt(geo_ref_diff[2, :] * geo_ref_diff[2, :])
delta_r = np.sqrt(geo_ref_diff[0, :] * geo_ref_diff[0, :] +
geo_ref_diff[1, :] * geo_ref_diff[1, :])
return delta_z, delta_r
def compute_geo_accuracy(fid_path, original_corrs_path,
geo_tform, trials_root, desc_name,
niter, ntrials, percentile=99):
#Load fiducial .ply
fid = open(fid_path, 'r')
fid_points = np.genfromtxt(fid, delimiter=' ',
skip_header=9)
fid.close()
#Load original corrs .ply
fid = open(original_corrs_path, 'r')
original_corrs = np.genfromtxt(fid, delimiter=' ', skip_header=9)
fid.close()
#load the geo tranformation
GEO = reg3d_T.geo_transformation(geo_tform);
#Compute the "reference error"
#i.e. fiducial points - geo registered correspondances
npoints, c = fid_points.shape
if npoints != 30:
LOG.warn("Number of fiducial point is NOT 30")
if c != 3:
LOG.error("Fiducial points has the wrong number of dimensions")
# import code; code.interact(local=locals())
fid_points_hom = np.hstack((fid_points, np.ones([npoints, 1]))).T
original_corrs_hom = np.hstack((original_corrs, np.ones([npoints, 1]))).T
geo_corrs_hom = GEO.transform_points(original_corrs_hom)
geo_ref_diff = geo_corrs_hom - fid_points_hom
# import pdb; pdb.set_trace()
delta_z = (geo_ref_diff[2, :] **2) ** (1./2.)
delta_r = (geo_ref_diff[0, :] **2 + geo_ref_diff[1, :] **2 )** (1./2.)
delta_z_ia = np.zeros([ntrials, npoints])
delta_r_ia = np.zeros([ntrials, npoints])
delta_z_icp = np.zeros([ntrials, npoints])
delta_r_icp = np.zeros([ntrials, npoints])
for trial in range(0, ntrials):
print "********Trial", trial, "**********"
#Load the transformations for this trial
#************Hs**************#
#read source to target "Ground Truth" Transformation
Tfile = trials_root + "/trial_" + str(trial) + "/Hs_inv.txt"
GT_Tform = reg3d_T.gt_transformation(Tfile)
src_features_dir = (trials_root + "/trial_" + str(trial) +
"/" + desc_name)
Tfile_ia = (src_features_dir + "/ia_transformation_" +
str(percentile) + "_" + str(niter) + ".txt")
Tfile_icp = (src_features_dir + "/icp_transformation_" +
str(percentile) + "_" + str(niter) + ".txt")
REG_Tform = reg3d_T.pcl_transformation(Tfile_ia, Tfile_icp)
Hs_ia_error = REG_Tform.Hs_ia.dot(GT_Tform.Hs)
Hs_icp_error = REG_Tform.Hs_icp.dot(GT_Tform.Hs)
# transform the points with the residual transformations
ia_corrs_hom = Hs_ia_error.dot(original_corrs_hom)
icp_corrs_hom = Hs_icp_error.dot(original_corrs_hom)
# geo-register
geo_ia_corrs_hom = GEO.transform_points(ia_corrs_hom)
geo_icp_corrs_hom = GEO.transform_points(icp_corrs_hom)
# distances
geo_ia_ref_diff = geo_ia_corrs_hom - fid_points_hom
geo_icp_ref_diff = geo_icp_corrs_hom - fid_points_hom
delta_z_ia[trial, :] = np.sqrt(geo_ia_ref_diff[2, :] ** 2)
delta_r_ia[trial, :] = np.sqrt(geo_ia_ref_diff[0, :] ** 2 +
geo_ia_ref_diff[1, :] ** 2 )
delta_z_icp[trial, :] = np.sqrt(geo_icp_ref_diff[2, :] ** 2)
delta_r_icp[trial, :] = np.sqrt(geo_icp_ref_diff[0, :] ** 2 +
geo_icp_ref_diff[1, :] ** 2)
# import pdb; pdb.set_trace()
return delta_z, delta_r,\
delta_z_ia, delta_r_ia, \
delta_z_icp, delta_r_icp
def main(logfile=None):
global LOG
LOG = setlogging(logfile)
descriptors = ["FPFH_30", "SHOT_30"]
niter = 500;
ntrials = 10;
plot_errors = True;
if (plot_errors):
colors = ['magenta','green'];
markers = ['o', 's', '*', '+', '^', 'v']
fid_path = "/data/lidar_providence/downtown_offset-1-financial-dan-pts1.ply"
original_corrs_path = "/data/lidar_providence/downtown_offset-1-financial-dan-pts0.ply"
trials_root = "/Users/isa/Experiments/reg3d_eval/downtown_dan";
geo_tform = "/data/lidar_providence/downtown_offset-1-financial-dan-Hs.txt"
for d_idx in range(0, len(descriptors)):
desc_name = descriptors[d_idx]
delta_z, delta_r, \
delta_z_ia, delta_r_ia, \
delta_z_icp, delta_r_icp = compute_geo_accuracy(fid_path,
original_corrs_path,
geo_tform, trials_root, desc_name,
niter, ntrials)
#sort errors for all trials to get the 70 80 90 % errors
delta_z_ia.sort(axis=0)
delta_r_ia.sort(axis=0)
delta_z_icp.sort(axis=0)
delta_r_icp.sort(axis=0)
CE_70_ia = delta_r_ia[int(0.7 * ntrials) - 1, :]
CE_80_ia = delta_r_ia[int(0.8 * ntrials) - 1, :]
CE_90_ia = delta_r_ia[int(0.9 * ntrials) - 1, :]
LE_70_ia = delta_z_ia[int(0.7 * ntrials) - 1, :]
LE_80_ia = delta_z_ia[int(0.8 * ntrials) - 1, :]
LE_90_ia = delta_z_ia[int(0.9 * ntrials) - 1, :]
CE_70_icp = delta_r_icp[int(0.7 * ntrials) - 1, :]
CE_80_icp = delta_r_icp[int(0.8 * ntrials) - 1, :]
CE_90_icp = delta_r_icp[int(0.9 * ntrials) - 1, :]
LE_70_icp = delta_z_icp[int(0.7 * ntrials) - 1, :]
LE_80_icp = delta_z_icp[int(0.8 * ntrials) - 1, :]
LE_90_icp = delta_z_icp[int(0.9 * ntrials) - 1, :]
if (plot_errors):
#Plot CE and LE
fig_ia_CE = plt.figure()
ax_ia_CE = fig_ia_CE.add_subplot(111);
plt.hold(True);
plt.axis(tight=True);
ax_ia_CE.plot(CE_70_ia, "--s", color="green", label= "CE_70");
ax_ia_CE.plot(CE_80_ia, "--^", color="magenta", label= "CE_80");
ax_ia_CE.plot(CE_90_ia, "--*", color="blue", label= "CE_90");
ax_ia_CE.plot( delta_r, "--o", color="cyan", label= "GT");
ax_ia_CE.set_xlabel('Fiducial Marker (index)',fontsize= 20);
ax_ia_CE.set_ylabel('Error (meters)',fontsize= 20);
ax_ia_CE.legend(loc='best', frameon=False);
# ax_ia_CE.set_title('IA CE')
fname = trials_root + "/GEO_results/IA_CE_" + desc_name + ".pdf"
fig_ia_CE.savefig(fname, transparent=True, pad_inches=5)
fig_ia_LE = plt.figure()
ax_ia_LE = fig_ia_LE.add_subplot(111);
plt.hold(True);
plt.axis(tight=True);
ax_ia_LE.plot(LE_70_ia, "--s", color="green", label= "LE_70");
ax_ia_LE.plot(LE_80_ia, "--^", color="magenta", label= "LE_80");
ax_ia_LE.plot(LE_90_ia, "--*", color="blue", label= "LE_90");
ax_ia_LE.plot( delta_z, "--o", color="cyan", label= "GT");
ax_ia_LE.set_xlabel('Fiducial Marker (index)',fontsize= 20);
ax_ia_LE.set_ylabel('Error (meters)',fontsize= 20);
ax_ia_LE.legend(loc='best', frameon=False);
# ax_ia_LE.set_title('IA LE')
fname = trials_root + "/GEO_results/IA_LE_" + desc_name + ".pdf"
fig_ia_LE.savefig(fname, transparent=True, pad_inches=5)
fig_icp_CE = plt.figure()
ax_icp_CE = fig_icp_CE.add_subplot(111);
plt.hold(True);
plt.axis(tight=True);
ax_icp_CE.plot(CE_70_icp, "--s", color="green", label= "CE_70");
ax_icp_CE.plot(CE_80_icp, "--^", color="magenta", label= "CE_80");
ax_icp_CE.plot(CE_90_icp, "--*", color="blue", label= "CE_90");
ax_icp_CE.plot( delta_r, "--o", color="cyan", label= "GT");
ax_icp_CE.set_xlabel('Fiducial Marker (index)',fontsize= 20);
ax_icp_CE.set_ylabel('Error (meters)',fontsize= 20);
ax_icp_CE.legend(loc='best', frameon=False);
# ax_icp_CE.set_title('ICP CE')
fname = trials_root + "/GEO_results/ICP_CE_" + desc_name + ".pdf"
fig_icp_CE.savefig(fname, transparent=True, pad_inches=5)
fig_icp_LE = plt.figure()
ax_icp_LE = fig_icp_LE.add_subplot(111);
plt.hold(True);
plt.axis(tight=True);
ax_icp_LE.plot(LE_70_icp, "--s", color="green", label= "LE_70");
ax_icp_LE.plot(LE_80_icp, "--^", color="magenta", label= "LE_80");
ax_icp_LE.plot(LE_90_icp, "--*", color="blue", label= "LE_90");
ax_icp_LE.plot( delta_z, "--o", color="cyan", label= "GT");
ax_icp_LE.set_xlabel('Fiducial Marker (index)',fontsize= 20);
ax_icp_LE.set_ylabel('Error (meters)',fontsize= 20);
ax_icp_LE.legend(loc='best', frameon=False);
# ax_icp_LE.set_title('ICP LE')
fname = trials_root + "/GEO_results/ICP_LE_" + desc_name + ".pdf"
fig_icp_LE.savefig(fname, transparent=True, pad_inches=5)
# axT.set_xlim((0,505) );
# axT.set_yticks(np.arange(0.0,250.0,20));
# # axT.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# # ncol=4, mode="expand", borderaxespad=0.)
#
# figT.savefig("/Users/isa/Experiments/reg3d_eval/downtown_dan/T_error.pdf", transparent=True, pad_inches=5)
# plt.show();
# import pdb; pdb.set_trace()
def setlogging(logfile=None):
level = logging.DEBUG
logger = logging.getLogger(__name__)
logger.setLevel(level)
# create formatter and add it to the handlers
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
# create file handler which logs error messages
if logfile:
print "Logging to file"
fh = logging.FileHandler(logfile)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
#test logging
logger.debug("debug message")
logger.info("info message")
logger.warn("warn message")
logger.error("error message")
logger.critical("critical message")
return logger
if __name__ == '__main__':
# initialize the parser object:
parser = argparse.ArgumentParser(description="Export PLY to PCD file")
# define options here:
parser.add_argument("-v", "--verbose", action='store', type = bool, dest="verbose", default=True, help="Write debug log to log_file")
parser.add_argument("-L", "--log", dest="logfile", help="write debug log to log_file")
args = parser.parse_args(argv)
# set up logging
if args.verbose:
status = main(args.logfile)
else:
status = main()
sys.exit(status)
| bsd-2-clause | 5,998,700,725,163,091,000 | 37.924581 | 150 | 0.568497 | false |
smpss91341/2016springcd_aG8 | static/publishconf.py | 1 | 1717 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# 因為 publishconf.py 在 pelicanconf.py 之後, 因此若兩處有相同變數的設定, 將以較後讀入的 publishconf.py 中的設定為主.
# 請注意, 為了在近端讓 Tipue search 傳回的搜尋結果連結正確, 必須使用 ./
SITEURL = 'http://ddss-40323123.rhcloud.com/static/blog/'
# 此設定用於近端靜態網頁查驗, 因此使用相對 URL
RELATIVE_URLS = False
# 為了要讓 local 與 gh-pages 上都能夠使用 Tipue search, 可能要採用不同的 theme
THEME = 'theme/pelican-bootstrap3'
#BOOTSTRAP_THEME = 'readable'
#BOOTSTRAP_THEME = 'readable-old'
BOOTSTRAP_THEME = 'united'
#PYGMENTS_STYLE = 'paraiso-drak'
#PYGMENTS_STYLE = 'fruity'
# 為了同時兼容 render_math, 必須放棄 fruity
PYGMENTS_STYLE = 'monokai'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
DISQUS_SITENAME = "cadlabmanual"
#GOOGLE_ANALYTICS = ""
# 設定網誌以 md 檔案建立的 file system date 為準, 無需自行設定
DEFAULT_DATE = 'fs'
# 近端的 code hightlight
MD_EXTENSIONS = ['fenced_code', 'extra', 'codehilite(linenums=True)']
# 若要依照日期存檔呼叫
#ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
#ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PAGE_URL = 'pages/{slug}/'
PAGE_SAVE_AS = 'pages/{slug}/index.html'
SHOW_ARTICLE_AUTHOR = True
| agpl-3.0 | 2,163,716,201,491,112,700 | 27.98 | 86 | 0.721877 | false |
denizs/torchUp | torchup/logging/logger.py | 1 | 1865 | import tensorflow as tf
import numpy as np
import scipy.misc
from tensorboardX.src.summary_pb2 import Summary
from tensorboardX import SummaryWriter
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO
class Logger(object):
def __init__(self, log_dir):
'''
Create a summary writer logging to log_dir
'''
self.writer = tf.summary.FileWriter(log_dir)
self.writerX = SummaryWriter(log_dir=log_dir)
def scalar_summary(self, tag, value, step):
'''
Log scalar value
'''
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def add_image(self, tag, img, step):
'''
Log img
'''
summary = Summary(value=[Summary.Value(tag=tag, image=imgs)])
self.writerX.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
'''
Log a histogram of the tensor of values.
'''
# Create histogram:
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
his.min = float(np.min(values))
his.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts:
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
| bsd-2-clause | -5,471,846,208,124,120,000 | 28.140625 | 83 | 0.604826 | false |
ziir/caliopen.api | caliopen/api/__init__.py | 1 | 1127 | # -*- coding: utf-8 -*-
__version__ = '0.0.1'
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
import logging
from pyramid.config import Configurator
from caliopen.base.config import Configuration
log = logging.getLogger(__name__)
def main(global_config, **settings):
"""Caliopen entry point for WSGI application.
Load Caliopen configuration and setup a WSGI application
with loaded API services.
"""
# XXX ugly way to init caliopen configuration before pyramid
caliopen_config = settings['caliopen.config'].split(':')[1]
Configuration.load(caliopen_config, 'global')
config = Configurator(settings=settings)
services = config.registry.settings. \
get('caliopen.api.services', []). \
split('\n')
route_prefix = settings.get('caliopen.api.route_prefix')
for service in services:
log.info('Loading %s service' % service)
config.include(service, route_prefix=route_prefix)
config.end()
return config.make_wsgi_app()
| gpl-3.0 | -7,291,350,353,224,447,000 | 27.897436 | 64 | 0.678793 | false |
noironetworks/networking-cisco | networking_cisco/tests/unit/cisco/l3/l3_router_test_support.py | 1 | 6077 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from networking_cisco import backwards_compatibility as bc
from networking_cisco.backwards_compatibility import attributes
from networking_cisco.backwards_compatibility import l3_const
import networking_cisco.plugins
from networking_cisco.plugins.cisco.db.l3 import l3_router_appliance_db
from networking_cisco.plugins.cisco.db.l3 import routertype_db
from networking_cisco.plugins.cisco.db.scheduler import (
l3_routertype_aware_schedulers_db as router_sch_db)
from networking_cisco.plugins.cisco.extensions import routerhostingdevice
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.plugins.cisco.extensions import routertype
from neutron.common import test_lib
from neutron.db import common_db_mixin
from neutron.extensions import l3
from neutron.extensions import standardattrdescription
L3_PLUGIN_KLASS = (
'networking_cisco.tests.unit.cisco.l3.l3_router_test_support.'
'TestL3RouterServicePlugin')
extensions_path = networking_cisco.plugins.__path__[0] + '/cisco/extensions'
class L3RouterTestSupportMixin(object):
_is_ha_tests = False
def _mock_get_routertype_scheduler_always_none(self):
self.get_routertype_scheduler_fcn_p = mock.patch(
'networking_cisco.plugins.cisco.db.l3.l3_router_appliance_db.'
'L3RouterApplianceDBMixin._get_router_type_scheduler',
mock.Mock(return_value=None))
self.get_routertype_scheduler_fcn_p.start()
def _mock_backlog_processing(self, plugin):
mock.patch.object(plugin, '_is_master_process',
return_value=True).start()
mock.patch.object(plugin, '_setup_backlog_handling').start()
def _add_router_plugin_ini_file(self):
# includes config file for router service plugin
if hasattr(self, '_is_ha_tests') and self._is_ha_tests is True:
cfg_file = (
networking_cisco.__path__[0] +
'/tests/unit/cisco/etc/ha/cisco_router_plugin.ini')
else:
cfg_file = (
networking_cisco.__path__[0] +
'/tests/unit/cisco/etc/cisco_router_plugin.ini')
if 'config_files' in test_lib.test_config:
test_lib.test_config['config_files'].append(cfg_file)
else:
test_lib.test_config['config_files'] = [cfg_file]
class TestL3RouterBaseExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
# first, add hosting device attribute to router resource
l3_const.RESOURCE_ATTRIBUTE_MAP['routers'].update(
routerhostingdevice.EXTENDED_ATTRIBUTES_2_0['routers'])
# also add role attribute to router resource
l3_const.RESOURCE_ATTRIBUTE_MAP['routers'].update(
routerrole.EXTENDED_ATTRIBUTES_2_0['routers'])
# also add routertype attribute to router resource
l3_const.RESOURCE_ATTRIBUTE_MAP['routers'].update(
routertype.EXTENDED_ATTRIBUTES_2_0['routers'])
# also add description attribute to router and fip resources
ext_res = (standardattrdescription.Standardattrdescription().
get_extended_resources("2.0"))
if 'routers' in ext_res:
l3_const.RESOURCE_ATTRIBUTE_MAP['routers'].update(
ext_res['routers'])
if 'floatingips' in ext_res:
(l3_const.RESOURCE_ATTRIBUTE_MAP['floatingips'].
update(ext_res['floatingips']))
# finally, extend the global attribute map
attributes.RESOURCES.update(
l3_const.RESOURCE_ATTRIBUTE_MAP)
res = l3.L3.get_resources()
# add routertype resource
for item in routertype.Routertype.get_resources():
res.append(item)
return res
def get_actions(self):
return []
def get_request_extensions(self):
return []
# A L3 routing service plugin class supporting the routertype and
# routerhost:hostingdevice extensions
class TestL3RouterServicePlugin(
common_db_mixin.CommonDbMixin,
routertype_db.RoutertypeDbMixin,
l3_router_appliance_db.L3RouterApplianceDBMixin,
# we need the router scheduling db but do not expose the scheduling
# REST operations
router_sch_db.L3RouterTypeAwareSchedulerDbMixin):
supported_extension_aliases = [
"router",
"standard-attr-description",
routerhostingdevice.ROUTERHOSTINGDEVICE_ALIAS,
routerrole.ROUTERROLE_ALIAS,
routertype.ROUTERTYPE_ALIAS]
def get_plugin_type(self):
return bc.constants.L3
def get_plugin_description(self):
return "L3 Routing Service Plugin for testing"
def cleanup_after_test(self):
"""This function should be called in the TearDown() function of
test classes that use the plugin.
Reset all class variables to their default values.
This is needed to avoid tests to pollute subsequent tests.
"""
TestL3RouterServicePlugin._router_schedulers = {}
TestL3RouterServicePlugin._router_drivers = {}
TestL3RouterServicePlugin._namespace_router_type_id = None
TestL3RouterServicePlugin._backlogged_routers = set()
TestL3RouterServicePlugin._refresh_router_backlog = True
| apache-2.0 | -5,213,895,254,985,300,000 | 40.060811 | 78 | 0.68751 | false |
HXLStandard/libhxl-python | hxl/model.py | 1 | 48776 | """Main data-model classes for the Humanitarian Exchange Language (HXL).
This module defines the basic classes for working with HXL data. Other
modules have classes derived from these (e.g. in
[hxl.filters](filters.html) or [hxl.io](io.html)). The core class is
[Dataset](#hxl.model.Dataset), which defines the operations available
on a HXL dataset, including convenience methods for chaining filters.
Typical usage:
source = hxl.data("https://example.org/data.csv")
# returns a hxl.model.Dataset object
result = source.with_lines("#country+name=Kenya").sort()
# a filtered/sorted view of the data
This code is released into the Public Domain and comes with NO WARRANTY.
"""
import abc, copy, csv, dateutil, hashlib, json, logging, operator, re, six
import hxl
logger = logging.getLogger(__name__)
class TagPattern(object):
"""Pattern for matching a HXL hashtag and attributes
- the pattern "#*" matches any hashtag/attribute combination
- the pattern "#*+foo" matches any hashtag with the foo attribute
- the pattern "#tag" matches #tag with any attributes
- the pattern "#tag+foo" matches #tag with foo among its attributes
- the pattern "#tag-foo" matches #tag with foo *not* among its attributes
- the pattern "#tag+foo-bar" matches #tag with foo but not bar
- the pattern "#tag+foo+bar!" matches #tag with exactly the attributes foo and bar, but *no others*
The normal way to create a tag pattern is using the
[parse()](#hxl.model.TagPattern.parse) method rather than the
constructor:
pattern = hxl.model.TagPattern.parse("#affected+f-children")
Args:
tag: the basic hashtag (without attributes)
include_attributes: a list of attributes that must be present
exclude_attributes: a list of attributes that must not be present
is_absolute: if True, no attributes are allowed except those in _include_attributes_
"""
PATTERN = r'^\s*#?({token}|\*)((?:\s*[+-]{token})*)\s*(!)?\s*$'.format(token=hxl.datatypes.TOKEN_PATTERN)
"""Constant: regular expression to match a HXL tag pattern.
"""
def __init__(self, tag, include_attributes=[], exclude_attributes=[], is_absolute=False):
self.tag = tag
self.include_attributes = set(include_attributes)
"""Set of all attributes that must be present"""
self.exclude_attributes = set(exclude_attributes)
"""Set of all attributes that must not be present"""
self.is_absolute = is_absolute
"""True if this pattern is absolute (no extra attributes allowed)"""
def is_wildcard(self):
return self.tag == '#*'
def match(self, column):
"""Check whether a Column matches this pattern.
@param column: the column to check
@returns: True if the column is a match
"""
if column.tag and (self.is_wildcard() or self.tag == column.tag):
# all include_attributes must be present
if self.include_attributes:
for attribute in self.include_attributes:
if attribute not in column.attributes:
return False
# all exclude_attributes must be absent
if self.exclude_attributes:
for attribute in self.exclude_attributes:
if attribute in column.attributes:
return False
# if absolute, then only specified attributes may be present
if self.is_absolute:
for attribute in column.attributes:
if attribute not in self.include_attributes:
return False
return True
else:
return False
def get_matching_columns(self, columns):
"""Return a list of columns that match the pattern.
@param columns: a list of L{hxl.model.Column} objects
@returns: a list (possibly empty)
"""
result = []
for column in columns:
if self.match(column):
result.append(column)
return result
def find_column_index(self, columns):
"""Get the index of the first matching column.
@param columns: a list of columns to check
@returns: the 0-based index of the first matching column, or None for no match
"""
for i in range(len(columns)):
if self.match(columns[i]):
return i
return None
def find_column(self, columns):
"""Check whether there is a match in a list of columns."""
for column in columns:
if self.match(column):
return column
return None
def __repr__(self):
s = self.tag
if self.include_attributes:
for attribute in self.include_attributes:
s += '+' + attribute
if self.exclude_attributes:
for attribute in self.exclude_attributes:
s += '-' + attribute
return s
__str__ = __repr__
@staticmethod
def parse(s):
"""Parse a single tag-pattern string.
pattern = TagPattern.parse("#affected+f-children")
The [parse_list()](#hxl.model.TagPattern.parse_list) method
will call this method to parse multiple patterns at once.
Args:
s: the tag-pattern string to parse
Returns:
A TagPattern object
"""
if not s:
# edge case: null value
raise hxl.HXLException('Attempt to parse empty tag pattern')
elif isinstance(s, TagPattern):
# edge case: already parsed
return s
result = re.match(TagPattern.PATTERN, s)
if result:
tag = '#' + result.group(1).lower()
include_attributes = set()
exclude_attributes = set()
attribute_specs = re.split(r'\s*([+-])', result.group(2))
for i in range(1, len(attribute_specs), 2):
if attribute_specs[i] == '+':
include_attributes.add(attribute_specs[i + 1].lower())
else:
exclude_attributes.add(attribute_specs[i + 1].lower())
if result.group(3) == '!':
is_absolute = True
if exclude_attributes:
raise ValueError('Exclusions not allowed in absolute patterns')
else:
is_absolute = False
return TagPattern(
tag,
include_attributes=include_attributes,
exclude_attributes=exclude_attributes,
is_absolute=is_absolute
)
else:
raise hxl.HXLException('Malformed tag: ' + s)
@staticmethod
def parse_list(specs):
"""Parse a list of tag-pattern strings.
If _specs_ is a list of already-parsed TagPattern objects, do
nothing. If it's a list of strings, apply
[parse()](#hxl.model.TagPattern.parse) to each one. If it's a
single string with multiple patterns separated by commas,
split the string, then parse the patterns.
patterns = TagPattern.parse_list("#affected+f,#inneed+f")
# or
patterns = TagPattern.parse_list("#affected+f", "#inneed+f")
Args:
specs: the raw input (a list of strings, or a single string with commas separating the patterns)
Returns:
A list of TagPattern objects.
"""
if not specs:
return []
if isinstance(specs, six.string_types):
specs = specs.split(',')
return [TagPattern.parse(spec) for spec in specs]
@staticmethod
def match_list(column, patterns):
"""Test if a column matches any of the patterns in a list.
This is convenient to use together with [parse_list()](hxl.model.TagPattern.parse_list):
patterns = TagPattern.parse_list(["#affected+f", "#inneed+f"])
if TagPattern.match_list(column, patterns):
print("The column matched one of the patterns")
Args:
column: the column to test
patterns: a list of zero or more patterns.
Returns:
True if there is a match
"""
for pattern in patterns:
if pattern.match(column):
return True
return False
class Dataset(object):
"""Abstract base class for a HXL data source.
Any source of parsed HXL data inherits from this class: that
includes Dataset, HXLReader, and the various filters in the
hxl.old_filters package. The contract of a Dataset is that it will
provide a columns property and a next() method to read through the
rows.
The child class must implement the columns() method as a property
and the __iter__() method to make itself iterable.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
"""Constructor."""
super().__init__()
@abc.abstractmethod
def __iter__(self):
"""Get the iterator over the rows.
@returns: an iterator that returns L{hxl.model.Row} objects
"""
raise RuntimeException("child class must implement __iter__() method")
@property
def is_cached(self):
"""Test whether the source data is cached (replayable).
By default, this is False, but some subclasses may override.
@returns: C{True} if the input is cached (replayable); C{False} otherwise.
"""
return False
@property
@abc.abstractmethod
def columns(self):
"""Get the column definitions for the dataset.
@returns: a list of Column objects.
"""
raise RuntimeException("child class must implement columns property method")
@property
def columns_hash(self):
"""Generate a hash across all of the columns in the dataset.
This function helps detect whether two HXL documents are of
the same type, even if they contain different data (e.g. the
HXL API output for the same humanitarian dataset in two
different months or two different countries).
It takes into account text headers, hashtags, the order of
attributes, and the order of columns. Whitespace is
normalised, and null values are treated as empty strings. The
MD5 hash digest is generated from a UTF-8 encoded version of
each header.
@returns: a 32-character hex-formatted MD5 hash string
"""
md5 = hashlib.md5()
for column in self.columns:
md5.update(hxl.datatypes.normalise_space(column.header).encode('utf-8'))
for column in self.columns:
md5.update(hxl.datatypes.normalise_space(column.display_tag).encode('utf-8'))
return md5.hexdigest()
@property
def data_hash(self):
"""Generate a hash for the entire dataset.
This function allows checking if two HXL datasets are
functionally identical. It takes into account text headers,
hashtags, the order of attributes, and the order of
columns. Whitespace is normalised, and null values are treated
as empty strings. The MD5 hash digest is generated from a
UTF-8 encoded version of each header and data cell.
@returns: a 32-character hex-formatted MD5 hash string
"""
md5 = hashlib.md5()
# text header row
for column in self.columns:
md5.update(hxl.datatypes.normalise_space(column.header).encode('utf-8'))
# hashtag row
for column in self.columns:
md5.update(hxl.datatypes.normalise_space(column.display_tag).encode('utf-8'))
# data rows
for row in self:
for value in row:
md5.update(hxl.datatypes.normalise_space(value).encode('utf-8'))
return md5.hexdigest()
@property
def headers(self):
"""Return a list of header strings (for a spreadsheet row).
"""
return [column.header if column else '' for column in self.columns]
@property
def tags(self):
"""Get all hashtags (without attributes) as a list
@returns: a list of base hashtags for the dataset columns
"""
return [column.tag if column else '' for column in self.columns]
@property
def display_tags(self):
"""Return a list of display tags.
@returns: a list of strings containing the hashtag and attributes for each column
"""
return [column.display_tag if column else '' for column in self.columns]
@property
def has_headers(self):
"""Report whether any non-empty header strings exist.
@returns: C{True} if there is at least one column with a non-empty header string
"""
for column in self.columns:
if column.header:
return True
return False
@property
def values(self):
"""Get all values for the dataset at once, in an array of arrays.
This method can be highly inefficient for large datasets.
@returns: an array of arrays of scalar values
"""
return [row.values for row in self]
def get_value_set(self, tag_pattern=None, normalise=False):
"""Return the set of all values in a dataset (optionally matching a tag pattern for a single column)
Warning: this method can be highly inefficient for large datasets.
@param tag_pattern: (optional) return values only for columns matching this tag pattern.
@param normalise: (optional) normalise the strings with hxl.datatypes.normalise (default: False)
@returns: a Python set of values
"""
value_set = set([])
if tag_pattern:
tag_pattern = TagPattern.parse(tag_pattern)
for row in self:
if tag_pattern:
new_values = row.get_all(tag_pattern)
else:
new_values = row.values
if normalise:
new_values = [hxl.datatypes.normalise(s) for s in new_values]
else:
new_values = [hxl.datatypes.normalise_space(s) for s in new_values]
value_set.update(new_values)
return value_set
def get_column_indices(self, tag_patterns, columns):
"""Get a list of indices that match the tag patterns provided
@param tag_patterns: a list of tag patterns or a string version of the list
@param columns: a list of columns
@returns: a (possibly-empty) list of 0-based indices
"""
patterns = TagPattern.parse_list(tag_patterns)
indices = []
for i, column in enumerate(columns):
for pattern in patterns:
if pattern.match(column):
indices.push(i)
return indices
#
# Aggregates
#
def _get_minmax(self, pattern, op):
"""Calculate the extreme min/max value for a tag pattern
Will iterate through the dataset, and use values from multiple matching columns.
Uses numbers, dates, or strings for comparison, based on the first non-empty value found.
@param pattern: the L{hxl.model.TagPattern} to match
@param op: operator_lt or operator_gt
@returns: the extreme value according to operator supplied, or None if no values found
"""
pattern = TagPattern.parse(pattern)
result_raw = None # what's actually in the dataset
result_normalised = None # normalised version for comparison
# Look at every row
for row in self:
# Look at every matching value in every row
for i, value in enumerate(row.get_all(pattern)):
# ignore empty values
if hxl.datatypes.is_empty(value):
continue
# make a normalised value for comparison
normalised = hxl.datatypes.normalise(value, row.columns[i])
# first non-empty value is always a match
if result_normalised is None:
result_raw = value
result_normalised = normalised
else:
# try comparing the normalised types first, then strings on failure
try:
if op(normalised, result_normalised):
result_raw = value
result_normalised = normalised
except TypeError:
if op(str(normalised), str(result_normalised)):
result_raw = value
result_normalised = normalised
return result_raw
def min(self, pattern):
"""Calculate the minimum value for a tag pattern
Will iterate through the dataset, and use values from multiple matching columns.
Uses numbers, dates, or strings for comparison, based on the first non-empty value found.
@param pattern: the L{hxl.model.TagPattern} to match
@returns: the minimum value according to the '<' operator, or None if no values found
"""
return self._get_minmax(pattern, operator.lt)
def max(self, pattern):
"""Calculate the maximum value for a tag pattern
Will iterate through the dataset, and use values from multiple matching columns.
@param pattern: the L{hxl.model.TagPattern} to match
@returns: the minimum value according to the '<' operator, or None if no values found
"""
return self._get_minmax(pattern, operator.gt)
#
# Utility
#
def validate(self, schema=None, callback=None):
"""
Validate the current dataset.
@param schema (optional) the pre-compiled schema, schema filename, URL, file object, etc. Defaults to a built-in schema.
@param callback (optional) a function to call with each error or warning. Defaults to collecting errors in an array and returning them.
"""
return hxl.schema(schema, callback).validate(self)
def recipe(self, recipe):
"""Parse a recipe (JSON or a list of dicts) and create the appropriate filters.
@param recipe: a list of dicts, a single dict, or a JSON literal string.
@return: the new end filter.
"""
import hxl.filters
return hxl.filters.from_recipe(self, recipe)
#
# Filters
#
def append(self, append_sources, add_columns=True, queries=[]):
"""Append additional datasets.
@param append_sources: a list of sources to append
@param add_columns: if True (default), include any extra columns in the append sources
@param queries: a list of row queries to select rows for inclusion from the append sources.
@returns: a new HXL source for chaining
"""
import hxl.filters
return hxl.filters.AppendFilter(self, append_sources, add_columns=add_columns, queries=queries)
def append_external_list(self, source_list_url, add_columns=True, queries=[]):
"""Append additional datasets from an external list
@param source_list_url: URL of a HXL dataset containing a list of sources to append.
@param add_columns: if True (default), include any extra columns in the append sources.
@param queries: a list of row queries to select rows for inclusion from the append sources.
@returns: a new HXL source for chaining
"""
import hxl.filters
logger.debug("Loading append list from %s...", source_list_url)
append_sources = hxl.filters.AppendFilter.parse_external_source_list(source_list_url)
logger.debug("Done loading")
return hxl.filters.AppendFilter(self, append_sources, add_columns=add_columns, queries=queries)
def cache(self):
"""Add a caching filter to the dataset."""
import hxl.filters
return hxl.filters.CacheFilter(self)
def dedup(self, patterns=[], queries=[]):
"""Deduplicate a dataset."""
import hxl.filters
return hxl.filters.DeduplicationFilter(self, patterns=patterns, queries=queries)
def with_columns(self, includes):
"""Select matching columns."""
import hxl.filters
return hxl.filters.ColumnFilter(self, include_tags=includes)
def without_columns(self, excludes=None, skip_untagged=False):
"""Select non-matching columns."""
import hxl.filters
return hxl.filters.ColumnFilter(self, exclude_tags=excludes, skip_untagged=skip_untagged)
def with_rows(self, queries, mask=[]):
"""Select matching rows.
@param queries: a predicate or list of predicates for rows to include
@param mask: a predicate or list of predicates for rows to test (default: [] to test all)
@return: a filtered version of the source
"""
import hxl.filters
return hxl.filters.RowFilter(self, queries=queries, reverse=False, mask=mask)
def without_rows(self, queries, mask=[]):
"""Select non-matching rows.
@param queries: a predicate or list of predicates for rows to ignore
@param mask: a predicate or list of predicates for rows to test (default: [] to test all)
@return: a filtered version of the source
"""
import hxl.filters
return hxl.filters.RowFilter(self, queries=queries, reverse=True, mask=mask)
def sort(self, keys=None, reverse=False):
"""Sort the dataset (caching)."""
import hxl.filters
return hxl.filters.SortFilter(self, tags=keys, reverse=reverse)
def count(self, patterns=[], aggregators=None, queries=[]):
"""Count values in the dataset (caching)."""
import hxl.filters
return hxl.filters.CountFilter(
self, patterns=patterns, aggregators=aggregators, queries=queries
)
def row_counter(self, queries=[]):
"""Count the number of rows while streaming."""
import hxl.filters
return hxl.filters.RowCountFilter(self, queries=queries)
def replace_data(self, original, replacement, pattern=None, use_regex=False, queries=[]):
"""Replace values in a HXL dataset."""
import hxl.filters
replacement = hxl.filters.ReplaceDataFilter.Replacement(original, replacement, pattern, use_regex)
return hxl.filters.ReplaceDataFilter(self, [replacement], queries=queries)
def replace_data_map(self, map_source, queries=[]):
"""Replace values in a HXL dataset."""
import hxl.filters
replacements = hxl.filters.ReplaceDataFilter.Replacement.parse_map(hxl.data(map_source))
return hxl.filters.ReplaceDataFilter(self, replacements, queries=queries)
def add_columns(self, specs, before=False):
"""Add fixed-value columns to a HXL dataset."""
import hxl.filters
return hxl.filters.AddColumnsFilter(self, specs=specs, before=before)
def rename_columns(self, specs):
"""Changes headers and tags on a column."""
import hxl.filters
return hxl.filters.RenameFilter(self, specs)
def clean_data(
self, whitespace=[], upper=[], lower=[], date=[], date_format=None,
number=[], number_format=None, latlon=[], purge=False, queries=[]
):
"""Clean data fields."""
import hxl.filters
return hxl.filters.CleanDataFilter(
self,
whitespace=whitespace,
upper=upper,
lower=lower,
date=date, date_format=date_format,
number=number, number_format=number_format,
latlon=latlon,
purge=purge,
queries=queries
)
def merge_data(self, merge_source, keys, tags, replace=False, overwrite=False, queries=[]):
"""Merges values from a second dataset.
@param merge_source: the second HXL data source
@param keys: a single tagspec or list of tagspecs for the shared keys
@param tags: the tags to copy over from the second dataset
@param replace: if True, replace existing columns when present
@param overwrite: if True, overwrite individual values in existing columns when available
@param queries: optional row queries to control the merge
"""
import hxl.filters
return hxl.filters.MergeDataFilter(self, merge_source, keys, tags, replace, overwrite, queries=queries)
def expand_lists(self, patterns=None, separator="|", correlate=False, queries=[]):
"""Expand lists by repeating rows.
By default, applies to every column with a +list attribute, and uses "|" as the separator.
@param patterns: a single tag pattern or list of tag patterns for columns to expand
@param separator: the list-item separator
"""
import hxl.filters
return hxl.filters.ExpandListsFilter(self, patterns=patterns, separator=separator, correlate=correlate, queries=queries)
def explode(self, header_attribute='header', value_attribute='value'):
"""Explodes a wide dataset into a long datasets.
@param header_attribute: the attribute to add to the hashtag of the column with the former header (default 'header')
@param value_attribute: the attribute to add to the hashtag of the column with the former value (default 'value')
@return: filtered dataset.
@see hxl.filters.ExplodeFilter
"""
import hxl.filters
return hxl.filters.ExplodeFilter(self, header_attribute, value_attribute)
def implode(self, label_pattern, value_pattern):
"""Implodes a long dataset into a wide dataset
@param label_pattern: the tag pattern to match the label column
@param value_pattern: the tag pattern to match the
@return: filtered dataset.
@see hxl.filters.ImplodeFilter
"""
import hxl.filters
return hxl.filters.ImplodeFilter(self, label_pattern=label_pattern, value_pattern=value_pattern)
def jsonpath(self, path, patterns=[], queries=[], use_json=True):
"""Parse the value as a JSON expression and extract data from it.
See http://goessner.net/articles/JsonPath/
@param path: a JSONPath expression for extracting data
@param patterns: a tag pattern or list of patterns for the columns to use (default to all)
@param queries: a predicate or list of predicates for the rows to consider.
@param use_json: if True, serialise multiple results as JSON lists.
@returns: filtered dataset
@see: hxl.filters.JSONPathFilter
"""
import hxl.filters
return hxl.filters.JSONPathFilter(self, path, patterns=patterns, queries=queries, use_json=use_json)
def fill_data(self, patterns=[], queries=[]):
"""Fills empty cells in a column using the last non-empty value.
@param patterns: a tag pattern or list of patterns for the columns to fill (default to all)
@param queries: a predicate or list of predicates for rows to fill (leave any blank that don't match).
@return filtered dataset
@see hxl.filters.FillFilter
"""
import hxl.filters
return hxl.filters.FillDataFilter(self, patterns=patterns, queries=queries)
#
# Generators
#
def gen_raw(self, show_headers=True, show_tags=True):
"""Generate an array representation of a HXL dataset, one at a time."""
if show_headers:
yield self.headers
if show_tags:
yield self.display_tags
for row in self:
yield row.values
def gen_csv(self, show_headers=True, show_tags=True):
"""Generate a CSV representation of a HXL dataset, one row at a time."""
class TextOut:
"""Simple string output source to capture CSV"""
def __init__(self):
self.data = ''
def write(self, s):
self.data += s
def get(self):
data = self.data
self.data = ''
return data
output = TextOut()
writer = csv.writer(output)
for raw in self.gen_raw(show_headers, show_tags):
writer.writerow(raw)
yield output.get()
def gen_json(self, show_headers=True, show_tags=True, use_objects=False):
"""Generate a JSON representation of a HXL dataset, one row at a time."""
is_first = True
yield "[\n"
if use_objects:
for row in self:
if is_first:
is_first = False
yield json.dumps(row.dictionary, sort_keys=True, indent=2)
else:
yield ",\n" + json.dumps(row.dictionary, sort_keys=True, indent=2)
else:
for raw in self.gen_raw(show_headers, show_tags):
if is_first:
is_first = False
yield json.dumps(raw)
else:
yield ",\n" + json.dumps(raw)
yield "\n]\n"
class Column(object):
"""
The definition of a logical column in the HXL data.
"""
# Regular expression to match a HXL tag
PATTERN = r'^\s*(#{token})((?:\s*\+{token})*)\s*$'.format(token=hxl.datatypes.TOKEN_PATTERN)
# To tighten debugging (may reconsider later -- not really a question of memory efficiency here)
__slots__ = ['tag', 'attributes', 'attribute_list', 'header', 'column_number']
def __init__(self, tag=None, attributes=(), header=None, column_number=None):
"""
Initialise a column definition.
@param tag: the HXL hashtag for the column (default: None)
@param attributes: (optional) a sequence of attributes (default: ())
@param header: (optional) the original plaintext header for the column (default: None)
@param column_number: (optional) the zero-based column number
"""
if tag:
tag = tag.lower()
self.tag = tag
self.header = header
self.column_number = column_number
self.attributes = set([a.lower() for a in attributes])
self.attribute_list = [a.lower() for a in attributes] # to preserve order
@property
def display_tag(self):
"""Default display version of a HXL hashtag.
Attributes are not sorted.
"""
return self.get_display_tag(sort_attributes=False)
def get_display_tag(self, sort_attributes=False):
"""
Generate a display version of the column hashtag
@param sort_attributes: if True, sort attributes; otherwise, preserve the original order
@return the reassembled HXL hashtag string, including language code
"""
if self.tag:
s = self.tag
for attribute in sorted(self.attribute_list) if sort_attributes else self.attribute_list:
s += '+' + attribute
return s
else:
return ''
def has_attribute(self, attribute):
"""Check if an attribute is present."""
return (attribute in self.attribute_list)
def add_attribute(self, attribute):
"""Add an attribute to the column."""
if attribute not in self.attributes:
self.attributes.add(attribute)
self.attribute_list.append(attribute)
return self
def remove_attribute(self, attribute):
"""Remove an attribute from the column."""
if attribute in self.attributes:
self.attributes.remove(attribute)
self.attribute_list.remove(attribute)
return self
def __hash__(self):
"""Make columns usable in a dictionary.
Only the hashtag and attributes are used.
"""
hash_value = hash(self.tag)
for attribute in self.attributes:
hash_value += hash(attribute)
return hash_value
def __eq__(self, other):
"""Test for comparison with another object.
For equality, only the hashtag and attributes have to be the same."""
try:
return (self.tag == other.tag and self.attributes == other.attributes)
except:
return False
def __repr__(self):
return self.display_tag
__str__ = __repr__
@staticmethod
def parse(raw_string, header=None, use_exception=False, column_number=None):
""" Attempt to parse a full hashtag specification.
@param raw_string: the string representation of the tagspec
@param header: the text header to include
@param use_exception: if True, throw an exception for a malformed tagspec
@returns: None if the string is empty, False if it's malformed (and use_exception is False), or a Column object otherwise
"""
# Already parsed?
if isinstance(raw_string, Column):
return raw_string
# Empty string?
if hxl.datatypes.is_empty(raw_string):
return None
# Pattern for a single tag
result = re.match(Column.PATTERN, raw_string)
if result:
tag = result.group(1)
attribute_string = result.group(2)
if attribute_string:
attributes = re.split(r'\s*\+', attribute_string.strip().strip('+'))
else:
attributes = []
return Column(tag=tag, attributes=attributes, header=header, column_number=column_number)
else:
if use_exception:
raise hxl.HXLException("Malformed tag expression: " + raw_string)
else:
logger.debug("Not a HXL hashtag spec: %s", raw_string)
return False
@staticmethod
def parse_spec(raw_string, default_header=None, use_exception=False, column_number=None):
"""Attempt to parse a single-string header/hashtag spec"""
# Already parsed?
if isinstance(raw_string, Column):
return raw_string
matches = re.match(r'^(.*)(#.*)$', raw_string)
if matches:
header = matches.group(1) if matches.group(1) else default_header
return Column.parse(matches.group(2), header=header, column_number=column_number)
else:
return Column.parse('#' + raw_string, header=default_header, column_number=column_number)
class Row(object):
"""
An iterable row of values in a HXL dataset.
"""
# Predefine the slots for efficiency (may reconsider later)
__slots__ = ['columns', 'values', 'row_number', 'source_row_number']
def __init__(self, columns, values=[], row_number=None, source_row_number=None):
"""
Set up a new row.
@param columns: The column definitions (array of Column objects).
@param values: (optional) The string values for the row (default: [])
@param row_number: (optional) The zero-based logical row number in the input dataset, if available (default: None)
@param source_row_number: (optional) The zero-based source row number in the input dataset, if available (default: None)
"""
self.columns = columns
self.values = copy.copy(values)
self.row_number = row_number
self.source_row_number = source_row_number
def append(self, value):
"""
Append a value to the row.
@param value The new value to append.
@return The new value
"""
self.values.append(value)
return value
def get(self, tag, index=None, default=None, parsed=False):
"""
Get a single value for a tag in a row.
If no index is provided ("None"), return the first non-empty value.
@param tag: A TagPattern or a string value for a tag.
@param index: The zero-based index if there are multiple values for the tag (default: None)
@param default: The default value if not found (default: None). Never parsed, even if parsed=True
@param parsed: If true, use attributes as hints to try to parse the value (e.g. number, list, date)
@return The value found, or the default value provided. If parsed=True, the return value will be a list (default: False)
"""
# FIXME - move externally, use for get_all as well, and support numbers and dates
def parse(column, value):
if parsed:
if column.has_attribute('list'):
return re.split(r'\s*,\s*', value)
else:
return [value]
return value
if type(tag) is TagPattern:
pattern = tag
else:
pattern = TagPattern.parse(tag)
for i, column in enumerate(self.columns):
if i >= len(self.values):
break
if pattern.match(column):
if index is None:
# None (the default) is a special case: it means look
# for the first truthy value
if self.values[i]:
return parse(column, self.values[i])
else:
# Otherwise, look for a specific index
if index == 0:
return parse(column, self.values[i])
else:
index = index - 1
return default
def get_all(self, tag, default=None):
"""
Get all values for a specific tag in a row
@param tag A TagPattern or a string value for a tag.
@return An array of values for the HXL hashtag.
"""
if type(tag) is TagPattern:
pattern = tag
else:
pattern = TagPattern.parse(tag)
result = []
for i, column in enumerate(self.columns):
if i >= len(self.values):
break
if pattern.match(column):
value = self.values[i]
if default is not None and not value:
value = default
result.append(value)
return result
def key(self, patterns=None, indices=None):
"""Generate a unique key tuple for the row, based on a list of tag patterns
@param patterns: a list of L{TagPattern} objects, or a parseable string
@returns: the key as a tuple (might be empty)
"""
key = []
# if the user doesn't provide indices, get indices from the pattern
if not indices and patterns:
indices = get_column_indices(patterns, self.columns)
if indices:
# if we have indices, use them to build the key
for i in indices:
if i < len(self.values):
key.append(hxl.datatypes.normalise(self.values[i], self.columns[i]))
else:
# if there are still no indices, use the whole row for the key
for i, value in enumerate(self.values):
key.append(hxl.datatypes.normalise(value, self.columns[i]))
return tuple(key) # make it into a tuple so that it's hashable
@property
def dictionary(self):
"""Return the row as a Python dict.
The keys will be HXL hashtags and attributes, normalised per HXL 1.1.
If two or more columns have the same hashtags and attributes, only the first will be included.
@return: The row as a Python dictionary.
"""
data = {}
for i, col in enumerate(self.columns):
key = col.get_display_tag(sort_attributes=True)
if key and (not key in data) and (i < len(self.values)):
data[key] = self.values[i]
return data
def __getitem__(self, index):
"""
Array-access method to make this class iterable.
@param index The zero-based index of a value to look up.
@return The value if it exists.
@exception IndexError if the index is out of range.
"""
return self.values[index]
def __str__(self):
"""
Create a string representation of a row for debugging.
"""
s = '<Row';
for column_number, value in enumerate(self.values):
s += "\n " + str(self.columns[column_number]) + "=" + str(value)
s += "\n>"
return s
class RowQuery(object):
"""Query to execute against a row of HXL data."""
def __init__(self, pattern, op, value, is_aggregate=False):
"""Constructor
@param pattern: the L{TagPattern} to match in the row
@param op: the operator function to use for comparison
@param value: the value to compare against
@param is_aggregate: if True, the value is a special placeholder like "min" or "max" that needs to be calculated
"""
self.pattern = TagPattern.parse(pattern)
self.op = op
self.value = value
# if the value is a formula, extract it
self.formula = None
result = re.match(r'^{{(.+)}}$', hxl.datatypes.normalise_space(value))
if result:
self.formula = result.group(1)
self.is_aggregate=is_aggregate
self.needs_aggregate = False
"""Need to calculate an aggregate value"""
if is_aggregate:
self.needs_aggregate = True
# calculate later
self.date_value = None
self.number_value = None
self._saved_indices = None
def calc_aggregate(self, dataset):
"""Calculate the aggregate value that we need for the row query
Substitute the special values "min" and "max" with aggregates.
@param dataset: the HXL dataset to use (must be cached)
"""
if not self.needs_aggregate:
logger.warning("no aggregate calculation needed")
return # no need to calculate
if not dataset.is_cached:
raise HXLException("need a cached dataset for calculating an aggregate value")
if self.value == 'min':
self.value = dataset.min(self.pattern)
self.op = operator.eq
elif self.value == 'max':
self.value = dataset.max(self.pattern)
self.op = operator.eq
elif self.value == 'not min':
self.value = dataset.min(self.pattern)
self.op = operator.ne
elif self.value == 'not max':
self.value = dataset.max(self.pattern)
self.op = operator.ne
else:
raise HXLException("Unrecognised aggregate: {}".format(value))
self.needs_aggregate = False
def match_row(self, row):
"""Check if a key-value pair appears in a HXL row"""
# fail if we need an aggregate and haven't calculated it
if self.needs_aggregate and not self.aggregate_is_calculated:
raise HXLException("must call calc_aggregate before matching an 'is min' or 'is max' condition")
# initialise is this is the first time matching for the row query
if self._saved_indices is None or self.formula:
# if it's a row formula, evaluate first
if self.formula:
value = hxl.formulas.eval.eval(row, self.formula)
else:
value = self.value
if self.pattern.tag == '#date':
try:
self.date_value = hxl.datatypes.normalise_date(value)
except ValueError:
self.date_value = None
try:
self.number_value = hxl.datatypes.normalise_number(value)
except ValueError:
self.number_value = None
self.string_value = hxl.datatypes.normalise_string(value)
# try all the matching column values
indices = self._get_saved_indices(row.columns)
for i in indices:
if i < len(row.values) and self.match_value(row.values[i], self.op):
return True
return False
def match_value(self, value, op):
"""Try matching as dates, then as numbers, then as simple strings"""
if self.date_value is not None:
try:
return op(hxl.datatypes.normalise_date(value), self.date_value)
except ValueError:
pass
if self.number_value is not None:
try:
return op(hxl.datatypes.normalise_number(value), self.number_value)
except:
pass
return self.op(hxl.datatypes.normalise_string(value), self.string_value)
def _get_saved_indices(self, columns):
"""Cache the column tests, so that we run them only once."""
# FIXME - assuming that the columns never change
self._saved_indices = []
for i in range(len(columns)):
if self.pattern.match(columns[i]):
self._saved_indices.append(i)
return self._saved_indices
@staticmethod
def parse(query):
"""Parse a filter expression"""
if isinstance(query, RowQuery):
# already parsed
return query
parts = re.split(r'([<>]=?|!?=|!?~|\bis\b)', hxl.datatypes.normalise_string(query), maxsplit=1)
pattern = TagPattern.parse(parts[0])
op_name = hxl.datatypes.normalise_string(parts[1])
op = RowQuery.OPERATOR_MAP.get(op_name)
value = hxl.datatypes.normalise_string(parts[2])
is_aggregate = False
# special handling for aggregates (FIXME)
if op_name == 'is' and value in ('min', 'max', 'not min', 'not max'):
is_aggregate = True
return RowQuery(pattern, op, value, is_aggregate)
@staticmethod
def parse_list(queries):
"""Parse a single query spec or a list of specs."""
if queries:
if not hasattr(queries, '__len__') or isinstance(queries, six.string_types):
# make a list if needed
queries = [queries]
return [hxl.model.RowQuery.parse(query) for query in queries]
else:
return []
@staticmethod
def match_list(row, queries=None, reverse=False):
"""See if any query in a list matches a row."""
if not queries:
# no queries = pass
return True
else:
# otherwise, must match at least one
for query in queries:
if query.match_row(row):
return not reverse
return reverse
@staticmethod
def operator_re(s, pattern):
"""Regular-expression comparison operator."""
return re.search(pattern, s)
@staticmethod
def operator_nre(s, pattern):
"""Regular-expression negative comparison operator."""
return not re.search(pattern, s)
@staticmethod
def operator_is(s, condition):
"""Advanced tests
Note: this won't be called for aggregate values like "is min" or "is not max";
for these, the aggregate will already be calculated, and a simple comparison
operator substituted by L{calc_aggregate}.
"""
if condition == 'empty':
return hxl.datatypes.is_empty(s)
elif condition == 'not empty':
return not hxl.datatypes.is_empty(s)
elif condition == 'number':
return hxl.datatypes.is_number(s)
elif condition == 'not number':
return not hxl.datatypes.is_number(s)
elif condition == 'date':
return (hxl.datatypes.is_date(s))
elif condition == 'not date':
return (hxl.datatypes.is_date(s) is False)
else:
raise hxl.HXLException('Unknown is condition: {}'.format(condition))
# Constant map of comparison operators
OPERATOR_MAP = {
'=': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
}
# Static functions
def get_column_indices(tag_patterns, columns):
"""Get a list of column indices that match the tag patterns provided
@param tag_patterns: a list of tag patterns or a string version of the list
@param columns: a list of columns
@returns: a (possibly-empty) list of 0-based indices
"""
tag_patterns = TagPattern.parse_list(tag_patterns)
columns = [Column.parse(column) for column in columns]
indices = []
for i, column in enumerate(columns):
for pattern in tag_patterns:
if pattern.match(column):
indices.append(i)
return indices
# Extra static initialisation
RowQuery.OPERATOR_MAP['~'] = RowQuery.operator_re
RowQuery.OPERATOR_MAP['!~'] = RowQuery.operator_nre
RowQuery.OPERATOR_MAP['is'] = RowQuery.operator_is
# end
| unlicense | -8,416,026,671,488,264,000 | 38.114675 | 143 | 0.603247 | false |
KDD-OpenSource/geox-young-academy | day-3/Kalman-filter_Mark.py | 1 | 1494 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 10:10:24 2017
@author: Mark
"""
import numpy as np
import matplotlib.pyplot as plt
#Define functions
def model(state_0,A,B):
state_1 = A*state_0 + np.random.normal(0,B)
return state_1
state_null=np.random.normal(0,0.4)
def observation_function(state,R):
obs=state+np.random.normal(0,R)
return obs
def forecast(state_0,cov_0,A,B):
state_1=A*state_0
cov_1=A*cov_0*A+B
return state_1,cov_1
def analysis_formulas(state_1_hat,cov_1_hat,K,H,obs_0):
state_1 = state_1_hat - K*(H*state_1_hat - obs_0)
cov_1 = cov_1_hat - K*H*cov_1_hat
return state_1, cov_1
def kalman_gain(cov_1_hat,H,R):
K = cov_1_hat*H*(R+H*cov_1_hat*H)**(-1)
return K
#Initialize model parameters
A = 0.5
H = 1
B = 0.5
R = 0.1
lev = 100
#Sythetic Model
STATE_real = np.zeros(lev)
OBS_real = np.zeros(lev)
STATE_real[0] = np.random.normal(5,0.1)
OBS_real[0] = observation_function(STATE_real[0],R)
for i in range (1,lev-1):
STATE_real[i] = model(STATE_real[i-1],0.4,0.01)
OBS_real[i] = observation_function(STATE_real[i],R)
#Kalman-filter
STATE = np.zeros(lev)
COV = np.zeros(lev)
STATE[0] = state_null
COV[0] = B
for i in range (1,lev-1):
(state_hat,cov_hat) = forecast(STATE[i-1],COV[i-1],A,B)
K = kalman_gain(cov_hat,H,R)
(STATE[i],COV[i]) = analysis_formulas(state_hat,cov_hat,K,H,OBS_real[i])
plt.plot(STATE)
plt.plot(STATE_real)
| mit | 167,307,088,535,886,000 | 21.34375 | 76 | 0.613788 | false |
Brazelton-Lab/lab_scripts | edit-esom-class-file.py | 1 | 1891 | #! /usr/bin/env python
"""
edit user-provided ESOM class file with new assignments in user-provided file
each line of user-provided file of new assignments should contain a data point number and a class number, separated by tabs
usage:
python edit-esom-class-file.py esom.cls new-assignments.tsv new-class-filename.cls
Copyright:
edit-esom-class-file.py Append user data to ESOM class file
Copyright (C) 2016 William Brazelton
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
cls_file = sys.argv[1]
user_file = sys.argv[2]
new_file = sys.argv[3]
# create dictionary of user-provided new assignments:
d = {}
with open(user_file) as user:
for line in user:
cols = line.split('\t')
data_point = cols[0].strip()
cls_number = cols[1].strip()
d[data_point] = cls_number.strip('\n')
# iterate through class file, writing new class file with new assignments:
with open(new_file,'w') as new:
with open(cls_file) as cls:
for line in cls:
if line[0] == '%': new.write(line)
else:
cols = line.split('\t')
if cols[0] in d: new.write(str(cols[0]) + '\t' + str(d[cols[0]]) + '\n')
else: new.write(line)
print 'WARNING: if you introduced new classes to this .cls file, you need to manually add them to the header of this new .cls file'
| gpl-2.0 | -7,825,894,047,534,338,000 | 31.603448 | 131 | 0.710206 | false |
dimtruck/magnum | magnum/tests/functional/k8s/test_k8s_python_client.py | 1 | 6881 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.common.pythonk8sclient.swagger_client import api_client
from magnum.common.pythonk8sclient.swagger_client.apis import apiv_api
from magnum.tests.functional.python_client_base import BayAPITLSTest
from magnum.tests.functional.python_client_base import BayTest
from magnumclient.openstack.common.apiclient import exceptions
class TestBayModelResource(BayTest):
coe = 'kubernetes'
def test_baymodel_create_and_delete(self):
self._test_baymodel_create_and_delete('test_k8s_baymodel')
class TestBayResource(BayTest):
coe = 'kubernetes'
def test_bay_create_and_delete(self):
baymodel_uuid = self._test_baymodel_create_and_delete(
'test_k8s_baymodel', delete=False, tls_disabled=True)
self._test_bay_create_and_delete('test_k8s_bay', baymodel_uuid)
class TestKubernetesAPIs(BayAPITLSTest):
@classmethod
def setUpClass(cls):
super(TestKubernetesAPIs, cls).setUpClass()
cls.baymodel = cls._create_baymodel('testk8sAPI',
coe='kubernetes',
tls_disabled=False,
network_driver='flannel',
fixed_network='192.168.0.0/24',
)
cls.bay = cls._create_bay('testk8sAPI', cls.baymodel.uuid)
config_contents = """[req]
distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[req_distinguished_name]
CN = Your Name
[req_ext]
extendedKeyUsage = clientAuth
"""
cls._create_tls_ca_files(config_contents)
cls.kube_api_url = cls.cs.bays.get(cls.bay.uuid).api_address
k8s_client = api_client.ApiClient(cls.kube_api_url,
key_file=cls.key_file,
cert_file=cls.cert_file,
ca_certs=cls.ca_file)
cls.k8s_api = apiv_api.ApivApi(k8s_client)
@classmethod
def tearDownClass(cls):
cls._delete_bay(cls.bay.uuid)
try:
cls._wait_on_status(cls.bay,
["CREATE_COMPLETE",
"DELETE_IN_PROGRESS", "CREATE_FAILED"],
["DELETE_FAILED", "DELETE_COMPLETE"])
except exceptions.NotFound:
pass
cls._delete_baymodel(cls.baymodel.uuid)
def test_pod_apis(self):
pod_manifest = {'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {'color': 'blue', 'name': 'test'},
'spec': {'containers': [{'image': 'dockerfile/redis',
'name': 'redis'}]}}
resp = self.k8s_api.create_namespaced_pod(body=pod_manifest,
namespace='default')
self.assertEqual('test', resp.metadata.name)
self.assertTrue(resp.status.phase)
resp = self.k8s_api.read_namespaced_pod(name='test',
namespace='default')
self.assertEqual('test', resp.metadata.name)
self.assertTrue(resp.status.phase)
resp = self.k8s_api.delete_namespaced_pod(name='test', body={},
namespace='default')
def test_service_apis(self):
service_manifest = {'apiVersion': 'v1',
'kind': 'Service',
'metadata': {'labels': {'name': 'frontend'},
'name': 'frontend',
'resourceversion': 'v1'},
'spec': {'ports': [{'port': 80,
'protocol': 'TCP',
'targetPort': 80}],
'selector': {'name': 'frontend'}}}
resp = self.k8s_api.create_namespaced_service(body=service_manifest,
namespace='default')
self.assertEqual('frontend', resp.metadata.name)
self.assertTrue(resp.status)
resp = self.k8s_api.read_namespaced_service(name='frontend',
namespace='default')
self.assertEqual('frontend', resp.metadata.name)
self.assertTrue(resp.status)
resp = self.k8s_api.delete_namespaced_service(name='frontend',
namespace='default')
def test_replication_controller_apis(self):
rc_manifest = {
'apiVersion': 'v1',
'kind': 'ReplicationController',
'metadata': {'labels': {'name': 'frontend'},
'name': 'frontend'},
'spec': {'replicas': 2,
'selector': {'name': 'frontend'},
'template': {'metadata': {
'labels': {'name': 'frontend'}},
'spec': {'containers': [{
'image': 'nginx',
'name': 'nginx',
'ports': [{'containerPort': 80,
'protocol': 'TCP'}]}]}}}}
resp = self.k8s_api.create_namespaced_replication_controller(
body=rc_manifest, namespace='default')
self.assertEqual('frontend', resp.metadata.name)
self.assertEqual(2, resp.spec.replicas)
resp = self.k8s_api.read_namespaced_replication_controller(
name='frontend', namespace='default')
self.assertEqual('frontend', resp.metadata.name)
self.assertEqual(2, resp.spec.replicas)
resp = self.k8s_api.delete_namespaced_replication_controller(
name='frontend', body={}, namespace='default')
"""
NB : Bug1504379. This is placeholder and will be removed when all
the objects-from-bay patches are checked in.
def test_pods_list(self):
self.assertIsNotNone(self.cs.pods.list(self.bay.uuid))
def test_rcs_list(self):
self.assertIsNotNone(self.cs.rcs.list(self.bay.uuid))
def test_services_list(self):
self.assertIsNotNone(self.cs.services.list(self.bay.uuid))
"""
| apache-2.0 | 6,369,746,258,494,733,000 | 41.214724 | 77 | 0.533789 | false |
lpramuk/robottelo | tests/foreman/cli/test_satellitesync.py | 1 | 80631 | # -*- encoding: utf-8 -*-
"""Test class for InterSatellite Sync
:Requirement: Satellitesync
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: InterSatelliteSync
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import json
from random import randint
from fauxfactory import gen_integer
from fauxfactory import gen_string
from nailgun import entities
from robottelo import manifests
from robottelo import ssh
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.contentview import ContentView
from robottelo.cli.factory import make_content_view
from robottelo.cli.factory import make_lifecycle_environment
from robottelo.cli.factory import make_org
from robottelo.cli.factory import make_product
from robottelo.cli.factory import make_repository
from robottelo.cli.package import Package
from robottelo.cli.puppetmodule import PuppetModule
from robottelo.cli.repository import Repository
from robottelo.cli.repository_set import RepositorySet
from robottelo.cli.settings import Settings
from robottelo.cli.subscription import Subscription
from robottelo.constants import CUSTOM_PUPPET_REPO
from robottelo.constants import ENVIRONMENT
from robottelo.constants import PRDS
from robottelo.constants import REPOS
from robottelo.constants import REPOSET
from robottelo.decorators import run_in_one_thread
from robottelo.decorators import skip_if_not_set
from robottelo.decorators import stubbed
from robottelo.decorators import tier1
from robottelo.decorators import tier2
from robottelo.decorators import tier3
from robottelo.decorators import tier4
from robottelo.decorators import upgrade
from robottelo.test import CLITestCase
class ExportDirectoryNotSet(Exception):
"""Raise when export Directory is not set or found"""
@run_in_one_thread
class RepositoryExportTestCase(CLITestCase):
"""Tests for exporting a repository via CLI"""
export_dir = None
is_set_up = False
org = None
def setUp(self):
"""Create a directory for export, configure permissions and satellite
settings
"""
super(RepositoryExportTestCase, self).setUp()
if not RepositoryExportTestCase.is_set_up:
RepositoryExportTestCase.export_dir = gen_string('alphanumeric')
# Create a new 'export' directory on the Satellite system
result = ssh.command('mkdir /mnt/{0}'.format(self.export_dir))
self.assertEqual(result.return_code, 0)
result = ssh.command('chown foreman.foreman /mnt/{0}'.format(self.export_dir))
self.assertEqual(result.return_code, 0)
result = ssh.command('ls -Z /mnt/ | grep {0}'.format(self.export_dir))
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
self.assertIn('unconfined_u:object_r:mnt_t:s0', result.stdout[0])
# Fix SELinux policy for new directory
result = ssh.command(
'semanage fcontext -a -t foreman_var_run_t "/mnt/{0}(/.*)?"'.format(
self.export_dir
)
)
self.assertEqual(result.return_code, 0)
result = ssh.command('restorecon -Rv /mnt/{0}'.format(self.export_dir))
self.assertEqual(result.return_code, 0)
# Assert that we have the correct policy
result = ssh.command('ls -Z /mnt/ | grep {0}'.format(self.export_dir))
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
self.assertIn('unconfined_u:object_r:foreman_var_run_t:s0', result.stdout[0])
# Update the 'pulp_export_destination' settings to new directory
Settings.set(
{'name': 'pulp_export_destination', 'value': '/mnt/{0}'.format(self.export_dir)}
)
# Create an organization to reuse in tests
RepositoryExportTestCase.org = make_org()
RepositoryExportTestCase.is_set_up = True
@classmethod
def tearDownClass(cls):
"""Remove the export directory with all exported repository archives"""
ssh.command('rm -rf /mnt/{0}'.format(RepositoryExportTestCase.export_dir))
super(RepositoryExportTestCase, cls).tearDownClass()
@tier3
def test_positive_export_custom_product(self):
"""Export a repository from the custom product
:id: 9c855866-b9b1-4e32-b3eb-7342fdaa7116
:expectedresults: Repository was successfully exported, rpm files are
present on satellite machine
:CaseLevel: System
"""
# Create custom product and repository
product = make_product({'organization-id': self.org['id']})
repo = make_repository(
{
'download-policy': 'immediate',
'organization-id': self.org['id'],
'product-id': product['id'],
}
)
backend_identifier = entities.Repository(id=repo['id']).read().backend_identifier
repo_export_dir = '/mnt/{0}/{1}/{2}/{3}/custom/{4}/{5}'.format(
self.export_dir,
backend_identifier,
self.org['label'],
ENVIRONMENT,
product['label'],
repo['label'],
)
# Export the repository
Repository.export({'id': repo['id']})
# Verify export directory is empty
result = ssh.command("find {} -name '*.rpm'".format(repo_export_dir))
self.assertEqual(len(result.stdout), 0)
# Synchronize the repository
Repository.synchronize({'id': repo['id']})
# Export the repository once again
Repository.export({'id': repo['id']})
# Verify RPMs were successfully exported
result = ssh.command("find {} -name '*.rpm'".format(repo_export_dir))
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@skip_if_not_set('fake_manifest')
@tier3
@upgrade
def test_positive_export_rh_product(self):
"""Export a repository from the Red Hat product
:id: e17898db-ca92-4121-a723-0d4b3cf120eb
:expectedresults: Repository was successfully exported, rpm files are
present on satellite machine
:CaseLevel: System
"""
# Enable RH repository
with manifests.clone() as manifest:
ssh.upload_file(manifest.content, manifest.filename)
Subscription.upload({'file': manifest.filename, 'organization-id': self.org['id']})
RepositorySet.enable(
{
'basearch': 'x86_64',
'name': REPOSET['rhva6'],
'organization-id': self.org['id'],
'product': PRDS['rhel'],
'releasever': '6Server',
}
)
repo = Repository.info(
{
'name': REPOS['rhva6']['name'],
'organization-id': self.org['id'],
'product': PRDS['rhel'],
}
)
backend_identifier = entities.Repository(id=repo['id']).read().backend_identifier
repo_export_dir = (
'/mnt/{0}/{1}/{2}/{3}/content/dist/rhel/server/6/6Server/'
'x86_64/rhev-agent/3/os'.format(
self.export_dir, backend_identifier, self.org['label'], ENVIRONMENT
)
)
# Update the download policy to 'immediate'
Repository.update({'download-policy': 'immediate', 'id': repo['id']})
# Export the repository
Repository.export({'id': repo['id']})
# Verify export directory is empty
result = ssh.command("find {} -name '*.rpm'".format(repo_export_dir))
self.assertEqual(len(result.stdout), 0)
# Synchronize the repository
Repository.synchronize({'id': repo['id']})
# Export the repository once again
Repository.export({'id': repo['id']})
# Verify RPMs were successfully exported
result = ssh.command("find {} -name '*.rpm'".format(repo_export_dir))
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
class ContentViewSync(CLITestCase):
"""Implements Content View Export Import tests in CLI
:CaseComponent: ContentViews
"""
export_base = '/var/lib/pulp/katello-export'
@staticmethod
def _create_cv(cv_name, repo, organization, publish=True):
"""Creates CV and/or publishes in organization with given name and repository
:param cv_name: The name of CV to create
:param repo: The repository directory
:param organization: The organization directory
:param publish: Publishes the CV if True else doesnt
:return: The directory of CV and Content View ID
"""
content_view = make_content_view({'name': cv_name, 'organization-id': organization['id']})
ContentView.add_repository(
{
'id': content_view['id'],
'organization-id': organization['id'],
'repository-id': repo['id'],
}
)
content_view = ContentView.info({'name': cv_name, 'organization-id': organization['id']})
cvv_id = None
if publish:
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
cvv_id = content_view['versions'][0]['id']
return content_view, cvv_id
@staticmethod
def _enable_rhel_content(organization, repo_name, releasever=None, product=None, sync=True):
"""Enable and/or Synchronize rhel content
:param organization: The organization directory into which the rhel
contents will be enabled
:param bool sync: Syncs contents to repository if true else doesnt
:return: Repository cli object
"""
manifests.upload_manifest_locked(organization['id'], interface=manifests.INTERFACE_CLI)
RepositorySet.enable(
{
'basearch': 'x86_64',
'name': REPOSET[repo_name],
'organization-id': organization['id'],
'product': PRDS[product],
'releasever': releasever,
}
)
repo = Repository.info(
{
'name': REPOS[repo_name]['name'],
'organization-id': organization['id'],
'product': PRDS[product],
}
)
# Update the download policy to 'immediate'
Repository.update(
{'download-policy': 'immediate', 'mirror-on-sync': 'no', 'id': repo['id']}
)
if sync:
# Synchronize the repository
Repository.synchronize({'id': repo['id']}, timeout=7200)
repo = Repository.info(
{
'name': REPOS[repo_name]['name'],
'organization-id': organization['id'],
'product': PRDS[product],
}
)
return repo
def _update_json(self, json_path):
"""Updates the major and minor version in the exported json file
:param json_path: json file path on server
:return: Returns major and minor versions
"""
new_major = gen_integer(2, 1000)
new_minor = gen_integer(2, 1000)
result = ssh.command('[ -f {} ]'.format(json_path))
if result.return_code == 0:
ssh.command(
'sed -i \'s/\"major\": [0-9]\\+/\"major\": {0}/\' {1}'.format(new_major, json_path)
)
ssh.command(
'sed -i \'s/\"minor\": [0-9]\\+/\"minor\": {0}/\' {1}'.format(new_minor, json_path)
)
return new_major, new_minor
raise IOError(
'Json File {} not found to alternate the major/minor versions'.format(json_path)
)
def set_importing_org(self, product, repo, cv, mos='no'):
"""Sets same CV, product and repository in importing organization as
exporting organization
:param str product: The product name same as exporting product
:param str repo: The repo name same as exporting repo
:param str cv: The cv name same as exporting cv
:param str mos: Mirror on Sync repo, by default 'no' can override to 'yes'
"""
self.importing_org = make_org()
self.importing_prod = make_product(
{'organization-id': self.importing_org['id'], 'name': product}
)
self.importing_repo = make_repository(
{
'name': repo,
'mirror-on-sync': mos,
'download-policy': 'immediate',
'product-id': self.importing_prod['id'],
}
)
self.importing_cv = make_content_view(
{'name': cv, 'organization-id': self.importing_org['id']}
)
ContentView.add_repository(
{
'id': self.importing_cv['id'],
'organization-id': self.importing_org['id'],
'repository-id': self.importing_repo['id'],
}
)
@classmethod
def setUpClass(cls):
"""Create Directory for all CV Sync Tests in export_base directory"""
super(ContentViewSync, cls).setUpClass()
if ssh.command('[ -d {} ]'.format(cls.export_base)).return_code == 1:
raise ExportDirectoryNotSet(
'Export Directory "{}" is not set/found.'.format(cls.export_base)
)
cls.exporting_org = make_org()
cls.exporting_prod_name = gen_string('alpha')
product = make_product(
{'organization-id': cls.exporting_org['id'], 'name': cls.exporting_prod_name}
)
cls.exporting_repo_name = gen_string('alpha')
cls.exporting_repo = make_repository(
{
'name': cls.exporting_repo_name,
'mirror-on-sync': 'no',
'download-policy': 'immediate',
'product-id': product['id'],
}
)
Repository.synchronize({'id': cls.exporting_repo['id']})
cls.exporting_cv_name = gen_string('alpha')
cls.exporting_cv, cls.exporting_cvv_id = ContentViewSync._create_cv(
cls.exporting_cv_name, cls.exporting_repo, cls.exporting_org
)
def setUp(self):
"""Create Directory for CV export"""
super().setUp()
self.export_dir = "{}/{}".format(self.export_base, gen_string('alpha'))
ssh.command('mkdir {}'.format(self.export_dir))
def tearDown(self):
"""Deletes Directory created for CV export Test during setUp"""
super(ContentViewSync, self).tearDown()
ssh.command('rm -rf {}'.format(self.export_dir))
def assert_exported_cvv_exists(self, content_view_name, content_view_version):
"""Verify an exported tar exists
:return: The path to the tar (if it exists).
"""
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, content_view_name, content_view_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
return exported_tar
@tier3
def test_positive_export_import_filtered_cvv(self):
"""CV Version with filtered contents only can be exported and imported.
:id: 2992e0ae-173d-4589-817d-1a11455dfc43
:steps:
1. Create product and repository with custom contents.
2. Sync the repository.
3. Create CV with above product.
4. Create a filter and filter-rule.
5. Publish the above filtered content-view.
6. Export Filtered CV version contents to a directory
7. Import those contents from some other org/satellite.
:expectedresults:
1. Filtered CV version custom contents has been exported to directory
2. Filtered exported custom contents has been imported in org/satellite
:CaseAutomation: Automated
:CaseImportance: High
:CaseLevel: System
"""
exporting_cv_name = gen_string('alpha')
exporting_cv, exporting_cvv = ContentViewSync._create_cv(
exporting_cv_name, self.exporting_repo, self.exporting_org, False
)
filter_name = gen_string('alphanumeric')
ContentView.filter.create(
{
'name': filter_name,
'content-view-id': exporting_cv['id'],
'inclusion': 'yes',
'type': 'rpm',
}
)
ContentView.filter.rule.create(
{
'name': 'cat',
'content-view-filter': filter_name,
'content-view-id': exporting_cv['id'],
}
)
ContentView.publish(
{'id': exporting_cv['id'], 'organization-id': self.exporting_org['id']}
)
exporting_cv = ContentView.info({'id': exporting_cv['id']})
exporting_cvv_id = exporting_cv['versions'][0]['id']
exporting_cvv_version = exporting_cv['versions'][0]['version']
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': exporting_cvv_id}
)
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, exporting_cv_name, exporting_cvv_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
exported_packages = Package.list({'content-view-version-id': exporting_cvv_id})
self.assertTrue(len(exported_packages) == 1)
self.set_importing_org(
self.exporting_prod_name, self.exporting_repo_name, exporting_cv_name
)
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': self.importing_org['id']}
)
importing_cvv = ContentView.info({'id': self.importing_cv['id']})['versions']
self.assertTrue(len(importing_cvv) >= 1)
imported_packages = Package.list({'content-view-version-id': importing_cvv[0]['id']})
self.assertTrue(len(imported_packages) == 1)
@tier3
def test_positive_export_import_cv(self):
"""Export CV version contents in directory and Import them.
:id: b4fb9386-9b6a-4fc5-a8bf-96d7c80af93e
:steps:
1. Create product and repository with custom contents.
2. Sync the repository.
3. Create CV with above product and publish.
4. Export CV version contents to a directory
5. Import those contents from some other org/satellite.
:expectedresults:
1. CV version custom contents has been exported to directory
2. All The exported custom contents has been imported in org/satellite
:CaseAutomation: Automated
:CaseComponent: ContentViews
:CaseImportance: High
:CaseLevel: System
"""
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': self.exporting_cvv_id}
)
exporting_cvv_version = self.exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, self.exporting_cv_name, exporting_cvv_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
exported_packages = Package.list({'content-view-version-id': self.exporting_cvv_id})
self.assertTrue(len(exported_packages) > 0)
self.set_importing_org(
self.exporting_prod_name, self.exporting_repo_name, self.exporting_cv_name
)
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': self.importing_org['id']}
)
importing_cvv = ContentView.info({'id': self.importing_cv['id']})['versions']
self.assertTrue(len(importing_cvv) >= 1)
imported_packages = Package.list({'content-view-version-id': importing_cvv[0]['id']})
self.assertTrue(len(imported_packages) > 0)
self.assertEqual(len(exported_packages), len(imported_packages))
@tier3
@upgrade
def test_positive_export_import_redhat_cv(self):
"""Export CV version redhat contents in directory and Import them
:id: f6bd7fa9-396e-44ac-92a3-ab87ce1a7ef5
:steps:
1. Enable product and repository with redhat contents.
2. Sync the repository.
3. Create CV with above product and publish.
4. Export CV version contents to a directory
5. Import those contents from some other org/satellite.
:expectedresults:
1. CV version redhat contents has been exported to directory
2. All The exported redhat contents has been imported in org/satellite
:BZ: 1655239
:CaseAutomation: Automated
:CaseComponent: ContentViews
:CaseImportance: High
:CaseLevel: System
"""
rhva_repo_name = 'rhva6'
releasever = '6Server'
product = 'rhel'
rhva_repo = ContentViewSync._enable_rhel_content(
self.exporting_org, rhva_repo_name, releasever, product
)
rhva_cv_name = gen_string('alpha')
rhva_cv, exporting_cvv_id = ContentViewSync._create_cv(
rhva_cv_name, rhva_repo, self.exporting_org
)
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': exporting_cvv_id}
)
exporting_cvv_version = rhva_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, rhva_cv_name, exporting_cvv_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
exported_packages = Package.list({'content-view-version-id': exporting_cvv_id})
self.assertTrue(len(exported_packages) > 0)
Subscription.delete_manifest({'organization-id': self.exporting_org['id']})
importing_org = make_org()
imp_rhva_repo = ContentViewSync._enable_rhel_content(
importing_org, rhva_repo_name, releasever, product, sync=False
)
importing_cv, _ = ContentViewSync._create_cv(
rhva_cv_name, imp_rhva_repo, importing_org, publish=False
)
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': importing_org['id']}
)
importing_cvv_id = ContentView.info({'id': importing_cv['id']})['versions'][0]['id']
imported_packages = Package.list({'content-view-version-id': importing_cvv_id})
self.assertTrue(len(imported_packages) > 0)
self.assertEqual(len(exported_packages), len(imported_packages))
@tier4
def test_positive_export_import_redhat_cv_with_huge_contents(self):
"""Export CV version redhat contents in directory and Import them
:id: 05eb185f-e526-466c-9c14-702dde1d49de
:steps:
1. Enable product and repository with redhat repository having huge contents.
2. Sync the repository.
3. Create CV with above product and publish.
4. Export CV version contents to a directory
5. Import those contents from some other org/satellite.
:expectedresults:
1. CV version redhat contents has been exported to directory
2. All The exported redhat contents has been imported in org/satellite
:BZ: 1655239
:CaseAutomation: Automated
:CaseComponent: ContentViews
:CaseImportance: Critical
:CaseLevel: Acceptance
"""
rhel_repo_name = 'rhscl7'
product = 'rhscl'
releasever = '7Server'
rhel_repo = ContentViewSync._enable_rhel_content(
self.exporting_org, rhel_repo_name, releasever, product
)
rhel_cv_name = gen_string('alpha')
rhel_cv, exporting_cvv_id = ContentViewSync._create_cv(
rhel_cv_name, rhel_repo, self.exporting_org
)
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': exporting_cvv_id}, timeout=7200
)
exporting_cvv_version = rhel_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, rhel_cv_name, exporting_cvv_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
exported_packages = Package.list({'content-view-version-id': exporting_cvv_id})
self.assertTrue(len(exported_packages) > 0)
Subscription.delete_manifest({'organization-id': self.exporting_org['id']})
importing_org = make_org()
imp_rhel_repo = ContentViewSync._enable_rhel_content(
importing_org, rhel_repo_name, releasever, product, sync=False
)
importing_cv, _ = ContentViewSync._create_cv(
rhel_cv_name, imp_rhel_repo, importing_org, publish=False
)
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': importing_org['id']}, timeout=7200
)
importing_cvv_id = ContentView.info({'id': importing_cv['id']})['versions'][0]['id']
imported_packages = Package.list({'content-view-version-id': importing_cvv_id})
self.assertTrue(len(imported_packages) > 0)
self.assertEqual(len(exported_packages), len(imported_packages))
@tier2
def test_positive_exported_cv_tar_contents(self):
"""Exported CV version contents in export directory are same as CVv contents
:id: 35cc3b20-0fbc-4177-a89c-b4c8d7389a77
:steps:
1. Enable product and repository with contents.
2. Sync the repository.
3. Create CV with above product and publish.
4. Export CV version contents to a directory
5. Validate contents in a directory.
:expectedresults:
1. The CVv should be exported to specified location with contents tar and json
:CaseLevel: Integration
"""
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': self.exporting_cvv_id}
)
exporting_cvv_version = self.exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, self.exporting_cv_name, exporting_cvv_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
result = ssh.command("tar -t -f {}".format(exported_tar))
contents_tar = 'export-{0}-{1}/export-{0}-{1}-repos.tar'.format(
self.exporting_cv_name, exporting_cvv_version
)
self.assertIn(contents_tar, result.stdout)
cvv_packages = Package.list({'content-view-version-id': self.exporting_cvv_id})
self.assertTrue(len(cvv_packages) > 0)
ssh.command("tar -xf {0} -C {1}".format(exported_tar, self.export_dir))
exported_packages = ssh.command(
"tar -tf {0}/{1} | grep .rpm | wc -l".format(self.export_dir, contents_tar)
)
self.assertEqual(len(cvv_packages), int(exported_packages.stdout[0]))
@tier1
@upgrade
def test_positive_export_import_promoted_cv(self):
"""Export promoted CV version contents in directory and Import them.
:id: 315ef1f0-e2ad-43ec-adff-453fb71654a7
:steps:
1. Create product and repository with contents.
2. Sync the repository.
3. Create CV with above product and publish.
4. Promote the CV.
5. Export CV version contents to a directory
6. Import those contents from some other org/satellite.
:expectedresults:
1. Promoted CV version contents has been exported to directory
2. Promoted CV version contents has been imported successfully
3. The imported CV should only be published and not promoted
:CaseLevel: System
"""
env = make_lifecycle_environment({'organization-id': self.exporting_org['id']})
ContentView.version_promote(
{'id': self.exporting_cvv_id, 'to-lifecycle-environment-id': env['id']}
)
promoted_cvv_id = ContentView.info({'id': self.exporting_cv['id']})['versions'][-1]['id']
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': promoted_cvv_id}
)
exporting_cvv_version = self.exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, self.exporting_cv_name, exporting_cvv_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
exported_packages = Package.list({'content-view-version-id': promoted_cvv_id})
self.set_importing_org(
self.exporting_prod_name, self.exporting_repo_name, self.exporting_cv_name
)
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': self.importing_org['id']}
)
importing_cvv = ContentView.info({'id': self.importing_cv['id']})['versions']
self.assertEqual(len(importing_cvv), 1)
imported_packages = Package.list({'content-view-version-id': importing_cvv[0]['id']})
self.assertEqual(len(exported_packages), len(imported_packages))
@tier2
def test_positive_repo_contents_of_imported_cv(self):
"""Repo contents of imported CV are same as repo contents of exported CV
:id: 76305fb9-2afd-46f8-842a-03bb706fa3fa
:steps:
1. Enable product and repository with contents.
2. Sync the repository.
3. Create CV with above product and publish.
4. Export CV version contents to a directory
5. Import those contents from some other org/satellite
:expectedresults:
1. The contents in repo of imported CV are same as repo of exported CV
:CaseLevel: Integration
"""
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': self.exporting_cvv_id}
)
exporting_cvv_version = self.exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, self.exporting_cv_name, exporting_cvv_version
)
self.set_importing_org(
self.exporting_prod_name, self.exporting_repo_name, self.exporting_cv_name
)
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': self.importing_org['id']}
)
exported_repo = Repository.info({'id': self.exporting_repo['id']})
imported_repo = Repository.info({'id': self.importing_repo['id']})
self.assertEqual(
exported_repo['content-counts']['packages'],
imported_repo['content-counts']['packages'],
)
self.assertEqual(
exported_repo['content-counts']['errata'], imported_repo['content-counts']['errata']
)
@tier2
def test_negative_reimport_cv_with_same_major_minor(self):
"""Reimport CV version with same major and minor fails
:id: 15a7ddd3-c1a5-4b22-8460-6cb2b8ea4ef9
:steps:
1. Create product and repository with custom contents.
2. Sync the repository.
3. Create CV with above product and publish.
4. Export CV version contents to a directory
5. Import those contents from some other org/satellite.
6. Attempt to reimport the those contents(without changing version in json)
:expectedresults:
1. Reimporting the contents with same major and minor fails
2. Satellite displays an error 'A CV version already exists with the same major and
minor version'
"""
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': self.exporting_cvv_id}
)
exporting_cvv_version = self.exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, self.exporting_cv_name, exporting_cvv_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
self.set_importing_org(
self.exporting_prod_name, self.exporting_repo_name, self.exporting_cv_name
)
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': self.importing_org['id']}
)
with self.assertRaises(CLIReturnCodeError) as error:
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': self.importing_org['id']}
)
self.assert_error_msg(
error,
"the Content View '{0}' is greater or equal to the version you "
"are trying to import".format(self.exporting_cv_name),
)
@tier2
def test_negative_import_cv_without_replicating_import_part(self):
"""Import CV version without creating same CV and repo at importing side
:id: 4cc69666-407f-4d66-b3d2-8fe2ed135a5f
:steps:
1. Create product and repository with custom contents.
2. Sync the repository.
3. Create CV with above product and publish.
4. Export CV version contents to a directory
5. Don't create replica CV and repo at importing org/satellite
6. Attempt to import the exported contents
:expectedresults:
1. Error 'Unable to sync repositories, no library repository found' should be
displayed
"""
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': self.exporting_cvv_id}
)
exporting_cvv_version = self.exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, self.exporting_cv_name, exporting_cvv_version
)
importing_org = make_org()
with self.assertRaises(CLIReturnCodeError) as error:
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': importing_org['id']}
)
self.assert_error_msg(
error,
'Error: The Content View {} is not present on this server, '
'please create the Content View and try the import again'.format(
self.exporting_cv_name
),
)
@tier1
def test_negative_import_without_associating_repo_to_cv(self):
"""Importing CV version without associating repo to CV at importing side throws error
:id: 3d20612f-b769-462e-9829-f13fd81bd4c7
:steps:
1. Create product and repository with custom contents
2. Sync the repository
3. Create CV with above product and publish
4. Export CV version contents to a directory
5. Create replica CV but don't associate repo to CV at importing org/satellite
6. Import those contents from some other org/satellite.
:expectedresults:
1. Error 'Unable to sync repositories, no library repository found' should be
displayed
"""
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': self.exporting_cvv_id}
)
exporting_cvv_version = self.exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, self.exporting_cv_name, exporting_cvv_version
)
importing_org = make_org()
make_content_view({'name': self.exporting_cv_name, 'organization-id': importing_org['id']})
with self.assertRaises(CLIReturnCodeError) as error:
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': importing_org['id']}
)
self.assert_error_msg(error, 'Unable to sync repositories, no library repository found')
@tier2
def test_negative_export_cv_with_on_demand_repo(self):
"""Exporting CV version having on_demand repo throws error
:id: f8b86d0e-e1a7-4e19-bb82-6de7d16c6676
:steps:
1. Create product and on-demand repository with custom contents
2. Sync the repository
3. Create CV with above product and publish
4. Attempt to export CV version contents to a directory
:expectedresults:
1. Export fails with error 'All exported repositories must be set to an immediate
download policy and re-synced' should be displayed.
"""
exporting_org = make_org()
exporting_prod = gen_string('alpha')
product = make_product({'organization-id': exporting_org['id'], 'name': exporting_prod})
exporting_repo = gen_string('alpha')
repo = make_repository(
{'name': exporting_repo, 'download-policy': 'on_demand', 'product-id': product['id']}
)
Repository.synchronize({'id': repo['id']})
exporting_cv = gen_string('alpha')
cv_dict, exporting_cvv_id = ContentViewSync._create_cv(exporting_cv, repo, exporting_org)
cv_name = cv_dict['name']
cv_version = cv_dict['versions'][0]['version']
with self.assertRaises(CLIReturnCodeError) as error:
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': exporting_cvv_id}
)
self.assert_error_msg(
error,
"Could not export the content view:\n Error: Ensure the content view version"
" '{} {}' has at least one repository.\n".format(cv_name, cv_version),
)
@tier2
def test_negative_export_cv_with_background_policy_repo(self):
"""Exporting CV version having background policy repo throws error
:id: c0f5a903-e9a8-4ce6-9377-1df1e7ba62c5
:steps:
1. Create product and background policy repository with custom contents
2. Sync the repository
3. Create CV with above product and publish
4. Attempt to export CV version contents to a directory
:expectedresults:
1. Export fails with error 'All exported repositories must be set to an immediate
download policy and re-synced' should be displayed.
"""
exporting_org = make_org()
exporting_prod = gen_string('alpha')
product = make_product({'organization-id': exporting_org['id'], 'name': exporting_prod})
repo = make_repository(
{
'name': gen_string('alpha'),
'download-policy': 'background',
'product-id': product['id'],
}
)
Repository.synchronize({'id': repo['id']})
cv_dict, exporting_cvv_id = ContentViewSync._create_cv(
gen_string('alpha'), repo, exporting_org
)
cv_name = cv_dict['name']
cv_version = cv_dict['versions'][0]['version']
with self.assertRaises(CLIReturnCodeError) as error:
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': exporting_cvv_id}
)
self.assert_error_msg(
error,
"Could not export the content view:\n Error: Ensure the content view version"
" '{} {}' has at least one repository.\n".format(cv_name, cv_version),
)
@tier2
def test_negative_import_cv_with_mirroronsync_repo(self):
"""Importing CV version having mirror-on-sync repo throws error
:id: dfa0cd5f-1596-4097-b505-06bc14de51dd
:steps:
1. Create product and mirror-on-sync repository with custom contents
2. Sync the repository
3. Create CV with above product and publish
4. Attempt to export CV version contents to a directory
:expectedresults:
1. Export fails with error 'The Repository '<repo_name>' is set with Mirror-on-Sync '
'to YES. Please change Mirror-on-Sync to NO and try the import again' should be
displayed.
"""
exporting_prod_name = gen_string('alpha')
product = make_product(
{'organization-id': self.exporting_org['id'], 'name': exporting_prod_name}
)
exporting_repo_name = gen_string('alpha')
repo = make_repository(
{
'name': exporting_repo_name,
'download-policy': 'immediate',
'mirror-on-sync': 'yes',
'product-id': product['id'],
}
)
Repository.synchronize({'id': repo['id']})
exporting_cv_name = gen_string('alpha')
exporting_cv, exporting_cvv_id = ContentViewSync._create_cv(
exporting_cv_name, repo, self.exporting_org
)
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': exporting_cvv_id}
)
exporting_cvv_version = exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, exporting_cv_name, exporting_cvv_version
)
self.set_importing_org(
exporting_prod_name, exporting_repo_name, exporting_cv_name, mos='yes'
)
with self.assertRaises(CLIReturnCodeError) as error:
ContentView.version_import(
{'export-tar': exported_tar, 'organization-id': self.importing_org['id']}
)
self.assert_error_msg(
error,
"The Repository '{}' is set with Mirror-on-Sync to YES. "
"Please change Mirror-on-Sync to NO and try the import again".format(
exporting_repo_name
),
)
@tier2
def test_positive_create_custom_major_minor_cv_version(self):
"""CV can published with custom major and minor versions
:id: 6697cd22-253a-4bdc-a108-7e0af22caaf4
:steps:
1. Create product and repository with custom contents
2. Sync the repository
3. Create CV with above repository
4. Publish the CV with custom major and minor versions
:expectedresults:
1. CV version with custom major and minor versions is created
:CaseLevel: System
"""
org = make_org()
major = randint(1, 1000)
minor = randint(1, 1000)
content_view = make_content_view(
{'name': gen_string('alpha'), 'organization-id': org['id']}
)
ContentView.publish({'id': content_view['id'], 'major': major, 'minor': minor})
content_view = ContentView.info({'id': content_view['id']})
cvv = content_view['versions'][0]['version']
self.assertEqual(cvv.split('.')[0], str(major))
self.assertEqual(cvv.split('.')[1], str(minor))
@tier3
def test_negative_export_cv_with_puppet_repo(self):
"""Exporting CV version having non yum(puppet) repo throws error
:id: 5e27f994-34f2-4595-95a9-346c6b9415f6
:steps:
1. Create product and non yum(puppet) repository
2. Sync the repository
3. Create CV with above product and publish
4. Attempt to export CV version contents to a directory
:expectedresults:
1. Export fails with error 'Could not export the content view:
Error: Ensure the content view version '#name' has at least one
repository.'.
"""
module = {'name': 'versioned', 'version': '3.3.3'}
exporting_org = make_org()
product = make_product(
{'organization-id': exporting_org['id'], 'name': gen_string('alpha')}
)
repo = make_repository(
{'url': CUSTOM_PUPPET_REPO, 'content-type': 'puppet', 'product-id': product['id']}
)
Repository.synchronize({'id': repo['id']})
puppet_module = PuppetModule.list(
{'search': 'name={name} and version={version}'.format(**module)}
)[0]
content_view = make_content_view({'organization-id': exporting_org['id']})
ContentView.puppet_module_add(
{
'content-view-id': content_view['id'],
'name': puppet_module['name'],
'author': puppet_module['author'],
}
)
ContentView.publish({'id': content_view['id']})
cv_version = ContentView.info({'id': content_view['id']})['versions'][0]['version']
with self.assertRaises(CLIReturnCodeError) as error:
ContentView.version_export(
{
'export-dir': '{}'.format(self.export_dir),
'id': ContentView.info({'id': content_view['id']})['versions'][0]['id'],
}
)
self.assert_error_msg(
error,
"Could not export the content view:\n "
"Error: Ensure the content view version "
"'{} {}' has at least one repository.\n".format(content_view['name'], cv_version),
)
@tier3
def test_postive_export_cv_with_mixed_content_repos(self):
"""Exporting CV version having yum and non-yum(puppet) is successful
:id: ffcdbbc6-f787-4978-80a7-4b44c389bf49
:steps:
1. Create product with yum and non-yum(puppet) repos
2. Sync the repositories
3. Create CV with above product and publish
4. Export CV version contents to a directory
:expectedresults:
1. Export will succeed, however the export wont contain non-yum repo.
No warning is printed (see BZ 1775383)
:BZ: 1726457
"""
module = {'name': 'versioned', 'version': '3.3.3'}
product = make_product(
{'organization-id': self.exporting_org['id'], 'name': gen_string('alpha')}
)
nonyum_repo = make_repository(
{'url': CUSTOM_PUPPET_REPO, 'content-type': 'puppet', 'product-id': product['id']}
)
Repository.synchronize({'id': nonyum_repo['id']})
yum_repo = make_repository(
{
'name': gen_string('alpha'),
'download-policy': 'immediate',
'mirror-on-sync': 'no',
'product-id': product['id'],
}
)
Repository.synchronize({'id': yum_repo['id']})
puppet_module = PuppetModule.list(
{'search': 'name={name} and version={version}'.format(**module)}
)[0]
content_view = make_content_view({'organization-id': self.exporting_org['id']})
ContentView.puppet_module_add(
{
'content-view-id': content_view['id'],
'name': puppet_module['name'],
'author': puppet_module['author'],
}
)
ContentView.add_repository(
{
'id': content_view['id'],
'organization-id': self.exporting_org['id'],
'repository-id': yum_repo['id'],
}
)
ContentView.publish({'id': content_view['id']})
export_cvv_info = ContentView.info({'id': content_view['id']})['versions'][0]
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': export_cvv_info['id']}
)
self.assert_exported_cvv_exists(content_view['name'], export_cvv_info['version'])
@tier2
def test_positive_import_cv_with_customized_major_minor(self):
"""Import the CV version with customized major and minor
:id: 38237795-0275-408c-8a0d-462120dafc59
:steps:
1. Create product and repository with custom contents.
2. Sync the repository.
3. Create CV with above product and publish
4. Export CV version contents to a directory
5. Untar the exported tar
6. Customize/Update the major and minor in metadata json
7. Retar with updated json and contents tar
8. Import the CV version from the updated json tar from some other org/satellite
:expectedresults:
1. The Cv version is imported with updated json
2. The Imported CV version has major and minor updated in exported tar json
"""
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': self.exporting_cvv_id}
)
exporting_cvv_version = self.exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, self.exporting_cv_name, exporting_cvv_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
self.set_importing_org(
self.exporting_prod_name, self.exporting_repo_name, self.exporting_cv_name
)
# Updating the json in exported tar
ssh.command("tar -xf {0} -C {1}".format(exported_tar, self.export_dir))
extracted_directory_name = 'export-{0}-{1}'.format(
self.exporting_cv_name, exporting_cvv_version
)
json_path = '{0}/{1}/{1}.json'.format(self.export_dir, extracted_directory_name)
new_major, new_minor = self._update_json(json_path)
custom_cvv_tar = '{0}/{1}.tar'.format(self.export_dir, extracted_directory_name)
ssh.command(
"tar -cvf {0} {1}/{2}".format(
custom_cvv_tar, self.export_dir, extracted_directory_name
)
)
# Importing the updated tar
ContentView.version_import(
{'export-tar': custom_cvv_tar, 'organization-id': self.importing_org['id']}
)
imported_cvv = ContentView.info({'id': self.importing_cv['id']})['versions']
self.assertEqual(str(new_major), imported_cvv[0]['version'].split('.')[0])
self.assertEqual(str(new_minor), imported_cvv[0]['version'].split('.')[1])
@tier1
def test_positive_exported_cvv_json_contents(self):
"""Export the CV version and verify export metadata json fields
:id: 44934116-b051-4bb3-814a-08a80303e4ee
:steps:
1. Create product and repository with custom contents.
2. Sync the repository.
3. Create CV with above product and publish
4. Export CV version contents to a directory
5. Untar the exported tar
:expectedresults:
The exported json has following fields and in sequence:
Name of CV
Major of CVv
Minor of CVv
Metadata of Repositories in CVv, each with the following info:
The repository ID
The repository label
The repository content Type
Backend Identifier
Relative Path to Packages in tar file
On disk path to Packages in /var/lib/pulp
rpm file names in CVV repo:
Rpm names
Errata IDs in CVV Repo:
Errata IDs
"""
exporting_repo = Repository.info({'id': self.exporting_repo['id']})
exporting_cv = ContentView.info({'id': self.exporting_cv['id']})
ContentView.version_export(
{'export-dir': '{}'.format(self.export_dir), 'id': self.exporting_cvv_id}
)
exporting_cvv_version = self.exporting_cv['versions'][0]['version']
exported_tar = '{0}/export-{1}-{2}.tar'.format(
self.export_dir, self.exporting_cv_name, exporting_cvv_version
)
result = ssh.command("[ -f {0} ]".format(exported_tar))
self.assertEqual(result.return_code, 0)
# Updating the json in exported tar
ssh.command("tar -xf {0} -C {1}".format(exported_tar, self.export_dir))
extracted_directory_name = 'export-{0}-{1}'.format(
self.exporting_cv_name, exporting_cvv_version
)
json_path_server = '{0}/{1}/{1}.json'.format(self.export_dir, extracted_directory_name)
json_path_local = '/tmp/{}.json'.format(extracted_directory_name)
ssh.download_file(json_path_server, json_path_local)
with open(json_path_local) as metafile:
metadata = json.load(metafile)
self.assertEqual(metadata.get('name'), self.exporting_cv_name)
self.assertEqual(
str(metadata.get('major')), exporting_cv['versions'][0]['version'].split('.')[0]
)
self.assertEqual(
str(metadata.get('minor')), exporting_cv['versions'][0]['version'].split('.')[1]
)
self.assertIsNotNone(metadata.get('repositories'))
cvv_repository = metadata['repositories'][0]
self.assertEqual(cvv_repository.get('content_type'), 'yum')
self.assertEqual(cvv_repository.get('label'), self.exporting_repo_name)
self.assertIsNotNone(cvv_repository.get('backend_identifier'))
self.assertIsNotNone(cvv_repository.get('on_disk_path'))
self.assertIsNotNone(cvv_repository.get('relative_path'))
self.assertEqual(
int(exporting_repo['content-counts']['packages']),
len(cvv_repository.get('rpm_filenames')),
)
self.assertEqual(
int(exporting_repo['content-counts']['errata']), len(cvv_repository.get('errata_ids'))
)
class InterSatelliteSyncTestCase(CLITestCase):
"""Implements InterSatellite Sync tests in CLI"""
@stubbed()
@tier3
def test_negative_import_cv(self):
"""Export whole CV version contents in directory and Import nothing.
:id: bcb4f64f-a480-4be0-a4ef-3ee1f024d8d7
:steps:
1. Export whole CV version contents to a directory specified in
settings.
2. Don't copy exported contents to /var/www/html/pub/export
directory.
3. Attempt to import these not copied contents from some other
org/satellite.
:expectedresults:
1. Whole CV version contents has been exported to directory
specified in settings.
2. The exported contents are not imported due to non availability.
3. Error is thrown for non availability of CV contents to import.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_cv(self):
"""Export whole CV version contents is aborted due to insufficient
memory.
:id: 4fa58c0c-95d2-45f5-a7fc-c5f3312a989c
:steps: Attempt to Export whole CV version contents to a directory
which has less memory available than contents size.
:expectedresults: The export CV version contents has been aborted due
to insufficient memory.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_cv_iso(self):
"""Export CV version contents in directory as iso and Import it.
:id: 5c39afd4-09d6-43c5-8d50-edc98105b7db
:steps:
1. Export whole CV version contents as ISO to a directory specified
in settings.
2. Copy exported ISO to /var/www/html/pub/export directory.
3. Import these copied ISO from some other org/satellite.
:expectedresults:
1. CV version has been exported to directory as ISO in specified in
settings.
2. The exported ISO has been imported in org/satellite.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_import_cv_iso(self):
"""Export whole CV version as ISO in directory and Import nothing.
:id: af9b3d6f-25c0-43a5-b8a7-d9a0df1986b4
:steps:
1. Export whole CV version as ISO to a directory specified in
settings.
2. Don't copy exported ISO to /var/www/html/pub/export directory.
3. Attempt to import this not copied ISO from some other
org/satellite.
:expectedresults:
1. The exported iso is not imported due to non availability.
2. Error is thrown for non availability of CV version ISO to
import.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_cv_iso(self):
"""Export whole CV version to iso is aborted due to insufficient
memory.
:id: ef84ffbd-c7cf-4d9a-9944-3c3b06a18872
:steps: Attempt to Export whole CV version as iso to a directory which
has less memory available than contents size.
:expectedresults: The export CV version to iso has been aborted due to
insufficient memory.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_cv_iso_max_size(self):
"""Export whole CV version to iso is aborted due to inadequate maximum
iso size.
:id: 93fe1cef-254b-484d-a628-bec56b356234
:steps: Attempt to Export whole CV version as iso with mb size less
than required.
:expectedresults: The export CV version to iso has been aborted due to
maximum size is not enough to contain the CV version contents.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_positive_export_cv_iso_max_size(self):
"""CV version exported to iso in maximum iso size.
:id: 7ec91557-bafc-490d-b760-573a07389be5
:steps: Attempt to Export whole CV version as iso with mb size more
than required.
:expectedresults: CV version has been exported to iso successfully.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_cv_incremental(self):
"""Export and Import CV version contents incrementally.
:id: 3c4dfafb-fabf-406e-bca8-7af1ab551135
:steps:
1. In upstream, Export CV version contents to a directory specified
in settings.
2. In downstream, Import these copied contents from some other
org/satellite.
3. In upstream, Add new packages to the CV.
4. Export the CV incrementally from the last date time.
5. In downstream, Import the CV incrementally.
:expectedresults:
1. On incremental export, only the new packages are exported.
2. New directory of incremental export with new packages is
created.
3. On incremental import, only the new packages are imported.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_import_cv_incremental(self):
"""No new incremental packages exported or imported.
:id: 90692d59-788c-4e18-add1-33db04204a4b
:steps:
1. In upstream, Export CV version contents to a directory specified
in settings.
2. In downstream, Import these copied contents from some other
org/satellite.
3. In upstream, Don't add any new packages to the CV.
4. Export the CV incrementally from the last date time.
5. In downstream, Import the CV incrementally.
:expectedresults:
1. An Empty packages directory created on incremental export.
2. On incremental import, no new packages are imported.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_positive_exported_cv_iso_dir_structure(self):
"""Exported CV in iso format respects cdn directory structure.
:id: cb901dde-1839-4e7d-a57b-8e41c212dc77
:steps:
1. Export the full CV in iso format.
2. Mount the iso.
3. Verify iso directory structure.
:expectedresults: Exported CV in iso should follow the cdn directory
structure.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_repo(self):
"""Export repo in directory and Import them.
:id: 2c5f09ce-225b-4f9d-ad4b-a26fe094b0e7
:steps:
1. Export repo to a directory specified in settings.
2. Copy exported repo contents to /var/www/html/pub/export
directory.
3. Import these copied repo contents from some other org/satellite.
:expectedresults:
1. The repo has been exported to directory specified in settings.
2. The exported repo has been imported in org/satellite.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_import_repo(self):
"""Export repo contents in directory and Import nothing.
:id: 8e0bbed9-bc68-44d3-a79c-2861f323e2ff
:steps:
1. Export repo to a directory specified in settings.
2. Dont copy exported repo to /var/www/html/pub/export directory.
3. Attempt to import this not copied repo from some other
org/satellite.
:expectedresults:
1. The repo has been exported to directory specified in settings.
2. The exported repo are not imported due to non availability.
3. Error is thrown for non availability of repo contents to import.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_repo(self):
"""Export repo is aborted due ti insufficient memory.
:id: 4bdd1183-a3a5-41a8-8a38-34c1035b64da
:steps: Attempt to Export repo to a directory which has less memory
available than contents size.
:expectedresults: The export repo has been aborted due to insufficient
memory.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_lazy_sync_repo(self):
"""Error is raised for lazy sync repo.
:id: 296a7bde-d8af-4e4d-b673-a7c393f6f846
:steps: Attempt to Export repo with 'on_demand' download policy.
:expectedresults: An Error is raised for updating the repo download
policy to 'immediate' to be exported.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_reimport_repo(self):
"""Packages missing from upstream are removed from downstream on reimport.
:id: b3a71405-d8f0-4085-b728-8fc3513611c8
:steps:
1. From upstream Export repo fully and import it in downstream.
2. In upstream delete some packages from repo.
3. Re-export the full repo.
4. In downstream, reimport the repo re-exported.
:expectedresults: Deleted packages from upstream are removed from
downstream.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_repo_iso(self):
"""Export repo in directory as iso and Import it.
:id: 95658d9e-9f0b-466f-a412-1bebadc709c9
:steps:
1. Export repo as ISO to a directory specified in settings.
2. Copy exported ISO to /var/www/html/pub/export directory.
3. Import this copied ISO from some other org/satellite.
:expectedresults:
1. repo has been exported to directory as ISO in specified in
settings.
2. The exported ISO has been imported in org/satellite.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_import_repo_iso(self):
"""Export repo as ISO in directory and Import nothing.
:id: dab72a79-e508-4236-ad7e-f92bb9639b5e
:steps:
1. Export repo as ISO to a directory specified in settings.
2. Dont copy exported ISO to /var/www/html/pub/export directory.
3. Attempt to import this not copied ISO from some other
org/satellite.
:expectedresults:
1. The exported iso is not imported due to non availability.
2. Error is thrown for non availability of repo ISO to import.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_repo_iso(self):
"""Export repo to iso is aborted due to insufficient memory.
:id: 028c4972-5746-463d-afd3-a1cea337ee11
:steps: Attempt to Export repo as iso to a directory which has less
memory available than contents size.
:expectedresults: The export repo to iso has been aborted due to
insufficient memory.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_repo_iso_max_size(self):
"""Export repo to iso is aborted due to inadequate maximum iso size.
:id: ef2ba2ec-0ec6-4c33-9c22-e4102734eecf
:steps: Attempt to Export repo as iso with mb size less than required.
:expectedresults: The export repo to iso has been aborted due to
maximum size is not enough to contain the repo contents.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_positive_export_repo_iso_max_size(self):
"""Repo exported to iso with maximum iso size.
:id: 19626697-9c5e-49d1-8429-720881dfe73d
:steps: Attempt to Export repo as iso with mb size more than required.
:expectedresults: Repo has been exported to iso successfully.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_repo_from_future_datetime(self):
"""Incremental export fails with future datetime.
:id: 1e8bc352-198f-4d59-b437-1b184141fab4
:steps: Export the repo incrementally from the future date time.
:expectedresults: Error is raised for attempting to export from future
datetime.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_repo_incremental(self):
"""Export and Import repo incrementally.
:id: b2537c09-4dd8-440d-be11-0728ee4be804
:steps:
1. In upstream, Export repo to a directory specified in settings.
2. In downstream, Import this repo fully.
3. In upstream, Add new packages to the repo.
4. Export the repo incrementally from the last date time.
5. In downstream, Import the repo incrementally.
:expectedresults:
1. On incremental export, only the new packages are exported.
2. New directory of incremental export with new packages is
created.
3. On incremental import, only the new packages are imported.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_import_repo_incremental(self):
"""No new incremental packages exported or imported.
:id: b51a3718-87d0-4aa1-8bff-fa153bd72df0
:steps:
1. In upstream, Export repo to a directory specified in settings.
2. In downstream, fully Import this repo.
3. In upstream, Don't add any new packages to the repo.
4. Export the repo incrementally from the last date time.
5. In downstream, Import the repo incrementally.
:expectedresults:
1. An Empty packages directory created on incremental export.
2. On incremental import, no new packages are imported.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_positive_exported_repo_iso_dir_structure(self):
"""Exported repo in iso format respects cdn directory structure.
:id: 6bfc28a8-6615-4927-976a-30e7a9bb6860
:steps:
1. Export the full repo in iso format.
2. Mount the iso.
3. Verify iso directory structure.
:expectedresults: Exported repo in iso should follow the cdn directory
structure.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_kickstart_tree(self):
"""kickstart tree is exported to specified location.
:id: bb9e77ed-fbbb-4e43-b118-2ddcb7c6341f
:steps:
1. Export the full kickstart tree.
2. Copy exported kickstart tree contents to
/var/www/html/pub/export.
3. Import above exported kickstart tree from other org/satellite.
:expectedresults:
1. Whole kickstart tree contents has been exported to directory
specified in settings.
2. All The exported contents has been imported in org/satellite.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_import_kickstart_tree(self):
"""Export whole kickstart tree in directory and Import nothing.
:id: 55ddf6a6-b99a-4986-bdd3-7a5384f06915
:steps:
1. Export whole kickstart tree contents to a directory specified in
settings.
2. Dont copy exported contents to /var/www/html/pub/export
directory.
3. Attempt to import these not copied contents from some other
org/satellite.
:expectedresults:
1. Whole kickstart tree has been exported to directory specified in
settings.
2. The exported contents are not imported due to non availability.
3. Error is thrown for non availability of kickstart tree to
import.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_negative_export_kickstart_tree(self):
"""Export whole kickstart tree contents is aborted due to insufficient
memory.
:id: 5f681f43-bac8-4196-9b3c-8b66b9c149f9
:steps: Attempt to Export whole kickstart tree contents to a directory
which has less memory available than contents size.
:expectedresults: The export kickstart tree has been aborted due to
insufficient memory.
:CaseAutomation: notautomated
:CaseLevel: System
"""
# Red Hat Repositories Export and Import
@stubbed()
@tier3
def test_positive_export_redhat_yum_repo(self):
"""Export Red Hat YUM repo in directory.
:id: 96bd5c72-6eb0-4b32-b75a-14c6ad556cc0
:steps: Export whole Red Hat YUM repo to some path.
:expectedresults: Whole YUM repo contents has been exported to
directory specified in settings.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_redhat_yum_repo(self):
"""Import the exported YUM repo contents.
:id: afc447b4-ed74-4ed3-839f-3d0048e4eca3
:steps:
1. Export Red Hat YUM repo to path which will be accessible over
HTTP.
2. Import the repository by defining the CDN URL the same as the
exported HTTP URL.
:expectedresults: All the exported YUM repo contents are imported
successfully.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_positive_export_redhat_incremental_yum_repo(self):
"""Export Red Hat YUM repo in directory incrementally.
:id: be054636-629a-40a0-b414-da3964154bd1
:steps:
1. Export whole Red Hat YUM repo.
2. Add some packages to the earlier exported yum repo.
3. Incrementally export the yum repo from last exported date.
:expectedresults: Red Hat YUM repo contents have been exported
incrementally in separate directory.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_redhat_incremental_yum_repo(self):
"""Import the exported YUM repo contents incrementally.
:id: 318560d7-71f5-4646-ab5c-12a2ec22d031
:steps:
1. First, Export and Import whole Red Hat YUM repo.
2. Add some packages to the earlier exported yum repo.
3. Incrementally export the Red Hat YUM repo from last exported
date.
4. Import the exported YUM repo contents incrementally.
:expectedresults: YUM repo contents have been imported incrementally.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_positive_export_redhat_yum_repo_iso(self):
"""Export Red Hat YUM repo as ISO in directory.
:id: e96a7a8c-9e71-4379-86e6-78177dfbf555
:steps: Export whole Red Hat YUM repo as ISO.
:expectedresults: Whole repo contents has been exported as ISO in
separate directory.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_redhat_yum_repo_iso(self):
"""Export Red Hat YUM repo as ISO in directory and Import.
:id: d1af556e-c622-4ca0-a617-0216d5805d45
:steps:
1. Export whole Red Hat YUM repo as ISO.
2. Mount exported ISO and explore the ISO contents on HTTP.
3. Import the repository by defining the CDN URL the same as the
exported HTTP URL.
:expectedresults: All The exported repo contents in ISO has been
imported successfully.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_positive_export_redhat_yum_incremental_repo_iso(self):
"""Export Red Hat YUM repo as ISO in directory and import incrementally.
:id: c54e9410-9945-4662-bea0-a4ab35e90606
:steps:
1. First, Export and Import whole Red Hat YUM repo.
2. Add some packages to the earlier exported yum repo.
3. Incrementally export the yum repo as ISO from last exported
date.
:expectedresults: Repo contents have been exported as ISO incrementally
in separate directory.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_redhat_yum_incremental_repo_iso(self):
"""Export Red Hat YUM repo as ISO in directory and import incrementally.
:id: 5e3f4013-489e-4d4e-abd9-49077f89efcd
:steps:
1. First, Export and Import whole Red Hat YUM repo.
2. Add some packages to the earlier exported yum repo.
3. Incrementally export the yum repo as ISO from last exported
date.
4. Mount incrementally exported contents ISO.
5. Import the repo contents incrementally.
:expectedresults: Repo contents have been exported as ISO and imported
incrementally.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
def test_positive_export_redhat_cv(self):
"""Export CV version having Red Hat contents in directory.
:id: 3eacbd64-e81b-455e-969d-570582616c4a
:steps: Export whole CV version having Red Hat contents to a directory
specified in settings.
:expectedresults: Whole CV version contents has been exported to
directory specified in settings.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_redhat_cv(self):
"""Export CV version having Red Hat contents in directory and Import
them.
:id: 0c9f1a9b-a166-4b9a-a9c4-099f3a45d552
:steps:
1. Export whole CV version having Red Hat contents to a path
accessible over HTTP.
2. Import the repository by defining the CDN URL from the exported
HTTP URL.
:expectedresults: The repo from an exported CV contents has been
imported successfully.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_redhat_mix_cv(self):
"""Export CV version having Red Hat and custom repo in directory
and Import them.
:id: a38cf67d-563c-46f0-a263-4825b26faf2b
:steps:
1. Export whole CV version having mixed repos to a path accessible
over HTTP.
2. Import the Red Hat repository by defining the CDN URL from the
exported HTTP URL.
3. Import custom repo by creating new repo and setting yum repo url
to exported HTTP url.
:expectedresults: Both custom and Red Hat repos are imported
successfully.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_redhat_cv_iso(self):
"""Export CV version having Red Hat contents as ISO.
:id: 7a35b76b-046f-402b-ba0d-4336e1757b8b
:steps: Export whole CV version having Red Hat contents as ISO.
:expectedresults: Whole CV version contents has been exported as ISO.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_export_import_redhat_cv_iso(self):
"""Export CV version having Red Hat contents as ISO and Import them.
:id: 44b3d4b7-2da2-4db0-afd7-6c696a444915
:steps:
1. Export whole CV version having Red Hat contents as ISO.
2. Mount ISO to local filesystem and explore iso contents over
HTTP.
3. Import the Red Hat repository by defining the CDN URL from the
exported HTTP URL.
:expectedresults: The repo is imported successfully from exported CV
ISO contents.
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier3
@upgrade
def test_positive_install_package_from_imported_repos(self):
"""Install packages in client from imported repo of Downstream satellite.
:id: a81ffb55-398d-4ad0-bcae-5ed48f504ded
:steps:
1. Export whole Red Hat YUM repo to a path accessible over HTTP.
2. Import the Red Hat repository by defining the CDN URL from the
exported HTTP URL.
3. In downstream satellite create CV, AK with this imported repo.
4. Register/Subscribe a client with a downstream satellite.
5. Attempt to install a package on a client from imported repo of
downstream.
:expectedresults: The package is installed on client from imported repo
of downstream satellite.
:CaseAutomation: notautomated
:CaseLevel: System
"""
| gpl-3.0 | -2,365,239,478,329,593,300 | 34.56727 | 99 | 0.603378 | false |
xhqu1981/custodian | custodian/qchem/handlers.py | 1 | 25246 | # coding: utf-8
from __future__ import unicode_literals, division
import shutil
import time
"""
This module implements error handlers for QChem runs. Currently tested only
for B3LYP DFT jobs.
"""
import copy
import glob
import json
import logging
import os
import re
import tarfile
from pymatgen.core.structure import Molecule
from pymatgen.io.qchem import QcOutput, QcInput, QcTask
from custodian.custodian import ErrorHandler
__author__ = "Xiaohui Qu"
__version__ = "0.1"
__maintainer__ = "Xiaohui Qu"
__email__ = "[email protected]"
__status__ = "Alpha"
__date__ = "12/04/13"
class QChemErrorHandler(ErrorHandler):
"""
Error handler for QChem Jobs. Currently tested only for B3LYP DFT jobs
generated by pymatgen.
"""
def __init__(self, input_file="mol.qcinp", output_file="mol.qcout",
ex_backup_list=(), rca_gdm_thresh=1.0E-3,
scf_max_cycles=200, geom_max_cycles=200, qchem_job=None):
"""
Initializes the error handler from a set of input and output files.
Args:
input_file (str): Name of the QChem input file.
output_file (str): Name of the QChem output file.
ex_backup_list ([str]): List of the files to backup in addition
to input and output file.
rca_gdm_thresh (float): The threshold for the prior scf algorithm.
If last deltaE is larger than the threshold try RCA_DIIS
first, else, try DIIS_GDM first.
scf_max_cycles (int): The max iterations to set to fix SCF failure.
geom_max_cycles (int): The max iterations to set to fix geometry
optimization failure.
qchem_job (QchemJob): the managing object to run qchem.
"""
self.input_file = input_file
self.output_file = output_file
self.ex_backup_list = ex_backup_list
self.rca_gdm_thresh = rca_gdm_thresh
self.scf_max_cycles = scf_max_cycles
self.geom_max_cycles = geom_max_cycles
self.outdata = None
self.qcinp = None
self.error_step_id = None
self.errors = None
self.fix_step = None
self.qchem_job = qchem_job
def check(self):
# Checks output file for errors.
self.outdata = QcOutput(self.output_file).data
self.qcinp = QcInput.from_file(self.input_file)
self.error_step_id = None
self.errors = None
self.fix_step = None
for i, od in enumerate(self.outdata):
if od["has_error"]:
self.error_step_id = i
self.fix_step = self.qcinp.jobs[i]
self.errors = sorted(list(set(od["errors"])))
return True
return False
def correct(self):
self.backup()
actions = []
error_rankings = ("pcm_solvent deprecated",
"autoz error",
"No input text",
"Killed",
"Insufficient static memory",
"Not Enough Total Memory",
"NAN values",
"Bad SCF convergence",
"Geometry optimization failed",
"Freq Job Too Small",
"Exit Code 134",
"Molecular charge is not found",
"Molecular spin multipilicity is not found"
)
e = self.errors[0]
for prio_error in error_rankings:
if prio_error in self.errors:
e = prio_error
break
if e == "autoz error":
if "sym_ignore" not in self.fix_step.params["rem"]:
self.fix_step.disable_symmetry()
actions.append("disable symmetry")
else:
return {"errors": self.errors, "actions": None}
elif e == "Bad SCF convergence":
act = self.fix_scf()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Geometry optimization failed":
act = self.fix_geom_opt()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "NAN values":
if "xc_grid" not in self.fix_step.params["rem"]:
self.fix_step.set_dft_grid(128, 302)
actions.append("use tighter grid")
else:
return {"errors": self.errors, "actions": None}
elif e == "No input text":
if "sym_ignore" not in self.fix_step.params["rem"]:
self.fix_step.disable_symmetry()
actions.append("disable symmetry")
else:
# This indicates something strange occured on the
# compute node. Wait for 30 minutes, such that it
# won't run too fast to make all the jobs fail
if "PBS_JOBID" in os.environ and ("edique" in os.environ["PBS_JOBID"]
or "hopque" in os.environ["PBS_JOBID"]):
time.sleep(30.0 * 60.0)
return {"errors": self.errors, "actions": None}
elif e == "Freq Job Too Small":
natoms = len(self.fix_step.mol)
if "cpscf_nseg" not in self.fix_step.params["rem"] or \
self.fix_step.params["rem"]["cpscf_nseg"] != natoms:
self.fix_step.params["rem"]["cpscf_nseg"] = natoms
actions.append("use {} segment in CPSCF".format(natoms))
else:
return {"errors": self.errors, "actions": None}
elif e == "pcm_solvent deprecated":
solvent_params = self.fix_step.params.pop("pcm_solvent", None)
if solvent_params is not None:
self.fix_step.params["solvent"] = solvent_params
actions.append("use keyword solvent instead")
else:
return {"errors": self.errors, "actions": None}
elif e == "Exit Code 134":
act = self.fix_error_code_134()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Killed":
act = self.fix_error_killed()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Insufficient static memory":
act = self.fix_insufficient_static_memory()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Not Enough Total Memory":
act = self.fix_not_enough_total_memory()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Molecular charge is not found":
return {"errors": self.errors, "actions": None}
elif e == "Molecular spin multipilicity is not found":
return {"errors": self.errors, "actions": None}
else:
return {"errors": self.errors, "actions": None}
self.qcinp.write_file(self.input_file)
return {"errors": self.errors, "actions": actions}
def fix_not_enough_total_memory(self):
if self.fix_step.params['rem']["jobtype"] in ["freq", "nmr"]:
ncpu = 1
if "-np" in self.qchem_job.current_command:
cmd = self.qchem_job.current_command
ncpu = int(cmd[cmd.index("-np") + 1])
natoms = len(self.qcinp.jobs[0].mol)
times_ncpu_full = int(natoms/ncpu)
nsegment_full = ncpu * times_ncpu_full
times_ncpu_half = int(natoms/(ncpu/2))
nsegment_half = int((ncpu/2) * times_ncpu_half)
if "cpscf_nseg" not in self.fix_step.params["rem"]:
self.fix_step.params["rem"]["cpscf_nseg"] = nsegment_full
return "Use {} CPSCF segments".format(nsegment_full)
elif self.fix_step.params["rem"]["cpscf_nseg"] < nsegment_half:
self.qchem_job.select_command("half_cpus", self.qcinp)
self.fix_step.params["rem"]["cpscf_nseg"] = nsegment_half
return "Use half CPUs and {} CPSCF segments".format(nsegment_half)
return None
elif not self.qchem_job.is_openmp_compatible(self.qcinp):
if self.qchem_job.current_command_name != "half_cpus":
self.qchem_job.select_command("half_cpus", self.qcinp)
return "half_cpus"
else:
return None
def fix_error_code_134(self):
if "thresh" not in self.fix_step.params["rem"]:
self.fix_step.set_integral_threshold(thresh=12)
return "use tight integral threshold"
elif not (self.qchem_job.is_openmp_compatible(self.qcinp) and
self.qchem_job.command_available("openmp")):
if self.qchem_job.current_command_name != "half_cpus":
self.qchem_job.select_command("half_cpus", self.qcinp)
return "half_cpus"
else:
if self.fix_step.params['rem']["jobtype"] in ["freq", "nmr"]:
act = self.fix_not_enough_total_memory()
return act
return None
elif self.qchem_job.current_command_name != "openmp":
self.qchem_job.select_command("openmp", self.qcinp)
return "openmp"
else:
if self.fix_step.params['rem']["jobtype"] in ["freq", "nmr"]:
act = self.fix_not_enough_total_memory()
return act
return None
def fix_insufficient_static_memory(self):
if not (self.qchem_job.is_openmp_compatible(self.qcinp)
and self.qchem_job.command_available("openmp")):
if self.qchem_job.current_command_name != "half_cpus":
self.qchem_job.select_command("half_cpus", self.qcinp)
return "half_cpus"
elif not self.qchem_job.large_static_mem:
self.qchem_job.large_static_mem = True
# noinspection PyProtectedMember
self.qchem_job._set_qchem_memory(self.qcinp)
return "Increase Static Memory"
else:
return None
elif self.qchem_job.current_command_name != "openmp":
self.qchem_job.select_command("openmp", self.qcinp)
return "Use OpenMP"
elif not self.qchem_job.large_static_mem:
self.qchem_job.large_static_mem = True
# noinspection PyProtectedMember
self.qchem_job._set_qchem_memory(self.qcinp)
return "Increase Static Memory"
else:
return None
def fix_error_killed(self):
if not (self.qchem_job.is_openmp_compatible(self.qcinp)
and self.qchem_job.command_available("openmp")):
if self.qchem_job.current_command_name != "half_cpus":
self.qchem_job.select_command("half_cpus", self.qcinp)
return "half_cpus"
else:
return None
elif self.qchem_job.current_command_name != "openmp":
self.qchem_job.select_command("openmp", self.qcinp)
return "Use OpenMP"
else:
return None
def fix_scf(self):
comments = self.fix_step.params.get("comment", "")
scf_pattern = re.compile(r"<SCF Fix Strategy>(.*)</SCF Fix "
r"Strategy>", flags=re.DOTALL)
old_strategy_text = re.findall(scf_pattern, comments)
if len(old_strategy_text) > 0:
old_strategy_text = old_strategy_text[0]
od = self.outdata[self.error_step_id]
if "Negative Eigen" in self.errors:
if "thresh" not in self.fix_step.params["rem"]:
self.fix_step.set_integral_threshold(thresh=12)
return "use tight integral threshold"
elif int(self.fix_step.params["rem"]["thresh"]) < 14:
self.fix_step.set_integral_threshold(thresh=14)
return "use even tighter integral threshold"
if len(od["scf_iteration_energies"]) == 0 \
or len(od["scf_iteration_energies"][-1]) <= 10:
if 'Exit Code 134' in self.errors:
# immature termination of SCF
return self.fix_error_code_134()
else:
return None
if od["jobtype"] in ["opt", "ts", "aimd"] \
and len(od["molecules"]) >= 2:
strategy = "reset"
elif len(old_strategy_text) > 0:
strategy = json.loads(old_strategy_text)
strategy["current_method_id"] += 1
else:
strategy = dict()
scf_iters = od["scf_iteration_energies"][-1]
if scf_iters[-1][1] >= self.rca_gdm_thresh:
strategy["methods"] = ["increase_iter", "rca_diis", "gwh",
"gdm", "rca", "core+rca", "fon"]
strategy["current_method_id"] = 0
else:
strategy["methods"] = ["increase_iter", "diis_gdm", "gwh",
"rca", "gdm", "core+gdm", "fon"]
strategy["current_method_id"] = 0
strategy["version"] = 2.0
# noinspection PyTypeChecker
if strategy == "reset":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
if self.error_step_id > 0:
self.set_scf_initial_guess("read")
else:
self.set_scf_initial_guess("sad")
if od["jobtype"] in ["opt", "ts"]:
self.set_last_input_geom(od["molecules"][-1])
else:
assert od["jobtype"] == "aimd"
from pymatgen.io.qchem import QcNucVeloc
from pymatgen.io.xyz import XYZ
scr_dir = od["scratch_dir"]
qcnv_filepath = os.path.join(scr_dir, "AIMD", "NucVeloc")
qc_md_view_filepath = os.path.join(scr_dir, "AIMD", "View.xyz")
qcnv = QcNucVeloc(qcnv_filepath)
qc_md_view = XYZ.from_file(qc_md_view_filepath)
assert len(qcnv.velocities) == len(qc_md_view.all_molecules)
aimd_steps = self.fix_step.params["rem"]["aimd_steps"]
elapsed_steps = len(qc_md_view.all_molecules)
remaining_steps = aimd_steps - elapsed_steps + 1
self.fix_step.params["rem"]["aimd_steps"] = remaining_steps
self.set_last_input_geom(qc_md_view.molecule)
self.fix_step.set_velocities(qcnv.velocities[-1])
self.fix_step.params["rem"].pop("aimd_init_veloc", None)
traj_num = max([0] + [int(f.split(".")[1])
for f in glob.glob("traj_View.*.xyz")])
dest_view_filename = "traj_View.{}.xyz".format(traj_num + 1)
dest_nv_filename = "traj_NucVeloc.{}.txt".format(traj_num + 1)
logging.info("Backing up trajectory files to {} and {}."
.format(dest_view_filename, dest_nv_filename))
shutil.copy(qc_md_view_filepath, dest_view_filename)
shutil.copy(qcnv_filepath, dest_nv_filename)
if len(old_strategy_text) > 0:
comments = scf_pattern.sub("", comments)
self.fix_step.params["comment"] = comments
if len(comments.strip()) == 0:
self.fix_step.params.pop("comment")
return "reset"
elif strategy["current_method_id"] > len(strategy["methods"])-1:
return None
else:
# noinspection PyTypeChecker
method = strategy["methods"][strategy["current_method_id"]]
if method == "increase_iter":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
elif method == "rca_diis":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="rca_diis", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
elif method == "gwh":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("gwh")
elif method == "gdm":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="gdm", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
elif method == "rca":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="rca", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
elif method == "core+rca":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="rca", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("core")
elif method == "diis_gdm":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis_gdm", iterations=self.scf_max_cycles)
self.fix_step.set_scf_initial_guess("sad")
elif method == "core+gdm":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="gdm", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("core")
elif method == "fon":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
natoms = len(od["molecules"][-1])
self.fix_step.params["rem"]["occupations"] = 2
self.fix_step.params["rem"]["fon_norb"] = int(natoms * 0.618)
self.fix_step.params["rem"]["fon_t_start"] = 300
self.fix_step.params["rem"]["fon_t_end"] = 300
self.fix_step.params["rem"]["fon_e_thresh"] = 6
self.fix_step.set_integral_threshold(14)
self.fix_step.set_scf_convergence_threshold(7)
else:
raise ValueError("fix method " + method + " is not supported")
strategy_text = "<SCF Fix Strategy>"
strategy_text += json.dumps(strategy, indent=4, sort_keys=True)
strategy_text += "</SCF Fix Strategy>"
if len(old_strategy_text) > 0:
comments = scf_pattern.sub(strategy_text, comments)
else:
comments += "\n" + strategy_text
self.fix_step.params["comment"] = comments
return method
def set_last_input_geom(self, new_mol):
for i in range(self.error_step_id, -1, -1):
qctask = self.qcinp.jobs[i]
if isinstance(qctask.mol, Molecule):
qctask.mol = copy.deepcopy(new_mol)
def set_scf_initial_guess(self, guess="sad"):
if "scf_guess" not in self.fix_step.params["rem"] \
or self.error_step_id > 0 \
or self.fix_step.params["rem"]["scf_guess"] != "read":
self.fix_step.set_scf_initial_guess(guess)
def fix_geom_opt(self):
comments = self.fix_step.params.get("comment", "")
geom_pattern = re.compile(r"<Geom Opt Fix Strategy>(.*)"
r"</Geom Opt Fix Strategy>",
flags=re.DOTALL)
old_strategy_text = re.findall(geom_pattern, comments)
if len(old_strategy_text) > 0:
old_strategy_text = old_strategy_text[0]
od = self.outdata[self.error_step_id]
if 'Lamda Determination Failed' in self.errors and len(od["molecules"])>=2:
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
if self.error_step_id > 0:
self.set_scf_initial_guess("read")
else:
self.set_scf_initial_guess("sad")
self.set_last_input_geom(od["molecules"][-1])
if od["jobtype"] == "aimd":
aimd_steps = self.fix_step.params["rem"]["aimd_steps"]
elapsed_steps = len(od["molecules"]) - 1
remaining_steps = aimd_steps - elapsed_steps + 1
self.fix_step.params["rem"]["aimd_steps"] = remaining_steps
if len(old_strategy_text) > 0:
comments = geom_pattern.sub("", comments)
self.fix_step.params["comment"] = comments
if len(comments.strip()) == 0:
self.fix_step.params.pop("comment")
return "reset"
if len(od["molecules"]) <= 10:
# immature termination of geometry optimization
if 'Exit Code 134' in self.errors:
return self.fix_error_code_134()
else:
return None
if len(old_strategy_text) > 0:
strategy = json.loads(old_strategy_text)
strategy["current_method_id"] += 1
else:
strategy = dict()
strategy["methods"] = ["increase_iter", "GDIIS", "CartCoords"]
strategy["current_method_id"] = 0
if strategy["current_method_id"] > len(strategy["methods"]) - 1:
return None
else:
method = strategy["methods"][strategy["current_method_id"]]
if method == "increase_iter":
self.fix_step.set_geom_max_iterations(self.geom_max_cycles)
self.set_last_input_geom(od["molecules"][-1])
elif method == "GDIIS":
self.fix_step.set_geom_opt_use_gdiis(subspace_size=5)
self.fix_step.set_geom_max_iterations(self.geom_max_cycles)
self.set_last_input_geom(od["molecules"][-1])
elif method == "CartCoords":
self.fix_step.set_geom_opt_coords_type("cartesian")
self.fix_step.set_geom_max_iterations(self.geom_max_cycles)
self.fix_step.set_geom_opt_use_gdiis(0)
self.set_last_input_geom(od["molecules"][-1])
else:
raise ValueError("fix method" + method + "is not supported")
strategy_text = "<Geom Opt Fix Strategy>"
strategy_text += json.dumps(strategy, indent=4, sort_keys=True)
strategy_text += "</Geom Opt Fix Strategy>"
if len(old_strategy_text) > 0:
comments = geom_pattern.sub(strategy_text, comments)
else:
comments += "\n" + strategy_text
self.fix_step.params["comment"] = comments
return method
def backup(self):
error_num = max([0] + [int(f.split(".")[1])
for f in glob.glob("error.*.tar.gz")])
filename = "error.{}.tar.gz".format(error_num + 1)
logging.info("Backing up run to {}.".format(filename))
tar = tarfile.open(filename, "w:gz")
bak_list = [self.input_file, self.output_file] + \
list(self.ex_backup_list)
for f in bak_list:
if os.path.exists(f):
tar.add(f)
tar.close()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"input_file": self.input_file,
"output_file": self.output_file,
"ex_backup_list": tuple(self.ex_backup_list),
"rca_gdm_thresh": self.rca_gdm_thresh,
"scf_max_cycles": self.scf_max_cycles,
"geom_max_cycles": self.geom_max_cycles,
"outdata": self.outdata,
"qcinp": self.qcinp.as_dict() if self.qcinp else None,
"error_step_id": self.error_step_id,
"errors": self.errors,
"fix_step": self.fix_step.as_dict() if self.fix_step else None}
@classmethod
def from_dict(cls, d):
h = QChemErrorHandler(input_file=d["input_file"],
output_file=d["output_file"],
ex_backup_list=d["ex_backup_list"],
rca_gdm_thresh=d["rca_gdm_thresh"],
scf_max_cycles=d["scf_max_cycles"],
geom_max_cycles=d["geom_max_cycles"])
h.outdata = d["outdata"]
h.qcinp = QcInput.from_dict(d["qcinp"]) if d["qcinp"] else None
h.error_step_id = d["error_step_id"]
h.errors = d["errors"]
h.fix_step = QcTask.from_dict(d["fix_step"]) if d["fix_step"] else None
return h
| mit | -107,415,132,109,779,500 | 44.735507 | 90 | 0.528202 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.