repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
christianlundkvist/pybitcointools | bitcoin/main.py | 1 | 14700 | #!/usr/bin/python
import hashlib, re, sys, os, base64, time, random, hmac
import ripemd
### Elliptic curve parameters (secp256k1)
P = 2**256-2**32-2**9-2**8-2**7-2**6-2**4-1
N = 115792089237316195423570985008687907852837564279074904382605163141518161494337
A = 0
B = 7
Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240
Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424
G = (Gx,Gy)
def change_curve(p, n, a, b, gx, gy):
global P, N, A, B, Gx, Gy, G
P, N, A, B, Gx, Gy = p, n, a, b, gx, gy
G = (Gx, Gy)
def getG():
return G
### Extended Euclidean Algorithm
def inv(a,n):
lm, hm = 1,0
low, high = a%n,n
while low > 1:
r = high/low
nm, new = hm-lm*r, high-low*r
lm, low, hm, high = nm, new, lm, low
return lm % n
### Base switching
def get_code_string(base):
if base == 2: return '01'
elif base == 10: return '0123456789'
elif base == 16: return '0123456789abcdef'
elif base == 32: return 'abcdefghijklmnopqrstuvwxyz234567'
elif base == 58: return '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
elif base == 256: return ''.join([chr(x) for x in range(256)])
else: raise ValueError("Invalid base!")
def lpad(msg,symbol,length):
if len(msg) >= length: return msg
return symbol * (length - len(msg)) + msg
def encode(val,base,minlen=0):
base, minlen = int(base), int(minlen)
code_string = get_code_string(base)
result = ""
while val > 0:
result = code_string[val % base] + result
val /= base
return lpad(result,code_string[0],minlen)
def decode(string,base):
base = int(base)
code_string = get_code_string(base)
result = 0
if base == 16: string = string.lower()
while len(string) > 0:
result *= base
result += code_string.find(string[0])
string = string[1:]
return result
def changebase(string,frm,to,minlen=0):
if frm == to: return lpad(string,get_code_string(frm)[0],minlen)
return encode(decode(string,frm),to,minlen)
### JSON access (for pybtctool convenience)
def access(obj,prop):
if isinstance(obj,dict):
if prop in obj: return obj[prop]
elif '.' in prop: return obj[float(prop)]
else: return obj[int(prop)]
else:
return obj[int(prop)]
def multiaccess(obj,prop):
return [access(o,prop) for o in obj]
def slice(obj,start=0,end=2**200):
return obj[int(start):int(end)]
def count(obj):
return len(obj)
_sum = sum
def sum(obj):
return _sum(obj)
### Elliptic Curve functions
def isinf(p): return p[0] == 0 and p[1] == 0
def base10_add(a,b):
if isinf(a): return b[0],b[1]
if isinf(b): return a[0],a[1]
if a[0] == b[0]:
if a[1] == b[1]: return base10_double((a[0],a[1]))
else: return (0,0)
m = ((b[1]-a[1]) * inv(b[0]-a[0],P)) % P
x = (m*m-a[0]-b[0]) % P
y = (m*(a[0]-x)-a[1]) % P
return (x,y)
def base10_double(a):
if isinf(a): return (0,0)
m = ((3*a[0]*a[0]+A)*inv(2*a[1],P)) % P
x = (m*m-2*a[0]) % P
y = (m*(a[0]-x)-a[1]) % P
return (x,y)
def base10_multiply(a,n):
if isinf(a) or n == 0: return (0,0)
if n == 1: return a
if n < 0 or n >= N: return base10_multiply(a,n%N)
if (n%2) == 0: return base10_double(base10_multiply(a,n/2))
if (n%2) == 1: return base10_add(base10_double(base10_multiply(a,n/2)),a)
# Functions for handling pubkey and privkey formats
def get_pubkey_format(pub):
if isinstance(pub,(tuple,list)): return 'decimal'
elif len(pub) == 65 and pub[0] == '\x04': return 'bin'
elif len(pub) == 130 and pub[0:2] == '04': return 'hex'
elif len(pub) == 33 and pub[0] in ['\x02','\x03']: return 'bin_compressed'
elif len(pub) == 66 and pub[0:2] in ['02','03']: return 'hex_compressed'
elif len(pub) == 64: return 'bin_electrum'
elif len(pub) == 128: return 'hex_electrum'
else: raise Exception("Pubkey not in recognized format")
def encode_pubkey(pub,formt):
if not isinstance(pub,(tuple,list)):
pub = decode_pubkey(pub)
if formt == 'decimal': return pub
elif formt == 'bin': return '\x04' + encode(pub[0],256,32) + encode(pub[1],256,32)
elif formt == 'bin_compressed': return chr(2+(pub[1]%2)) + encode(pub[0],256,32)
elif formt == 'hex': return '04' + encode(pub[0],16,64) + encode(pub[1],16,64)
elif formt == 'hex_compressed': return '0'+str(2+(pub[1]%2)) + encode(pub[0],16,64)
elif formt == 'bin_electrum': return encode(pub[0],256,32) + encode(pub[1],256,32)
elif formt == 'hex_electrum': return encode(pub[0],16,64) + encode(pub[1],16,64)
else: raise Exception("Invalid format!")
def decode_pubkey(pub,formt=None):
if not formt: formt = get_pubkey_format(pub)
if formt == 'decimal': return pub
elif formt == 'bin': return (decode(pub[1:33],256),decode(pub[33:65],256))
elif formt == 'bin_compressed':
x = decode(pub[1:33],256)
beta = pow(x*x*x+A*x+B,(P+1)/4,P)
y = (P-beta) if ((beta + ord(pub[0])) % 2) else beta
return (x,y)
elif formt == 'hex': return (decode(pub[2:66],16),decode(pub[66:130],16))
elif formt == 'hex_compressed':
return decode_pubkey(pub.decode('hex'),'bin_compressed')
elif formt == 'bin_electrum':
return (decode(pub[:32],256),decode(pub[32:64],256))
elif formt == 'hex_electrum':
return (decode(pub[:64],16),decode(pub[64:128],16))
else: raise Exception("Invalid format!")
def get_privkey_format(priv):
if isinstance(priv,(int,long)): return 'decimal'
elif len(priv) == 32: return 'bin'
elif len(priv) == 33: return 'bin_compressed'
elif len(priv) == 64: return 'hex'
elif len(priv) == 66: return 'hex_compressed'
else:
bin_p = b58check_to_bin(priv)
if len(bin_p) == 32: return 'wif'
elif len(bin_p) == 33: return 'wif_compressed'
else: raise Exception("WIF does not represent privkey")
def encode_privkey(priv,formt,vbyte=0):
if not isinstance(priv,(int,long)):
return encode_privkey(decode_privkey(priv),formt,vbyte)
if formt == 'decimal': return priv
elif formt == 'bin': return encode(priv,256,32)
elif formt == 'bin_compressed': return encode(priv,256,32)+'\x01'
elif formt == 'hex': return encode(priv,16,64)
elif formt == 'hex_compressed': return encode(priv,16,64)+'01'
elif formt == 'wif':
return bin_to_b58check(encode(priv,256,32),128+int(vbyte))
elif formt == 'wif_compressed':
return bin_to_b58check(encode(priv,256,32)+'\x01',128+int(vbyte))
else: raise Exception("Invalid format!")
def decode_privkey(priv,formt=None):
if not formt: formt = get_privkey_format(priv)
if formt == 'decimal': return priv
elif formt == 'bin': return decode(priv,256)
elif formt == 'bin_compressed': return decode(priv[:32],256)
elif formt == 'hex': return decode(priv,16)
elif formt == 'hex_compressed': return decode(priv[:64],16)
else:
bin_p = b58check_to_bin(priv)
if len(bin_p) == 32: return decode(bin_p,256)
elif len(bin_p) == 33: return decode(bin_p[:32],256)
else: raise Exception("WIF does not represent privkey")
def add_pubkeys(p1,p2):
f1,f2 = get_pubkey_format(p1), get_pubkey_format(p2)
return encode_pubkey(base10_add(decode_pubkey(p1,f1),decode_pubkey(p2,f2)),f1)
def add_privkeys(p1,p2):
f1,f2 = get_privkey_format(p1), get_privkey_format(p2)
return encode_privkey((decode_privkey(p1,f1) + decode_privkey(p2,f2)) % N,f1)
def multiply(pubkey,privkey):
f1,f2 = get_pubkey_format(pubkey), get_privkey_format(privkey)
pubkey, privkey = decode_pubkey(pubkey,f1), decode_privkey(privkey,f2)
# http://safecurves.cr.yp.to/twist.html
if not isinf(pubkey) and (pubkey[0]**3+B-pubkey[1]*pubkey[1]) % P != 0:
raise Exception("Point not on curve")
return encode_pubkey(base10_multiply(pubkey,privkey),f1)
def divide(pubkey,privkey):
factor = inv(decode_privkey(privkey),N)
return multiply(pubkey,factor)
def compress(pubkey):
f = get_pubkey_format(pubkey)
if 'compressed' in f: return pubkey
elif f == 'bin': return encode_pubkey(decode_pubkey(pubkey,f),'bin_compressed')
elif f == 'hex' or f == 'decimal':
return encode_pubkey(decode_pubkey(pubkey,f),'hex_compressed')
def decompress(pubkey):
f = get_pubkey_format(pubkey)
if 'compressed' not in f: return pubkey
elif f == 'bin_compressed': return encode_pubkey(decode_pubkey(pubkey,f),'bin')
elif f == 'hex_compressed' or f == 'decimal':
return encode_pubkey(decode_pubkey(pubkey,f),'hex')
def privkey_to_pubkey(privkey):
f = get_privkey_format(privkey)
privkey = decode_privkey(privkey,f)
if privkey == 0 or privkey >= N:
raise Exception("Invalid privkey")
if f in ['bin','bin_compressed','hex','hex_compressed','decimal']:
return encode_pubkey(base10_multiply(G,privkey),f)
else:
return encode_pubkey(base10_multiply(G,privkey),f.replace('wif','hex'))
privtopub = privkey_to_pubkey
def privkey_to_address(priv,magicbyte=0):
return pubkey_to_address(privkey_to_pubkey(priv),magicbyte)
privtoaddr = privkey_to_address
def neg_pubkey(pubkey):
f = get_pubkey_format(pubkey)
pubkey = decode_pubkey(pubkey,f)
return encode_pubkey((pubkey[0],(P-pubkey[1]) % P),f)
def neg_privkey(privkey):
f = get_privkey_format(privkey)
privkey = decode_privkey(privkey,f)
return encode_privkey((N - privkey) % N,f)
def subtract_pubkeys(p1, p2):
f1,f2 = get_pubkey_format(p1), get_pubkey_format(p2)
k2 = decode_pubkey(p2,f2)
return encode_pubkey(base10_add(decode_pubkey(p1,f1),(k2[0],(P - k2[1]) % P)),f1)
def subtract_privkeys(p1, p2):
f1,f2 = get_privkey_format(p1), get_privkey_format(p2)
k2 = decode_privkey(p2,f2)
return encode_privkey((decode_privkey(p1,f1) - k2) % N,f1)
### Hashes
def bin_hash160(string):
intermed = hashlib.sha256(string).digest()
digest = ''
try:
digest = hashlib.new('ripemd160',intermed).digest()
except:
digest = ripemd.RIPEMD160(intermed).digest()
return digest
def hash160(string):
return bin_hash160(string).encode('hex')
def bin_sha256(string):
return hashlib.sha256(string).digest()
def sha256(string):
return bin_sha256(string).encode('hex')
def bin_dbl_sha256(string):
return hashlib.sha256(hashlib.sha256(string).digest()).digest()
def dbl_sha256(string):
return bin_dbl_sha256(string).encode('hex')
def bin_slowsha(string):
orig_input = string
for i in range(100000):
string = hashlib.sha256(string + orig_input).digest()
return string
def slowsha(string):
return bin_slowsha(string).encode('hex')
def hash_to_int(x):
if len(x) in [40,64]: return decode(x,16)
else: return decode(x,256)
def num_to_var_int(x):
x = int(x)
if x < 253: return chr(x)
elif x < 65536: return chr(253) + encode(x,256,2)[::-1]
elif x < 4294967296: return chr(254) + encode(x,256,4)[::-1]
else: return chr(255) + encode(x,256,8)[::-1]
# WTF, Electrum?
def electrum_sig_hash(message):
padded = "\x18Bitcoin Signed Message:\n" + num_to_var_int( len(message) ) + message
return bin_dbl_sha256(padded)
def random_key():
# Gotta be secure after that java.SecureRandom fiasco...
entropy = os.urandom(32)+str(random.randrange(2**256))+str(int(time.time())**7)
return sha256(entropy)
def random_electrum_seed():
entropy = os.urandom(32)+str(random.randrange(2**256))+str(int(time.time())**7)
return sha256(entropy)[:32]
### Encodings
def bin_to_b58check(inp,magicbyte=0):
inp_fmtd = chr(int(magicbyte)) + inp
leadingzbytes = len(re.match('^\x00*',inp_fmtd).group(0))
checksum = bin_dbl_sha256(inp_fmtd)[:4]
return '1' * leadingzbytes + changebase(inp_fmtd+checksum,256,58)
def b58check_to_bin(inp):
leadingzbytes = len(re.match('^1*',inp).group(0))
data = '\x00' * leadingzbytes + changebase(inp,58,256)
assert bin_dbl_sha256(data[:-4])[:4] == data[-4:]
return data[1:-4]
def get_version_byte(inp):
leadingzbytes = len(re.match('^1*',inp).group(0))
data = '\x00' * leadingzbytes + changebase(inp,58,256)
assert bin_dbl_sha256(data[:-4])[:4] == data[-4:]
return ord(data[0])
def hex_to_b58check(inp,magicbyte=0):
return bin_to_b58check(inp.decode('hex'),magicbyte)
def b58check_to_hex(inp): return b58check_to_bin(inp).encode('hex')
def pubkey_to_address(pubkey,magicbyte=0):
if isinstance(pubkey,(list,tuple)):
pubkey = encode_pubkey(pubkey,'bin')
if len(pubkey) in [66,130]:
return bin_to_b58check(bin_hash160(pubkey.decode('hex')),magicbyte)
return bin_to_b58check(bin_hash160(pubkey),magicbyte)
pubtoaddr = pubkey_to_address
### EDCSA
def encode_sig(v,r,s):
vb, rb, sb = chr(v), encode(r,256), encode(s,256)
return base64.b64encode(vb+'\x00'*(32-len(rb))+rb+'\x00'*(32-len(sb))+sb)
def decode_sig(sig):
bytez = base64.b64decode(sig)
return ord(bytez[0]), decode(bytez[1:33],256), decode(bytez[33:],256)
# https://tools.ietf.org/html/rfc6979#section-3.2
def deterministic_generate_k(msghash,priv):
v = '\x01' * 32
k = '\x00' * 32
priv = encode_privkey(priv,'bin')
msghash = encode(hash_to_int(msghash),256,32)
k = hmac.new(k, v+'\x00'+priv+msghash, hashlib.sha256).digest()
v = hmac.new(k, v, hashlib.sha256).digest()
k = hmac.new(k, v+'\x01'+priv+msghash, hashlib.sha256).digest()
v = hmac.new(k, v, hashlib.sha256).digest()
return decode(hmac.new(k, v, hashlib.sha256).digest(),256)
def ecdsa_raw_sign(msghash,priv):
z = hash_to_int(msghash)
k = deterministic_generate_k(msghash,priv)
r,y = base10_multiply(G,k)
s = inv(k,N) * (z + r*decode_privkey(priv)) % N
return 27+(y%2),r,s
def ecdsa_sign(msg,priv):
return encode_sig(*ecdsa_raw_sign(electrum_sig_hash(msg),priv))
def ecdsa_raw_verify(msghash,vrs,pub):
v,r,s = vrs
w = inv(s,N)
z = hash_to_int(msghash)
u1, u2 = z*w % N, r*w % N
x,y = base10_add(base10_multiply(G,u1), base10_multiply(decode_pubkey(pub),u2))
return r == x
def ecdsa_verify(msg,sig,pub):
return ecdsa_raw_verify(electrum_sig_hash(msg),decode_sig(sig),pub)
def ecdsa_raw_recover(msghash,vrs):
v,r,s = vrs
x = r
beta = pow(x*x*x+A*x+B,(P+1)/4,P)
y = beta if v%2 ^ beta%2 else (P - beta)
z = hash_to_int(msghash)
Qr = base10_add(neg_pubkey(base10_multiply(G,z)),base10_multiply((x,y),s))
Q = base10_multiply(Qr,inv(r,N))
if ecdsa_raw_verify(msghash,vrs,Q): return Q
return False
def ecdsa_recover(msg,sig):
return encode_pubkey(ecdsa_raw_recover(electrum_sig_hash(msg),decode_sig(sig)),'hex')
| mit | 2,564,641,832,011,280,000 | 33.106729 | 89 | 0.639864 | false |
robmcmullen/peppy | i18n.in/make-podict.py | 1 | 13109 | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
"""Generate python dictionaries catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a python dictionary
Based on msgfmt.py by Martin v. Löwis <[email protected]>
"""
import sys, re, os, glob
from optparse import OptionParser
def normalize(s):
# This converts the utf-8 string into a format that is appropriate for .po
# files, namely much closer to C style.
lines = s.split('\n')
if len(lines) == 1:
s = '"' + s + '"'
else:
if not lines[-1]:
del lines[-1]
lines[-1] = lines[-1] + '\\n'
print lines
lineterm = '\\n"\n"'
s = '""\n"' + lineterm.join(lines) + '"'
print lines
print s
return s
class MessageCatalog(object):
def __init__(self, template, use_fuzzy=False):
self.messages = {}
self.template = {}
self.encoding = None
self.current_encoding = None
self.loadTemplate(template)
self.use_fuzzy = use_fuzzy
def addAlways(self, id, str, fuzzy):
"Add a non-fuzzy translation to the dictionary."
if not fuzzy or self.use_fuzzy:
print ("adding template for %s" % id)
self.template[id] = True
if str:
self.addCheck(id, str, fuzzy)
def addCheck(self, id, str, fuzzy):
"Add a non-fuzzy translation to the dictionary if it's a valid msgid."
if id == "":
match = re.search(r'charset=(\S*)\n', str)
if match:
self.current_encoding = match.group(1).lower()
#print("Found encoding %s" % self.current_encoding)
if not self.encoding:
self.encoding = self.current_encoding
if str and (not fuzzy or self.use_fuzzy):
if id in self.template and id not in self.messages:
print ("adding translation for %s" % id)
if self.current_encoding != self.encoding:
str = str.decode(self.current_encoding).encode(self.encoding)
if id != str:
# Don't include translation if it's the same as the source
self.messages[id] = str
else:
for prefix in [u'', u'&']:
for suffix in [u'', u'...', u':']:
if not prefix and not suffix:
continue
keyword = prefix + id.decode(self.current_encoding) + suffix
if keyword in self.template and keyword not in self.messages:
print ("adding pre/suffixed translation for %s" % keyword)
if self.current_encoding != self.encoding:
str = str.decode(self.current_encoding).encode(self.encoding)
str = prefix.encode(self.encoding) + str + suffix.encode(self.encoding)
self.messages[keyword] = str
def generateDict(self):
"Return the generated dictionary"
metadata = self.messages['']
del self.messages['']
msgids = self.messages.keys()
msgids.sort()
messages = '\n'.join(["%s: %s," % (repr(a), repr(self.messages[a])) for a in msgids])
return "# -*- coding: %s -*-\n#This is generated code - do not edit\nencoding = '%s'\ndict = {\n%s\n}\n"%(self.encoding, self.encoding, messages)
def loadTemplate(self, filename):
self.add = self.addAlways
self.addPO(filename)
self.add = self.addCheck
def addPO(self, filename):
ID = 1
STR = 2
print("loading translations from %s" % filename)
try:
lines = open(filename).readlines()
except IOError, msg:
print >> sys.stderr, msg
sys.exit(1)
self.current_encoding = 'utf-8'
section = None
fuzzy = 0
# Parse the catalog
lno = 0
for l in lines:
lno += 1
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
self.add(msgid, msgstr, fuzzy)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and l.find('fuzzy'):
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid'):
if section == STR:
self.add(msgid, msgstr, fuzzy)
section = ID
l = l[5:]
msgid = msgstr = ''
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
# XXX: Does this always follow Python escape semantics?
try:
l = eval(l)
if section == ID:
msgid += l
elif section == STR:
msgstr += l
else:
print >> sys.stderr, 'Syntax error on %s:%d' % (filename, lno), \
'before:'
print >> sys.stderr, l
sys.exit(1)
except SyntaxError:
print >> sys.stderr, 'Syntax error on %s:%d' % (filename, lno)
# Add last entry
if section == STR:
self.add(msgid, msgstr, fuzzy)
def addMO(self, filename):
temp = "converted-%s.po" % os.path.basename(filename)
os.system('msgunfmt -o %s %s' % (temp, filename))
# if it's a blank catalog, it won't be written, so check for it.
if os.path.exists(temp):
self.addPO(temp)
os.remove(temp)
def addFile(self, filename):
if filename.endswith('mo'):
self.addMO(filename)
else:
self.addPO(filename)
def addDir(self, dir, canonical):
if not canonical:
return
# If it's a LC_MESSAGES format directory, use all the files in
# the directory
choices = [canonical]
if "_" in canonical:
choices.append(canonical[0:2])
for locale in choices:
lcdir = "%s/%s/LC_MESSAGES" % (dir, locale)
ldir = "%s/%s" % (dir, locale)
if os.path.isdir(lcdir):
files = glob.glob("%s/*" % lcdir)
print files
for file in files:
self.addFile(file)
elif os.path.isdir(ldir):
files = glob.glob("%s/*" % ldir)
print files
for file in files:
self.addFile(file)
else:
# not LC_MESSAGES format; only look for canonical format files
files = glob.glob("%s/*%s*" % (dir, locale))
print files
for file in files:
if os.path.isfile(file):
self.addFile(file)
def save(self, outfile):
# Compute output
output = self.generateDict()
try:
open(outfile,"wb").write(output)
except IOError,msg:
print >> sys.stderr, msg
def save_po_entry(self, fh, msgid, msgstr):
if not msgstr and msgid in self.messages:
msgstr = self.messages[msgid]
if not msgid:
print repr(msgstr)
fh.write("msgid %s\n" % normalize(msgid))
fh.write("msgstr %s\n" % normalize(msgstr))
def save_po(self, template, outfile):
ID = 1
STR = 2
print("reading template from %s" % template)
try:
lines = open(template).readlines()
except IOError, msg:
print >> sys.stderr, msg
sys.exit(1)
self.current_encoding = 'utf-8'
section = None
fuzzy = 0
fh = open(outfile, 'wb')
# Parse the catalog
lno = 0
for l in lines:
lno += 1
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#':
if section == STR:
self.save_po_entry(fh, msgid, msgstr)
section = None
# Immediately write comments
fh.write("%s" % l)
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid'):
if section == STR:
self.save_po_entry(fh, msgid, msgstr)
section = ID
l = l[5:]
msgid = msgstr = ''
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
if section == STR:
self.save_po_entry(fh, msgid, msgstr)
section = None
fh.write("\n")
continue
# XXX: Does this always follow Python escape semantics?
try:
l = eval(l)
if section == ID:
msgid += l
elif section == STR:
msgstr += l
else:
print >> sys.stderr, 'Syntax error on %s:%d' % (filename, lno), \
'before:'
print >> sys.stderr, l
sys.exit(1)
except SyntaxError:
print >> sys.stderr, 'Syntax error on %s:%d' % (filename, lno)
# Add last entry
if section == STR:
self.save_po_entry(fh, msgid, msgstr)
if __name__ == "__main__":
usage="usage: %prog [-o file] template po-files"
parser=OptionParser(usage=usage)
parser.add_option("-a", action="store", dest="all",
default='', help="process all po files in this directory as locales to be generated")
parser.add_option("-c", action="store", dest="canonical",
default=None, help="canonical name of the locale")
parser.add_option("-f", action="store_true", dest="fuzzy",
default=False, help="use fuzzy strings")
parser.add_option("-o", action="store", dest="output",
default=None, help="output file or directory")
parser.add_option("-s", action="store_true", dest="system",
default=False, help="check system locale directories")
parser.add_option("-p", action="store", dest="make_po",
default=None, help="using the template, create po file containing the merged data")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.usage()
if options.all:
if not options.output:
options.output = ""
catalogs = ['en_US']
files = glob.glob(os.path.join(options.all, "*.po"))
for file in files:
canonical = os.path.basename(file)[0:-3]
if canonical.startswith('merged-'):
continue
if canonical.startswith('peppy-'):
canonical = canonical[6:]
print("Processing locale %s" % canonical)
po = MessageCatalog(args[0], use_fuzzy=options.fuzzy)
po.addFile(file)
for pofile in args[1:]:
print("checking %s" % pofile)
if os.path.isdir(pofile):
po.addDir(pofile, canonical)
else:
po.addFile(pofile)
po.save(os.path.join(options.output, canonical + ".py"))
catalogs.append(canonical)
if options.make_po:
po.save_po(options.make_po, "merged-%s.po" % canonical)
fh = open(os.path.join(options.output, 'peppy_message_catalogs.py'), 'w')
fh.write("supplied_translations = %s" % str(catalogs))
fh.write("\n\nif False:\n # Dummy imports to trick py2exe into including these\n")
for catalog in catalogs[1:]:
# skip en_US
fh.write(" import %s\n" % catalog)
elif options.canonical:
po = MessageCatalog(args[0], use_fuzzy=options.fuzzy)
for pofile in args[1:]:
print("checking %s" % pofile)
if os.path.isdir(pofile):
po.addDir(pofile, options.canonical)
else:
po.addFile(pofile)
if options.output:
po.save(options.output)
elif options.canonical:
po.save(options.canonical + ".py")
if options.make_po:
po.save_po(options.make_po, "merged-%s.po" % options.canonical)
| gpl-2.0 | 8,741,893,544,413,561,000 | 35.823034 | 153 | 0.493859 | false |
fkie-cad/FACT_core | src/test/yara_signature_testing.py | 1 | 3209 | import logging
import os
from tempfile import TemporaryDirectory
from typing import List
from common_helper_files import get_files_in_dir
from common_helper_yara import compile_rules, get_all_matched_strings, scan
class SignatureTestingMatching:
def __init__(self):
self.tmp_dir = TemporaryDirectory(prefix='fact_software_signature_test')
self.signature_file_path = os.path.join(self.tmp_dir.name, 'test_sig.yc')
self.matches = []
self.test_file = None
self.signature_path = None
self.strings_to_match = None
def check(self, signature_path, test_file):
self.test_file = test_file
self.signature_path = signature_path
self._get_list_of_test_data()
self._execute_yara_matching()
return self._intersect_lists()
def _execute_yara_matching(self):
compile_rules(self.signature_path, self.signature_file_path, external_variables={'test_flag': 'true'})
scan_result = scan(self.signature_file_path, self.test_file)
self.matches = get_all_matched_strings(scan_result)
def _get_list_of_test_data(self):
with open(self.test_file, mode='r', encoding='utf8') as pointer:
self.strings_to_match = pointer.read().split('\n')
self.strings_to_match.pop()
def _intersect_lists(self):
strings_to_match = set(self.strings_to_match)
return strings_to_match.difference(self.matches)
class SignatureTestingMeta:
META_FIELDS = ['software_name', 'open_source', 'website', 'description']
missing_meta_fields = []
def check_meta_fields(self, sig_path):
sig_dir = sig_path
list_of_files = get_files_in_dir(sig_dir)
for file in list_of_files:
self.check_for_file(file)
return self.missing_meta_fields
def check_for_file(self, file_path):
with open(file_path, 'r') as fd:
raw = fd.read()
rules = self._split_rules(raw)
for rule in rules:
self.check_meta_fields_of_rule(rule)
@staticmethod
def _split_rules(raw_rules: str) -> List[str]:
rule_lines = raw_rules.splitlines()
rule_start_indices = [
i
for i in range(len(rule_lines))
if rule_lines[i].startswith('rule ')
]
rules = [
''.join(rule_lines[start:end])
for start, end in zip(rule_start_indices, rule_start_indices[1:] + [len(rule_lines)])
]
return rules
def check_meta_fields_of_rule(self, rule: str):
rule_components = [s.strip() for s in rule.split()]
rule_name = rule_components[1].replace('{', '')
if 'meta:' not in rule_components:
self._register_missing_field('ALL', rule_name)
return
for required_field in self.META_FIELDS:
if required_field not in rule_components:
self._register_missing_field(required_field, rule_name)
def _register_missing_field(self, missing_field: str, rule_name: str):
self.missing_meta_fields.append('{} in {}'.format(missing_field, rule_name))
logging.error('CST: No meta field {} for rule {}.'.format(missing_field, rule_name))
| gpl-3.0 | -2,878,870,941,976,532,500 | 36.313953 | 110 | 0.623247 | false |
JWDebelius/scikit-bio | skbio/stats/_subsample/subsample.py | 1 | 4259 | r"""
Subsampling (:mod:`skbio.math.subsample`)
=========================================
.. currentmodule:: skbio.math.subsample
This module provides functionality for subsampling from vectors of counts.
Functions
---------
.. autosummary::
:toctree: generated/
subsample
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from warnings import warn
import numpy as np
from skbio.util import EfficiencyWarning
try:
from ._subsample import _subsample_without_replacement
except ImportError:
pass
def subsample(counts, n, replace=False):
"""Randomly subsample from a vector of counts, with or without replacement.
Parameters
----------
counts : 1-D array_like
Vector of counts (integers) to randomly subsample from.
n : int
Number of items to subsample from `counts`. Must be less than or equal
to the sum of `counts`.
replace : bool, optional
If ``True``, subsample with replacement. If ``False`` (the default),
subsample without replacement.
Returns
-------
subsampled : ndarray
Subsampled vector of counts where the sum of the elements equals `n`
(i.e., ``subsampled.sum() == n``). Will have the same shape as
`counts`.
Raises
------
TypeError
If `counts` cannot be safely converted to an integer datatype.
ValueError
If `n` is less than zero or greater than the sum of `counts`.
Raises
------
EfficiencyWarning
If the accelerated code isn't present or hasn't been compiled.
Notes
-----
If subsampling is performed without replacement (``replace=False``), a copy
of `counts` is returned if `n` is equal to the number of items in `counts`,
as all items will be chosen from the original vector.
If subsampling is performed with replacement (``replace=True``) and `n` is
equal to the number of items in `counts`, the subsampled vector that is
returned may not necessarily be the same vector as `counts`.
Examples
--------
Subsample 4 items (without replacement) from a vector of counts:
>>> import numpy as np
>>> from skbio.math.subsample import subsample
>>> a = np.array([4, 5, 0, 2, 1])
>>> sub = subsample(a, 4)
>>> sub.sum()
4
>>> sub.shape
(5,)
Trying to subsample an equal number of items (without replacement) results
in the same vector as our input:
>>> subsample([0, 3, 0, 1], 4)
array([0, 3, 0, 1])
Subsample 5 items (with replacement):
>>> sub = subsample([1, 0, 1, 2, 2, 3, 0, 1], 5, replace=True)
>>> sub.sum()
5
>>> sub.shape
(8,)
"""
if n < 0:
raise ValueError("n cannot be negative.")
counts = np.asarray(counts)
counts = counts.astype(int, casting='safe')
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
counts_sum = counts.sum()
if n > counts_sum:
raise ValueError("Cannot subsample more items than exist in input "
"counts vector.")
if replace:
probs = counts / counts_sum
result = np.random.multinomial(n, probs)
else:
if counts_sum == n:
result = counts
else:
try:
result = _subsample_without_replacement(counts, n, counts_sum)
except NameError:
warn("Accelerated subsampling without replacement isn't"
" available.", EfficiencyWarning)
nz = counts.nonzero()[0]
unpacked = np.concatenate([np.repeat(np.array(i,), counts[i])
for i in nz])
permuted = np.random.permutation(unpacked)[:n]
result = np.zeros(len(counts), dtype=int)
for p in permuted:
result[p] += 1
return result
| bsd-3-clause | -945,406,261,290,175,600 | 27.972789 | 79 | 0.57854 | false |
yl565/statsmodels | statsmodels/tsa/vector_ar/tests/test_var.py | 1 | 18926 | """
Test VAR Model
"""
from __future__ import print_function
# pylint: disable=W0612,W0231
from statsmodels.compat.python import (iteritems, StringIO, lrange, BytesIO,
range)
from nose.tools import assert_raises
import nose
import os
import sys
import numpy as np
import statsmodels.api as sm
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tools.data as data_util
from statsmodels.tsa.vector_ar.var_model import VAR
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
assert_allclose)
DECIMAL_12 = 12
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
class CheckVAR(object):
# just so pylint won't complain
res1 = None
res2 = None
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_neqs(self):
assert_equal(self.res1.neqs, self.res2.neqs)
def test_nobs(self):
assert_equal(self.res1.avobs, self.res2.nobs)
def test_df_eq(self):
assert_equal(self.res1.df_eq, self.res2.df_eq)
def test_rmse(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].mse_resid**.5,
eval('self.res2.rmse_'+str(i+1)), DECIMAL_6)
def test_rsquared(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].rsquared,
eval('self.res2.rsquared_'+str(i+1)), DECIMAL_3)
def test_llf(self):
results = self.res1.results
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)
for i in range(len(results)):
assert_almost_equal(results[i].llf,
eval('self.res2.llf_'+str(i+1)), DECIMAL_2)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic)
def test_hqic(self):
assert_almost_equal(self.res1.hqic, self.res2.hqic)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe)
def test_detsig(self):
assert_almost_equal(self.res1.detomega, self.res2.detsig)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def get_macrodata():
data = sm.datasets.macrodata.load().data[['realgdp','realcons','realinv']]
names = data.dtype.names
nd = data.view((float,3), type=np.ndarray)
nd = np.diff(np.log(nd), axis=0)
return nd.ravel().view(data.dtype, type=np.ndarray)
def generate_var():
from rpy2.robjects import r
import pandas.rpy.common as prp
r.source('tests/var.R')
return prp.convert_robj(r['result'], use_pandas=False)
def write_generate_var():
result = generate_var()
np.savez('tests/results/vars_results.npz', **result)
class RResults(object):
"""
Simple interface with results generated by "vars" package in R.
"""
def __init__(self):
#data = np.load(resultspath + 'vars_results.npz')
from .results.results_var_data import var_results
data = var_results.__dict__
self.names = data['coefs'].dtype.names
self.params = data['coefs'].view((float, len(self.names)), type=np.ndarray)
self.stderr = data['stderr'].view((float, len(self.names)), type=np.ndarray)
self.irf = data['irf'].item()
self.orth_irf = data['orthirf'].item()
self.nirfs = int(data['nirfs'][0])
self.nobs = int(data['obs'][0])
self.totobs = int(data['totobs'][0])
crit = data['crit'].item()
self.aic = crit['aic'][0]
self.sic = self.bic = crit['sic'][0]
self.hqic = crit['hqic'][0]
self.fpe = crit['fpe'][0]
self.detomega = data['detomega'][0]
self.loglike = data['loglike'][0]
self.nahead = int(data['nahead'][0])
self.ma_rep = data['phis']
self.causality = data['causality']
def close_plots():
try:
import matplotlib.pyplot as plt
plt.close('all')
except ImportError:
pass
_orig_stdout = None
def setup_module():
global _orig_stdout
_orig_stdout = sys.stdout
sys.stdout = StringIO()
def teardown_module():
sys.stdout = _orig_stdout
close_plots()
def have_matplotlib():
try:
import matplotlib
return True
except ImportError:
return False
class CheckIRF(object):
ref = None; res = None; irf = None
k = None
#---------------------------------------------------------------------------
# IRF tests
def test_irf_coefs(self):
self._check_irfs(self.irf.irfs, self.ref.irf)
self._check_irfs(self.irf.orth_irfs, self.ref.orth_irf)
def _check_irfs(self, py_irfs, r_irfs):
for i, name in enumerate(self.res.names):
ref_irfs = r_irfs[name].view((float, self.k), type=np.ndarray)
res_irfs = py_irfs[:, :, i]
assert_almost_equal(ref_irfs, res_irfs)
def test_plot_irf(self):
if not have_matplotlib():
raise nose.SkipTest
import matplotlib.pyplot as plt
self.irf.plot()
plt.close('all')
self.irf.plot(plot_stderr=False)
plt.close('all')
self.irf.plot(impulse=0, response=1)
plt.close('all')
self.irf.plot(impulse=0)
plt.close('all')
self.irf.plot(response=0)
plt.close('all')
self.irf.plot(orth=True)
plt.close('all')
self.irf.plot(impulse=0, response=1, orth=True)
close_plots()
def test_plot_cum_effects(self):
if not have_matplotlib():
raise nose.SkipTest
# I need close after every plot to avoid segfault, see #3158
import matplotlib.pyplot as plt
plt.close('all')
self.irf.plot_cum_effects()
plt.close('all')
self.irf.plot_cum_effects(plot_stderr=False)
plt.close('all')
self.irf.plot_cum_effects(impulse=0, response=1)
plt.close('all')
self.irf.plot_cum_effects(orth=True)
plt.close('all')
self.irf.plot_cum_effects(impulse=0, response=1, orth=True)
close_plots()
class CheckFEVD(object):
fevd = None
#---------------------------------------------------------------------------
# FEVD tests
def test_fevd_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.fevd.plot()
close_plots()
def test_fevd_repr(self):
self.fevd
def test_fevd_summary(self):
self.fevd.summary()
def test_fevd_cov(self):
# test does not crash
# not implemented
# covs = self.fevd.cov()
pass
class TestVARResults(CheckIRF, CheckFEVD):
@classmethod
def setupClass(cls):
cls.p = 2
cls.data = get_macrodata()
cls.model = VAR(cls.data)
cls.names = cls.model.endog_names
cls.ref = RResults()
cls.k = len(cls.ref.names)
cls.res = cls.model.fit(maxlags=cls.p)
cls.irf = cls.res.irf(cls.ref.nirfs)
cls.nahead = cls.ref.nahead
cls.fevd = cls.res.fevd()
def test_constructor(self):
# make sure this works with no names
ndarr = self.data.view((float, 3), type=np.ndarray)
model = VAR(ndarr)
res = model.fit(self.p)
def test_names(self):
assert_equal(self.model.endog_names, self.ref.names)
model2 = VAR(self.data)
assert_equal(model2.endog_names, self.ref.names)
def test_get_eq_index(self):
assert(type(self.res.names) is list)
for i, name in enumerate(self.names):
idx = self.res.get_eq_index(i)
idx2 = self.res.get_eq_index(name)
assert_equal(idx, i)
assert_equal(idx, idx2)
assert_raises(Exception, self.res.get_eq_index, 'foo')
def test_repr(self):
# just want this to work
foo = str(self.res)
bar = repr(self.res)
def test_params(self):
assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)
def test_cov_params(self):
# do nothing for now
self.res.cov_params
def test_cov_ybar(self):
self.res.cov_ybar()
def test_tstat(self):
self.res.tvalues
def test_pvalues(self):
self.res.pvalues
def test_summary(self):
summ = self.res.summary()
def test_detsig(self):
assert_almost_equal(self.res.detomega, self.ref.detomega)
def test_aic(self):
assert_almost_equal(self.res.aic, self.ref.aic)
def test_bic(self):
assert_almost_equal(self.res.bic, self.ref.bic)
def test_hqic(self):
assert_almost_equal(self.res.hqic, self.ref.hqic)
def test_fpe(self):
assert_almost_equal(self.res.fpe, self.ref.fpe)
def test_lagorder_select(self):
ics = ['aic', 'fpe', 'hqic', 'bic']
for ic in ics:
res = self.model.fit(maxlags=10, ic=ic, verbose=True)
assert_raises(Exception, self.model.fit, ic='foo')
def test_nobs(self):
assert_equal(self.res.nobs, self.ref.nobs)
def test_stderr(self):
assert_almost_equal(self.res.stderr, self.ref.stderr, DECIMAL_4)
def test_loglike(self):
assert_almost_equal(self.res.llf, self.ref.loglike)
def test_ma_rep(self):
ma_rep = self.res.ma_rep(self.nahead)
assert_almost_equal(ma_rep, self.ref.ma_rep)
#--------------------------------------------------
# Lots of tests to make sure stuff works...need to check correctness
def test_causality(self):
causedby = self.ref.causality['causedby']
for i, name in enumerate(self.names):
variables = self.names[:i] + self.names[i + 1:]
result = self.res.test_causality(name, variables, kind='f')
assert_almost_equal(result['pvalue'], causedby[i], DECIMAL_4)
rng = lrange(self.k)
rng.remove(i)
result2 = self.res.test_causality(i, rng, kind='f')
assert_almost_equal(result['pvalue'], result2['pvalue'], DECIMAL_12)
# make sure works
result = self.res.test_causality(name, variables, kind='wald')
# corner cases
_ = self.res.test_causality(self.names[0], self.names[1])
_ = self.res.test_causality(0, 1)
assert_raises(Exception,self.res.test_causality, 0, 1, kind='foo')
def test_select_order(self):
result = self.model.fit(10, ic='aic', verbose=True)
result = self.model.fit(10, ic='fpe', verbose=True)
# bug
model = VAR(self.model.endog)
model.select_order()
def test_is_stable(self):
# may not necessarily be true for other datasets
assert(self.res.is_stable(verbose=True))
def test_acf(self):
# test that it works...for now
acfs = self.res.acf(10)
# defaults to nlags=lag_order
acfs = self.res.acf()
assert(len(acfs) == self.p + 1)
def test_acorr(self):
acorrs = self.res.acorr(10)
def test_forecast(self):
point = self.res.forecast(self.res.y[-5:], 5)
def test_forecast_interval(self):
y = self.res.y[:-self.p:]
point, lower, upper = self.res.forecast_interval(y, 5)
def test_plot_sim(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plotsim(steps=100)
close_plots()
def test_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot()
close_plots()
def test_plot_acorr(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_acorr()
close_plots()
def test_plot_forecast(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_forecast(5)
close_plots()
def test_reorder(self):
#manually reorder
data = self.data.view((float,3), type=np.ndarray)
names = self.names
data2 = np.append(np.append(data[:,2,None], data[:,0,None], axis=1), data[:,1,None], axis=1)
names2 = []
names2.append(names[2])
names2.append(names[0])
names2.append(names[1])
res2 = VAR(data2).fit(maxlags=self.p)
#use reorder function
res3 = self.res.reorder(['realinv','realgdp', 'realcons'])
#check if the main results match
assert_almost_equal(res2.params, res3.params)
assert_almost_equal(res2.sigma_u, res3.sigma_u)
assert_almost_equal(res2.bic, res3.bic)
assert_almost_equal(res2.stderr, res3.stderr)
def test_pickle(self):
fh = BytesIO()
#test wrapped results load save pickle
self.res.save(fh)
fh.seek(0,0)
res_unpickled = self.res.__class__.load(fh)
assert_(type(res_unpickled) is type(self.res))
class E1_Results(object):
"""
Results from Lutkepohl (2005) using E2 dataset
"""
def __init__(self):
# Lutkepohl p. 120 results
# I asked the author about these results and there is probably rounding
# error in the book, so I adjusted these test results to match what is
# coming out of the Python (double-checked) calculations
self.irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.129, 0.547, 0.663],
[0.032, 0.134, 0.163],
[0.026, 0.108, 0.131]],
[[0.084, .385, .479],
[.016, .079, .095],
[.016, .078, .103]]])
self.cum_irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.149, 0.631, 0.764],
[0.044, 0.185, 0.224],
[0.033, 0.140, 0.169]],
[[0.099, .468, .555],
[.038, .170, .205],
[.033, .150, .185]]])
self.lr_stderr = np.array([[.134, .645, .808],
[.048, .230, .288],
[.043, .208, .260]])
basepath = os.path.split(sm.__file__)[0]
resultspath = basepath + '/tsa/vector_ar/tests/results/'
def get_lutkepohl_data(name='e2'):
lut_data = basepath + '/tsa/vector_ar/data/'
path = lut_data + '%s.dat' % name
return util.parse_lutkepohl_data(path)
def test_lutkepohl_parse():
files = ['e%d' % i for i in range(1, 7)]
for f in files:
get_lutkepohl_data(f)
class TestVARResultsLutkepohl(object):
"""
Verify calculations using results from Lutkepohl's book
"""
def __init__(self):
self.p = 2
sdata, dates = get_lutkepohl_data('e1')
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
self.model = VAR(adj_data[:-16], dates=dates[1:-16], freq='Q')
self.res = self.model.fit(maxlags=self.p)
self.irf = self.res.irf(10)
self.lut = E1_Results()
def test_approx_mse(self):
# 3.5.18, p. 99
mse2 = np.array([[25.12, .580, 1.300],
[.580, 1.581, .586],
[1.300, .586, 1.009]]) * 1e-4
assert_almost_equal(mse2, self.res.forecast_cov(3)[1],
DECIMAL_3)
def test_irf_stderr(self):
irf_stderr = self.irf.stderr(orth=False)
for i in range(1, 1 + len(self.lut.irf_stderr)):
assert_almost_equal(np.round(irf_stderr[i], 3),
self.lut.irf_stderr[i-1])
def test_cum_irf_stderr(self):
stderr = self.irf.cum_effect_stderr(orth=False)
for i in range(1, 1 + len(self.lut.cum_irf_stderr)):
assert_almost_equal(np.round(stderr[i], 3),
self.lut.cum_irf_stderr[i-1])
def test_lr_effect_stderr(self):
stderr = self.irf.lr_effect_stderr(orth=False)
orth_stderr = self.irf.lr_effect_stderr(orth=True)
assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)
def test_get_trendorder():
results = {
'c' : 1,
'nc' : 0,
'ct' : 2,
'ctt' : 3
}
for t, trendorder in iteritems(results):
assert(util.get_trendorder(t) == trendorder)
def test_var_constant():
# see 2043
import datetime
from pandas import DataFrame, DatetimeIndex
series = np.array([[2., 2.], [1, 2.], [1, 2.], [1, 2.], [1., 2.]])
data = DataFrame(series)
d = datetime.datetime.now()
delta = datetime.timedelta(days=1)
index = []
for i in range(data.shape[0]):
index.append(d)
d += delta
data.index = DatetimeIndex(index)
model = VAR(data)
assert_raises(ValueError, model.fit, 1)
def test_var_trend():
# see 2271
data = get_macrodata().view((float,3), type=np.ndarray)
model = sm.tsa.VAR(data)
results = model.fit(4) #, trend = 'c')
irf = results.irf(10)
data_nc = data - data.mean(0)
model_nc = sm.tsa.VAR(data_nc)
results_nc = model_nc.fit(4, trend = 'nc')
assert_raises(ValueError, model.fit, 4, trend='t')
def test_irf_trend():
# test for irf with different trend see #1636
# this is a rough comparison by adding trend or subtracting mean to data
# to get similar AR coefficients and IRF
data = get_macrodata().view((float,3), type=np.ndarray)
model = sm.tsa.VAR(data)
results = model.fit(4) #, trend = 'c')
irf = results.irf(10)
data_nc = data - data.mean(0)
model_nc = sm.tsa.VAR(data_nc)
results_nc = model_nc.fit(4, trend = 'nc')
irf_nc = results_nc.irf(10)
assert_allclose(irf_nc.stderr()[1:4], irf.stderr()[1:4], rtol=0.01)
trend = 1e-3 * np.arange(len(data)) / (len(data) - 1)
# for pandas version, currently not used, if data is a pd.DataFrame
#data_t = pd.DataFrame(data.values + trend[:,None], index=data.index, columns=data.columns)
data_t = data + trend[:,None]
model_t = sm.tsa.VAR(data_t)
results_t = model_t.fit(4, trend = 'ct')
irf_t = results_t.irf(10)
assert_allclose(irf_t.stderr()[1:4], irf.stderr()[1:4], rtol=0.03)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause | -8,557,128,009,497,108,000 | 28.342636 | 100 | 0.558227 | false |
alexforencich/xfcp | python/xfcp/gty_node.py | 1 | 13912 | """
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from . import node
PRBS_MODE_OFF = 0x0
PRBS_MODE_PRBS7 = 0x1
PRBS_MODE_PRBS9 = 0x2
PRBS_MODE_PRBS15 = 0x3
PRBS_MODE_PRBS23 = 0x4
PRBS_MODE_PRBS31 = 0x5
PRBS_MODE_PCIE = 0x8
PRBS_MODE_SQ_2UI = 0x9
PRBS_MODE_SQ = 0xA
prbs_mode_mapping = {
'off': PRBS_MODE_OFF,
'prbs7': PRBS_MODE_PRBS7,
'prbs9': PRBS_MODE_PRBS9,
'prbs15': PRBS_MODE_PRBS15,
'prbs23': PRBS_MODE_PRBS23,
'prbs31': PRBS_MODE_PRBS31,
'pcie': PRBS_MODE_PCIE,
'sq_2ui': PRBS_MODE_SQ_2UI,
'sq': PRBS_MODE_SQ
}
class GTYE3CommonNode(node.MemoryNode):
def masked_read(self, addr, mask):
return self.read_word(addr) & mask
def masked_write(self, addr, mask, val):
return self.write_word(addr, (self.read_word(addr) & ~mask) | (val & mask))
def get_common_cfg0(self):
return self.read_word(0x0009*2)
def set_common_cfg0(self, val):
self.write_word(0x0009*2, val)
def get_common_cfg1(self):
return self.read_word(0x0089*2)
def set_common_cfg1(self, val):
self.write_word(0x0089*2, val)
def get_qpll0_cfg0(self):
return self.read_word(0x0008*2)
def set_qpll0_cfg0(self, val):
self.write_word(0x0008*2, val)
def get_qpll0_cfg1(self):
return self.read_word(0x0010*2)
def set_qpll0_cfg1(self, val):
self.write_word(0x0010*2, val)
def get_qpll0_cfg2(self):
return self.read_word(0x0011*2)
def set_qpll0_cfg2(self, val):
self.write_word(0x0011*2, val)
def get_qpll0_cfg3(self):
return self.read_word(0x0015*2)
def set_qpll0_cfg3(self, val):
self.write_word(0x0015*2, val)
def get_qpll0_cfg4(self):
return self.read_word(0x0030*2)
def set_qpll0_cfg4(self, val):
self.write_word(0x0030*2, val)
def get_qpll0_lock_cfg(self):
return self.read_word(0x0012*2)
def set_qpll0_lock_cfg(self, val):
self.write_word(0x0012*2, val)
def get_qpll0_init_cfg0(self):
return self.read_word(0x0013*2)
def set_qpll0_init_cfg0(self, val):
self.write_word(0x0013*2, val)
def get_qpll0_init_cfg1(self):
return self.masked_read(0x0014*2, 0xff00) >> 8
def set_qpll0_init_cfg1(self, val):
self.masked_write(0x0014*2, 0xff00, val << 8)
def get_qpll0_clkout_rate(self):
return bool(self.masked_read(0x000e*2, 0x0001))
def set_qpll0_clkout_rate(self, val):
self.masked_write(0x000e*2, 0x0001, 0x0001 if val else 0x0000)
def get_qpll0_fbdiv(self):
return self.masked_read(0x0014*2, 0x00ff)+2
def set_qpll0_fbdiv(self, val):
self.masked_write(0x0014*2, 0x00ff, val-2)
def get_qpll0_cp(self):
return self.masked_read(0x0014*2, 0x03ff)
def set_qpll0_cp(self, val):
self.masked_write(0x0014*2, 0x03ff, val)
def get_qpll0_refclk_div(self):
return self.masked_read(0x0018*2, 0x0780) >> 7
def set_qpll0_refclk_div(self, val):
self.masked_write(0x0018*2, 0x00780, val << 7)
def get_qpll0_ips_refclk_sel(self):
return self.masked_read(0x0018*2, 0x0038) >> 3
def set_qpll0_ips_refclk_sel(self, val):
self.masked_write(0x0018*2, 0x00038, val << 3)
def get_qpll0_ips_en(self):
return bool(self.masked_read(0x0018*2, 0x0001))
def set_qpll0_ips_en(self, val):
self.masked_write(0x0018*2, 0x0001, 0x0001 if val else 0x0000)
def get_qpll0_lpf(self):
return self.masked_read(0x0019*2, 0x03ff)
def set_qpll0_lpf(self, val):
self.masked_write(0x0019*2, 0x03ff, val)
def get_qpll0_cfg1_g3(self):
return self.read_word(0x001a*2)
def set_qpll0_cfg1_g3(self, val):
self.write_word(0x001a*2, val)
def get_qpll0_cfg2_g3(self):
return self.read_word(0x001b*2)
def set_qpll0_cfg2_g3(self, val):
self.write_word(0x001b*2, val)
def get_qpll0_lpf_g3(self):
return self.masked_read(0x001c*2, 0x03ff)
def set_qpll0_lpf_g3(self, val):
self.masked_write(0x001c*2, 0x03ff, val)
def get_qpll0_lock_cfg_g3(self):
return self.read_word(0x001d*2)
def set_qpll0_lock_cfg_g3(self, val):
self.write_word(0x001d*2, val)
def get_qpll0_fbdiv_g3(self):
return self.masked_read(0x001f*2, 0x00ff)+2
def set_qpll0_fbdiv_g3(self, val):
self.masked_write(0x001f*2, 0x00ff, val-2)
def get_rx_rec_clk_out0_sel(self):
return self.masked_read(0x001f*2, 0x0003)
def set_rx_rec_clk_out0_sel(self, val):
self.masked_write(0x001f*2, 0x0003, val)
def get_qpll0_sdm_cfg0(self):
return self.read_word(0x0020*2)
def set_qpll0_sdm_cfg0(self, val):
self.write_word(0x0020*2, val)
def get_qpll0_sdm_cfg1(self):
return self.read_word(0x0021*2)
def set_qpll0_sdm_cfg1(self, val):
self.write_word(0x0021*2, val)
def get_qpll0_sdm_cfg2(self):
return self.read_word(0x0024*2)
def set_qpll0_sdm_cfg2(self, val):
self.write_word(0x0024*2, val)
def get_qpll0_cp_g3(self):
return self.masked_read(0x0025*2, 0x03ff)
def set_qpll0_cp_g3(self, val):
self.masked_write(0x0025*2, 0x03ff, val)
def get_qpll1_cfg0(self):
return self.read_word(0x0088*2)
def set_qpll1_cfg0(self, val):
self.write_word(0x0088*2, val)
def get_qpll1_cfg1(self):
return self.read_word(0x0090*2)
def set_qpll1_cfg1(self, val):
self.write_word(0x0090*2, val)
def get_qpll1_cfg2(self):
return self.read_word(0x0091*2)
def set_qpll1_cfg2(self, val):
self.write_word(0x0091*2, val)
def get_qpll1_cfg3(self):
return self.read_word(0x0095*2)
def set_qpll1_cfg3(self, val):
self.write_word(0x0095*2, val)
def get_qpll1_cfg4(self):
return self.read_word(0x00b0*2)
def set_qpll1_cfg4(self, val):
self.write_word(0x00b0*2, val)
def get_qpll1_lock_cfg(self):
return self.read_word(0x0092*2)
def set_qpll1_lock_cfg(self, val):
self.write_word(0x0092*2, val)
def get_qpll1_init_cfg0(self):
return self.read_word(0x0093*2)
def set_qpll1_init_cfg0(self, val):
self.write_word(0x0093*2, val)
def get_qpll1_init_cfg1(self):
return self.masked_read(0x0094*2, 0xff00) >> 8
def set_qpll1_init_cfg1(self, val):
self.masked_write(0x0094*2, 0xff00, val << 8)
def get_qpll1_clkout_rate(self):
return bool(self.masked_read(0x008e*2, 0x0001))
def set_qpll1_clkout_rate(self, val):
self.masked_write(0x008e*2, 0x0001, 0x0001 if val else 0x0000)
def get_qpll1_fbdiv(self):
return self.masked_read(0x0094*2, 0x00ff)+2
def set_qpll1_fbdiv(self, val):
self.masked_write(0x0094*2, 0x00ff, val-2)
def get_qpll1_cp(self):
return self.masked_read(0x0096*2, 0x03ff)
def set_qpll1_cp(self, val):
self.masked_write(0x0096*2, 0x03ff, val)
def get_qpll1_refclk_div(self):
return self.masked_read(0x0098*2, 0x0780) >> 7
def set_qpll1_refclk_div(self, val):
self.masked_write(0x0098*2, 0x00780, val << 7)
def get_qpll1_ips_refclk_sel(self):
return self.masked_read(0x0098*2, 0x0038) >> 3
def set_qpll1_ips_refclk_sel(self, val):
self.masked_write(0x0098*2, 0x00038, val << 3)
def get_qpll1_ips_en(self):
return bool(self.masked_read(0x0098*2, 0x0040))
def set_qpll1_ips_en(self, val):
self.masked_write(0x0098*2, 0x0040, 0x0040 if val else 0x0000)
def get_qpll1_lpf(self):
return self.masked_read(0x0099*2, 0x03ff)
def set_qpll1_lpf(self, val):
self.masked_write(0x0099*2, 0x03ff, val)
def get_qpll1_cfg1_g3(self):
return self.read_word(0x009a*2)
def set_qpll1_cfg1_g3(self, val):
self.write_word(0x009a*2, val)
def get_qpll1_cfg2_g3(self):
return self.read_word(0x009b*2)
def set_qpll1_cfg2_g3(self, val):
self.write_word(0x009b*2, val)
def get_qpll1_lpf_g3(self):
return self.masked_read(0x009c*2, 0x03ff)
def set_qpll1_lpf_g3(self, val):
self.masked_write(0x009c*2, 0x03ff, val)
def get_qpll1_lock_cfg_g3(self):
return self.read_word(0x009d*2)
def set_qpll1_lock_cfg_g3(self, val):
self.write_word(0x009d*2, val)
def get_qpll1_fbdiv_g3(self):
return self.masked_read(0x009f*2, 0x00ff)+2
def set_qpll1_fbdiv_g3(self, val):
self.masked_write(0x009f*2, 0x00ff, val-2)
def get_rx_rec_clk_out1_sel(self):
return self.masked_read(0x009f*2, 0x0003)
def set_rx_rec_clk_out1_sel(self, val):
self.masked_write(0x009f*2, 0x0003, val)
def get_qpll1_sdm_cfg0(self):
return self.read_word(0x00a0*2)
def set_qpll1_sdm_cfg0(self, val):
self.write_word(0x00a0*2, val)
def get_qpll1_sdm_cfg1(self):
return self.read_word(0x00a1*2)
def set_qpll1_sdm_cfg1(self, val):
self.write_word(0x00a1*2, val)
def get_qpll1_sdm_cfg2(self):
return self.read_word(0x00a4*2)
def set_qpll1_sdm_cfg2(self, val):
self.write_word(0x00a4*2, val)
def get_qpll1_cp_g3(self):
return self.masked_read(0x00a5*2, 0x03ff)
def set_qpll1_cp_g3(self, val):
self.masked_write(0x00a5*2, 0x03ff, val)
node.register(GTYE3CommonNode, 0x8A82)
class GTYE4CommonNode(GTYE3CommonNode):
pass
node.register(GTYE4CommonNode, 0x8A92)
class GTYE3ChannelNode(node.MemoryNode):
def __init__(self, obj=None):
self.rx_prbs_error = False
super().__init__(obj)
def masked_read(self, addr, mask):
return self.read_word(addr) & mask
def masked_write(self, addr, mask, val):
return self.write_word(addr, (self.read_word(addr) & ~mask) | (val & mask))
def reset(self):
self.masked_write(0xfe00, 0x0001, 0x0001)
def tx_reset(self):
self.masked_write(0xfe00, 0x0002, 0x0002)
def rx_reset(self):
self.masked_write(0xfe00, 0x0004, 0x0004)
def get_tx_reset_done(self):
return bool(self.masked_read(0xfe00, 0x0200))
def get_rx_reset_done(self):
return bool(self.masked_read(0xfe00, 0x0400))
def get_tx_polarity(self):
return bool(self.masked_read(0xfe02, 0x0001))
def set_tx_polarity(self, val):
self.masked_write(0xfe02, 0x0001, 0x0001 if val else 0x0000)
def get_rx_polarity(self):
return bool(self.masked_read(0xfe02, 0x0002))
def set_rx_polarity(self, val):
self.masked_write(0xfe02, 0x0002, 0x0002 if val else 0x0000)
def get_tx_prbs_mode(self):
return self.masked_read(0xfe04, 0x000f)
def set_tx_prbs_mode(self, val):
if type(val) is str:
val = prbs_mode_mapping[val]
self.masked_write(0xfe04, 0x000f, val)
def get_rx_prbs_mode(self):
return self.masked_read(0xfe04, 0x00f0) >> 4
def set_rx_prbs_mode(self, val):
if type(val) is str:
val = prbs_mode_mapping[val]
self.masked_write(0xfe04, 0x00f0, val << 4)
def tx_prbs_force_error(self):
self.masked_write(0xfe06, 0x0001, 0x0001)
def rx_err_count_reset(self):
self.masked_write(0xfe06, 0x0002, 0x0002)
def is_rx_prbs_error(self):
val = self.rx_prbs_error
self.rx_prbs_error = False
return val | bool(self.masked_read(0xfe06, 0x0004))
def is_rx_prbs_locked(self):
w = self.masked_read(0xfe06, 0x000c)
self.rx_prbs_error |= bool(w & 0x0004)
return bool(w & 0x0008)
def get_tx_elecidle(self):
return bool(self.masked_read(0xfe08, 0x0001))
def set_tx_elecidle(self, val):
self.masked_write(0xfe08, 0x0001, 0x0001 if val else 0x0000)
def get_tx_inhibit(self):
return bool(self.masked_read(0xfe08, 0x0002))
def set_tx_inhibit(self, val):
self.masked_write(0xfe08, 0x0002, 0x0002 if val else 0x0000)
def get_tx_diffctrl(self):
return self.masked_read(0xfe0a, 0x001f)
def set_tx_diffctrl(self, val):
self.masked_write(0xfe0a, 0x001f, val)
def get_tx_maincursor(self):
return self.masked_read(0xfe0c, 0x007f)
def set_tx_maincursor(self, val):
self.masked_write(0xfe0c, 0x007f, val)
def get_tx_postcursor(self):
return self.masked_read(0xfe0c, 0x001f)
def set_tx_postcursor(self, val):
self.masked_write(0xfe0c, 0x001f, val)
def get_tx_precursor(self):
return self.masked_read(0xfe0e, 0x001f)
def set_tx_precursor(self, val):
self.masked_write(0xfe0e, 0x001f, val)
def get_rx_prbs_err_count(self):
return self.read_dword(0x025e*2)
node.register(GTYE3ChannelNode, 0x8A83)
class GTYE4ChannelNode(GTYE3ChannelNode):
pass
node.register(GTYE4ChannelNode, 0x8A93)
| mit | -4,510,853,304,519,722,500 | 27.105051 | 83 | 0.643186 | false |
dabeaz/bitey | test/test_ctest.py | 1 | 2308 | import sys
sys.path.insert(0,"..")
import unittest
import bitey
import ctest
import ctypes
class TestTypes(unittest.TestCase):
def test_char(self):
r = ctest.add_char(4,5)
self.assertEqual(r,9)
def test_short(self):
r = ctest.add_short(4,5)
self.assertEqual(r,9)
def test_int(self):
r = ctest.add_int(4,5)
self.assertEqual(r,9)
def test_long(self):
r = ctest.add_long(4,5)
self.assertEqual(r,9)
def test_longlong(self):
r = ctest.add_longlong(4,5)
self.assertEqual(r,9)
def test_float(self):
r = ctest.add_float(2.1, 4.2)
self.assertAlmostEqual(r, 6.3, 5)
def test_double(self):
r = ctest.add_double(2.1, 4.2)
self.assertEqual(r, 2.1 + 4.2)
class TestPointers(unittest.TestCase):
def test_mutate_short(self):
a = ctypes.c_short()
a.value = 2
r = ctest.mutate_short(a)
self.assertEqual(a.value, 4)
def test_mutate_int(self):
a = ctypes.c_int()
a.value = 2
r = ctest.mutate_int(a)
self.assertEqual(a.value, 4)
def test_mutate_long(self):
a = ctypes.c_long()
a.value = 2
r = ctest.mutate_long(a)
self.assertEqual(a.value, 4)
def test_mutate_longlong(self):
a = ctypes.c_longlong()
a.value = 2
r = ctest.mutate_longlong(a)
self.assertEqual(a.value, 4)
def test_mutate_float(self):
a = ctypes.c_float()
a.value = 2
r = ctest.mutate_float(a)
self.assertEqual(a.value, 4)
def test_mutate_double(self):
a = ctypes.c_double()
a.value = 2
r = ctest.mutate_double(a)
self.assertEqual(a.value, 4)
class TestArrays(unittest.TestCase):
def test_int(self):
a = (ctypes.c_int * 4)(1,2,3,4)
r = ctest.arr_sum_int(a)
self.assertEqual(r,10)
def test_double(self):
a = (ctypes.c_double *4)(1,2,3,4)
r = ctest.arr_sum_double(a)
self.assertEqual(r, 10.0)
class TestStructure(unittest.TestCase):
def test_Point(self):
a = ctest.Point(3,4)
b = ctest.Point(6,8)
d = ctest.distance(a,b)
self.assertEqual(d,5.0)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -36,004,984,067,493,956 | 23.041667 | 41 | 0.558059 | false |
phatblat/AbletonLiveMIDIRemoteScripts | Push2/item_lister_component.py | 1 | 9031 | # Source Generated with Decompyle++
# File: item_lister_component.pyc (Python 2.5)
from __future__ import absolute_import
from ableton.v2.base import forward_property, index_if, listens, SlotManager, Subject
from ableton.v2.control_surface import Component, CompoundComponent
from ableton.v2.control_surface.control import control_list, ButtonControl, RadioButtonControl
class SimpleItemSlot(SlotManager, Subject):
__events__ = ('name',)
def __init__(self, item = None, name = '', nesting_level = -1, icon = '', *a, **k):
super(SimpleItemSlot, self).__init__(*a, **a)
self._item = item
self._name = name
self._nesting_level = nesting_level
self._icon = icon
if hasattr(self._item, 'name_has_listener'):
pass
1
self._SimpleItemSlot__on_name_changed.subject = None
def __on_name_changed(self):
self.notify_name()
self._name = self._item.name
_SimpleItemSlot__on_name_changed = listens('name')(__on_name_changed)
def name(self):
return self._name
name = property(name)
def item(self):
return self._item
item = property(item)
def nesting_level(self):
return self._nesting_level
nesting_level = property(nesting_level)
def icon(self):
return self._icon
icon = property(icon)
class ItemSlot(SimpleItemSlot):
def __init__(self, item = None, nesting_level = 0, **k):
if not item != None:
raise AssertionError
super(ItemSlot, self).__init__(item = item, name = item.name, nesting_level = nesting_level, **None)
def __eq__(self, other):
if not id(self) == id(other):
pass
return self._item == other
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self._item)
_live_ptr = forward_property('_item')('_live_ptr')
class ItemProvider(Subject):
''' General interface to implement for providers used in ItemListerComponent '''
__events__ = ('items', 'selected_item')
def items(self):
'''
Returns a list of tuples, each of which contains an item
followed by its nesting level
'''
return []
items = property(items)
def selected_item(self):
pass
selected_item = property(selected_item)
class ItemListerComponentBase(CompoundComponent):
__events__ = ('items',)
def __init__(self, item_provider = ItemProvider(), num_visible_items = 8, *a, **k):
super(ItemListerComponentBase, self).__init__(*a, **a)
self._item_offset = 0
self._item_provider = item_provider
self._items = []
self._num_visible_items = num_visible_items
self._ItemListerComponentBase__on_items_changed.subject = item_provider
self.update_items()
def reset_offset(self):
self._item_offset = 0
def items(self):
return self._items
items = property(items)
def item_provider(self):
return self._item_provider
item_provider = property(item_provider)
def _get_item_offset(self):
return self._item_offset
def _set_item_offset(self, offset):
self._item_offset = offset
self.update_items()
item_offset = property(_get_item_offset, _set_item_offset)
def can_scroll_left(self):
return self.item_offset > 0
def can_scroll_right(self):
items = self._item_provider.items[self.item_offset:]
return len(items) > self._num_visible_items
def scroll_left(self):
self.item_offset -= 1
def scroll_right(self):
self.item_offset += 1
def _adjust_offset(self):
num_raw_items = len(self._item_provider.items)
list_length = self._num_visible_items
if list_length >= num_raw_items or self._item_offset >= num_raw_items - list_length:
self._item_offset = max(0, num_raw_items - list_length)
def update_items(self):
for item in self._items:
self.disconnect_disconnectable(item)
self._adjust_offset()
items = self._item_provider.items[self.item_offset:]
num_slots = min(self._num_visible_items, len(items))
def create_slot(index, item, nesting_level):
slot = None
if index == 0 and self.can_scroll_left():
slot = SimpleItemSlot(icon = 'page_left.svg')
slot.is_scrolling_indicator = True
elif index == num_slots - 1 and self.can_scroll_right():
slot = SimpleItemSlot(icon = 'page_right.svg')
slot.is_scrolling_indicator = True
else:
slot = ItemSlot(item = item, nesting_level = nesting_level)
slot.is_scrolling_indicator = False
return slot
new_items = []
if num_slots > 0:
continue
new_items = _[1]
self._items = map(self.register_disconnectable, new_items)
self.notify_items()
def __on_items_changed(self):
self.update_items()
_ItemListerComponentBase__on_items_changed = listens('items')(__on_items_changed)
class ScrollComponent(Component):
__events__ = ('scroll',)
button = ButtonControl(color = 'ItemNavigation.ItemNotSelected', repeat = True)
def button(self, button):
self.notify_scroll()
button = button.pressed(button)
class ScrollOverlayComponent(CompoundComponent):
def __init__(self, *a, **k):
super(ScrollOverlayComponent, self).__init__(*a, **a)
(self._scroll_left_component, self._scroll_right_component) = self.register_components(ScrollComponent(is_enabled = False), ScrollComponent(is_enabled = False))
self._ScrollOverlayComponent__on_scroll_left.subject = self._scroll_left_component
self._ScrollOverlayComponent__on_scroll_right.subject = self._scroll_right_component
scroll_left_layer = forward_property('_scroll_left_component')('layer')
scroll_right_layer = forward_property('_scroll_right_component')('layer')
def can_scroll_left(self):
raise NotImplementedError
def can_scroll_right(self):
raise NotImplementedError
def scroll_left(self):
raise NotImplementedError
def scroll_right(self):
raise NotImplementedError
def update_scroll_buttons(self):
if self.is_enabled():
self._scroll_left_component.set_enabled(self.can_scroll_left())
self._scroll_right_component.set_enabled(self.can_scroll_right())
def __on_scroll_left(self):
self.scroll_left()
_ScrollOverlayComponent__on_scroll_left = listens('scroll')(__on_scroll_left)
def __on_scroll_right(self):
self.scroll_right()
_ScrollOverlayComponent__on_scroll_right = listens('scroll')(__on_scroll_right)
def update(self):
super(ScrollOverlayComponent, self).update()
if self.is_enabled():
self.update_scroll_buttons()
class ItemListerComponent(ItemListerComponentBase):
select_buttons = control_list(RadioButtonControl, checked_color = 'ItemNavigation.ItemSelected', unchecked_color = 'ItemNavigation.ItemNotSelected', unavailable_color = 'ItemNavigation.NoItem')
def __init__(self, *a, **k):
super(ItemListerComponent, self).__init__(*a, **a)
self._scroll_overlay = self.register_component(ScrollOverlayComponent(is_enabled = True))
self._scroll_overlay.can_scroll_left = self.can_scroll_left
self._scroll_overlay.can_scroll_right = self.can_scroll_right
self._scroll_overlay.scroll_left = self.scroll_left
self._scroll_overlay.scroll_right = self.scroll_right
self._ItemListerComponent__on_items_changed.subject = self
self._ItemListerComponent__on_selection_changed.subject = self._item_provider
scroll_left_layer = forward_property('_scroll_overlay')('scroll_left_layer')
scroll_right_layer = forward_property('_scroll_overlay')('scroll_right_layer')
def __on_items_changed(self):
self.select_buttons.control_count = len(self.items)
self._update_button_selection()
self._scroll_overlay.update_scroll_buttons()
_ItemListerComponent__on_items_changed = listens('items')(__on_items_changed)
def __on_selection_changed(self):
self._update_button_selection()
_ItemListerComponent__on_selection_changed = listens('selected_item')(__on_selection_changed)
def _update_button_selection(self):
selected_item = self._item_provider.selected_item
items = self.items
selected_index = (index_if,)(lambda item: item == selected_item, items)
if selected_index >= len(items):
selected_index = -1
self.select_buttons.checked_index = selected_index
| mit | 8,494,214,371,004,027,000 | 30.034364 | 197 | 0.618758 | false |
Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2021_02_01_preview/aio/operations/_users_operations.py | 1 | 21068 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsersOperations:
"""UsersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2021_02_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name: str,
resource_group_name: str,
expand: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.UserList"]:
"""Gets all the users registered on a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param expand: Specify $expand=details to populate additional fields related to the resource or
Specify $skipToken=:code:`<token>` to populate the next page in the list.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UserList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.databoxedge.v2021_02_01_preview.models.UserList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UserList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users'} # type: ignore
async def get(
self,
device_name: str,
name: str,
resource_group_name: str,
**kwargs
) -> "_models.User":
"""Gets the properties of the specified user.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: User, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2021_02_01_preview.models.User
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.User"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('User', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
async def _create_or_update_initial(
self,
device_name: str,
name: str,
resource_group_name: str,
user: "_models.User",
**kwargs
) -> Optional["_models.User"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.User"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(user, 'User')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('User', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
async def begin_create_or_update(
self,
device_name: str,
name: str,
resource_group_name: str,
user: "_models.User",
**kwargs
) -> AsyncLROPoller["_models.User"]:
"""Creates a new user or updates an existing user's information on a Data Box Edge/Data Box
Gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param user: The user details.
:type user: ~azure.mgmt.databoxedge.v2021_02_01_preview.models.User
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either User or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.databoxedge.v2021_02_01_preview.models.User]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.User"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
user=user,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('User', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
async def _delete_initial(
self,
device_name: str,
name: str,
resource_group_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
async def begin_delete(
self,
device_name: str,
name: str,
resource_group_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the user on a databox edge/gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
| mit | 2,500,892,677,157,854,000 | 47.432184 | 211 | 0.635039 | false |
sigmike/ganeti | test/ganeti.rapi.rlib2_unittest.py | 1 | 11483 | #!/usr/bin/python
#
# Copyright (C) 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for unittesting the RAPI rlib2 module
"""
import unittest
import tempfile
from ganeti import constants
from ganeti import opcodes
from ganeti import compat
from ganeti import http
from ganeti.rapi import rlib2
import testutils
class TestParseInstanceCreateRequestVersion1(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.Parse = rlib2._ParseInstanceCreateRequestVersion1
def test(self):
disk_variants = [
# No disks
[],
# Two disks
[{"size": 5, }, {"size": 100, }],
# Disk with mode
[{"size": 123, "mode": constants.DISK_RDWR, }],
# With unknown setting
[{"size": 123, "unknown": 999 }],
]
nic_variants = [
# No NIC
[],
# Three NICs
[{}, {}, {}],
# Two NICs
[
{ "ip": "192.0.2.6", "mode": constants.NIC_MODE_ROUTED,
"mac": "01:23:45:67:68:9A",
},
{ "mode": constants.NIC_MODE_BRIDGED, "link": "n0", "bridge": "br1", },
],
# Unknown settings
[{ "unknown": 999, }, { "foobar": "Hello World", }],
]
beparam_variants = [
None,
{},
{ constants.BE_VCPUS: 2, },
{ constants.BE_MEMORY: 123, },
{ constants.BE_VCPUS: 2,
constants.BE_MEMORY: 1024,
constants.BE_AUTO_BALANCE: True, }
]
hvparam_variants = [
None,
{ constants.HV_BOOT_ORDER: "anc", },
{ constants.HV_KERNEL_PATH: "/boot/fookernel",
constants.HV_ROOT_PATH: "/dev/hda1", },
]
for mode in [constants.INSTANCE_CREATE, constants.INSTANCE_IMPORT]:
for nics in nic_variants:
for disk_template in constants.DISK_TEMPLATES:
for disks in disk_variants:
for beparams in beparam_variants:
for hvparams in hvparam_variants:
data = {
"name": "inst1.example.com",
"hypervisor": constants.HT_FAKE,
"disks": disks,
"nics": nics,
"mode": mode,
"disk_template": disk_template,
"os": "debootstrap",
}
if beparams is not None:
data["beparams"] = beparams
if hvparams is not None:
data["hvparams"] = hvparams
for dry_run in [False, True]:
op = self.Parse(data, dry_run)
self.assert_(isinstance(op, opcodes.OpCreateInstance))
self.assertEqual(op.mode, mode)
self.assertEqual(op.disk_template, disk_template)
self.assertEqual(op.dry_run, dry_run)
self.assertEqual(len(op.disks), len(disks))
self.assertEqual(len(op.nics), len(nics))
for opdisk, disk in zip(op.disks, disks):
for key in constants.IDISK_PARAMS:
self.assertEqual(opdisk.get(key), disk.get(key))
self.assertFalse("unknown" in opdisk)
for opnic, nic in zip(op.nics, nics):
for key in constants.INIC_PARAMS:
self.assertEqual(opnic.get(key), nic.get(key))
self.assertFalse("unknown" in opnic)
self.assertFalse("foobar" in opnic)
if beparams is None:
self.assertEqualValues(op.beparams, {})
else:
self.assertEqualValues(op.beparams, beparams)
if hvparams is None:
self.assertEqualValues(op.hvparams, {})
else:
self.assertEqualValues(op.hvparams, hvparams)
def testErrors(self):
# Test all required fields
reqfields = {
"name": "inst1.example.com",
"disks": [],
"nics": [],
"mode": constants.INSTANCE_CREATE,
"disk_template": constants.DT_PLAIN,
"os": "debootstrap",
}
for name in reqfields.keys():
self.assertRaises(http.HttpBadRequest, self.Parse,
dict(i for i in reqfields.iteritems() if i[0] != name),
False)
# Invalid disks and nics
for field in ["disks", "nics"]:
invalid_values = [None, 1, "", {}, [1, 2, 3], ["hda1", "hda2"]]
if field == "disks":
invalid_values.append([
# Disks without size
{},
{ "mode": constants.DISK_RDWR, },
])
for invvalue in invalid_values:
data = reqfields.copy()
data[field] = invvalue
self.assertRaises(http.HttpBadRequest, self.Parse, data, False)
class TestParseExportInstanceRequest(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.Parse = rlib2._ParseExportInstanceRequest
def test(self):
name = "instmoo"
data = {
"mode": constants.EXPORT_MODE_REMOTE,
"destination": [(1, 2, 3), (99, 99, 99)],
"shutdown": True,
"remove_instance": True,
"x509_key_name": ("name", "hash"),
"destination_x509_ca": ("x", "y", "z"),
}
op = self.Parse(name, data)
self.assert_(isinstance(op, opcodes.OpExportInstance))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, constants.EXPORT_MODE_REMOTE)
self.assertEqual(op.shutdown, True)
self.assertEqual(op.remove_instance, True)
self.assertEqualValues(op.x509_key_name, ("name", "hash"))
self.assertEqualValues(op.destination_x509_ca, ("x", "y", "z"))
def testDefaults(self):
name = "inst1"
data = {
"destination": "node2",
"shutdown": False,
}
op = self.Parse(name, data)
self.assert_(isinstance(op, opcodes.OpExportInstance))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, constants.EXPORT_MODE_LOCAL)
self.assertEqual(op.remove_instance, False)
def testErrors(self):
self.assertRaises(http.HttpBadRequest, self.Parse, "err1",
{ "remove_instance": "True", })
self.assertRaises(http.HttpBadRequest, self.Parse, "err1",
{ "remove_instance": "False", })
class TestParseMigrateInstanceRequest(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.Parse = rlib2._ParseMigrateInstanceRequest
def test(self):
name = "instYooho6ek"
for cleanup in [False, True]:
for mode in constants.HT_MIGRATION_MODES:
data = {
"cleanup": cleanup,
"mode": mode,
}
op = self.Parse(name, data)
self.assert_(isinstance(op, opcodes.OpMigrateInstance))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, mode)
self.assertEqual(op.cleanup, cleanup)
def testDefaults(self):
name = "instnohZeex0"
op = self.Parse(name, {})
self.assert_(isinstance(op, opcodes.OpMigrateInstance))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, None)
self.assertFalse(op.cleanup)
class TestParseRenameInstanceRequest(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.Parse = rlib2._ParseRenameInstanceRequest
def test(self):
name = "instij0eeph7"
for new_name in ["ua0aiyoo", "fai3ongi"]:
for ip_check in [False, True]:
for name_check in [False, True]:
data = {
"new_name": new_name,
"ip_check": ip_check,
"name_check": name_check,
}
op = self.Parse(name, data)
self.assert_(isinstance(op, opcodes.OpRenameInstance))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.new_name, new_name)
self.assertEqual(op.ip_check, ip_check)
self.assertEqual(op.name_check, name_check)
def testDefaults(self):
name = "instahchie3t"
for new_name in ["thag9mek", "quees7oh"]:
data = {
"new_name": new_name,
}
op = self.Parse(name, data)
self.assert_(isinstance(op, opcodes.OpRenameInstance))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.new_name, new_name)
self.assert_(op.ip_check)
self.assert_(op.name_check)
class TestParseModifyInstanceRequest(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.Parse = rlib2._ParseModifyInstanceRequest
def test(self):
name = "instush8gah"
test_disks = [
[],
[(1, { constants.IDISK_MODE: constants.DISK_RDWR, })],
]
for osparams in [{}, { "some": "value", "other": "Hello World", }]:
for hvparams in [{}, { constants.HV_KERNEL_PATH: "/some/kernel", }]:
for beparams in [{}, { constants.BE_MEMORY: 128, }]:
for force in [False, True]:
for nics in [[], [(0, { constants.INIC_IP: "192.0.2.1", })]]:
for disks in test_disks:
for disk_template in constants.DISK_TEMPLATES:
data = {
"osparams": osparams,
"hvparams": hvparams,
"beparams": beparams,
"nics": nics,
"disks": disks,
"force": force,
"disk_template": disk_template,
}
op = self.Parse(name, data)
self.assert_(isinstance(op, opcodes.OpSetInstanceParams))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.hvparams, hvparams)
self.assertEqual(op.beparams, beparams)
self.assertEqual(op.osparams, osparams)
self.assertEqual(op.force, force)
self.assertEqual(op.nics, nics)
self.assertEqual(op.disks, disks)
self.assertEqual(op.disk_template, disk_template)
self.assert_(op.remote_node is None)
self.assert_(op.os_name is None)
self.assertFalse(op.force_variant)
def testDefaults(self):
name = "instir8aish31"
op = self.Parse(name, {})
self.assert_(isinstance(op, opcodes.OpSetInstanceParams))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.hvparams, {})
self.assertEqual(op.beparams, {})
self.assertEqual(op.osparams, {})
self.assertFalse(op.force)
self.assertEqual(op.nics, [])
self.assertEqual(op.disks, [])
self.assert_(op.disk_template is None)
self.assert_(op.remote_node is None)
self.assert_(op.os_name is None)
self.assertFalse(op.force_variant)
if __name__ == '__main__':
testutils.GanetiTestProgram()
| gpl-2.0 | -8,935,929,096,729,442,000 | 30.720994 | 79 | 0.579727 | false |
pslacerda/GromacsWrapper | gromacs/manager.py | 1 | 26171 | # GromacsWrapper
# Copyright (c) 2009-2011 Oliver Beckstein <[email protected]>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
Managing jobs remotely
======================
.. Warning:: Experimental code, use at your own risk.
The :class:`Manager` encapsulates knowledge of how to run a job on a
remote system. This basically means the host name, where are the
scratch directories, what queuing system is running, etc. It simply
uses :program:`ssh` to communicate with the remote system and thus it
is necessary to set up :program:`ssh` with public key authentication
to make this work smoothly.
* The manager can move files between the local file system and the
remote scratch directories back and forth (using :program:`scp`).
* It can remotely launch a job (by running :program:`qsub`).
* It can also check on the progress by inspecting (in a primitive
fashion) the log file of :program:`mdrun` on the remote system.
The remote directory name is constructed in the following way:
1. *topdir* is stripped from the local working directory to give WDIR
2. *scratchdir*/WDIR is the directory on the remote system
.. _manager-config-file:
Configuration file
------------------
See :class:`Manager` for how the values in the configuration file are
used.
Example::
[DEFAULT]
name = leviathan
[local]
topdir = ~
[remote]
hostname = leviathan.petagrid.org
scratchdir = /scratch/username/Projects
[queuing_system]
name = PBS
qscript = leviathan.pbs
walltime = 24.0
start_cwd = True
All entries except *walltime* and *start_cwd* are required; *walltime*
can be omitted or set to ``None``.
*DEFAULT* section
~~~~~~~~~~~~~~~~~
*name*
identifier of the configuration; should also be the name of the
configuration file, i.e. `name.cfg`
*local* section
~~~~~~~~~~~~~~~
*topdir*
path component under which the local files are stored; see below
*remote* section
~~~~~~~~~~~~~~~~
*hostname*
fully qualified domain name of the host; used for running ``ssh
hostname`` or ``scp FILES hostname:DIR``
*scratchdir*
top level directory on the remote host udner which the working
directories are constructed; see below for how this is done
*queuing_system* section
~~~~~~~~~~~~~~~~~~~~~~~~
*name*
identifier for the queuing system (should be a valid python
identifier)
*qscript*
default queuing system script template; store it in ``~/.gromacswrapper/qscripts``
*walltime*
maximum allowed run time on the system; job files are written in
such a way that Gromacs stops run at 0.99 or walltime. If omitted
then the job runs until it is done (provided the queuing system
policy allows that)
*start_cwd*
Set to ``True`` means that the queuing system requires the queuing
system script to ``cd`` into the job directory; this seems to be a
bug in some versions of PBS, which we can work-around in
:meth:`Manager.qsub`
Queuing system Manager
----------------------
The configuration files are stored in the `~.gromacswrapper/manager`
directory. A file named "foo.cfg" corresponds to the manager named
"foo".
The :class:`Manager` class must be customized for each system such as
a cluster or a super computer through a cfg file (see
:ref:`manager-config-file`). It then allows submission and control of
jobs remotely (using ssh_).
.. autoclass:: Manager
:members:
:exclude-members: job_done, qstat
.. method:: job_done
alias for :meth:`get_status`
.. method:: qstat
alias for :meth:`get_status`
The actual config file contents can be retrieved with
:func:`get_manager_config`.
.. autofunction:: get_manager_config
Helper classes and functions
----------------------------
The following classes and functions are mainly documented for developers.
.. autofunction:: find_manager_config
.. autoclass:: ManagerConfigParser
:members:
:inherited-members:
.. autoclass:: Job
.. _ssh: http://www.openssh.com/
.. _~/.ssh/config: http://linux.die.net/man/5/ssh_config
"""
from __future__ import absolute_import, with_statement
import os
import errno
from subprocess import call, Popen, PIPE
import shutil
import fnmatch
import re
import glob
from ConfigParser import SafeConfigParser
from .exceptions import MissingDataError
from . import config
from . import utilities
from . import cbook
from . import setup
import warnings
import logging
logger = logging.getLogger("gromacs.manager")
class ManagerConfigParser(SafeConfigParser):
def getpath(self, section, option, **kwargs):
"""Return option as an expanded path."""
return os.path.expanduser(os.path.expandvars(self.get(section, option, **kwargs)))
def getfloat(self, section, option, **kwargs):
"""Return as :func:`float` or ``None``."""
try:
return float(self.get(section, option, **kwargs))
except ValueError:
return None
def find_manager_config(name):
"""Find a configuration file for manager *name*."""
found = list(utilities.find_files(config.managerdir, name+".cfg"))
if len(found) == 0:
errmsg = "No configuration file found for name %r" % name
logger.error(errmsg)
raise MissingDataError(errmsg)
elif len(found) > 1:
logger.warn("Multiple configuration files found: %r. Using the first one!", found)
return found[0]
def get_manager_config(filename):
"""Load manager configuration file from *filename*."""
logger.info("Loading Manager configuration data from %(filename)r", vars())
cfg = ManagerConfigParser()
cfg.add_section("local")
cfg.set("local", "topdir", os.path.expanduser("~"))
cfg.add_section("remote")
cfg.add_section("queuing_system")
cfg.set("queuing_system", "walltime", "None")
cfg.set("queuing_system", "start_cwd", "False")
cfg.set("DEFAULT", "filename", filename)
cfg.readfp(open(filename))
return cfg
class Job(dict):
"""Properties of a job."""
class Manager(object):
""" Base class to launch simulations remotely on computers with queuing systems.
Basically, ssh into machine and run job.
The manager is configured through a cfg file "*name*.cfg", whose
format is described in :ref:`manager-config-file`.
If a special job submission procedure is required then a class
must be dreived that implements a specialized :meth:`Manager.qsub`
method.
ssh_ must be set up (via `~/.ssh/config`_) to allow access via a
commandline such as ::
ssh <hostname> <command> ...
Typically you want something such as ::
host <hostname>
hostname <hostname>.fqdn.org
user <remote_user>
in ``~/.ssh/config`` and also set up public-key authentication in
order to avoid typing your password all the time.
"""
#: Regular expression used by :meth:`Manager.get_status` to parse
#: the logfile from :program:`mdrun`.
log_RE = re.compile(r"""
Run\stime\sexceeded\s+(?P<exceeded>.*)\s+hours,\swill\sterminate\sthe\srun
# another part to come
| Performance:\s*(?P<performance>[\s\d.]+)\n # performance line (split later)
| (?P<completed>Finished\smdrun\son\snode) # this (part of a) run completed
""", re.VERBOSE)
def __init__(self, name, dirname=None, **kwargs):
"""Set up the manager.
:Arguments:
*name*
configuration name (corresponds to a store cfg file)
*dirname*
directory component under the remote scratch dir (should
be different for different jobs); the default is to strip
*topdir* from the config file from the full path of the
current directory
*prefix*
identifier for job names [MD]
"""
self.name = name
self.logger = logging.getLogger('gromacs.manager.%(name)s' % vars())
try:
cfg = get_manager_config(find_manager_config(name))
except:
logger.error("Failed to read the configuration for Manager %(name)r.", vars())
raise
attribs = {
'name': cfg.get('DEFAULT', 'name'),
'topdir': cfg.getpath('local', 'topdir'),
'hostname': cfg.get('remote', 'hostname'),
'scratchdir': cfg.get('remote', 'scratchdir'),
'queuing_system': cfg.get('queuing_system', 'name'),
'qscript': cfg.get('queuing_system', 'qscript'),
'walltime': cfg.getfloat('queuing_system', 'walltime'),
'start_cwd': cfg.getboolean('queuing_system', 'start_cwd'),
}
if attribs['name'] != self.name:
errmsg = "Sanity check failed: requested name %r does not match the Manager name %r "\
"that was recorded in the config file %r." % \
(self.name, attribs['name'], cfg.getpath('DEFAULT', 'filename'))
logger.fatal(errmsg)
raise ValueError(errmsg)
self.__dict__.update(attribs)
self.performance = None # ns/d, updated with get_status()
if dirname is None:
logger.info("Stripping %(topdir)r from current dirname to generate workdir path.", vars(self))
dirname = os.path.realpath(os.path.curdir).replace(os.path.normpath(self.topdir)+"/", "")
self.wdir = os.path.normpath(os.path.join(self.scratchdir, dirname))
self.prefix = kwargs.pop('prefix', 'MD') # for future use/examples
self.uri = self.hostname.strip()+":"+self.wdir
self.logger.info("Setting up a manager from %r.", dirname)
# test connection and make directory where we run things on the remote host
rc = call(['ssh', self.hostname, 'mkdir' , '-p', self.wdir])
if rc == 0:
self.logger.info("All good: can access %(uri)s" % vars(self))
else:
self.logger.error("Problem with ssh and path %(uri)s" % vars(self))
super(Manager, self).__init__(**kwargs)
def remotepath(self, *args):
"""Directory on the remote machine."""
return os.path.join(self.wdir,*args)
get_dir = remotepath
def remoteuri(self, *args):
"""URI of the directory on the remote machine."""
return os.path.join(self.uri,*args)
def put(self, dirname):
"""scp dirname to host.
:Arguments: dirname to be transferred
:Returns: return code from scp
"""
self.logger.info("Copying %r to %r" % (dirname, self.uri))
return call(["scp", "-r", dirname, self.uri])
def putfile(self, filename, dirname):
"""scp *filename* to host in *dirname*.
:Arguments: filename and dirname to be transferred to
:Returns: return code from scp
"""
destdir = self.remoteuri(dirname)
self.logger.info("Copying %(filename)r to %(destdir)r" % vars())
return call(["scp", filename, destdir])
def get(self, dirname, checkfile=None, targetdir=os.path.curdir):
"""``scp -r`` *dirname* from host into *targetdir*
:Arguments:
- *dirname*: dir to download
- *checkfile*: raise OSError/ENOENT if *targetdir/dirname/checkfile* was not found
- *targetdir*: put *dirname* into this directory
:Returns: return code from scp
"""
self.logger.info("Copying %r from %r" % (dirname, self.uri))
rc = call(["scp", "-r", self.remoteuri(dirname), targetdir])
#rc = call(["rsync", "-e","ssh","-avP", os.path.join(self.uri,dirname), targetdir])
if not checkfile is None:
if not os.path.exists(os.path.join(targetdir, dirname, checkfile)):
self.logger.error("Failed to get %r from %s", checkfile, self.hostname)
raise OSError(errno.ENOENT, checkfile,
"Failed to download file from %(hostname)r" % vars(self))
return rc
def local_get(self, dirname, checkfile, cattrajectories=True, cleanup=False):
"""Find *checkfile* locally if possible.
If *checkfile* is not found in *dirname* then it is transferred from the
remote host.
If needed, the trajectories are concatenated using :meth:`Manager.cat`.
:Returns: local path of *checkfile*
"""
checkpath = os.path.join(dirname, checkfile)
if not os.path.exists(checkpath):
self.get(dirname) # try downloading
if cattrajectories and not os.path.exists(checkpath):
# try cating everything first (guess prefix...)
prefix = os.path.splitext(os.path.basename(checkfile))[0]
self.cat(dirname, prefix=prefix, cleanup=cleanup)
if not os.path.exists(checkpath):
self.logger.error("Failed to get %r from %s", checkfile, self.hostname)
raise OSError(errno.ENOENT, checkfile,
"Failed to download file from %(hostname)r" % vars(self))
return checkpath
def cat(self, dirname, prefix='md', cleanup=True):
"""Concatenate parts of a run in *dirname*.
Always uses :func:`gromacs.cbook.cat` with *resolve_multi* = 'guess'.
.. Note:: The default is to immediately delete the original files
(*cleanup* = ``True``).
:Keywords:
*dirname*
directory to work in
*prefix*
prefix (deffnm) of the files [md]
*cleanup* : boolean
if ``True``, remove all used files [``True``]
"""
cbook.cat(prefix, dirname=dirname, resolve_multi='guess')
# cleanup/get stuff back
full_dir = os.path.join(dirname, 'full') # default of cat
complete_files = os.path.join(full_dir, '*.*')
self.logger.info("Manager.cat(): recoverning cated files from %r", full_dir)
for f in glob.glob(complete_files):
self.logger.debug("Manager.cat(): mv %s %s", f, dirname)
shutil.move(f, dirname)
shutil.rmtree(full_dir)
if cleanup:
partsdir = os.path.join(dirname, 'parts') # default of cat
self.logger.info("Manager.cat(): Removing cat dir %r", partsdir)
shutil.rmtree(partsdir)
def qsub(self, dirname, **kwargs):
"""Submit job remotely on host.
This is the most primitive implementation: it just runs the commands ::
cd remotedir && qsub qscript
on :attr:`Manager._hostname`. *remotedir* is *dirname* under
:attr:`Manager._scratchdir` and *qscript* is the name of the
queuing system script in *remotedir*.
:Arguments:
*dirname*
directory, relative to the current one, under which the
all job files reside (typically, this is also were the
queuing system script *qscript* lives)
*qscript*
name of the queuing system script; defaults to the
queuing system script hat was produced from the template
:attr:`Manager._qscript`; searched in the current
directory (``.``) and under *dirname*
*remotedir*
full path to the job directory on the remote system; in
most cases it should be sufficient to let the programme
choose the appropriate value based on *dirname* and the
configuration of the manager
"""
remotedir = kwargs.pop('remotedir', self.remotepath(dirname))
qscript = kwargs.pop('qscript', os.path.basename(self.qscript))
if self.start_cwd:
# hack for queuing systems that require hard coding of the
# start directory into the queuing system script (see setup_MD below)
qscriptpath = qscript
if not os.path.exists(qscript):
# catch the common case that the qscript resides within the job dir
qscriptpath = os.path.join(dirname, qscript)
if not os.path.exists(qscriptpath):
logger.error("Failed to find qscript %(qscript)r under %(dirname)r or current dir.", vars())
raise OSError(errno.ENOENT, "Missing qscript", qscript)
cbook.edit_txt(qscriptpath, [('^ *STARTDIR=', '(?<==)(.*)', remotedir),]) # in-place!
rc = self.putfile(qscriptpath, dirname)
if rc != 0:
errmsg = "Failed to scp updated qscript %(qscriptpath)r to remote %(remotedir)r" % vars()
logger.error(errmsg)
raise IOError(rc, errmsg)
logger.debug("Using qscript %(qscriptpath)r with hard-coded remote cd %(remotedir)r", vars())
rc = call(['ssh', self.hostname, 'cd %s && qsub %s' % (remotedir, qscript)])
if rc == 0:
self.logger.info("Submitted job %r on %s.", qscript, self.hostname )
else:
self.logger.error("Failed running job %s on %s in %r.",
qscript, self.hostname, remotedir)
return rc == 0
def get_status(self, dirname, logfilename='md*.log', silent=False):
"""Check status of remote job by looking into the logfile.
Report on the status of the job and extracts the performance in ns/d if
available (which is saved in :attr:`Manager.performance`).
:Arguments:
- *dirname*
- *logfilename* can be a shell glob pattern [md*.log]
- *silent* = True/False; True suppresses log.info messages
:Returns: ``True`` is job is done, ``False`` if still running
``None`` if no log file found to look at
.. Note:: Also returns ``False`` if the connection failed.
.. Warning:: This is an important but somewhat **fragile** method. It
needs to be improved to be more robust.
"""
remotefile = os.path.join(self.wdir, dirname, logfilename)
def loginfo(*args, **kwargs):
if not silent:
self.logger.info(*args, **kwargs)
if not silent:
self.logger.debug("Checking status of %s:%s", self.hostname, remotefile)
# need to check if file exists to avoid infinite hangs
sshcmd = """files=$(ls %(remotefile)s); """ \
"""test -n "$files" && tail -n 500 $(echo $files | tr ' ' '\n' | sort | tail -n 1) """\
"""|| exit 255""" % vars()
p = Popen(['ssh', self.hostname, sshcmd],
stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = p.communicate()
rc = p.returncode
status = {'exceeded': False, 'completed': False, 'started': False}
performance = None
if rc == 0:
status['started'] = True
for m in re.finditer(self.log_RE, out):
if m.group('completed'):
status['completed'] = True
elif m.group('exceeded'):
status['exceeded'] = True
elif m.group('performance'):
data = m.group('performance').split()
if len(data) == 4:
# Gromacs 4.5.x and earlier(?)
performance = dict(zip(['Mnbf/s', 'GFlops', 'ns/day', 'hour/ns'], map(float, data)))
elif len(data) == 2:
# Gromacs 4.6.x
performance = dict(zip(['ns/day', 'hour/ns'], map(float, data)))
else:
logger.warn("Cannot reliably parse the 'Performance:' line %r in the log file.", m.group('performance'))
elif rc == 255:
loginfo("No output file (yet) for job on %(hostname)s." % vars(self))
if err:
self.logger.error("remote: %r", err)
else:
self.logger.debug("get_status(): got return code %r, not sure what it means", rc)
isDone = False
if status['exceeded']:
loginfo("Job on %(hostname)s is RUNNING but waiting for next part to run." % vars(self))
elif status['completed']: # and not exceeded
isDone = True
loginfo("Job on %(hostname)s is DONE." % vars(self))
elif not status['started']:
loginfo("Job on %(hostname)s is WAITING in the queue." % vars(self))
else:
loginfo("Job on %(hostname)s is still RUNNING." % vars(self))
if err:
self.logger.error("remote: %r", err)
lines = out.split('\n').__iter__()
values = ['NAN', 'NAN', 'NAN'] # set a stupid default in case we don't find any time step
for line in lines:
if re.match('\s*Step\s+Time\s+Lambda', line):
try:
values = lines.next().split() # typically three values
except StopIteration:
pass # if we're unlucky and Step...is last line
loginfo("Last time step %f ns at step %d.", float(values[1])/1000, float(values[0]))
if performance:
self.performance = performance['ns/day'] # used for calculating ndependent()
loginfo("Performance: %(ns/day)g ns/day", performance)
return isDone
job_done = get_status
qstat = get_status
def ndependent(self, runtime, performance=None, walltime=None):
"""Calculate how many dependent (chained) jobs are required.
Uses *performance* in ns/d (gathered from :meth:`get_status`) and job max
*walltime* (in hours) from the class unless provided as keywords.
n = ceil(runtime/(performance*0.99*walltime)
:Keywords:
*runtime*
length of run in ns
*performance*
ns/d with the given setup
*walltime*
maximum run length of the script (using 99% of it), in h
:Returns: *n* or 1 if walltime is unlimited
"""
import math
perf = performance or self.performance # in ns/d
wt = walltime or self.walltime # max runtime of job in h (None = inf)
if wt is None or wt == float('inf'):
return 1
if perf is None:
raise ValueError("No performance data available. Run get_status()?")
return int(math.ceil(runtime/(perf*0.99*wt/24.)))
def waitfor(self, dirname, **kwargs):
"""Wait until the job associated with *dirname* is done.
Super-primitive, uses a simple while ... sleep for *seconds* delay
:Arguments:
*dirname*
look for log files under the remote dir corresponding to *dirname*
*seconds*
delay in *seconds* during re-polling
"""
import sys
import time
delta_seconds = kwargs.pop('seconds', 120)
kwargs.setdefault('silent', True)
totseconds = 0
while not self.job_done(dirname, **kwargs):
sys.stderr.write("%4d min ... %s still running\r" % (totseconds/60, dirname))
time.sleep(delta_seconds)
totseconds += delta_seconds
sys.stderr.write('\n')
#------------------------------------------------------------
# example implementations for various stages
#------------------------------------------------------------
def setup_posres(self, **kwargs):
"""Set up position restraints run and transfer to host.
*kwargs* are passed to :func:`gromacs.setup.MD_restrained`
"""
dirname = 'MD_POSRES'
struct = self.local_get('em','em.pdb')
qscript = kwargs.pop('qscript', self.qscript)
setup.MD_restrained(dirname=dirname, struct=struct, qscript=qscript,
qname=self.prefix+'pr', startdir=self.remotepath(dirname),
**kwargs)
self.put(dirname)
self.logger.info("Run %s on %s in %s/%s" % (dirname, self.hostname, self.uri, dirname))
self.logger.info(">> qsub('%s')", dirname)
return dirname
def setup_MD(self, jobnumber, struct=os.path.join('MD_POSRES', 'md.pdb'), **kwargs):
"""Set up production and transfer to host.
:Arguments:
- *jobnumber*: 1,2 ...
- *struct* is the starting structure (default from POSRES
run but that is just a guess);
- kwargs are passed to :func:`gromacs.setup.MD`
"""
kwargs.setdefault('runtime', 1e4)
jobid_s = '%(jobnumber)03d' % vars()
dirname = 'MD_'+jobid_s
structure = self.local_get(os.path.dirname(struct), os.path.basename(struct))
qscript = kwargs.pop('qscript', self.qscript)
setup.MD(dirname=dirname, struct=structure, qscript=qscript,
qname=self.prefix+jobid_s, startdir=self.remotepath(dirname),
**kwargs)
self.put(dirname)
self.logger.info("Run %s on %s in %s/%s" % (dirname, self.hostname, self.uri, dirname))
self.logger.info("Or use %s.qsub(%r)" % (self.__class__.__name__, dirname))
return dirname
# :func:`get_manager` creates a :class:`Manager` from a configuration file.
# def get_manager(name):
# """Factory function that creates a new Manager class, based on a config file.
# :Arguments:
# *name*
# name of the config file, `~/.gromacswrapper/managers/name.cfg`
# """
# import qsub
# warnings.warn("Old-style, derives from qsub.Manager", DeprecationWarning)
# cfg = get_manager_config(find_manager_config(name))
# attribs = {
# 'name': cfg.get('manager', 'name'),
# '_hostname': cfg.get('manager', 'hostname'),
# '_scratchdir': cfg.get('manager', 'scratchdir'),
# 'queuing_system': cfg.get('queuing_system', 'name'),
# '_qscript': cfg.get('queuing_system', 'qscript'),
# '_walltime': cfg.getfloat('queuing_system', 'walltime'),
# }
# return type(name, (qsub.Manager,), attribs)
| gpl-3.0 | -8,149,531,292,777,102,000 | 36.440629 | 128 | 0.592641 | false |
gadsbyfly/PyBioMed | PyBioMed/PyMolecule/fingerprint.py | 1 | 20276 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
##############################################################################
This module is to compute the various fingerprints based on the provided
fingerprint system. If you have any question please contact me via email.
2016.11.15
@author: Zhijiang Yao and Dongsheng Cao
Email: [email protected] and [email protected]
##############################################################################
"""
# Third party modules
from openbabel import pybel
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, ChemicalFeatures, MACCSkeys
from rdkit.Chem.AtomPairs import Pairs, Torsions
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit.Chem.Pharm2D import Generate
from rdkit.Chem.Pharm2D.SigFactory import SigFactory
# First party modules
from PyBioMed.PyMolecule.estate import CalculateEstateFingerprint as EstateFingerprint
from PyBioMed.PyMolecule.ghosecrippen import GhoseCrippenFingerprint
from PyBioMed.PyMolecule.PubChemFingerprints import calcPubChemFingerAll
Version = 1.0
similaritymeasure = [i[0] for i in DataStructs.similarityFunctions]
################################################################
def CalculateFP2Fingerprint(mol):
"""
#################################################################
Calculate FP2 fingerprints (1024 bits).
Usage:
result=CalculateFP2Fingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = {}
NumFinger = 1024
temp = mol.calcfp().bits
for i in temp:
res.update({i: 1})
return NumFinger, res
def CalculateFP3Fingerprint(mol):
"""
#################################################################
Calculate FP3 fingerprints (210 bits).
Usage:
result=CalculateFP3Fingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = {}
NumFinger = 210
temp = mol.calcfp("FP3").bits
for i in temp:
res.update({i: 1})
return NumFinger, res
def CalculateFP4Fingerprint(mol):
"""
#################################################################
Calculate FP4 fingerprints (307 bits).
Usage:
result=CalculateFP4Fingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = {}
NumFinger = 307
temp = mol.calcfp("FP4").bits
for i in temp:
res.update({i: 1})
return NumFinger, res
def CalculateDaylightFingerprint(mol):
"""
#################################################################
Calculate Daylight-like fingerprint or topological fingerprint
(2048 bits).
Usage:
result=CalculateDaylightFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = {}
NumFinger = 2048
bv = FingerprintMols.FingerprintMol(mol)
temp = tuple(bv.GetOnBits())
for i in temp:
res.update({i: 1})
return NumFinger, res, bv
def CalculateMACCSFingerprint(mol):
"""
#################################################################
Calculate MACCS keys (166 bits).
Usage:
result=CalculateMACCSFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = {}
NumFinger = 166
bv = MACCSkeys.GenMACCSKeys(mol)
temp = tuple(bv.GetOnBits())
for i in temp:
res.update({i: 1})
return NumFinger, res, bv
def CalculateEstateFingerprint(mol):
"""
#################################################################
Calculate E-state fingerprints (79 bits).
Usage:
result=CalculateEstateFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
NumFinger = 79
res = {}
temp = EstateFingerprint(mol)
for i in temp:
if temp[i] > 0:
res[i[7:]] = 1
return NumFinger, res, temp
def CalculateAtomPairsFingerprint(mol):
"""
#################################################################
Calculate atom pairs fingerprints
Usage:
result=CalculateAtomPairsFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = Pairs.GetAtomPairFingerprint(mol)
return res.GetLength(), res.GetNonzeroElements(), res
def CalculateTopologicalTorsionFingerprint(mol):
"""
#################################################################
Calculate Topological Torsion Fingerprints
Usage:
result=CalculateTopologicalTorsionFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = Torsions.GetTopologicalTorsionFingerprint(mol)
return res.GetLength(), res.GetNonzeroElements(), res
def CalculateMorganFingerprint(mol, radius=2):
"""
#################################################################
Calculate Morgan
Usage:
result=CalculateMorganFingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = AllChem.GetMorganFingerprint(mol, radius)
return res.GetLength(), res.GetNonzeroElements(), res
def CalculateECFP2Fingerprint(mol, radius=1):
"""
#################################################################
Calculate ECFP2
Usage:
result=CalculateECFP2Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = AllChem.GetMorganFingerprint(mol, radius)
fp = tuple(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=1024))
return fp, res.GetNonzeroElements(), res
def CalculateECFP4Fingerprint(mol, radius=2):
"""
#################################################################
Calculate ECFP4
Usage:
result=CalculateECFP4Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = AllChem.GetMorganFingerprint(mol, radius)
fp = tuple(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=1024))
return fp, res.GetNonzeroElements(), res
def CalculateECFP6Fingerprint(mol, radius=3):
"""
#################################################################
Calculate ECFP6
Usage:
result=CalculateECFP6Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = AllChem.GetMorganFingerprint(mol, radius)
fp = tuple(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=1024))
return fp, res.GetNonzeroElements(), res
def CalculateSimilarityPybel(fp1, fp2):
"""
#################################################################
Calculate Tanimoto similarity between two molecules.
Usage:
result=CalculateSimilarityPybel(fp1,fp2)
Input: fp1 and fp2 are two DataStructs.
Output: result is a Tanimoto similarity value.
#################################################################
"""
intersection = set(fp1[1].keys()) & set(fp2[1].keys())
union = set(fp1[1].keys()) | set(fp2[1].keys())
tanimoto = len(intersection) / float(len(union))
return round(tanimoto, 3)
def CalculateSimilarityRdkit(fp1, fp2, similarity="Tanimoto"):
"""
#################################################################
Calculate similarity between two molecules.
Usage:
result=CalculateSimilarity(fp1,fp2)
Users can choose 11 different types:
Tanimoto, Dice, Cosine, Sokal, Russel,
RogotGoldberg, AllBit, Kulczynski,
McConnaughey, Asymmetric, BraunBlanquet
Input: fp1 and fp2 are two DataStructs.
Output: result is a similarity value.
#################################################################
"""
temp = DataStructs.similarityFunctions
for i in temp:
if similarity in i[0]:
similarityfunction = i[1]
else:
similarityfunction = temp[0][1]
res = similarityfunction(fp1, fp2)
return round(res, 3)
def CalculateFCFP2Fingerprint(mol, radius=1, nBits=1024):
"""
#################################################################
Calculate FCFP2
Usage:
result=CalculateFCFP2Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = AllChem.GetMorganFingerprint(mol, radius, useFeatures=True)
fp = tuple(
AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits, useFeatures=True)
)
return fp, res.GetNonzeroElements(), res
def CalculateFCFP4Fingerprint(mol, radius=2, nBits=1024):
"""
#################################################################
Calculate FCFP4
Usage:
result=CalculateFCFP4Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = AllChem.GetMorganFingerprint(mol, radius, useFeatures=True)
fp = tuple(
AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits, useFeatures=True)
)
return fp, res.GetNonzeroElements(), res
def CalculateFCFP6Fingerprint(mol, radius=3, nBits=1024):
"""
#################################################################
Calculate FCFP6
Usage:
result=CalculateFCFP4Fingerprint(mol)
Input: mol is a molecule object.
radius is a radius.
Output: result is a tuple form. The first is the vector of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = AllChem.GetMorganFingerprint(mol, radius, useFeatures=True)
fp = tuple(
AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits, useFeatures=True)
)
return fp, res.GetNonzeroElements(), res
################################################################
fdefstr = """
AtomType NDonor [N&!H0&v3,N&!H0&+1&v4,n&H1&+0]
AtomType ChalcDonor [O,S;H1;+0]
DefineFeature SingleAtomDonor [{NDonor},{ChalcDonor},!$([D1]-[C;D3]=[O,S,N])]
Family Donor
Weights 1
EndFeature
AtomType NAcceptor [$([N&v3;H1,H2]-[!$(*=[O,N,P,S])])]
Atomtype NAcceptor [$([N;v3;H0])]
AtomType NAcceptor [$([n;+0])]
AtomType ChalcAcceptor [$([O,S;H1;v2]-[!$(*=[O,N,P,S])])]
AtomType ChalcAcceptor [O,S;H0;v2]
Atomtype ChalcAcceptor [O,S;-]
Atomtype ChalcAcceptor [o,s;+0]
AtomType HalogenAcceptor [F]
DefineFeature SingleAtomAcceptor [{NAcceptor},{ChalcAcceptor},{HalogenAcceptor}]
Family Acceptor
Weights 1
EndFeature
# this one is delightfully easy:
DefineFeature AcidicGroup [C,S](=[O,S,P])-[O;H1,H0&-1]
Family NegIonizable
Weights 1.0,1.0,1.0
EndFeature
AtomType CarbonOrArom_NonCarbonyl [$([C,a]);!$([C,a](=O))]
AtomType BasicNH2 [$([N;H2&+0][{CarbonOrArom_NonCarbonyl}])]
AtomType BasicNH1 [$([N;H1&+0]([{CarbonOrArom_NonCarbonyl}])[{CarbonOrArom_NonCarbonyl}])]
AtomType BasicNH0 [$([N;H0&+0]([{CarbonOrArom_NonCarbonyl}])([{CarbonOrArom_NonCarbonyl}])[{CarbonOrArom_NonCarbonyl}])]
AtomType BasicNakedN [N,n;X2;+0]
DefineFeature BasicGroup [{BasicNH2},{BasicNH1},{BasicNH0},{BasicNakedN}]
Family PosIonizable
Weights 1.0
EndFeature
# aromatic rings of various sizes:
DefineFeature Arom5 a1aaaa1
Family Aromatic
Weights 1.0,1.0,1.0,1.0,1.0
EndFeature
DefineFeature Arom6 a1aaaaa1
Family Aromatic
Weights 1.0,1.0,1.0,1.0,1.0,1.0
EndFeature
DefineFeature Arom7 a1aaaaaa1
Family Aromatic
Weights 1.0,1.0,1.0,1.0,1.0,1.0,1.0
EndFeature
DefineFeature Arom8 a1aaaaaaa1
Family Aromatic
Weights 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0
EndFeature
"""
featFactory = ChemicalFeatures.BuildFeatureFactoryFromString(fdefstr)
def CalculatePharm2D2pointFingerprint(mol, featFactory=featFactory):
"""
Calculate Pharm2D2point Fingerprints
"""
sigFactory_2point = SigFactory(featFactory, minPointCount=2, maxPointCount=2)
sigFactory_2point.SetBins(
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)]
)
sigFactory_2point.Init()
res = Generate.Gen2DFingerprint(mol, sigFactory_2point)
res_keys = tuple(res.GetOnBits())
init_list = [0] * 135
for res_key in res_keys:
init_list[res_key] = 1
BitVect = tuple(init_list)
return BitVect, res_keys, res
################################################################
def CalculatePharm2D3pointFingerprint(mol, featFactory=featFactory):
"""
Calculate Pharm2D3point Fingerprints
"""
sigFactory_3point = SigFactory(featFactory, minPointCount=3, maxPointCount=3)
sigFactory_3point.SetBins([(0, 2), (2, 4), (4, 6), (6, 10)])
sigFactory_3point.Init()
res = Generate.Gen2DFingerprint(mol, sigFactory_3point)
res_keys = tuple(res.GetOnBits())
init_list = [0] * 2135
for res_key in res_keys:
init_list[res_key] = 1
BitVect = tuple(init_list)
return BitVect, res_keys, res
################################################################
def CalculateGhoseCrippenFingerprint(mol, count=False):
"""
Calculate GhoseCrippen Fingerprints
"""
res = GhoseCrippenFingerprint(mol, count=count)
return res
def CalculatePubChemFingerprint(mol):
"""
Calculate PubChem Fingerprints
"""
res = calcPubChemFingerAll(mol)
return res
_FingerprintFuncs = {
"FP2": CalculateFP2Fingerprint,
"FP3": CalculateFP3Fingerprint,
"FP4": CalculateFP4Fingerprint,
"topological": CalculateDaylightFingerprint,
"Estate": CalculateEstateFingerprint,
"atompairs": CalculateAtomPairsFingerprint,
"torsions": CalculateTopologicalTorsionFingerprint,
"morgan": CalculateMorganFingerprint,
"ECFP2": CalculateECFP2Fingerprint,
"ECFP4": CalculateECFP4Fingerprint,
"ECFP6": CalculateECFP6Fingerprint,
"MACCS": CalculateMACCSFingerprint,
"FCFP2": CalculateFCFP2Fingerprint,
"FCFP4": CalculateFCFP4Fingerprint,
"FCFP6": CalculateFCFP6Fingerprint,
"Pharm2D2point": CalculatePharm2D2pointFingerprint,
"Pharm2D3point": CalculatePharm2D3pointFingerprint,
"PubChem": CalculatePubChemFingerprint,
"GhoseCrippen": CalculateGhoseCrippenFingerprint,
}
################################################################
if __name__ == "__main__":
print("-" * 10 + "START" + "-" * 10)
ms = [
Chem.MolFromSmiles("CCOC=N"),
Chem.MolFromSmiles("NC1=NC(=CC=N1)N1C=CC2=C1C=C(O)C=C2"),
]
m2 = [pybel.readstring("smi", "CCOC=N"), pybel.readstring("smi", "CCO")]
res1 = CalculateECFP4Fingerprint(ms[0])
print(res1)
print("-" * 25)
res2 = CalculateECFP4Fingerprint(ms[1])
print(res2)
print("-" * 25)
mol = pybel.readstring("smi", "CCOC=N")
res3 = CalculateFP3Fingerprint(mol)
print(res3)
print("-" * 25)
mol = Chem.MolFromSmiles("O=C1NC(=O)NC(=O)C1(C(C)C)CC=C")
res4 = CalculatePharm2D2pointFingerprint(mol)[0]
print(res4)
print("-" * 25)
res5 = CalculatePharm2D3pointFingerprint(mol)[0]
print(res5)
print("-" * 25)
res6 = CalculateGhoseCrippenFingerprint(mol)
print(res6)
print("-" * 25)
res7 = CalculatePubChemFingerprint(mol)
print(res7)
print("-" * 10 + "END" + "-" * 10)
| bsd-3-clause | 1,747,260,672,386,670,300 | 28.090387 | 120 | 0.584435 | false |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/invited_positions_v30.py | 1 | 5255 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.affiliation_group_v30_invited_position_summary_v30 import AffiliationGroupV30InvitedPositionSummaryV30 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501
class InvitedPositionsV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_modified_date': 'LastModifiedDateV30',
'affiliation_group': 'list[AffiliationGroupV30InvitedPositionSummaryV30]',
'path': 'str'
}
attribute_map = {
'last_modified_date': 'last-modified-date',
'affiliation_group': 'affiliation-group',
'path': 'path'
}
def __init__(self, last_modified_date=None, affiliation_group=None, path=None): # noqa: E501
"""InvitedPositionsV30 - a model defined in Swagger""" # noqa: E501
self._last_modified_date = None
self._affiliation_group = None
self._path = None
self.discriminator = None
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if affiliation_group is not None:
self.affiliation_group = affiliation_group
if path is not None:
self.path = path
@property
def last_modified_date(self):
"""Gets the last_modified_date of this InvitedPositionsV30. # noqa: E501
:return: The last_modified_date of this InvitedPositionsV30. # noqa: E501
:rtype: LastModifiedDateV30
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this InvitedPositionsV30.
:param last_modified_date: The last_modified_date of this InvitedPositionsV30. # noqa: E501
:type: LastModifiedDateV30
"""
self._last_modified_date = last_modified_date
@property
def affiliation_group(self):
"""Gets the affiliation_group of this InvitedPositionsV30. # noqa: E501
:return: The affiliation_group of this InvitedPositionsV30. # noqa: E501
:rtype: list[AffiliationGroupV30InvitedPositionSummaryV30]
"""
return self._affiliation_group
@affiliation_group.setter
def affiliation_group(self, affiliation_group):
"""Sets the affiliation_group of this InvitedPositionsV30.
:param affiliation_group: The affiliation_group of this InvitedPositionsV30. # noqa: E501
:type: list[AffiliationGroupV30InvitedPositionSummaryV30]
"""
self._affiliation_group = affiliation_group
@property
def path(self):
"""Gets the path of this InvitedPositionsV30. # noqa: E501
:return: The path of this InvitedPositionsV30. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this InvitedPositionsV30.
:param path: The path of this InvitedPositionsV30. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InvitedPositionsV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InvitedPositionsV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | 465,823,469,642,113,860 | 30.848485 | 146 | 0.603616 | false |
BitTigerInst/MonkeyKing_crawler_recommender | crawler/xiaomi_appstore_crawler/xiaomi_appstore_crawler/spiders/xiaomi_spider.py | 1 | 1999 | import scrapy
from scrapy.spiders import Spider
from scrapy import Request
import re
from scrapy.selector import Selector
from xiaomi_appstore_crawler.items import XiaomiAppstoreCrawlerItem
class XiaomiSpider(Spider):
name = "xiaomi"
allowed_domains = ["app.mi.com"]
start_urls = [
"http://app.mi.com/topList?page=1"
]
def parse(self, response):
#import pudb; pu.db
page = Selector(response)
page_nexts = page.xpath('//div[@class="pages"]/a')
page_max = int(page_nexts[-2].xpath('text()').extract_first())
for page_id in xrange(1, 2): #xrange(1, page_max + 1):
url = '{0}{1}'.format('http://app.mi.com/topList?page=', str(page_id))
yield scrapy.Request(url, callback=self.parse_page)
def parse_page(self, response):
page = Selector(response)
lis = page.xpath('//ul[@class="applist"]/li')
if lis == None:
return
url_common = 'http://app.mi.com'
for li in lis:
item = XiaomiAppstoreCrawlerItem()
item['title'] = li.xpath('./h5/a/text()').extract_first().encode('utf-8')
url = li.xpath('./h5/a/@href').extract_first()
appid = re.match(r'/detail/(.*)', url).group(1)
item['appid'] = appid
# import pudb; pu.db
req = scrapy.Request(url_common + url, callback=self.parse_details)
req.meta["item"] = item
yield req
def parse_details(self, response):
item = response.meta["item"]
page = Selector(response)
lis = page.xpath('//div[@class="second-imgbox"]/ul/li')
recommended = []
for li in lis:
url = li.xpath('./a/@href').extract_first()
appid = re.match(r'/detail/(.*)', url).group(1)
recommended.append(appid)
item['recommended'] = recommended
#import pudb; pu.db
yield item
| mit | -4,969,427,099,541,880,000 | 31.770492 | 85 | 0.550775 | false |
manahl/PythonTrainingExercises | Beginners/BuiltinFunctions/solution.py | 1 | 2913 | """Builtin functions problems.
It might be useful to browse here (adjust your version to suit):
https://docs.python.org/2.7/library/functions.html
1. Create the sequence [0, 3, 6, 9, ... N]. What is the problem if N is very large?
Is there a better way if N is very large?
2. Find the difference between the biggest and smallest values in the list
[4, 3, -9, 21, 0]
3. The same as 2. but use the absolute values in the list.
4.Convert a list:
['Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine']
To a dictionary:
{
0 : 'Zero',
1 : 'One',
2 : 'Two',
3 : 'Three',
4 : 'Four',
5 : 'Five',
6 : 'Six',
7 : 'Seven',
8 : 'Eight',
9 : 'Nine',
}
5. I have two list a and b. Is there a way that I can tell if they are the same
list? For example in the following case they are the same list:
a = [1, 2, 3]
b = a
And any change to b be will be 'seen' by a.
However in this case a and b are not the same list in the sense that any change
to b be will NOT be 'seen' by a.
a = [1, 2, 3]
b = [1, 2, 3]
Created on 22 Feb 2016
@author: paulross
"""
import sys
import pytest
def create_sequence(N):
"""Create the 3x table up to and including N."""
return range(0, N + 3, 3)
def range_of_list():
"""Return the difference between the largest and smallest values in a list."""
x = [4, 3, -9, 21, 0]
return max(x) - min(x)
def range_of_list_abs():
"""Return the difference between the largest and smallest absolute values in a list."""
x = [4, 3, -9, 21, 0]
abs_x = [abs(value) for value in x]
return max(abs_x) - min(abs_x)
def list_to_sequence_dict():
"""Create a dictionary where the key is the ordinal of the object in the list
and the value is the object itself. For example: {0 : 'Zero', 1 : 'One', ...}"""
x = ['Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine']
d = {}
for index, value in enumerate(x):
d[index] = value
return d
def is_same(a, b):
"""Return True is the two items are the same."""
# This is the same as:
# return a is b
return id(a) == id(b)
#=========== Tests ===================
def test_create_sequence():
assert create_sequence(12) == [0, 3, 6, 9, 12]
def test_range_of_list():
assert range_of_list() == 30
def test_range_of_list_abs():
assert range_of_list_abs() == 21
def test_list_to_sequence_dict():
expected = {
0 : 'Zero',
1 : 'One',
2 : 'Two',
3 : 'Three',
4 : 'Four',
5 : 'Five',
6 : 'Six',
7 : 'Seven',
8 : 'Eight',
9 : 'Nine',
}
assert list_to_sequence_dict() == expected
def test_is_same():
a = [1, 2, 3]
b = a
assert is_same(a, b)
b = [1, 2, 3]
assert not is_same(a, b)
def main():
return pytest.main(__file__)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -5,869,194,093,357,374,000 | 24.778761 | 91 | 0.567113 | false |
RevanProdigalKnight/sublimetext-codeformatter | codeformatter/lib/coldfusionbeautifier/__init__.py | 1 | 10949 | from __future__ import print_function
import sys
import re
import sublime
try:
# Python 3
from .__version__ import __version__
except (ValueError):
# Python 2
from __version__ import __version__
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.expand_tags = False
# self.expand_javascript = False
self.minimum_attribute_count = 2
self.first_attribute_on_new_line = False
self.reduce_empty_tags = False
self.exception_on_tag_mismatch = False
self.custom_singletons = ''
def __repr__(self):
return """indent_size = %d
indent_char = [%s]
indent_with_tabs = [%s]
expand_tags = [%s]
minimum_attribute_count = %d
first_attribute_on_new_line = [%s]
reduce_empty_tags = [%s]
exception_on_tag_mismatch = [%s]
custom_singletons = [%s]""" % (self.indent_size, self.indent_char, self.indent_with_tabs, self.expand_tags, self.minimum_attribute_count, self.first_attribute_on_new_line, self.reduce_empty_tags, self.exception_on_tag_mismatch, self.custom_singletons)
def default_options():
return BeautifierOptions()
def beautify(string, opts=default_options()):
b = Beautifier(string, opts)
return b.beautify()
def beautify_file(file_name, opts=default_options()):
if file_name == '-': # stdin
stream = sys.stdin
else:
stream = open(file_name)
content = ''.join(stream.readlines())
b = Beautifier(content, opts)
return b.beautify()
def usage(stream=sys.stdout):
print("coldfusionbeautifier.py@" + __version__ + "\nColdfusion beautifier (http://jsbeautifier.org/)\n", file=stream)
return stream == sys.stderr
if stream == sys.stderr: return 1
else: return 0
class Beautifier:
def __init__(self, source_text, opts=default_options()):
self.source_text = source_text
self.opts = opts
self.exception_on_tag_mismatch = opts.exception_on_tag_mismatch
self.expand_tags = opts.expand_tags
self.expand_javascript = opts.expand_javascript
self.minimum_attribute_count = opts.minimum_attribute_count
self.first_attribute_on_new_line = opts.first_attribute_on_new_line
self.reduce_empty_tags = opts.reduce_empty_tags
self.indent_size = opts.indent_size
self.indent_char = opts.indent_char
self.indent_with_tabs = opts.indent_with_tabs
if self.indent_with_tabs:
self.indent_char = "\t"
self.indent_size = 1
self.tab_size = sublime.load_settings('Preferences.sublime-settings').get('tab_size',4)
self.indent_level = 0
# These are the tags that are currently defined as being void by the HTML5 spec, and should be self-closing (a.k.a. singletons)
self.singletons = r'<(area|base|br|col|command|embed|hr|img|input|keygen|link|meta|param|source|track|wbr|cf(?:abort|admin|applet|argument|associate|authenticate|break|content|continue|cookie|directory|document|documentitem|documentsection|dump|error|execute|exit|file|flush|header|httpparam|import|include|index|invoke|invokeargument|ldap|location|log|mailparam|object|objectcache|param|processingdirective|property|queryparam|rethrow|return|retry|schedule|set|setting|thread|throw)<%= custom %>)([^>]*?)/?>(?:\s*?</\1>)?'
if not opts.custom_singletons == '':
self.singletons = re.sub(r'<%= custom %>','|' + opts.custom_singletons,self.singletons)
else:
self.singletons = re.sub(r'<%= custom %>','',self.singletons)
self.midle_tags = r'<cf(else|elseif)([^>]*)>'
# Compile singletons regex since it's used so often (twice before the loop, then once per loop iteration)
self.singletons = re.compile(self.singletons,re.I)
self.removed_css = []
self.removed_js = []
self.removed_comments = []
def expand_tag(self,str):
_str = str.group(0) # cache the original string in a variable for faster access
s = re.findall(r'([\w\-]+(?:=(?:"[^"]*"|\'[^\']*\'))?)',_str)
# If the tag has fewer than "minimum_attribute_count" attributes, leave it alone
if len(s) <= self.minimum_attribute_count: return _str
tagEnd = re.search(r'/?>$',_str)
if not tagEnd == None: s += [tagEnd.group(0)] # Append the end of the tag to the array of attributes
tag = '<' + s[0] # The '<' at the beginning of a tag is not included in the regex match
indent = len(tag) + 1 # include the space after the tag name, this is not included in the regex
s = s[1:] # pop the tag name off of the attribute array - we don't need it any more
# Calculate how much to indent each line
if self.first_attribute_on_new_line: # If we're putting all the attributes on their own line, only use 1 indentation unit
if self.indent_with_tabs:
indent = 0
extra_tabs = 1
else:
indent = self.indent_size
extra_tabs = 0
else: # Otherwise, align the attributes with the beginning of the first attribute after the tag name
if self.indent_with_tabs:
extra_tabs = int(indent / self.tab_size)
indent = indent % self.tab_size
else:
extra_tabs = 0
tag += ' ' + s[0]
s = s[1:] # Go ahead and pop the first attribute off the array so that we don't duplicate it in the loop below
# For each attribute in the list, append a newline and indentation followed by the attribute (or the end of the tag)
for l in s:
tag += '\n' + (((self.indent_level * self.indent_size) + extra_tabs) * self.indent_char) + (indent * ' ') + l
return tag
def remove_newlines(self,ch=''): return lambda str: re.sub(r'\n\s*',ch,str.group(0))
def remove(self,pattern,replacement,findList,raw):
pattern = re.compile(r'(?<=\n)\s*?' + pattern,re.S|re.I)
findList.extend(pattern.findall(raw))
return pattern.sub((lambda match: match.group(0)[:-len(match.group(0).lstrip())] + replacement),raw) # Preserve the indentation from the beginning of the match
def remove_js(self,raw): return self.remove(r'<script[^>]*>.*?</script>','/* SCRIPT */',self.removed_js,raw)
def remove_css(self,raw): return self.remove(r'<style[^>]*>.*?</style>','/* STYLE */',self.removed_css,raw)
def remove_comments(self,raw): return self.remove(r'<!---.*?--->','/* COMMENT */',self.removed_comments,raw)
def reindent(self,raw,match):
prev_newline = r'(?<=\n)'
lowest_indent = -1
for l in re.split(r'\n',raw):
indent = len(l) - len(l.strip())
if lowest_indent == -1 or lowest_indent > indent:
lowest_indent = indent
indent = len(match.group(1)) * self.indent_char
return indent + re.sub(prev_newline,indent,re.sub(prev_newline + (lowest_indent * self.indent_char),'',raw.lstrip())); # Force new indentation
def getNextFrom(self,_list):
it = iter(_list)
return lambda match: self.reindent(next(it),match)
def replace(self,pattern,replaceList,raw): return re.compile(r'(?<=\n)(\s*?)' + pattern,re.S|re.I).sub(self.getNextFrom(replaceList),raw)
def replace_comments(self,raw): return self.replace(r'/\* COMMENT \*/',self.removed_comments,raw)
def replace_css(self,raw): return self.replace(r'/\* STYLE \*/',self.removed_css,raw)
def replace_js(self,raw): return self.replace(r'/\* SCRIPT \*/',self.removed_js,raw)
def beautify(self):
beautiful = ''
replaceWithSpace = self.remove_newlines(' ')
raw = self.source_text
# Remove JS, CSS, and comments from raw source
raw = self.remove_js(raw)
raw = self.remove_css(raw)
raw = self.remove_comments(raw)
# Add newlines before/after tags (excluding CDATA). This separates single-line HTML comments into 3 lines as well
raw = re.sub(r'(<[^! ]|(?<!/\*|//)\]\]>|(?<!<!\[endif\])--->)',r'\n\1',raw)
raw = re.sub(r'(>|(?<!/\*|//)<!\[CDATA\[|<!---(?!\[if .+?\]>))',r'\1\n',raw)
# Fix AngularJS/Blade/etc brace ({{}}, {{::}}, etc) templates that will have been broken into multiple lines
raw = re.sub(r'(\{{2,}(?:::)?)\s?(.*?)\s?(\}{2,})',r'\1 \2 \3',re.sub(r'\{(?:\s*\{)+\s?[\s\S]*?\s?\}(?:\s*\})+',self.remove_newlines(),raw))
raw = re.sub(r'"[^"]*"',replaceWithSpace,raw) # Put all content between double-quote marks back on the same line
# Re-join start tags that are already on multiple lines (ignore end tags)
raw = re.compile(r'(?<=\n)<(?!/).*?>(?=\n)',re.S).sub(replaceWithSpace,raw)
raw = self.singletons.sub(r'<\1\2/>',raw) # Replace all singleton tags with /-delimited tags (XHTML style)
raw = self.singletons.sub(replaceWithSpace,raw)
raw = re.sub(r'(?<!\s)\s(?=/?>)','',raw)
raw = re.sub(r'\n{2,}',r'\n',raw) # Replace multiple newlines with just one
for l in re.split('\n',raw):
l = l.strip() # Trim whitespace from the line
if l == '': continue # If the line has no content, skip
# If the line starts with </, or an end CDATA/block comment tag, reduce indentation
if re.match(r'</|]]>|(?:<!\[endif\])?--->',l) or re.search(self.midle_tags,l): self.indent_level -= 1
beautiful += (self.indent_char * self.indent_level * self.indent_size)
if self.expand_tags:
beautiful += re.sub(r'^<.*>$',self.expand_tag,l)
else:
beautiful += l
beautiful += '\n'
if self.singletons.search(l): pass # If the tag is a singleton, indentation stays the same
elif re.search(self.midle_tags,l): self.indent_level += 1
else:
# If the line starts with a begin CDATA/block comment tag or a tag, indent the next line
if re.match(r'<!---|<!\[CDATA\[|<[^/?! ]',l): self.indent_level += 1
# If the end of the document is not at the same indentation as the beginning, the tags aren't matched
if not self.indent_level == 0 and self.exception_on_tag_mismatch:
raise Exception("Mismatched tags")
# Put all matched start/end tags with no content between them on the same line and return
if self.reduce_empty_tags:
beautiful = re.sub(r'<(\S+)([^>]*)>\s+</\1>',r'<\1\2></\1>',beautiful)
# Replace JS, CSS, and comments in the opposite order of their removal
beautiful = self.replace_comments(beautiful)
beautiful = self.replace_css(beautiful)
beautiful = self.replace_js(beautiful)
return beautiful
| mit | -7,164,027,467,788,904,000 | 50.646226 | 531 | 0.60179 | false |
qspin/qtaste | TestSuites/TestSuite_QTaste/EngineSuite/QTASTE_DATA/QTASTE_DATA_03/TestScript.py | 1 | 1425 | # coding=utf-8
# Copyright 2007-2009 QSpin - www.qspin.be
#
# This file is part of QTaste framework.
#
# QTaste is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QTaste is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with QTaste. If not, see <http://www.gnu.org/licenses/>.
##
# QTaste Data driven test: Check the default TIMEOUT value.
# <p>
# This test case has the goal to verify that the TIMEOUT data value is defined to 60 seconds if it is not defined in the CSV file.<p>
# This test will execute a test case that takes more time than the default TIMEOUT data.
# @preparation None
##
from qtaste import *
def Step1():
"""
@step Call the verb neverReturn()
@expected Test is "Failed", reason: <i>Test execution timeout.</i><p>
Script call stack is reported.<p>
Elapsed time is more or less 60 seconds.
"""
testAPI.getEngineTest().neverReturn()
doStep(Step1)
| lgpl-3.0 | -1,825,885,832,837,244,200 | 34.625 | 133 | 0.698947 | false |
spwilson2/cache-my-subreddit | cms/reddit.py | 1 | 6083 | import requests
import time
import re
import praw
from cms.util import BeautifulSoup, _FAKE_HEADERS
SUBMITTED_FMT = 'https://www.reddit.com/user/%s/submitted/'
SUBREDDIT_FMT = 'https://www.reddit.com/r/%s/'
USER_AGENT='sheenrocks\' user agent'
RATE_LIMIT = 1
class Reddit(object):
def __init__(self, username=None, password=None, client_id=None, client_secret=None):
"""Log in to reddit using the given credentials."""
self._username = username
self._password = password
self._client_id = client_id
self._client_secret = client_secret
self._cookies=None
self._authenticate()
# Allow us to request instantly after setup.
self._last_request = time.time() - RATE_LIMIT
def _authenticate(self):
response = requests.post(
'https://www.reddit.com/api/login',
{'user': self._username, 'passwd': self._password},
headers = _FAKE_HEADERS
)
self._cookies = response.cookies
if self._client_id:
self._reddit = praw.Reddit(user_agent=USER_AGENT,
client_id=self._client_id,
client_secret=self._client_secret,
username=self._username,
password=self._password)
def _get_url(self, url):
"""Return the response from getting the url as the signed in user."""
# Rate-limit by sleeping if we need to.
time_left = self._last_request + RATE_LIMIT - time.time()
if time_left > 0:
print('Rate limiting; sleeping for %ss.' % time_left)
time.sleep(time_left)
self._last_request = time.time()
print('GETting: ', str(url))
return requests.get(
url,
cookies=self._cookies,
headers=_FAKE_HEADERS
)
def list_friends(self):
friends = self._reddit.user.friends()
friends.limit = 1000
for friend in friends:
yield friend.name
def _submissions(self, url, user=None):
next_submitted_url = url
while next_submitted_url:
submissions_html = self._get_url(next_submitted_url).text
bs_obj = BeautifulSoup(submissions_html)
submissions = _get_metadata(bs_obj, user)
for submission in submissions:
yield submission
next_submitted_url = _get_next_link(bs_obj)
def subreddit_submissions(self, subreddit, limit=10):
listings = self._reddit.subreddit(subreddit).hot()
listings.limit = limit
count = 0
for listing in listings:
count += 1
if count > limit:
return
yield Post.wrap(listing)
def user_submissions(self, user, limit=10000):
count = 0
submissions = self._submissions(SUBMITTED_FMT % user, user)
for submission in submissions:
count += 1
if count > limit:
return
yield submission
class Post(object):
def __init__(self, title, author, url, shortlink, subreddit):
self.title = title
self.author = author
self.url = url
self.shortlink = shortlink
self.subreddit = subreddit
@staticmethod
def wrap(post):
return Post(post.title, post.author.name, post.url, post.shortlink, post.subreddit.display_name)
def _get_next_link(bs_obj):
possible_link = bs_obj.find('a', rel='nofollow next')
if possible_link is not None:
return possible_link['href']
class LinkParser(object):
def __init__(self):
pass
def _get_metadata(bs_obj, user=None):
title_link_url_anchors = None
if user is None:
user_anchors = bs_obj.find_all('a', class_="author friend may-blank")
subreddit_anchors = bs_obj.find_all('a', class_="subreddit hover may-blank")
post_url_anchors = bs_obj.find_all('a', class_="bylink comments may-blank")
title_link_url_anchors = bs_obj.find_all('a', class_="title may-blank outbound")
if not title_link_url_anchors:
title_link_url_anchors = bs_obj.find_all('a', class_="title may-blank loggedin outbound")
# (title, url) generator
titles_links = ((anchor.text, anchor['href']) for anchor in title_link_url_anchors)
post_urls = [anchor.text for anchor in post_url_anchors]
# Ignore the /r/.
subreddits = (anchor.text[2:].replace('/','') for anchor in subreddit_anchors)
if user is None:
users = (anchor.text for anchor in user_anchors)
else:
users = (user for _ in post_urls)
metadata_list = []
for submission in zip(titles_links, post_urls, subreddits, users):
(title, link_url), post_url, post_subreddit, user = submission
metadata_list.append(Post(title, user, link_url, post_url, post_subreddit))
#subreddit:
# <a href="https://www.reddit.com/r/GoneMild/" class="subreddit hover may-blank">/r/GoneMild</a>
#post_url:
# <a href="/r/GoneMild/comments/5jn5ao/about_to_work_out/" data-inbound-url="/r/GoneMild/comments/5jn5ao/about_to_work_out/?utm_content=comments&utm_medium=user&utm_source=reddit&utm_name=frontpage" data-href-url="/r/GoneMild/comments/5jn5ao/about_to_work_out/" data-event-action="comments" class="bylink comments may-blank" rel="nofollow">7 comments</a>
#link_url:
# <a class="title may-blank loggedin outbound" data-event-action="title" href="http://i.imgur.com/fnXnhfK.jpg" tabindex="1" data-href-url="http://i.imgur.com/fnXnhfK.jpg" data-outbound-url="https://out.reddit.com/t3_5jn5ao?url=http%3A%2F%2Fi.imgur.com%2FfnXnhfK.jpg&token=AQAAq-ZdWMcR1gXU5EWru4O3HuYimaam0xNWwa2a_pGd08Drf1wN&app_name=reddit.com" data-outbound-expiration="1482548907000" rel="">About to work out :)</a>
return metadata_list
if __name__ == '__main__':
val = requests.get('https://www.reddit.com/r/AskReddit/').text
bs_obj = BeautifulSoup(val)
res = bs_obj.find_all('a', class_="title")
| mit | 4,649,188,721,936,869,000 | 36.319018 | 432 | 0.616143 | false |
datawire/ambassador | vendor/github.com/envoyproxy/protoc-gen-validate/validate/validator.py | 1 | 42082 | import re
from validate_email import validate_email
import ipaddress
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import uuid
import struct
from jinja2 import Template
import time
import sys
printer = ""
# Well known regex mapping.
regex_map = {
"UNKNOWN": "",
"HTTP_HEADER_NAME": r'^:?[0-9a-zA-Z!#$%&\'*+-.^_|~\x60]+$',
"HTTP_HEADER_VALUE": r'^[^\u0000-\u0008\u000A-\u001F\u007F]*$',
"HEADER_STRING": r'^[^\u0000\u000A\u000D]*$'
}
def validate(proto_message):
func = file_template(proto_message)
global printer
printer += func + "\n"
exec(func)
try:
return generate_validate
except NameError:
return locals()['generate_validate']
def print_validate(proto_message):
return "".join([s for s in printer.splitlines(True) if s.strip()])
def has_validate(field):
if field.GetOptions() is None:
return False
for option_descriptor, option_value in field.GetOptions().ListFields():
if option_descriptor.full_name == "validate.rules":
return True
return False
def byte_len(s):
try:
return len(s.encode('utf-8'))
except:
return len(s)
def _validateHostName(host):
if len(host) > 253:
return False
s = host.rsplit(".",1)[0].lower()
for part in s.split("."):
if len(part) == 0 or len(part) > 63:
return False
# Host names cannot begin or end with hyphens
if s[0] == "-" or s[len(s)-1] == '-':
return False
for r in part:
if (r < 'A' or r > 'Z') and (r < 'a' or r > 'z') and (r < '0' or r > '9') and r != '-':
return False
return True
def _validateEmail(addr):
if '<' in addr and '>' in addr: addr = addr.split("<")[1].split(">")[0]
if not validate_email(addr):
return False
if len(addr) > 254:
return False
parts = addr.split("@")
if len(parts[0]) > 64:
return False
return _validateHostName(parts[1])
def _has_field(message_pb, property_name):
# NOTE: As of proto3, HasField() only works for message fields, not for
# singular (non-message) fields. First try to use HasField and
# if it fails (with a ValueError) we manually consult the fields.
try:
return message_pb.HasField(property_name)
except:
all_fields = set([field.name for field in message_pb.DESCRIPTOR.fields])
return property_name in all_fields
def const_template(option_value, name):
const_tmpl = """{%- if str(o.string) and o.string.HasField('const') -%}
if {{ name }} != \"{{ o.string['const'] }}\":
raise ValidationFailed(\"{{ name }} not equal to {{ o.string['const'] }}\")
{%- elif str(o.bool) and o.bool['const'] != "" -%}
if {{ name }} != {{ o.bool['const'] }}:
raise ValidationFailed(\"{{ name }} not equal to {{ o.bool['const'] }}\")
{%- elif str(o.enum) and o.enum['const'] -%}
if {{ name }} != {{ o.enum['const'] }}:
raise ValidationFailed(\"{{ name }} not equal to {{ o.enum['const'] }}\")
{%- elif str(o.bytes) and o.bytes.HasField('const') -%}
{% if sys.version_info[0] >= 3 %}
if {{ name }} != {{ o.bytes['const'] }}:
raise ValidationFailed(\"{{ name }} not equal to {{ o.bytes['const'] }}\")
{% else %}
if {{ name }} != b\"{{ o.bytes['const'].encode('string_escape') }}\":
raise ValidationFailed(\"{{ name }} not equal to {{ o.bytes['const'].encode('string_escape') }}\")
{% endif %}
{%- endif -%}
"""
return Template(const_tmpl).render(sys = sys, o = option_value, name = name, str = str)
def in_template(value, name):
in_tmpl = """
{%- if value['in'] %}
if {{ name }} not in {{ value['in'] }}:
raise ValidationFailed(\"{{ name }} not in {{ value['in'] }}\")
{%- endif -%}
{%- if value['not_in'] %}
if {{ name }} in {{ value['not_in'] }}:
raise ValidationFailed(\"{{ name }} in {{ value['not_in'] }}\")
{%- endif -%}
"""
return Template(in_tmpl).render(value = value, name = name)
def string_template(option_value, name):
if option_value.string.well_known_regex:
known_regex_type = option_value.string.DESCRIPTOR.fields_by_name['well_known_regex'].enum_type
regex_value = option_value.string.well_known_regex
regex_name = known_regex_type.values_by_number[regex_value].name
if regex_name in ["HTTP_HEADER_NAME", "HTTP_HEADER_VALUE"] and not option_value.string.strict:
option_value.string.pattern = regex_map["HEADER_STRING"]
else:
option_value.string.pattern = regex_map[regex_name]
str_templ = """
{{ const_template(o, name) -}}
{{ in_template(o.string, name) -}}
{%- set s = o.string -%}
{%- if s['len'] %}
if len({{ name }}) != {{ s['len'] }}:
raise ValidationFailed(\"{{ name }} length does not equal {{ s['len'] }}\")
{%- endif -%}
{%- if s['min_len'] %}
if len({{ name }}) < {{ s['min_len'] }}:
raise ValidationFailed(\"{{ name }} length is less than {{ s['min_len'] }}\")
{%- endif -%}
{%- if s['max_len'] %}
if len({{ name }}) > {{ s['max_len'] }}:
raise ValidationFailed(\"{{ name }} length is more than {{ s['max_len'] }}\")
{%- endif -%}
{%- if s['len_bytes'] %}
if byte_len({{ name }}) != {{ s['len_bytes'] }}:
raise ValidationFailed(\"{{ name }} length does not equal {{ s['len_bytes'] }}\")
{%- endif -%}
{%- if s['min_bytes'] %}
if byte_len({{ name }}) < {{ s['min_bytes'] }}:
raise ValidationFailed(\"{{ name }} length is less than {{ s['min_bytes'] }}\")
{%- endif -%}
{%- if s['max_bytes'] %}
if byte_len({{ name }}) > {{ s['max_bytes'] }}:
raise ValidationFailed(\"{{ name }} length is greater than {{ s['max_bytes'] }}\")
{%- endif -%}
{%- if s['pattern'] %}
if re.search(r\'{{ s['pattern'] }}\', {{ name }}) is None:
raise ValidationFailed(\"{{ name }} pattern does not match {{ s['pattern'] }}\")
{%- endif -%}
{%- if s['prefix'] %}
if not {{ name }}.startswith(\"{{ s['prefix'] }}\"):
raise ValidationFailed(\"{{ name }} does not start with prefix {{ s['prefix'] }}\")
{%- endif -%}
{%- if s['suffix'] %}
if not {{ name }}.endswith(\"{{ s['suffix'] }}\"):
raise ValidationFailed(\"{{ name }} does not end with suffix {{ s['suffix'] }}\")
{%- endif -%}
{%- if s['contains'] %}
if not \"{{ s['contains'] }}\" in {{ name }}:
raise ValidationFailed(\"{{ name }} does not contain {{ s['contains'] }}\")
{%- endif -%}
{%- if s['not_contains'] %}
if \"{{ s['not_contains'] }}\" in {{ name }}:
raise ValidationFailed(\"{{ name }} contains {{ s['not_contains'] }}\")
{%- endif -%}
{%- if s['email'] %}
if not _validateEmail({{ name }}):
raise ValidationFailed(\"{{ name }} is not a valid email\")
{%- endif -%}
{%- if s['hostname'] %}
if not _validateHostName({{ name }}):
raise ValidationFailed(\"{{ name }} is not a valid email\")
{%- endif -%}
{%- if s['address'] %}
try:
ipaddress.ip_address({{ name }})
except ValueError:
if not _validateHostName({{ name }}):
raise ValidationFailed(\"{{ name }} is not a valid address\")
{%- endif -%}
{%- if s['ip'] %}
try:
ipaddress.ip_address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ip\")
{%- endif -%}
{%- if s['ipv4'] %}
try:
ipaddress.IPv4Address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ipv4\")
{%- endif -%}
{%- if s['ipv6'] %}
try:
ipaddress.IPv6Address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ipv6\")
{%- endif %}
{%- if s['uri'] %}
url = urlparse.urlparse({{ name }})
if not all([url.scheme, url.netloc, url.path]):
raise ValidationFailed(\"{{ name }} is not a valid uri\")
{%- endif %}
{%- if s['uri_ref'] %}
url = urlparse.urlparse({{ name }})
if not all([url.scheme, url.path]) and url.fragment:
raise ValidationFailed(\"{{ name }} is not a valid uri ref\")
{%- endif -%}
{%- if s['uuid'] %}
try:
uuid.UUID({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid UUID\")
{%- endif -%}
"""
return Template(str_templ).render(o = option_value, name = name, const_template = const_template, in_template = in_template)
def required_template(value, name):
req_tmpl = """{%- if value['required'] -%}
if not _has_field(p, \"{{ name.split('.')[-1] }}\"):
raise ValidationFailed(\"{{ name }} is required.\")
{%- endif -%}
"""
return Template(req_tmpl).render(value = value, name = name)
def message_template(option_value, name, repeated = False):
message_tmpl = """{%- if m.message %}
{{- required_template(m.message, name) }}
{%- endif -%}
{%- if m.message and m.message['skip'] %}
# Skipping validation for {{ name }}
{%- else %}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
embedded = validate(p.{{ name }})(p.{{ name }})
if embedded is not None:
return embedded
{%- endif -%}
"""
return Template(message_tmpl).render(m = option_value, name = name, required_template = required_template, repeated = repeated)
def bool_template(option_value, name):
bool_tmpl = """
{{ const_template(o, name) -}}
"""
return Template(bool_tmpl).render(o = option_value, name = name, const_template = const_template)
def num_template(option_value, name, num):
num_tmpl = """{%- if num.HasField('const') and str(o.float) == "" -%}
if {{ name }} != {{ num['const'] }}:
raise ValidationFailed(\"{{ name }} not equal to {{ num['const'] }}\")
{%- endif -%}
{%- if num.HasField('const') and str(o.float) != "" %}
if {{ name }} != struct.unpack(\"f\", struct.pack(\"f\", ({{ num['const'] }})))[0]:
raise ValidationFailed(\"{{ name }} not equal to {{ num['const'] }}\")
{%- endif -%}
{{ in_template(num, name) }}
{%- if num.HasField('lt') %}
{%- if num.HasField('gt') %}
{%- if num['lt'] > num['gt'] %}
if {{ name }} <= {{ num['gt'] }} or {{ name }} >= {{ num ['lt'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['lt'], num['gt'] }}\")
{%- else %}
if {{ name }} >= {{ num['lt'] }} and {{ name }} <= {{ num['gt'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['gt'], num['lt'] }}\")
{%- endif -%}
{%- elif num.HasField('gte') %}
{%- if num['lt'] > num['gte'] %}
if {{ name }} < {{ num['gte'] }} or {{ name }} >= {{ num ['lt'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['lt'], num['gte'] }}\")
{%- else %}
if {{ name }} >= {{ num['lt'] }} and {{ name }} < {{ num['gte'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['gte'], num['lt'] }}\")
{%- endif -%}
{%- else %}
if {{ name }} >= {{ num['lt'] }}:
raise ValidationFailed(\"{{ name }} is not lesser than {{ num['lt'] }}\")
{%- endif -%}
{%- elif num.HasField('lte') %}
{%- if num.HasField('gt') %}
{%- if num['lte'] > num['gt'] %}
if {{ name }} <= {{ num['gt'] }} or {{ name }} > {{ num ['lte'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['lte'], num['gt'] }}\")
{%- else %}
if {{ name }} > {{ num['lte'] }} and {{ name }} <= {{ num['gt'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['gt'], num['lte'] }}\")
{%- endif -%}
{%- elif num.HasField('gte') %}
{%- if num['lte'] > num['gte'] %}
if {{ name }} < {{ num['gte'] }} or {{ name }} > {{ num ['lte'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['lte'], num['gte'] }}\")
{%- else %}
if {{ name }} > {{ num['lte'] }} and {{ name }} < {{ num['gte'] }}:
raise ValidationFailed(\"{{ name }} is not in range {{ num['gte'], num['lte'] }}\")
{%- endif -%}
{%- else %}
if {{ name }} > {{ num['lte'] }}:
raise ValidationFailed(\"{{ name }} is not lesser than or equal to {{ num['lte'] }}\")
{%- endif -%}
{%- elif num.HasField('gt') %}
if {{ name }} <= {{ num['gt'] }}:
raise ValidationFailed(\"{{ name }} is not greater than {{ num['gt'] }}\")
{%- elif num.HasField('gte') %}
if {{ name }} < {{ num['gte'] }}:
raise ValidationFailed(\"{{ name }} is not greater than or equal to {{ num['gte'] }}\")
{%- endif -%}
"""
return Template(num_tmpl).render(o = option_value, name = name, num = num, in_template = in_template, str = str)
def dur_arr(dur):
value = 0
arr = []
for val in dur:
value += val.seconds
value += (10**-9 * val.nanos)
arr.append(value)
value = 0
return arr
def dur_lit(dur):
value = dur.seconds + (10**-9 * dur.nanos)
return value
def duration_template(option_value, name, repeated = False):
dur_tmpl = """
{{- required_template(o.duration, name) }}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
dur = {{ name }}.seconds + round((10**-9 * {{ name }}.nanos), 9)
{%- set dur = o.duration -%}
{%- if dur.HasField('lt') %}
lt = {{ dur_lit(dur['lt']) }}
{% endif %}
{%- if dur.HasField('lte') %}
lte = {{ dur_lit(dur['lte']) }}
{% endif %}
{%- if dur.HasField('gt') %}
gt = {{ dur_lit(dur['gt']) }}
{% endif %}
{%- if dur.HasField('gte') %}
gte = {{ dur_lit(dur['gte']) }}
{% endif %}
{%- if dur.HasField('const') %}
if dur != {{ dur_lit(dur['const']) }}:
raise ValidationFailed(\"{{ name }} is not equal to {{ dur_lit(dur['const']) }}\")
{%- endif -%}
{%- if dur['in'] %}
if dur not in {{ dur_arr(dur['in']) }}:
raise ValidationFailed(\"{{ name }} is not in {{ dur_arr(dur['in']) }}\")
{%- endif -%}
{%- if dur['not_in'] %}
if dur in {{ dur_arr(dur['not_in']) }}:
raise ValidationFailed(\"{{ name }} is not in {{ dur_arr(dur['not_in']) }}\")
{%- endif -%}
{%- if dur.HasField('lt') %}
{%- if dur.HasField('gt') %}
{%- if dur_lit(dur['lt']) > dur_lit(dur['gt']) %}
if dur <= gt or dur >= lt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['lt']), dur_lit(dur['gt']) }}\")
{%- else -%}
if dur >= lt and dur <= gt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['gt']), dur_lit(dur['lt']) }}\")
{%- endif -%}
{%- elif dur.HasField('gte') %}
{%- if dur_lit(dur['lt']) > dur_lit(dur['gte']) %}
if dur < gte or dur >= lt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['lt']), dur_lit(dur['gte']) }}\")
{%- else -%}
if dur >= lt and dur < gte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['gte']), dur_lit(dur['lt']) }}\")
{%- endif -%}
{%- else -%}
if dur >= lt:
raise ValidationFailed(\"{{ name }} is not lesser than {{ dur_lit(dur['lt']) }}\")
{%- endif -%}
{%- elif dur.HasField('lte') %}
{%- if dur.HasField('gt') %}
{%- if dur_lit(dur['lte']) > dur_lit(dur['gt']) %}
if dur <= gt or dur > lte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['lte']), dur_lit(dur['gt']) }}\")
{%- else -%}
if dur > lte and dur <= gt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['gt']), dur_lit(dur['lte']) }}\")
{%- endif -%}
{%- elif dur.HasField('gte') %}
{%- if dur_lit(dur['lte']) > dur_lit(dur['gte']) %}
if dur < gte or dur > lte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['lte']), dur_lit(dur['gte']) }}\")
{%- else -%}
if dur > lte and dur < gte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(dur['gte']), dur_lit(dur['lte']) }}\")
{%- endif -%}
{%- else -%}
if dur > lte:
raise ValidationFailed(\"{{ name }} is not lesser than or equal to {{ dur_lit(dur['lte']) }}\")
{%- endif -%}
{%- elif dur.HasField('gt') %}
if dur <= gt:
raise ValidationFailed(\"{{ name }} is not greater than {{ dur_lit(dur['gt']) }}\")
{%- elif dur.HasField('gte') %}
if dur < gte:
raise ValidationFailed(\"{{ name }} is not greater than or equal to {{ dur_lit(dur['gte']) }}\")
{%- endif -%}
"""
return Template(dur_tmpl).render(o = option_value, name = name, required_template = required_template, dur_lit = dur_lit, dur_arr = dur_arr, repeated = repeated)
def timestamp_template(option_value, name, repeated = False):
timestamp_tmpl = """
{{- required_template(o.timestamp, name) }}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
ts = {{ name }}.seconds + round((10**-9 * {{ name }}.nanos), 9)
{%- set ts = o.timestamp -%}
{%- if ts.HasField('lt') %}
lt = {{ dur_lit(ts['lt']) }}
{% endif -%}
{%- if ts.HasField('lte') %}
lte = {{ dur_lit(ts['lte']) }}
{% endif -%}
{%- if ts.HasField('gt') %}
gt = {{ dur_lit(ts['gt']) }}
{% endif -%}
{%- if ts.HasField('gte') %}
gte = {{ dur_lit(ts['gte']) }}
{% endif -%}
{%- if ts.HasField('const') %}
if ts != {{ dur_lit(ts['const']) }}:
raise ValidationFailed(\"{{ name }} is not equal to {{ dur_lit(ts['const']) }}\")
{% endif %}
{%- if ts['in'] %}
if ts not in {{ dur_arr(ts['in']) }}:
raise ValidationFailed(\"{{ name }} is not in {{ dur_arr(ts['in']) }}\")
{%- endif %}
{%- if ts['not_in'] %}
if ts in {{ dur_arr(ts['not_in']) }}:
raise ValidationFailed(\"{{ name }} is not in {{ dur_arr(ts['not_in']) }}\")
{%- endif %}
{%- if ts.HasField('lt') %}
{%- if ts.HasField('gt') %}
{%- if dur_lit(ts['lt']) > dur_lit(ts['gt']) %}
if ts <= gt or ts >= lt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['lt']), dur_lit(ts['gt']) }}\")
{%- else -%}
if ts >= lt and ts <= gt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['gt']), dur_lit(ts['lt']) }}\")
{%- endif -%}
{%- elif ts.HasField('gte') %}
{%- if dur_lit(ts['lt']) > dur_lit(ts['gte']) %}
if ts < gte or ts >= lt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['lt']), dur_lit(ts['gte']) }}\")
{%- else -%}
if ts >= lt and ts < gte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['gte']), dur_lit(ts['lt']) }}\")
{%- endif -%}
{%- else -%}
if ts >= lt:
raise ValidationFailed(\"{{ name }} is not lesser than {{ dur_lit(ts['lt']) }}\")
{%- endif -%}
{%- elif ts.HasField('lte') %}
{%- if ts.HasField('gt') %}
{%- if dur_lit(ts['lte']) > dur_lit(ts['gt']) %}
if ts <= gt or ts > lte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['lte']), dur_lit(ts['gt']) }}\")
{%- else -%}
if ts > lte and ts <= gt:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['gt']), dur_lit(ts['lte']) }}\")
{%- endif -%}
{%- elif ts.HasField('gte') %}
{%- if dur_lit(ts['lte']) > dur_lit(ts['gte']) %}
if ts < gte or ts > lte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['lte']), dur_lit(ts['gte']) }}\")
{%- else -%}
if ts > lte and ts < gte:
raise ValidationFailed(\"{{ name }} is not in range {{ dur_lit(ts['gte']), dur_lit(ts['lte']) }}\")
{%- endif -%}
{%- else -%}
if ts > lte:
raise ValidationFailed(\"{{ name }} is not lesser than or equal to {{ dur_lit(ts['lte']) }}\")
{%- endif -%}
{%- elif ts.HasField('gt') %}
if ts <= gt:
raise ValidationFailed(\"{{ name }} is not greater than {{ dur_lit(ts['gt']) }}\")
{%- elif ts.HasField('gte') %}
if ts < gte:
raise ValidationFailed(\"{{ name }} is not greater than or equal to {{ dur_lit(ts['gte']) }}\")
{%- elif ts.HasField('lt_now') %}
now = time.time()
{%- if ts.HasField('within') %}
within = {{ dur_lit(ts['within']) }}
if ts >= now or ts >= now - within:
raise ValidationFailed(\"{{ name }} is not within range {{ dur_lit(ts['within']) }}\")
{%- else %}
if ts >= now:
raise ValidationFailed(\"{{ name }} is not lesser than now\")
{%- endif -%}
{%- elif ts.HasField('gt_now') %}
now = time.time()
{%- if ts.HasField('within') %}
within = {{ dur_lit(ts['within']) }}
if ts <= now or ts <= now + within:
raise ValidationFailed(\"{{ name }} is not within range {{ dur_lit(ts['within']) }}\")
{%- else %}
if ts <= now:
raise ValidationFailed(\"{{ name }} is not greater than now\")
{%- endif -%}
{%- elif ts.HasField('within') %}
now = time.time()
within = {{ dur_lit(ts['within']) }}
if ts >= now + within or ts <= now - within:
raise ValidationFailed(\"{{ name }} is not within range {{ dur_lit(ts['within']) }}\")
{%- endif -%}
"""
return Template(timestamp_tmpl).render(o = option_value, name = name, required_template = required_template, dur_lit = dur_lit, dur_arr = dur_arr, repeated = repeated)
def wrapper_template(option_value, name, repeated = False):
wrapper_tmpl = """
{% if repeated %}
if {{ name }}:
{% else %}
if p.HasField(\"{{ name[2:] }}\"):
{% endif %}
{%- if str(option_value.float) %}
{{- num_template(option_value, name + ".value", option_value.float)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.double) %}
{{- num_template(option_value, name + ".value", option_value.double)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.int32) %}
{{- num_template(option_value, name + ".value", option_value.int32)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.int64) %}
{{- num_template(option_value, name + ".value", option_value.int64)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.uint32) %}
{{- num_template(option_value, name + ".value", option_value.uint32)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.uint64) %}
{{- num_template(option_value, name + ".value", option_value.uint64)|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.bool) %}
{{- bool_template(option_value, name + ".value")|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.string) %}
{{- string_template(option_value, name + ".value")|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.bytes) %}
{{- bytes_template(option_value, name + ".value")|indent(4,True) -}}
{% endif -%}
{%- if str(option_value.message) and option_value.message['required'] %}
else:
raise ValidationFailed(\"{{ name }} is required.\")
{%- endif %}
"""
return Template(wrapper_tmpl).render(option_value = option_value, name = name, str = str, num_template = num_template, bool_template = bool_template, string_template = string_template, bytes_template = bytes_template, repeated = repeated)
def enum_values(field):
return [x.number for x in field.enum_type.values]
def enum_template(option_value, name, field):
enum_tmpl = """
{{ const_template(option_value, name) -}}
{{ in_template(option_value.enum, name) -}}
{% if option_value.enum['defined_only'] %}
if {{ name }} not in {{ enum_values(field) }}:
raise ValidationFailed(\"{{ name }} is not defined\")
{% endif %}
"""
return Template(enum_tmpl).render(option_value = option_value, name = name, const_template = const_template, in_template = in_template, field = field, enum_values = enum_values)
def any_template(option_value, name, repeated = False):
any_tmpl = """
{{- required_template(o, name) }}
{%- if o['in'] %}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
if {{ name }}.type_url not in {{ o['in'] }}:
raise ValidationFailed(\"{{ name }} not in {{ o['in'] }}\")
{%- endif %}
{%- if o['not_in'] %}
{% if repeated %}
if {{ name }}:
{% else %}
if _has_field(p, \"{{ name.split('.')[-1] }}\"):
{% endif %}
if {{ name }}.type_url in {{ o['not_in'] }}:
raise ValidationFailed(\"{{ name }} in {{ o['not_in'] }}\")
{%- endif %}
"""
return Template(any_tmpl).render(o = option_value.any, name = name, required_template = required_template, repeated = repeated)
def bytes_template(option_value, name):
bytes_tmpl = """
{{ const_template(o, name) -}}
{{ in_template(o.bytes, name) -}}
{%- if b['len'] %}
if len({{ name }}) != {{ b['len'] }}:
raise ValidationFailed(\"{{ name }} length does not equal {{ b['len'] }}\")
{%- endif -%}
{%- if b['min_len'] %}
if len({{ name }}) < {{ b['min_len'] }}:
raise ValidationFailed(\"{{ name }} length is less than {{ b['min_len'] }}\")
{%- endif -%}
{%- if b['max_len'] %}
if len({{ name }}) > {{ b['max_len'] }}:
raise ValidationFailed(\"{{ name }} length is more than {{ b['max_len'] }}\")
{%- endif -%}
{%- if b['ip'] %}
try:
ipaddress.ip_address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ip\")
{%- endif -%}
{%- if b['ipv4'] %}
try:
ipaddress.IPv4Address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ipv4\")
{%- endif -%}
{%- if b['ipv6'] %}
try:
ipaddress.IPv6Address({{ name }})
except ValueError:
raise ValidationFailed(\"{{ name }} is not a valid ipv6\")
{%- endif -%}
{% if b['pattern'] %}
{% if sys.version_info[0] >= 3%}
if re.search({{ b['pattern'].encode('unicode-escape') }}, {{ name }}) is None:
raise ValidationFailed(\"{{ name }} pattern does not match b['pattern'].encode('unicode-escape')\")
{% else %}
if re.search(b\"{{ b['pattern'].encode('unicode-escape') }}\", {{ name }}) is None:
raise ValidationFailed(\"{{ name }} pattern does not match \")
{% endif %}
{% endif %}
{% if b['contains'] %}
{% if sys.version_info[0] >= 3 %}
if not {{ b['contains'] }} in {{ name }}:
raise ValidationFailed(\"{{ name }} does not contain {{ b['contains'] }}\")
{% else %}
if not b\"{{ b['contains'].encode('string_escape') }}\" in {{ name }}:
raise ValidationFailed(\"{{ name }} does not contain \")
{% endif %}
{% endif %}
{% if b['prefix'] %}
{% if sys.version_info[0] >= 3 %}
if not {{ name }}.startswith({{ b['prefix'] }}):
raise ValidationFailed(\"{{ name }} does not start with prefix {{ b['prefix'] }}\")
{% else %}
if not {{name}}.startswith(b\"{{ b['prefix'].encode('string_escape') }}\"):
raise ValidationFailed(\"{{ name }} does not start with prefix {{ b['prefix'].encode('string_escape') }}\")
{% endif %}
{% endif %}
{% if b['suffix'] %}
{% if sys.version_info[0] >= 3 %}
if not {{ name }}.endswith({{ b['suffix'] }}):
raise ValidationFailed(\"{{ name }} does not end with suffix {{ b['suffix'] }}\")
{% else %}
if not {{name}}.endswith(b\"{{ b['suffix'].encode('string_escape') }}\"):
raise ValidationFailed(\"{{ name }} does not end with suffix {{ b['suffix'] }}\")
{% endif %}
{% endif %}
"""
return Template(bytes_tmpl).render(sys=sys,o = option_value, name = name, const_template = const_template, in_template = in_template, b = option_value.bytes)
def switcher_template(accessor, name, field, map = False):
switcher_tmpl = """
{%- if str(accessor.float) %}
{{- num_template(accessor, name, accessor.float)|indent(4,True) -}}
{%- elif str(accessor.double) %}
{{- num_template(accessor, name, accessor.double)|indent(4,True) -}}
{%- elif str(accessor.int32) %}
{{- num_template(accessor, name, accessor.int32)|indent(4,True) -}}
{%- elif str(accessor.int64) %}
{{- num_template(accessor, name, accessor.int64)|indent(4,True) -}}
{%- elif str(accessor.uint32) %}
{{- num_template(accessor, name, accessor.uint32)|indent(4,True) -}}
{%- elif str(accessor.uint64) %}
{{- num_template(accessor, name, accessor.uint64)|indent(4,True) -}}
{%- elif str(accessor.sint32) %}
{{- num_template(accessor, name, accessor.sint32)|indent(4,True) -}}
{%- elif str(accessor.sint64) %}
{{- num_template(accessor, name, accessor.sint64)|indent(4,True) -}}
{%- elif str(accessor.fixed32) %}
{{- num_template(accessor, name, accessor.fixed32)|indent(4,True) -}}
{%- elif str(accessor.fixed64) %}
{{- num_template(accessor, name, accessor.fixed64)|indent(4,True) -}}
{%- elif str(accessor.sfixed32) %}
{{- num_template(accessor, name, accessor.sfixed32)|indent(4,True) -}}
{%- elif str(accessor.sfixed64) %}
{{- num_template(accessor, name, accessor.sfixed64)|indent(4,True) -}}
{%- elif str(accessor.bool) %}
{{- bool_template(accessor, name)|indent(4,True) -}}
{%- elif str(accessor.string) %}
{{- string_template(accessor, name)|indent(4,True) -}}
{%- elif str(accessor.enum) and map %}
{{- enum_template(accessor, name, field.message_type.fields[1])|indent(4,True) -}}
{%- elif str(accessor.enum) and not map %}
{{- enum_template(accessor, name, field)|indent(4,True) -}}
{%- elif str(accessor.duration) %}
{{- duration_template(accessor, name, True)|indent(4,True) -}}
{%- elif str(accessor.timestamp) %}
{{- timestamp_template(accessor, name, True)|indent(4,True) -}}
{%- elif str(accessor.message) %}
{{- message_template(accessor, name, True)|indent(4,True) -}}
{%- elif str(accessor.any) %}
{{- any_template(accessor, name, True)|indent(4,True) -}}
{%- elif str(accessor.message) %}
{{- message_template(accessor, name, True)|indent(4,True) -}}
{%- endif %}
"""
return Template(switcher_tmpl).render(accessor = accessor, name = name, str = str, num_template = num_template, bool_template = bool_template, string_template = string_template, enum_template = enum_template, duration_template = duration_template, timestamp_template = timestamp_template, any_template = any_template, message_template = message_template, field = field, map = map)
def repeated_template(option_value, name, field):
rep_tmpl = """
{%- if o and o.repeated['min_items'] %}
if len({{ name }}) < {{ o.repeated['min_items'] }}:
raise ValidationFailed(\"{{ name }} needs to contain at least {{ o.repeated['min_items'] }} items\")
{%- endif %}
{%- if o and o.repeated['max_items'] %}
if len({{ name }}) > {{ o.repeated['max_items'] }}:
raise ValidationFailed(\"{{ name }} needs to contain at most {{ o.repeated['max_items'] }} items\")
{%- endif %}
{%- if o and o.repeated['unique'] %}
seen = set()
for item in {{ name }}:
if item in seen:
raise ValidationFailed(\"{{ name }} must contain unique items. %s has been repeated.\" %item)
else:
seen.add(item)
{%- endif %}
{%- if message_type %}
for item in {{ name }}:
{%- if o and o.repeated and o.repeated.items.message.skip %}
pass
{% else %}
validate(item)(item)
{% endif %}
{%- endif %}
{%- if o and str(o.repeated['items']) %}
for item in {{ name }}:
{%- set accessor = o.repeated['items'] -%}
{{ switcher_template(accessor, 'item', field) }}
pass
{%- endif %}
"""
return Template(rep_tmpl).render(o = option_value, name = name, message_type = field.message_type, str = str, field = field, switcher_template = switcher_template)
def is_map(field):
return field.label == 3 and field.message_type and len(field.message_type.fields) == 2 and \
field.message_type.fields[0].name == "key" and field.message_type.fields[1].name == "value"
def map_template(option_value, name, field):
map_tmpl = """
{%- if o and o.map['min_pairs'] %}
if len({{ name }}) < {{ o.map['min_pairs'] }}:
raise ValidationFailed(\"{{ name }} needs to contain at least {{ o.map['min_pairs'] }} items\")
{%- endif %}
{%- if o and o.map['max_pairs'] %}
if len({{ name }}) > {{ o.map['max_pairs'] }}:
raise ValidationFailed(\"{{ name }} can contain at most {{ o.map['max_pairs'] }} items\")
{%- endif %}
{%- if o and o.map['no_sparse'] -%}
raise UnimplementedException(\"no_sparse validation is not implemented because protobuf maps cannot be sparse in Python\")
{%- endif %}
{%- if o and (str(o.map['keys']) or str(o.map['values']))%}
for key in {{ name }}:
{%- set keys = o.map['keys'] -%}
{%- set values = o.map['values'] -%}
{%- if str(keys.double) %}
{{- num_template(keys, 'key', keys.double)|indent(4,True) -}}
{%- elif str(keys.int32) %}
{{- num_template(keys, 'key', keys.int32)|indent(4,True) -}}
{%- elif str(keys.int64) %}
{{- num_template(keys, 'key', keys.int64)|indent(4,True) -}}
{%- elif str(keys.uint32) %}
{{- num_template(keys, 'key', keys.uint32)|indent(4,True) -}}
{%- elif str(keys.uint64) %}
{{- num_template(keys, 'key', keys.uint64)|indent(4,True) -}}
{%- elif str(keys.sint32) %}
{{- num_template(keys, 'key', keys.sint32)|indent(4,True) -}}
{%- elif str(keys.sint64) %}
{{- num_template(keys, 'key', keys.sint64)|indent(4,True) -}}
{%- elif str(keys.fixed32) %}
{{- num_template(keys, 'key', keys.fixed32)|indent(4,True) -}}
{%- elif str(keys.fixed64) %}
{{- num_template(keys, 'key', keys.fixed64)|indent(4,True) -}}
{%- elif str(keys.sfixed32) %}
{{- num_template(keys, 'key', keys.sfixed32)|indent(4,True) -}}
{%- elif str(keys.sfixed64) %}
{{- num_template(keys, 'key', keys.sfixed64)|indent(4,True) -}}
{%- elif str(keys.bool) %}
{{- bool_template(keys, 'key')|indent(4,True) -}}
{%- elif str(keys.string) %}
{{- string_template(keys, 'key')|indent(4,True) -}}
{%- endif %}
{%- set values = o.map['values'] -%}
{{ switcher_template(values, name +'[key]', field, True) }}
pass
{%- elif field.message_type.fields[1].message_type %}
for key in {{ name }}:
validate({{ name }}[key])({{ name }}[key])
{%- endif %}
"""
return Template(map_tmpl).render(o = option_value, name = name, message_type = field.message_type, str = str, field = field, switcher_template = switcher_template, num_template = num_template, string_template = string_template, bool_template = bool_template)
def rule_type(field):
name = "p."+ field.name
if has_validate(field) and field.message_type is None:
for option_descriptor, option_value in field.GetOptions().ListFields():
if option_descriptor.full_name == "validate.rules":
if str(option_value.string):
return string_template(option_value, name )
elif str(option_value.message):
return message_template(option_value, field.name)
elif str(option_value.bool):
return bool_template(option_value, name)
elif str(option_value.float):
return num_template(option_value, name, option_value.float)
elif str(option_value.double):
return num_template(option_value, name, option_value.double)
elif str(option_value.int32):
return num_template(option_value, name, option_value.int32)
elif str(option_value.int64):
return num_template(option_value, name, option_value.int64)
elif str(option_value.uint32):
return num_template(option_value, name, option_value.uint32)
elif str(option_value.uint64):
return num_template(option_value, name, option_value.uint64)
elif str(option_value.sint32):
return num_template(option_value, name, option_value.sint32)
elif str(option_value.sint64):
return num_template(option_value, name, option_value.sint64)
elif str(option_value.fixed32):
return num_template(option_value, name, option_value.fixed32)
elif str(option_value.fixed64):
return num_template(option_value, name, option_value.fixed64)
elif str(option_value.sfixed32):
return num_template(option_value, name, option_value.sfixed32)
elif str(option_value.sfixed64):
return num_template(option_value, name, option_value.sfixed64)
elif str(option_value.enum):
return enum_template(option_value, name, field)
elif str(option_value.bytes):
return bytes_template(option_value, name)
elif str(option_value.repeated):
return repeated_template(option_value, name, field)
elif str(option_value.map):
return map_template(option_value, name, field)
elif str(option_value.required):
return required_template(option_value, name)
if field.message_type:
for option_descriptor, option_value in field.GetOptions().ListFields():
if option_descriptor.full_name == "validate.rules":
if str(option_value.duration):
return duration_template(option_value, name)
elif str(option_value.timestamp):
return timestamp_template(option_value, name)
elif str(option_value.float) or str(option_value.int32) or str(option_value.int64) or \
str(option_value.double) or str(option_value.uint32) or str(option_value.uint64) or \
str(option_value.bool) or str(option_value.string) or str(option_value.bytes):
return wrapper_template(option_value, name)
elif str(option_value.message) is not "":
return message_template(option_value, field.name)
elif str(option_value.any):
return any_template(option_value, name)
elif str(option_value.repeated):
return repeated_template(option_value, name, field)
elif str(option_value.map):
return map_template(option_value, name, field)
elif str(option_value.required):
return required_template(option_value, name)
if field.message_type.full_name.startswith("google.protobuf"):
return ""
elif is_map(field):
return map_template(None, name, field)
elif field.label == 3:
return repeated_template(None, name, field)
else:
return message_template(None, field.name)
return ""
def file_template(proto_message):
file_tmp = """
# Validates {{ p.DESCRIPTOR.name }}
def generate_validate(p):
{%- for option_descriptor, option_value in p.DESCRIPTOR.GetOptions().ListFields() %}
{%- if option_descriptor.full_name == "validate.disabled" and option_value %}
return None
{%- endif -%}
{%- endfor -%}
{%- for oneof in p.DESCRIPTOR.oneofs %}
present = False
{%- for field in oneof.fields %}
if _has_field(p, \"{{ field.name }}\"):
present = True
{{ rule_type(field)|indent(4,True) }}
{%- endfor %}
{% for option in oneof.GetOptions().ListFields() %}
{% if option[0].name == 'required' and option[1] %}
if not present:
raise ValidationFailed(\"Oneof {{ oneof.name }} is required\")
{% endif %}
{% endfor %}
{%- endfor %}
{%- for field in p.DESCRIPTOR.fields -%}
{%- if not field.containing_oneof %}
{{ rule_type(field) -}}
{%- endif %}
{%- endfor %}
return None"""
return Template(file_tmp).render(rule_type = rule_type, p = proto_message)
class UnimplementedException(Exception):
pass
class ValidationFailed(Exception):
pass
| apache-2.0 | -1,249,293,611,258,870,500 | 43.43717 | 384 | 0.519557 | false |
Connexions/nebuchadnezzar | nebu/tests/models/test_document.py | 1 | 5799 | from copy import copy
from lxml import etree
from cnxepub.html_parsers import HTML_DOCUMENT_NAMESPACES
from nebu.models.document import Document
REFERENCE_MARKER = '#!--testing--'
M46882_METADATA = {
'authors': [{'id': 'OpenStaxCollege',
'name': 'OpenStaxCollege',
'type': 'cnx-id'}],
'cnx-archive-shortid': None,
'cnx-archive-uri': '[email protected]',
'copyright_holders': [{'id': 'OSCRiceUniversity',
'name': 'OSCRiceUniversity',
'type': 'cnx-id'}],
'created': '2013/07/19 00:42:23 -0500',
'derived_from_title': None,
'derived_from_uri': None,
'editors': [],
'illustrators': [],
'keywords': ('cumulative relative frequency', 'frequency'),
'language': 'en',
'license_text': 'CC BY',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'print_style': None,
'publishers': [{'id': 'OpenStaxCollege',
'name': 'OpenStaxCollege',
'type': 'cnx-id'},
{'id': 'cnxstats', 'name': 'cnxstats', 'type': 'cnx-id'}],
'revised': '2019/02/08 09:37:55.846 US/Central',
'subjects': ('Mathematics and Statistics',),
'summary': None,
'title': 'Frequency, Frequency Tables, and Levels of Measurement',
'translators': [],
'version': '1.17',
}
def mock_reference_resolver(reference, resource):
"""Used for testing reference resolution during model tests"""
if resource:
reference.bind(resource, '{}/{{}}'.format(REFERENCE_MARKER))
class TestDocument(object):
def test_sanatize_content(self, assembled_data):
with (assembled_data / 'm46913.xhtml').open('rb') as fb:
html = etree.parse(fb)
# And parse a second copy for verification
fb.seek(0)
expected_html = etree.parse(fb)
# Hit the target
results = Document._sanatize_content(html)
# Construct expected results
body = expected_html.xpath(
"//xhtml:body",
namespaces=HTML_DOCUMENT_NAMESPACES,
)[0]
metadata_elm = body.xpath(
"//xhtml:div[@data-type='metadata']",
namespaces=HTML_DOCUMENT_NAMESPACES,
)[0]
body.remove(metadata_elm)
body.attrib.pop('itemtype')
body.attrib.pop('itemscope')
expected_results = etree.tostring(expected_html)
assert results == expected_results
def test_find_resources(self, collection_data):
loc = collection_data / 'm46909'
# Hit the target
resources = Document._find_resources(loc)
# Verify we discovered the resource files
expected_filenames = [
'Prev_m16020_DotPlot.png',
'fig-ch01_02_01n.png',
'm16020_DotPlot_description.html',
'm16020_DotPlot_download.pdf',
]
assert sorted([r.id for r in resources]) == expected_filenames
assert sorted([r.filename for r in resources]) == expected_filenames
def test_from_index_cnxml(self, collection_data):
filepath = collection_data / 'm46882' / 'index.cnxml'
# Hit the target
doc = Document.from_index_cnxml(filepath, mock_reference_resolver)
# Verify the metadata
assert doc.id == 'm46882'
expected_metadata = M46882_METADATA
assert doc.metadata == expected_metadata
# Verify the content is content'ish
assert doc._xml.xpath(
"/xhtml:body/*[@data-type='metadata']",
namespaces=HTML_DOCUMENT_NAMESPACES,
) == []
assert len(doc._xml.xpath(
"//*[@id='fs-idm20141232']",
namespaces=HTML_DOCUMENT_NAMESPACES,
)) == 1
# Verify the resources are attached to the object
expected_filenames = [
'CNX_Stats_C01_M10_001.jpg',
'CNX_Stats_C01_M10_002.jpg',
'CNX_Stats_C01_M10_003.jpg',
]
filenames = [r.filename for r in doc.resources]
assert sorted(filenames) == expected_filenames
# Verify the references have been rewritten
ref = '{}/CNX_Stats_C01_M10_003.jpg'.format(REFERENCE_MARKER).encode()
assert ref in doc.content
# Verify external and non-existent resource references remain
assert b'src="foobar.png"' in doc.content
assert b'ef="/[email protected]"' in doc.content # rewritten in cnxml->html
assert b'ef="http://en.wikibooks.org/"' in doc.content
def test_from_filepath(self, assembled_data):
filepath = assembled_data / 'm46882.xhtml'
# Hit the target
doc = Document.from_filepath(filepath)
# Verify the metadata
assert doc.id == 'm46882'
expected_metadata = copy(M46882_METADATA)
# cnx-epub metadata is mutable, so sequences are lists rather than
# tuples.
expected_metadata['keywords'] = list(expected_metadata['keywords'])
expected_metadata['subjects'] = list(expected_metadata['subjects'])
assert doc.metadata == expected_metadata
# Verify the content is content'ish
assert doc._xml.xpath(
"/xhtml:body/*[@data-type='metadata']",
namespaces=HTML_DOCUMENT_NAMESPACES,
) == []
assert len(doc._xml.xpath(
"//*[@id='fs-idm20141232']",
namespaces=HTML_DOCUMENT_NAMESPACES,
)) == 1
# Verify the resources are attached to the object
expected_filenames = []
filenames = [r.filename for r in doc.resources]
assert sorted(filenames) == expected_filenames
# Verify the references have been rewritten
ref = '{}/CNX_Stats_C01_M10_003.jpg'.format(REFERENCE_MARKER).encode()
assert ref in doc.content
| agpl-3.0 | 3,357,162,293,099,597,300 | 34.796296 | 78 | 0.591999 | false |
Abhino/GamifiedTodoList | apis/userInfoApi.py | 1 | 1973 | import logging
from flask_restplus import Namespace, Resource, fields
from flask import jsonify, request
from Service.userInfoService import *
api = Namespace('user', description='User Info API related operations')
LOG = logging.getLogger("userInfoApi")
user_fields = api.model('UserModel', {
'lastLogin': fields.Date,
'totalScore':fields.Integer(description='Total Score for the day'),
'userName': fields.String
})
@api.route('')
class UserInfo(Resource):
@api.expect(user_fields)
def post(self):
json_data = request.get_json()
LOG.debug("Request JSON : %s " % json_data)
createUserInfo(json_data)
return ["POST Request Complete"]
@api.doc(params={'userName': 'Get userInfo by UserName'})
def get(self):
userName = request.args.get('userName')
obj = getUserInfoByUserName(userName)
return jsonify(row2dict(obj))
@api.expect(user_fields)
def put(self):
json_data = request.get_json()
LOG.debug("Request JSON : %s " % json_data)
updateUserInfoByUserName(json_data)
return ["PUT Request Complete"]
@api.route('/calculate')
@api.doc(params={'amount': 'Amount Value to update','operation': 'Add or Sub', 'userName':'userName to be updated'})
class scoresTotal(Resource):
def put(self):
amount = request.args.get('amount')
operation = request.args.get('operation')
userName = request.args.get('userName')
evaluateScoreTotal(amount,operation,userName)
return "Evaluated Total successfully"
@api.route('/calculate/bonus')
class scoresDate(Resource):
@api.doc(params={'userName':'userName to be updated'})
def put(self):
userName = request.args.get('userName')
evaluateBonus(userName)
return "Evaluated Bonus successfully"
def row2dict(row):
d = {}
for column in row.__table__.columns:
d[column.name] = str(getattr(row, column.name))
return d | mit | -959,042,493,399,824,600 | 33.034483 | 116 | 0.660922 | false |
sigurdga/nidarholm | nidarholm/urls.py | 1 | 2433 | from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
from django.contrib.auth import views as auth_views
from accounts.views import groups, group_object_detail, user_groups, member_list, edit_profile, new_profile, all_members
from news.views import story_list
from pages.views import edit_flatpage, new_flatpage, flatpage_list
from accounts.forms import ProfileForm, LoginForm
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', story_list, (), 'main'),
(r'^c/', include('s7n.threaded_comments.urls')),
(r'^f/', include('s7n.forum.urls')),
(r'^t/', include('s7n.timeline.urls')),
(r'^accounts/login/', 'django.contrib.auth.views.login', {'authentication_form': LoginForm}, 'auth_login'),
(r'^accounts/', include('registration.urls')),
(r'^forum/', include('forum.urls')),
(r'^news/', include('news.urls')),
(r'^events/', include('events.urls')),
(r'^projects/', include('projects.urls')),
(r'^files/', include('vault.urls')),
(r'^sitemap/', include('navigation.urls')),
(r'^users/all/$', all_members, (), 'all-members'),
(r'^users/new/$', new_profile, (), 'new-profile'),
(r'^users/(?P<id>\d+)/edit/$', edit_profile, (), 'edit-profile'),
#(r'^users/new/$', 'profiles.views.create_profile', {'form_class': ProfileForm}, 'create-profile'),
(r'^users/(?P<username>\w+)/groups$', user_groups, (), 'user-groups'),
(r'^members$', member_list, (), 'member-list'),
(r'^users/', include('profiles.urls')),
(r'^groups$', groups, (), 'groups'),
(r'^groups/(?P<id>\d+)$', group_object_detail, (), 'groups-group'),
(r'^organization/', include('organization.urls')),
(r'^pages/(?P<id>\d+)/edit$', edit_flatpage, (), 'edit-flatpage'),
(r'^pages/new$', new_flatpage, (), 'new-flatpage'),
(r'^pages/$', flatpage_list, (), 'flatpage-list'),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^avatar/', include('avatar.urls')),
(r'^search/', include('search.urls')),
(r'^tagging_autocomplete/', include('tagging_autocomplete.urls')),
(r'^markitup/', include('markitup.urls')),
)
if settings.DEVELOPMENT_MODE:
import os
urlpatterns += patterns('',
(r'^m/(.*)$', 'django.views.static.serve', {'document_root': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'media')}),
)
| agpl-3.0 | 1,904,305,003,041,845,500 | 44.055556 | 141 | 0.621455 | false |
Johnzero/erp | openerp/addons/base/ir/ir_cron.py | 1 | 15239 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import calendar
import time
import logging
import threading
import psycopg2
from datetime import datetime
from dateutil.relativedelta import relativedelta
import netsvc
import openerp
import pooler
import tools
from openerp.cron import WAKE_UP_NOW
from osv import fields, osv
from tools import DEFAULT_SERVER_DATETIME_FORMAT
from tools.safe_eval import safe_eval as eval
from tools.translate import _
_logger = logging.getLogger(__name__)
def str2tuple(s):
return eval('tuple(%s)' % (s or ''))
_intervalTypes = {
'work_days': lambda interval: relativedelta(days=interval),
'days': lambda interval: relativedelta(days=interval),
'hours': lambda interval: relativedelta(hours=interval),
'weeks': lambda interval: relativedelta(days=7*interval),
'months': lambda interval: relativedelta(months=interval),
'minutes': lambda interval: relativedelta(minutes=interval),
}
class ir_cron(osv.osv):
""" Model describing cron jobs (also called actions or tasks).
"""
# TODO: perhaps in the future we could consider a flag on ir.cron jobs
# that would cause database wake-up even if the database has not been
# loaded yet or was already unloaded (e.g. 'force_db_wakeup' or something)
# See also openerp.cron
_name = "ir.cron"
_order = 'name'
_columns = {
'name': fields.char('Name', size=60, required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'active': fields.boolean('Active'),
'interval_number': fields.integer('Interval Number',help="Repeat every x."),
'interval_type': fields.selection( [('minutes', 'Minutes'),
('hours', 'Hours'), ('work_days','Work Days'), ('days', 'Days'),('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
'numbercall': fields.integer('Number of Calls', help='How many times the method is called,\na negative number indicates no limit.'),
'doall' : fields.boolean('Repeat Missed', help="Specify if missed occurrences should be executed when the server restarts."),
'nextcall' : fields.datetime('Next Execution Date', required=True, help="Next planned execution date for this job."),
'model': fields.char('Object', size=64, help="Model name on which the method to be called is located, e.g. 'res.partner'."),
'function': fields.char('Method', size=64, help="Name of the method to be called when this job is processed."),
'args': fields.text('Arguments', help="Arguments to be passed to the method, e.g. (uid,)."),
'priority': fields.integer('Priority', help='The priority of the job, as an integer: 0 means higher priority, 10 means lower priority.')
}
_defaults = {
'nextcall' : lambda *a: time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'priority' : lambda *a: 5,
'user_id' : lambda obj,cr,uid,context: uid,
'interval_number' : lambda *a: 1,
'interval_type' : lambda *a: 'months',
'numbercall' : lambda *a: 1,
'active' : lambda *a: 1,
'doall' : lambda *a: 1
}
def _check_args(self, cr, uid, ids, context=None):
try:
for this in self.browse(cr, uid, ids, context):
str2tuple(this.args)
except Exception:
return False
return True
_constraints = [
(_check_args, 'Invalid arguments', ['args']),
]
def _handle_callback_exception(self, cr, uid, model_name, method_name, args, job_id, job_exception):
""" Method called when an exception is raised by a job.
Simply logs the exception and rollback the transaction.
:param model_name: model name on which the job method is located.
:param method_name: name of the method to call when this job is processed.
:param args: arguments of the method (without the usual self, cr, uid).
:param job_id: job id.
:param job_exception: exception raised by the job.
"""
cr.rollback()
_logger.exception("Call of self.pool.get('%s').%s(cr, uid, *%r) failed in Job %s" % (model_name, method_name, args, job_id))
def _callback(self, cr, uid, model_name, method_name, args, job_id):
""" Run the method associated to a given job
It takes care of logging and exception handling.
:param model_name: model name on which the job method is located.
:param method_name: name of the method to call when this job is processed.
:param args: arguments of the method (without the usual self, cr, uid).
:param job_id: job id.
"""
args = str2tuple(args)
model = self.pool.get(model_name)
if model and hasattr(model, method_name):
method = getattr(model, method_name)
try:
log_depth = (None if _logger.isEnabledFor(logging.DEBUG) else 1)
netsvc.log(_logger, logging.DEBUG, 'cron.object.execute', (cr.dbname,uid,'*',model_name,method_name)+tuple(args), depth=log_depth)
if _logger.isEnabledFor(logging.DEBUG):
start_time = time.time()
method(cr, uid, *args)
if _logger.isEnabledFor(logging.DEBUG):
end_time = time.time()
_logger.debug('%.3fs (%s, %s)' % (end_time - start_time, model_name, method_name))
except Exception, e:
self._handle_callback_exception(cr, uid, model_name, method_name, args, job_id, e)
def _run_job(self, cr, job, now):
""" Run a given job taking care of the repetition.
The cursor has a lock on the job (aquired by _run_jobs_multithread()) and this
method is run in a worker thread (spawned by _run_jobs_multithread())).
:param job: job to be run (as a dictionary).
:param now: timestamp (result of datetime.now(), no need to call it multiple time).
"""
try:
nextcall = datetime.strptime(job['nextcall'], DEFAULT_SERVER_DATETIME_FORMAT)
numbercall = job['numbercall']
ok = False
while nextcall < now and numbercall:
if numbercall > 0:
numbercall -= 1
if not ok or job['doall']:
self._callback(cr, job['user_id'], job['model'], job['function'], job['args'], job['id'])
if numbercall:
nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
ok = True
addsql = ''
if not numbercall:
addsql = ', active=False'
cr.execute("UPDATE ir_cron SET nextcall=%s, numbercall=%s"+addsql+" WHERE id=%s",
(nextcall.strftime(DEFAULT_SERVER_DATETIME_FORMAT), numbercall, job['id']))
if numbercall:
# Reschedule our own main cron thread if necessary.
# This is really needed if this job runs longer than its rescheduling period.
nextcall = calendar.timegm(nextcall.timetuple())
openerp.cron.schedule_wakeup(nextcall, cr.dbname)
finally:
cr.commit()
cr.close()
openerp.cron.release_thread_slot()
def _run_jobs_multithread(self):
# TODO remove 'check' argument from addons/base_action_rule/base_action_rule.py
""" Process the cron jobs by spawning worker threads.
This selects in database all the jobs that should be processed. It then
tries to lock each of them and, if it succeeds, spawns a thread to run
the cron job (if it doesn't succeed, it means the job was already
locked to be taken care of by another thread).
The cursor used to lock the job in database is given to the worker
thread (which has to close it itself).
"""
db = self.pool.db
cr = db.cursor()
db_name = db.dbname
try:
jobs = {} # mapping job ids to jobs for all jobs being processed.
now = datetime.utcnow()
# Careful to compare timestamps with 'UTC' - everything is UTC as of v6.1.
cr.execute("""SELECT * FROM ir_cron
WHERE numbercall != 0
AND active AND nextcall <= (now() at time zone 'UTC')
ORDER BY priority""")
for job in cr.dictfetchall():
if not openerp.cron.get_thread_slots():
break
jobs[job['id']] = job
task_cr = db.cursor()
try:
# Try to grab an exclusive lock on the job row from within the task transaction
acquired_lock = False
task_cr.execute("""SELECT *
FROM ir_cron
WHERE id=%s
FOR UPDATE NOWAIT""",
(job['id'],), log_exceptions=False)
acquired_lock = True
except psycopg2.OperationalError, e:
if e.pgcode == '55P03':
# Class 55: Object not in prerequisite state; 55P03: lock_not_available
_logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['name'])
continue
else:
# Unexpected OperationalError
raise
finally:
if not acquired_lock:
# we're exiting due to an exception while acquiring the lot
task_cr.close()
#do a dummy to strptime
#http://bugs.python.org/issue7980
datetime.strptime('2012-01-01', '%Y-%m-%d')
# Got the lock on the job row, now spawn a thread to execute it in the transaction with the lock
task_thread = threading.Thread(target=self._run_job, name=job['name'], args=(task_cr, job, now))
# force non-daemon task threads (the runner thread must be daemon, and this property is inherited by default)
task_thread.setDaemon(False)
openerp.cron.take_thread_slot()
task_thread.start()
_logger.debug('Cron execution thread for job `%s` spawned', job['name'])
# Find next earliest job ignoring currently processed jobs (by this and other cron threads)
find_next_time_query = """SELECT min(nextcall) AS min_next_call
FROM ir_cron WHERE numbercall != 0 AND active"""
if jobs:
cr.execute(find_next_time_query + " AND id NOT IN %s", (tuple(jobs.keys()),))
else:
cr.execute(find_next_time_query)
next_call = cr.dictfetchone()['min_next_call']
if next_call:
next_call = calendar.timegm(time.strptime(next_call, DEFAULT_SERVER_DATETIME_FORMAT))
else:
# no matching cron job found in database, re-schedule arbitrarily in 1 day,
# this delay will likely be modified when running jobs complete their tasks
next_call = time.time() + (24*3600)
openerp.cron.schedule_wakeup(next_call, db_name)
except Exception, ex:
_logger.warning('Exception in cron:', exc_info=True)
finally:
cr.commit()
cr.close()
def update_running_cron(self, cr):
""" Schedule as soon as possible a wake-up for this database. """
# Verify whether the server is already started and thus whether we need to commit
# immediately our changes and restart the cron agent in order to apply the change
# immediately. The commit() is needed because as soon as the cron is (re)started it
# will query the database with its own cursor, possibly before the end of the
# current transaction.
# This commit() is not an issue in most cases, but we must absolutely avoid it
# when the server is only starting or loading modules (hence the test on pool._init).
if not self.pool._init:
cr.commit()
openerp.cron.schedule_wakeup(WAKE_UP_NOW, self.pool.db.dbname)
def _try_lock(self, cr, uid, ids, context=None):
"""Try to grab a dummy exclusive write-lock to the rows with the given ids,
to make sure a following write() or unlink() will not block due
to a process currently executing those cron tasks"""
try:
cr.execute("""SELECT id FROM "%s" WHERE id IN %%s FOR UPDATE NOWAIT""" % self._table,
(tuple(ids),), log_exceptions=False)
except psycopg2.OperationalError:
cr.rollback() # early rollback to allow translations to work for the user feedback
raise osv.except_osv(_("Record cannot be modified right now"),
_("This cron task is currently being executed and may not be modified, "
"please try again in a few minutes"))
def create(self, cr, uid, vals, context=None):
res = super(ir_cron, self).create(cr, uid, vals, context=context)
self.update_running_cron(cr)
return res
def write(self, cr, uid, ids, vals, context=None):
self._try_lock(cr, uid, ids, context)
res = super(ir_cron, self).write(cr, uid, ids, vals, context=context)
self.update_running_cron(cr)
return res
def unlink(self, cr, uid, ids, context=None):
self._try_lock(cr, uid, ids, context)
res = super(ir_cron, self).unlink(cr, uid, ids, context=context)
self.update_running_cron(cr)
return res
ir_cron()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,828,882,223,435,639,000 | 46.072555 | 146 | 0.57425 | false |
olivier-m/Tamia | tamia/api.py | 1 | 9186 | # -*- coding: utf-8 -*-
#
# This file is part of Tamia released under the MIT license.
# See the LICENSE for more information.
from __future__ import (print_function, division, absolute_import, unicode_literals)
from datetime import datetime
import os.path
from StringIO import StringIO
import pygit2
from .errors import RepositoryNotFound, NodeNotFound, RevisionNotFound
from .index import Index
from .utils import clean_path, TZ
class Repository(object):
def __init__(self, repo_path, create=False, **kwargs):
try:
self._repo = pygit2.Repository(repo_path)
except KeyError:
if not create:
raise RepositoryNotFound('Repository "{0}" does not exist'.format(repo_path))
self._repo = pygit2.init_repository(repo_path, **kwargs)
self.path = self._repo.path
self.is_empty = self._repo.is_empty
self.is_bare = self._repo.is_bare
self._ref_map = {}
self._set_refs()
def __repr__(self):
return b'<{0}: {1}>'.format(self.__class__.__name__, self.path.encode('UTF-8'))
def _set_refs(self):
self._ref_map = {}
for r in self._repo.listall_references():
if not r.startswith('refs'):
continue
parts = r.split('/', 2)
if len(parts) != 3:
continue
parts.pop(0)
reftype = parts[0]
refname = parts[1]
refid = self._repo.revparse_single(r).hex
if refid not in self._ref_map:
self._ref_map[refid] = {}
if reftype not in self._ref_map[refid]:
self._ref_map[refid][reftype] = []
self._ref_map[refid][reftype].append(refname)
@property
def branches(self):
return self._repo.listall_branches()
@property
def tags(self):
return tuple([x[10:] for x in self._repo.listall_references() if x.startswith('refs/tags/')])
def get_revision(self, revision=None):
try:
instance = self._repo.revparse_single(revision or 'HEAD')
except KeyError:
raise RevisionNotFound('Revision "{0}" does not exist'.format(revision))
return Revision(self, instance)
def history(self, revision=None, reverse=False):
initial = self.get_revision(revision)._commit
sort = reverse and pygit2.GIT_SORT_REVERSE or pygit2.GIT_SORT_TIME
for instance in self._repo.walk(initial.oid, sort):
yield Revision(self, instance)
def diff(self, rev1, rev2):
return self.get_revision(rev1).node().diff(rev2)
def index(self, revision=None):
index = Index(self)
if revision:
index.set_revision(revision)
return index
def __iter__(self):
return self.history()
class Revision(object):
def __init__(self, repository, commit):
self._repository = repository
self._commit = commit
self.id = commit.hex
self.short_id = self.id[:7]
self.author = Signature(commit.author)
self.committer = Signature(commit.committer)
self.message = commit.message
self.offset = self._commit.commit_time_offset
self.date = datetime.fromtimestamp(self._commit.commit_time, TZ(self.offset))
self._parents = None
self.tags = self._repository._ref_map.get(commit.hex, {}).get('tags', [])
self.branches = self._repository._ref_map.get(commit.hex, {}).get('heads', [])
def __repr__(self):
return b'<{0}: {1}>'.format(self.__class__.__name__, self.id)
@property
def parents(self):
if self._parent is None:
self._parents = [Revision(self._repository, x) for x in self._commit.parents]
return self._parents
def node(self, path=None):
path = clean_path(path or '')
return Node(self, path)
class Signature(object):
def __init__(self, sig):
self.name = sig.name
self.email = sig.email
self.offset = sig.offset
self.date = datetime.fromtimestamp(sig.time, TZ(self.offset))
def __unicode__(self):
return '{name} <{email}> {date}{offset}'.format(**self.__dict__)
def __repr__(self):
return '<{0}> {1}'.format(self.__class__.__name__, self.name).encode('UTF-8')
class Node(object):
DIR = 1
FILE = 2
def __init__(self, revision, path=None):
self._revision = revision
if path in (None, '', '.'):
self._obj = revision._commit.tree
self.name = ''
self.type = self.DIR
else:
try:
entry = revision._commit.tree[path]
except KeyError:
raise NodeNotFound('Node "{0}" does not exist'.format(path))
self._obj = revision._repository._repo.get(entry.oid)
self.name = path
self.type = entry.filemode in (16384, 57344) and self.DIR or self.FILE
def __unicode__(self):
return self.name
def __repr__(self):
suffix = self.isdir() and '/' or ''
return '<{0}: {1}{2}>'.format(self.__class__.__name__, self.name, suffix).encode('UTF-8')
def isdir(self):
return self.type == self.DIR
def isfile(self):
return self.type == self.FILE
@property
def dirname(self):
return os.path.dirname(self.name)
@property
def basename(self):
return os.path.basename(self.name)
def children(self, recursive=False):
obj = self._obj
if isinstance(obj, pygit2.Tree):
for entry in obj:
dirname = self.isdir() and self.name or self.dirname
node = Node(self._revision, os.path.join(dirname, entry.name.decode('UTF-8')))
yield node
if recursive and node.isdir() and node._obj is not None:
for x in node.children(recursive=True):
yield x
def open(self):
blob = self._obj
if not isinstance(blob, pygit2.Blob):
raise TypeError('Node if not a file')
return FileBlob(blob)
def history(self, revision=None):
initial = self._revision._repository.get_revision(revision or self._revision.id)._commit
walker = self._revision._repository._repo.walk(initial.oid, pygit2.GIT_SORT_TIME)
last = None
c0 = walker.next()
try:
e0 = c0.tree[self.name]
last = c0
except KeyError:
e0 = None
for c1 in walker:
try:
e1 = c1.tree[self.name]
if e0 and e0.oid != e1.oid:
yield Revision(self._revision._repository, c0)
except KeyError:
e1 = None
c0 = c1
e0 = e1
if e1:
last = c1
if last:
yield Revision(self._revision._repository, last)
def diff(self, revision):
return Diff(self, revision)
class FileBlob(object):
def __init__(self, blob):
self._blob = blob
self._data = None
def _get_data(self):
if not self._data:
self._data = StringIO(self._blob.data)
return self._data
def read(self, size=None):
return self._get_data().read(size)
def write(self, data):
return self._get_data().write(data)
def close(self):
self._data = None
class Diff(object):
def __init__(self, node, revision, reversed=False):
self._node = node
self._t0 = node._revision._commit.tree
self._rev = node._revision._repository.get_revision(revision)
self._t1 = self._rev._commit.tree
self._diff = None
def __repr__(self):
return '<{0}: {1}..{2}>'.format(self.__class__.__name__,
self._node._revision.short_id,
self._rev.short_id
)
def __iter__(self):
if self._diff is None:
self._init_diff()
for f in self._files:
yield f
@property
def patch(self):
if self._diff is None:
self._init_diff()
return self._diff.patch
def _init_diff(self):
self._diff = self._t1.diff_to_tree(self._t0)
files = {}
for p in self._diff:
if self._node.name and not (
p.old_file_path.startswith(self._node.name.encode('UTF-8')) or
p.new_file_path.startswith(self._node.name.encode('UTF-8'))
):
continue
_id = '%s@%s' % (p.old_file_path.decode('UTF-8'), p.new_file_path.decode('UTF-8'))
if not _id in files:
files[_id] = Patch(p)
for h in p.hunks:
files[_id].hunks.append(Hunk(h))
self._files = files.values()
class Patch(object):
def __init__(self, patch):
self.old_path = patch.old_file_path.decode('UTF-8')
self.new_path = patch.new_file_path.decode('UTF-8')
self.hunks = []
class Hunk(object):
def __init__(self, hunk):
self.old_start = hunk.old_start
self.new_start = hunk.new_start
self.lines = hunk.lines
| mit | -8,687,526,702,763,049,000 | 27.70625 | 101 | 0.554213 | false |
IMIO/django-fixmystreet | django_fixmystreet/fixmystreet/migrations/0045_migrate_mark_as_done_motivation_to_comment.py | 1 | 45757 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
to_migrate = orm['fixmystreet.Report'].objects.filter(mark_as_done_motivation__isnull=False)
for report in to_migrate:
contact_user = report.mark_as_done_user
print 'Report', report.id
print ' Motivation', report.mark_as_done_motivation
print ' User', contact_user
# If it's a citizen who did the action, no comment
if report.mark_as_done_motivation and contact_user:
comment = orm['fixmystreet.ReportComment']()
comment.text = report.mark_as_done_motivation
comment.user = contact_user
comment.report = report
comment.save()
report.mark_as_done_comment = comment
report.save()
print 'mark_as_done_comment migrated:', len(to_migrate)
def backwards(self, orm):
"Write your backwards methods here."
to_delete = orm['fixmystreet.Report'].objects.filter(mark_as_done_motivation__isnull=False, mark_as_done_comment__isnull=False)
for report in to_delete:
contact_user = report.mark_as_done_user
print 'Report', report.id
print ' Motivation', report.mark_as_done_comment
print ' User', contact_user
report.mark_as_done_comment.delete()
print 'mark_as_done_comment deleted:', len(to_delete)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "'!'", 'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fixmystreet.faqentry': {
'Meta': {'ordering': "['order']", 'object_name': 'FaqEntry'},
'a_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'a_nl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'q_fr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'q_nl': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.fmsuser': {
'Meta': {'ordering': "['last_name']", 'object_name': 'FMSUser', '_ormbases': [u'auth.User']},
'agent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'type'", 'blank': 'True', 'to': u"orm['fixmystreet.ReportCategory']"}),
'contractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fmsuser_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'last_used_language': ('django.db.models.fields.CharField', [], {'default': "'FR'", 'max_length': '10', 'null': 'True'}),
'leader': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logical_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fmsuser_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
u'fixmystreet.historicalfmsuser': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalFMSUser'},
'agent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_used_language': ('django.db.models.fields.CharField', [], {'default': "'FR'", 'max_length': '10', 'null': 'True'}),
'leader': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logical_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'modified_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'organisation_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "'!'", 'max_length': '128'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'user_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '75', 'db_index': 'True'})
},
u'fixmystreet.historicalorganisationentity': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalOrganisationEntity'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'commune': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'dependency_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'feature_id': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'modified_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'region': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subcontractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1'})
},
u'fixmystreet.historicalpage': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalPage'},
'content_fr': ('ckeditor.fields.RichTextField', [], {}),
'content_nl': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'slug_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.historicalreport': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalReport'},
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'address_fr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_nl': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_number_as_int': ('django.db.models.fields.IntegerField', [], {'max_length': '255'}),
'address_regional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'category_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'citizen_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'close_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'contractor_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_planned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'false_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fixed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gravity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hash_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
u'mark_as_done_comment_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'mark_as_done_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'mark_as_done_user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'merged_with_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'modified_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'photo': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'planned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '31370', 'null': 'True', 'blank': 'True'}),
'postalcode': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'probability': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'refusal_comment_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'refusal_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'responsible_department_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'responsible_entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'responsible_manager_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'responsible_manager_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'secondary_category_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'default': "'web'"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'terms_of_use_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thumbnail': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_pro': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'fixmystreet.listitem': {
'Meta': {'object_name': 'ListItem'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'label_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'model_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'model_field': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fixmystreet.mailnotificationtemplate': {
'Meta': {'object_name': 'MailNotificationTemplate'},
'content_fr': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_nl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'title_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.organisationentity': {
'Meta': {'ordering': "['name_fr']", 'object_name': 'OrganisationEntity'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'commune': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organisationentity_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'department': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dependency': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'associates'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'dispatch_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assigned_to_department'", 'blank': 'True', 'to': u"orm['fixmystreet.ReportCategory']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'feature_id': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organisationentity_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'region': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subcontractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1'})
},
u'fixmystreet.page': {
'Meta': {'object_name': 'Page'},
'content_fr': ('ckeditor.fields.RichTextField', [], {}),
'content_nl': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.report': {
'Meta': {'object_name': 'Report'},
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'address_fr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_nl': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_number_as_int': ('django.db.models.fields.IntegerField', [], {'max_length': '255'}),
'address_regional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.ReportMainCategoryClass']", 'null': 'True', 'blank': 'True'}),
'citizen': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'citizen_reports'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'close_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'contractor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_reports'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'report_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'date_planned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'false_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fixed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gravity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hash_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mark_as_done_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'report_mark_as_done'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['fixmystreet.ReportComment']"}),
'mark_as_done_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mark_as_done_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reports_solved'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'merged_with': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'merged_reports'", 'null': 'True', 'to': u"orm['fixmystreet.Report']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'report_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'photo': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'planned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '31370', 'null': 'True', 'blank': 'True'}),
'postalcode': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'previous_managers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'previous_reports'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['fixmystreet.FMSUser']"}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'probability': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'refusal_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'report_refusal'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['fixmystreet.ReportComment']"}),
'refusal_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'responsible_department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports_in_department'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'responsible_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reports_in_charge'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'responsible_manager': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reports_in_charge'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'responsible_manager_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'secondary_category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.ReportCategory']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'default': "'web'"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'terms_of_use_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thumbnail': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_pro': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'fixmystreet.reportattachment': {
'Meta': {'object_name': 'ReportAttachment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportattachment_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logical_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportattachment_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': u"orm['fixmystreet.Report']"}),
'security_level': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
u'fixmystreet.reportcategory': {
'Meta': {'object_name': 'ReportCategory'},
'category_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': u"orm['fixmystreet.ReportMainCategoryClass']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportcategory_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportcategory_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'secondary_category_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': u"orm['fixmystreet.ReportSecondaryCategoryClass']"}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportcategoryhint': {
'Meta': {'object_name': 'ReportCategoryHint'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_fr': ('django.db.models.fields.TextField', [], {}),
'label_nl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportcomment': {
'Meta': {'object_name': 'ReportComment', '_ormbases': [u'fixmystreet.ReportAttachment']},
u'reportattachment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fixmystreet.ReportAttachment']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'fixmystreet.reporteventlog': {
'Meta': {'ordering': "['event_at']", 'object_name': 'ReportEventLog'},
'event_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_with_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'related_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'related_new_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'related_old_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'to': u"orm['fixmystreet.Report']"}),
'status_new': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'status_old': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'null': 'True', 'to': u"orm['auth.User']"}),
'value_old': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
u'fixmystreet.reportfile': {
'Meta': {'object_name': 'ReportFile', '_ormbases': [u'fixmystreet.ReportAttachment']},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'file_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'file_type': ('django.db.models.fields.IntegerField', [], {}),
'image': ('django_fixmystreet.fixmystreet.utils.FixStdImageField', [], {'max_length': '100', 'name': "'image'", 'blank': 'True'}),
u'reportattachment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fixmystreet.ReportAttachment']", 'unique': 'True', 'primary_key': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportmaincategoryclass': {
'Meta': {'object_name': 'ReportMainCategoryClass'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportmaincategoryclass_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'hint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.ReportCategoryHint']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportmaincategoryclass_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportnotification': {
'Meta': {'object_name': 'ReportNotification'},
'content_template': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'error_msg': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notifications'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'recipient_mail': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'related_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'related_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'reply_to': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'fixmystreet.reportsecondarycategoryclass': {
'Meta': {'object_name': 'ReportSecondaryCategoryClass'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportsecondarycategoryclass_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportsecondarycategoryclass_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportsubscription': {
'Meta': {'unique_together': "(('report', 'subscriber'),)", 'object_name': 'ReportSubscription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'to': u"orm['fixmystreet.Report']"}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.FMSUser']"})
},
u'fixmystreet.streetsurface': {
'Meta': {'object_name': 'StreetSurface'},
'administrator': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'srid': '31370'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pw_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ssft': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'sslv': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'urbis_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'version_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'fixmystreet.userorganisationmembership': {
'Meta': {'unique_together': "(('user', 'organisation'),)", 'object_name': 'UserOrganisationMembership'},
'contact_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userorganisationmembership_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userorganisationmembership_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'memberships'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'memberships'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"})
},
u'fixmystreet.zipcode': {
'Meta': {'object_name': 'ZipCode'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'commune': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'zipcode'", 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['fixmystreet']
symmetrical = True
| agpl-3.0 | 8,207,752,784,111,992,000 | 93.34433 | 238 | 0.566558 | false |
openpli-arm/bitbake | lib/bb/parse/parse_py/BBHandler.py | 1 | 7448 | #!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
class for handling .bb files
Reads a .bb file and obtains its metadata
"""
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2003, 2004 Phil Blundell
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re, bb, os, sys, time, string
import bb.fetch, bb.build, bb.utils
from bb import data, fetch
from ConfHandler import include, init
from bb.parse import ParseError, resolve_file, ast
# For compatibility
from bb.parse import vars_from_file
__func_start_regexp__ = re.compile( r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" )
__inherit_regexp__ = re.compile( r"inherit\s+(.+)" )
__export_func_regexp__ = re.compile( r"EXPORT_FUNCTIONS\s+(.+)" )
__addtask_regexp__ = re.compile("addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
__addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" )
__def_regexp__ = re.compile( r"def\s+(\w+).*:" )
__python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" )
__infunc__ = ""
__inpython__ = False
__body__ = []
__classname__ = ""
classes = [ None, ]
cached_statements = {}
# We need to indicate EOF to the feeder. This code is so messy that
# factoring it out to a close_parse_file method is out of question.
# We will use the IN_PYTHON_EOF as an indicator to just close the method
#
# The two parts using it are tightly integrated anyway
IN_PYTHON_EOF = -9999999999999
def supports(fn, d):
return fn[-3:] == ".bb" or fn[-8:] == ".bbclass" or fn[-4:] == ".inc"
def inherit(files, d):
__inherit_cache = data.getVar('__inherit_cache', d) or []
fn = ""
lineno = 0
files = data.expand(files, d)
for file in files:
if file[0] != "/" and file[-8:] != ".bbclass":
file = os.path.join('classes', '%s.bbclass' % file)
if not file in __inherit_cache:
bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file))
__inherit_cache.append( file )
data.setVar('__inherit_cache', __inherit_cache, d)
include(fn, file, d, "inherit")
__inherit_cache = data.getVar('__inherit_cache', d) or []
def get_statements(filename, absolsute_filename, base_name):
global cached_statements
try:
return cached_statements[absolsute_filename]
except KeyError:
file = open(absolsute_filename, 'r')
statements = ast.StatementGroup()
lineno = 0
while 1:
lineno = lineno + 1
s = file.readline()
if not s: break
s = s.rstrip()
feeder(lineno, s, filename, base_name, statements)
if __inpython__:
# add a blank line to close out any python definition
feeder(IN_PYTHON_EOF, "", filename, base_name, statements)
if filename.endswith(".bbclass") or filename.endswith(".inc"):
cached_statements[absolsute_filename] = statements
return statements
def handle(fn, d, include):
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__
__body__ = []
__infunc__ = ""
__classname__ = ""
__residue__ = []
if include == 0:
bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data)")
else:
bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)")
(root, ext) = os.path.splitext(os.path.basename(fn))
base_name = "%s%s" % (root,ext)
init(d)
if ext == ".bbclass":
__classname__ = root
classes.append(__classname__)
__inherit_cache = data.getVar('__inherit_cache', d) or []
if not fn in __inherit_cache:
__inherit_cache.append(fn)
data.setVar('__inherit_cache', __inherit_cache, d)
if include != 0:
oldfile = data.getVar('FILE', d)
else:
oldfile = None
abs_fn = resolve_file(fn, d)
if include:
bb.parse.mark_dependency(d, abs_fn)
# actual loading
statements = get_statements(fn, abs_fn, base_name)
# DONE WITH PARSING... time to evaluate
if ext != ".bbclass":
data.setVar('FILE', fn, d)
statements.eval(d)
if ext == ".bbclass":
classes.remove(__classname__)
else:
if include == 0:
return ast.multi_finalize(fn, d)
if oldfile:
bb.data.setVar("FILE", oldfile, d)
# we have parsed the bb class now
if ext == ".bbclass" or ext == ".inc":
bb.methodpool.get_parsed_dict()[base_name] = 1
return d
def feeder(lineno, s, fn, root, statements):
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__
if __infunc__:
if s == '}':
__body__.append('')
ast.handleMethod(statements, __infunc__, lineno, fn, __body__)
__infunc__ = ""
__body__ = []
else:
__body__.append(s)
return
if __inpython__:
m = __python_func_regexp__.match(s)
if m and lineno != IN_PYTHON_EOF:
__body__.append(s)
return
else:
ast.handlePythonMethod(statements, root, __body__, fn)
__body__ = []
__inpython__ = False
if lineno == IN_PYTHON_EOF:
return
# fall through
if s == '' or s[0] == '#': return # skip comments and empty lines
if s[-1] == '\\':
__residue__.append(s[:-1])
return
s = "".join(__residue__) + s
__residue__ = []
m = __func_start_regexp__.match(s)
if m:
__infunc__ = m.group("func") or "__anonymous"
ast.handleMethodFlags(statements, __infunc__, m)
return
m = __def_regexp__.match(s)
if m:
__body__.append(s)
__inpython__ = True
return
m = __export_func_regexp__.match(s)
if m:
ast.handleExportFuncs(statements, m, classes)
return
m = __addtask_regexp__.match(s)
if m:
ast.handleAddTask(statements, m)
return
m = __addhandler_regexp__.match(s)
if m:
ast.handleBBHandlers(statements, m)
return
m = __inherit_regexp__.match(s)
if m:
ast.handleInherit(statements, m)
return
from bb.parse import ConfHandler
return ConfHandler.feeder(lineno, s, fn, statements)
# Add us to the handlers list
from bb.parse import handlers
handlers.append({'supports': supports, 'handle': handle, 'init': init})
del handlers
| gpl-2.0 | 2,966,996,180,090,194,000 | 30.033333 | 220 | 0.575054 | false |
KVM-VMI/nitro | nitro/backends/linux/backend.py | 1 | 7468 | """
Backend for extracting information about system calls from Linux guests.
"""
import logging
import re
from ctypes import sizeof, c_void_p
from libvmi import LibvmiError
from nitro.syscall import Syscall
from nitro.event import SyscallDirection
from nitro.backends.linux.process import LinuxProcess
from nitro.backends.backend import Backend
from nitro.backends.linux.arguments import LinuxArgumentMap
# Technically, I do not think using this the way
# I do is correct since it might be different for the VM
VOID_P_SIZE = sizeof(c_void_p)
HANDLER_NAME_REGEX = re.compile(r"^(SyS|sys)_(?P<name>.+)")
MAX_SYSTEM_CALL_COUNT = 1024
class LinuxBackend(Backend):
"""Extract information about system calls produced by the guest. This backend
support 64-bit Linux guests."""
__slots__ = (
"sys_call_table_addr",
"nb_vcpu",
"syscall_stack",
"tasks_offset",
"syscall_names",
"mm_offset",
"pgd_offset",
)
def __init__(self, domain, libvmi, listener, syscall_filtering=True):
super().__init__(domain, libvmi, listener, syscall_filtering)
self.sys_call_table_addr = self.libvmi.translate_ksym2v("sys_call_table")
logging.debug("sys_call_table at %s", hex(self.sys_call_table_addr))
vcpus_info = self.domain.vcpus()
self.nb_vcpu = len(vcpus_info[0])
self.syscall_stack = tuple([] for _ in range(self.nb_vcpu))
self.syscall_names = self.build_syscall_name_map()
self.tasks_offset = self.libvmi.get_offset("linux_tasks")
self.mm_offset = self.libvmi.get_offset("linux_mm")
self.pgd_offset = self.libvmi.get_offset("linux_pgd")
def process_event(self, event):
"""
Process ``NitroEvent`` and return a matching ``Systemcall``. This function
analyzes system state and, based on it, produces a new ``Systemcall``
that contains higher-level information about the system call that is
being processed.
:param NitroEvent event: event to be analyzed
:returns: system call based on ``event``.
:rtype: Systemcall
"""
# Clearing these caches is really important since otherwise we will end
# up with incorrect memory references. Unfortunatelly, this will also
# make the backend slow. In my limited testing it seems that only
# clearing v2p cache works most of the time but I am sure issues will
# arise.
self.libvmi.v2pcache_flush()
self.libvmi.pidcache_flush()
self.libvmi.rvacache_flush()
self.libvmi.symcache_flush()
process = self.associate_process(event.sregs.cr3)
if event.direction == SyscallDirection.exit:
try:
syscall = self.syscall_stack[event.vcpu_nb].pop()
syscall.event = event
except IndexError:
syscall = Syscall(event, "Unknown", "Unknown", process, None)
else:
# Maybe we should catch errors from associate_process
name = self.get_syscall_name(event.regs.rax)
args = LinuxArgumentMap(event, process)
cleaned = clean_name(name) if name is not None else None
syscall = Syscall(event, name, cleaned, process, args)
self.syscall_stack[event.vcpu_nb].append(syscall)
self.dispatch_hooks(syscall)
return syscall
def get_syscall_name(self, rax):
"""
Return name of the system call handler associated with ``rax``.
:param int rax: index into system call table.
:returns: system call handler name
:rtype: str
"""
# address of the pointer within the sys_call_table array
p_addr = self.sys_call_table_addr + (rax * VOID_P_SIZE)
# get the address of the procedure
addr = self.libvmi.read_addr_va(p_addr, 0)
# translate the address into a name
return self.libvmi.translate_v2ksym(addr)
def build_syscall_name_map(self):
# Its a bit difficult to know where the system call table ends, here we
# do something kind of risky and read as long as translate_v2ksym
# returns something that looks like a system call handler.
mapping = {}
for i in range(0, MAX_SYSTEM_CALL_COUNT):
p_addr = self.sys_call_table_addr + (i * VOID_P_SIZE)
try:
addr = self.libvmi.read_addr_va(p_addr, 0)
symbol = self.libvmi.translate_v2ksym(addr)
except LibvmiError as error:
logging.critical("Failed to build syscall name map")
raise error
else:
if symbol is not None:
mapping[symbol] = i
else:
break
return mapping
def find_syscall_nb(self, syscall_name):
# What about thos compat_* handlers?
handler_regexp = re.compile(r"^(SyS|sys)_{}$".format(re.escape(syscall_name)))
for full_name, ind in self.syscall_names.items():
if handler_regexp.match(full_name) is not None:
return ind
def associate_process(self, cr3):
"""
Get ``LinuxProcess`` associated with ``cr3``
:params int cr3: cr3 value
:returns: process associated with ``cr3``
:rtype: LinuxProcess
"""
head = self.libvmi.translate_ksym2v("init_task") # get the address of swapper's task_struct
next_ = head
while True: # Maybe this should have a sanity check stopping it
mm = self.libvmi.read_addr_va(next_ + self.mm_offset, 0)
if not mm:
mm = self.libvmi.read_addr_va(next_ + self.mm_offset + VOID_P_SIZE, 0)
if mm:
pgd = self.libvmi.read_addr_va(mm + self.pgd_offset, 0)
pgd_phys_addr = self.libvmi.translate_kv2p(pgd)
if cr3 == pgd_phys_addr:
# Eventually, I would like to look for the executable name from mm->exe_file->f_path
return LinuxProcess(self.libvmi, cr3, next_)
else:
#logging.debug("missing mm")
pass
next_ = self.libvmi.read_addr_va(next_ + self.tasks_offset, 0) - self.tasks_offset
if next_ == head:
break
def define_hook(self, name, callback, direction=SyscallDirection.enter):
super().define_hook(name, callback, direction)
if self.syscall_filtering:
self.add_syscall_filter(name)
def undefine_hook(self, name, direction=SyscallDirection.enter):
super().undefine_hook(name, direction)
if self.syscall_filtering:
self.remove_syscall_filter(name)
def add_syscall_filter(self, syscall_name):
syscall_nb = self.find_syscall_nb(syscall_name)
if syscall_nb is None:
raise RuntimeError(
'Unable to find syscall number for %s' % syscall_name)
self.listener.add_syscall_filter(syscall_nb)
def remove_syscall_filter(self, syscall_name):
syscall_nb = self.find_syscall_nb(syscall_name)
if syscall_nb is None:
raise RuntimeError(
'Unable to find syscall number for %s' % syscall_name)
self.listener.remove_syscall_filter(syscall_nb)
def clean_name(name):
matches = HANDLER_NAME_REGEX.search(name)
return matches.group("name") if matches is not None else name
| gpl-3.0 | -2,482,266,364,801,742,000 | 38.513228 | 104 | 0.615158 | false |
HPCGISLab/pcml | pcml/core/Decomposition.py | 1 | 12072 | """
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: Eric Shook ([email protected]); Zhengliang Feng ([email protected], [email protected])
"""
from .Layer import *
from .Subdomain import *
import pcml.core.PCMLConfig as PCMLConfig
from .PCMLPrims import *
import math
def globalpointlistdecomposition(layer, buffersize):
# Row decomposition supports pointlist only for globalclass operations
if layer.data_structure == Datastructure.pointlist:
if buffersize >= 0: # Then it is not globalclass operation
raise PCMLNotSupported("Currently globalpointlistdecomposition only supports globalclass+pointlist")
# If this layer is a pointlist, then it is assumed to be global operation
# so just copy layer information and duplicate the subdomain
subdomain = Subdomain(layer.y, layer.x, layer.h, layer.w, layer.title + " subdomain pointlist")
subdomain.set_pointlist(layer.get_pointlist())
subdomainlist = []
for sdind in xrange(numsubdomains):
subdomainlist.append(subdomain)
return subdomainlist
else:
raise PCMLNotSupported("globalpointlistdecomposition only supports pointlist datastructures")
# Take a layer and return a list of subdomains
def rowdecomposition(layer, buffersize):
# Row decomposition supports pointlist only for globalclass operations
if layer.data_structure == Datastructure.pointlist:
globalpointlistdecomposition(layer, buffersize)
assert(layer.data_structure == Datastructure.array), "Data structure is not an array"
# If global then buffer size is infinite as all subdomains will have all data
if buffersize < 0: # This indicates the buffer should be infinite sized (global/zonal operation)
buffersize = 9999999999999
# FIXME: I should do the same global subdomain as pointlist here
# Sanity check nrows and ncols
# FIXME: In the future this check will happen in operation._decompositioninit
assert(layer.nrows is not None), "Layer number of rows (nrows) is None"
assert(layer.ncols is not None), "Layer number of columns (ncols) is None"
subdomainlist = []
# Numer of rows per subdomain given suggested decomposition granularity (think number of chunks)
# rowspersubdomain = int(math.ceil(float(layer.nrows)/float(PCMLConfig.decomposition_granularity)))
# Number of subdomains to create when given rowspersubdomain
numsubdomains = int(math.ceil(float(layer.nrows) / float(PCMLConfig.decomposition_granularity)))
# For each subdomain indexed by sdind, calculate the size
for sdind in xrange(numsubdomains):
# First row in the subdomain
r = PCMLConfig.decomposition_granularity * sdind
# Default number of rows for this subdomain
nrows = PCMLConfig.decomposition_granularity # Number of rows for this sudomain
if buffersize > 0: # If we have a buffer (e.g., focal operation), then add the buffer
# A buffer will generally reduce r by buffersize and increase nrows by buffersize*2
# However, r and r+nrows must be contained within the range 0-layer.nrows
new_r = max(0, r - buffersize) # Calculate new r value making sure it is not negative
new_h = min(layer.nrows, r + nrows + buffersize) # calculate new height making sure it is <= layer.nrows
# Replace original r and nrows with new values
nrows = new_h - new_r
r = new_r
# print("new_r",new_r,"new_h",new_h)
else: # Ensure that we don't allocate more rows past the number of layer rows
nrows = min(layer.nrows - r, nrows)
# Sanity check
# print("r",r,"nrows",nrows,"layer.nrows",layer.nrows)
assert(r + nrows <= layer.nrows), "Number of rows for layer is less than total for subdomains"
# In row decomposition, column index is always 0 and ncols never changes
c = 0
ncols = layer.ncols
# Now derive y, x, h, w
y = layer.y + r * layer.cellsize
h = nrows * layer.cellsize
# In row decomposition: x and w always remain the same
x = layer.x
w = layer.w
# Create a subdomain and populate it with the correct attribute values
subdomain = Subdomain(y, x, h, w, layer.title+" subdomain "+str(sdind))
subdomain.cellsize = layer.cellsize
subdomain.nodata_value = layer.nodata_value
subdomain.r = r
subdomain.c = c
subdomain.nrows = nrows
subdomain.ncols = ncols
# Extract an array slice (reference to data in a layer for lower memory overhead)
# from the layer and set the data reference for the subdomain to use
arrslice = layer.slice_nparray(r, 0, nrows, ncols)
subdomain.set_data_ref(arrslice)
# Add the subdomain to the list
subdomainlist.append(subdomain)
return subdomainlist
# Take a layer and return a list of subdomains
def columndecomposition(layer, buffersize):
# print("Column decomposition")
# Col decomposition supports pointlist only for globalclass operations
if layer.data_structure == Datastructure.pointlist:
globalpointlistdecomposition(layer, buffersize)
assert(layer.data_structure == Datastructure.array), "Data structure is not an array"
# If global then buffer size is infinite as all subdomains will have all data
if buffersize < 0: # This indicates the buffer should be infinite sized (global/zonal operation)
buffersize = 9999999999999
# FIXME: I should do the same global subdomain as pointlist here
# Sanity check nrows and ncols
# FIXME: In the future this check will happen in operation._decompositioninit
assert(layer.nrows is not None), "Layer number of rows (nrows) is None"
assert(layer.ncols is not None), "Layer number of columns (ncols) is None"
subdomainlist = []
# Numer of columns per subdomain given suggested decomposition granularity (think number of chunks)
# colspersubdomain = int(math.ceil(float(layer.ncols)/float(PCMLConfig.decomposition_granularity)))
# Number of subdomains to create when given colspersubdomain
numsubdomains = int(math.ceil(float(layer.ncols)/float(PCMLConfig.decomposition_granularity)))
# For each subdomain indexed by sdind, calculate the size
for sdind in xrange(numsubdomains):
# First col in the subdomain
c = PCMLConfig.decomposition_granularity*sdind
# Default number of columns for this subdomain
ncols = PCMLConfig.decomposition_granularity # Number of columns for this sudomain
if buffersize > 0: # If we have a buffer (e.g., focal operation), then add the buffer
# A buffer will generally reduce c by buffersize and increase ncols by buffersize*2
# However, c and c+ncols must be contained within the range 0-layer.ncols
new_c = max(0, c - buffersize) # Calculate new c value making sure it is not negative
new_w = min(layer.ncols, c+ncols + buffersize) # calculate new width making sure it is <= layer.ncols
# Replace original c and ncols with new values
ncols = new_w - new_c
c = new_c
else: # Ensure that we don't allocate more cols than the cols in a layer
ncols = min(layer.ncols - c, ncols)
# Sanity check
assert(c + ncols <= layer.ncols), "Number of columns in layer is less than total for subdomains"
# In column decomposition, row index is always 0 and nrows never changes
r = 0
nrows = layer.nrows
# Now derive y, x, h, w
x = layer.x + c * layer.cellsize
w = ncols * layer.cellsize
# In column decomposition: y and h always remain the same
y = layer.y
h = layer.h
# Create a subdomain and populate it with the correct attribute values
subdomain = Subdomain(y, x, h, w, layer.title+" subdomain "+str(sdind))
subdomain.cellsize = layer.cellsize
subdomain.nodata_value = layer.nodata_value
subdomain.r = r
subdomain.c = c
subdomain.nrows = nrows
subdomain.ncols = ncols
# Extract an array slice (reference to data in a layer for lower memory overhead)
# from the layer and set the data reference for the subdomain to use
arrslice = layer.slice_nparray(0, c, nrows, ncols)
subdomain.set_data_ref(arrslice)
# Add the subdomain to the list
subdomainlist.append(subdomain)
return subdomainlist
# point decomposition using row strategy
def pointrowdecomposition(layer, buffersize):
subdomainlist = []
totalsubdomains = PCMLConfig.numsubdomains
currenty = layer.y
currentwithbuffy = layer.y
layerblockheight = layer.h / totalsubdomains
for subdindex in xrange(totalsubdomains):
buffh = buffy = 0
if buffersize > 0:
buffh = min(layer.h, currenty + layerblockheight + buffersize)
buffy = currentwithbuffy
else:
buffh = layerblockheight
buffy = currenty
subdomain = Subdomain(currenty, layer.x, layerblockheight, layer.w, layer.title + " subdomain " + str(subdindex))
subdomain.buffx = layer.x
subdomain.buffw = layer.w
subdomain.buffh = buffh
subdomain.buffy = buffy
pointlist = []
for point in layer.get_pointlist():
if subdomain.isinsidebounds(point, usehalo=True):
pointlist.append(point.copy())
# if serial execution then subdomains will need only ordinary list or else a multiprocessing list implementation
if PCMLConfig.exectype == ExecutorType.serialpython:
subdomain.set_pointlist(pointlist)
else:
subdomain.set_pointlist(pointlist, ref=True)
subdomainlist.append(subdomain)
currenty = currenty + layerblockheight
currentwithbuffy = max(currenty - buffersize, layer.y)
return subdomainlist
# Create a point subdomain by using raster layer as model from point layer
def pointsubdomainsfromrastersubdomains(pointlayer, rasterlayer, buffersize):
subdomainlist = []
rowspersubdomain = float(PCMLConfig.decomposition_granularity)
numsubdomains = int(math.ceil(float(rasterlayer.nrows) / float(rowspersubdomain)))
for sdind in xrange(numsubdomains):
r = rowspersubdomain * sdind
nrows = rowspersubdomain
hwithoutbuff = min(rasterlayer.nrows - r, nrows) * rasterlayer.cellsize
ywithoutbuff = rasterlayer.y + r * rasterlayer.cellsize
if buffersize > 0:
new_r = max(0, r - buffersize)
new_h = min(rasterlayer.nrows, r + nrows + buffersize)
nrows = new_h - new_r
r = new_r
else:
nrows = min(rasterlayer.nrows - r, nrows)
y = rasterlayer.y + r * rasterlayer.cellsize
h = nrows * rasterlayer.cellsize
x = rasterlayer.x
w = rasterlayer.w
subdomain = Subdomain(ywithoutbuff, x, hwithoutbuff, w, pointlayer.title+" subdomain "+str(sdind))
subdomain.buffx = x
subdomain.buffw = w
subdomain.buffh = h
subdomain.buffy = y
pointlist = []
for point in pointlayer.get_pointlist():
if subdomain.isinsidebounds(point, usehalo=True):
pointlist.append(point.copy())
subdomain.set_pointlist(pointlist)
subdomainlist.append(subdomain)
return subdomainlist
def pointrasterrowdecomposition(layer, buffersize, layerlist=None):
if layer.data_structure == Datastructure.array:
return rowdecomposition(layer, buffersize)
elif layer.data_structure == Datastructure.pointlist and layerlist is not None:
return pointsubdomainsfromrastersubdomains(layer, layerlist[1], buffersize)
| bsd-3-clause | -8,558,338,686,517,079,000 | 43.382353 | 121 | 0.677601 | false |
ttreeagency/PootleTypo3Org | pootle/apps/pootle_misc/dispatch.py | 1 | 2131 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2012 Zuza Software Foundation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Pootle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pootle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from pootle_misc.baseurl import l
def translate(path_obj, state=None, check=None, user=None):
# In Pootle, URLs ending in translate.html are used when the user
# translates all files in a directory (for example, if the user is
# going through all fuzzy translations in a directory).
path = path_obj.pootle_path
if path.endswith('/'):
path += 'translate.html'
else:
path += '/translate/'
if state:
path += '#filter=%s' % state
if user:
path += '&user=%s' % user
elif check:
path += '#filter=checks,%s' % check
return l(path)
def download_zip(path_obj):
if path_obj.is_dir:
current_folder = path_obj.pootle_path
else:
current_folder = path_obj.parent.pootle_path
# FIXME: ugly URL, django.core.urlresolvers.reverse() should work
archive_name = "%sexport/zip" % current_folder
return l(archive_name)
def export(pootle_path, format):
return l('/export-file/%s%s' % (format, pootle_path))
def commit(path_obj):
return l(path_obj.pootle_path + '/commit')
def update(path_obj):
return l(path_obj.pootle_path + '/update')
def commit_all(path_obj):
return l(path_obj.pootle_path + 'commit_all')
def update_all(path_obj):
return l(path_obj.pootle_path + 'update_all')
| gpl-2.0 | -5,951,156,004,610,860,000 | 28.597222 | 75 | 0.678085 | false |
sekikn/ambari | ambari-agent/src/main/python/ambari_agent/listeners/AgentActionsListener.py | 2 | 2270 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import ambari_stomp
from ambari_agent.listeners import EventListener
from ambari_agent.Utils import Utils
from ambari_agent import Constants
logger = logging.getLogger(__name__)
class AgentActionsListener(EventListener):
"""
Listener of Constants.AGENT_ACTIONS_TOPIC events from server.
"""
ACTION_NAME = 'actionName'
RESTART_AGENT_ACTION = 'RESTART_AGENT'
def __init__(self, initializer_module):
super(AgentActionsListener, self).__init__(initializer_module)
self.stop_event = initializer_module.stop_event
def on_event(self, headers, message):
"""
Is triggered when an event to Constants.AGENT_ACTIONS_TOPIC topic is received from server.
It contains some small actions which server can ask agent to do.
For bigger actions containing a lot of info and special workflow and a new topic would be
required. Small actions like restart_agent/clean_cache make sense to be in a general event
@param headers: headers dictionary
@param message: message payload dictionary
"""
action_name = message[self.ACTION_NAME]
if action_name == self.RESTART_AGENT_ACTION:
self.restart_agent()
else:
logger.warn("Unknown action '{0}' requested by server. Ignoring it".format(action_name))
def restart_agent(self):
logger.warn("Restarting the agent by the request from server")
Utils.restartAgent(self.stop_event)
def get_handled_path(self):
return Constants.AGENT_ACTIONS_TOPIC
| apache-2.0 | -5,151,192,752,702,581,000 | 33.393939 | 94 | 0.753304 | false |
corpnewt/CorpBot.py | Cogs/DL.py | 1 | 2147 | import asyncio, aiohttp, json
def setup(bot):
# Not a cog
pass
async def async_post_json(url, data = None, headers = None):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.post(url, data=data) as response:
return await response.json()
async def async_post_text(url, data = None, headers = None):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.post(url, data=data) as response:
res = await response.read()
return res.decode("utf-8", "replace")
async def async_post_bytes(url, data = None, headers = None):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.post(url, data=data) as response:
return await response.read()
async def async_head_json(url, headers = None):
async with aiohttp.ClientSession(headers=headers) as session:
async with session.head(url) as response:
return await response.json()
async def async_dl(url, headers = None):
# print("Attempting to download {}".format(url))
total_size = 0
data = b""
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(url) as response:
assert response.status == 200
while True:
chunk = await response.content.read(4*1024) # 4k
data += chunk
total_size += len(chunk)
if not chunk:
break
if total_size > 8000000:
# Too big...
# print("{}\n - Aborted - file too large.".format(url))
return None
return data
async def async_text(url, headers = None):
data = await async_dl(url, headers)
if data != None:
return data.decode("utf-8", "replace")
else:
return data
async def async_json(url, headers = None):
data = await async_dl(url, headers)
if data != None:
return json.loads(data.decode("utf-8", "replace"))
else:
return data
| mit | -6,453,324,823,601,828,000 | 34.389831 | 75 | 0.588728 | false |
fangeugene/the-blue-alliance | controllers/main_controller.py | 1 | 13263 | import datetime
import logging
import os
import webapp2
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
import tba_config
from base_controller import CacheableHandler
from consts.event_type import EventType
from helpers.event_helper import EventHelper
from helpers.firebase.firebase_pusher import FirebasePusher
from models.event import Event
from models.insight import Insight
from models.team import Team
from template_engine import jinja2_engine
def render_static(page):
memcache_key = "main_%s" % page
html = memcache.get(memcache_key)
if html is None:
path = os.path.join(os.path.dirname(__file__), "../templates/%s.html" % page)
html = template.render(path, {})
if tba_config.CONFIG["memcache"]:
memcache.set(memcache_key, html, 86400)
return html
def handle_404(request, response, exception):
response.write(render_static("404"))
response.set_status(404)
def handle_500(request, response, exception):
logging.exception(exception)
response.write(render_static("500"))
response.set_status(500)
class TwoChampsHandler(CacheableHandler):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = "two_champs_{}_{}"
def __init__(self, *args, **kw):
super(TwoChampsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
self._team_key_a = self.request.get('team_a', None)
self._team_key_b = self.request.get('team_b', None)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self._team_key_a, self._team_key_b)
def _render(self, *args, **kw):
team_a = Team.get_by_id(self._team_key_a) if self._team_key_a else None
team_b = Team.get_by_id(self._team_key_b) if self._team_key_b else None
self.template_values.update({
'team_a': team_a,
'team_b': team_b,
})
return jinja2_engine.render('2champs.html', self.template_values)
class MainKickoffHandler(CacheableHandler):
CACHE_VERSION = 3
CACHE_KEY_FORMAT = "main_kickoff"
def __init__(self, *args, **kw):
super(MainKickoffHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def _render(self, *args, **kw):
kickoff_datetime_est = datetime.datetime(2017, 1, 7, 10, 00)
kickoff_datetime_utc = kickoff_datetime_est + datetime.timedelta(hours=5)
is_kickoff = datetime.datetime.now() >= kickoff_datetime_est - datetime.timedelta(days=1) # turn on 1 day before
self.template_values.update({
'is_kickoff': is_kickoff,
'kickoff_datetime_est': kickoff_datetime_est,
'kickoff_datetime_utc': kickoff_datetime_utc,
})
path = os.path.join(os.path.dirname(__file__), "../templates/index_kickoff.html")
return template.render(path, self.template_values)
class MainBuildseasonHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_buildseason"
def __init__(self, *args, **kw):
super(MainBuildseasonHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
endbuild_datetime_est = datetime.datetime(2017, 2, 21, 23, 59)
endbuild_datetime_utc = endbuild_datetime_est + datetime.timedelta(hours=5)
week_events = EventHelper.getWeekEvents()
self.template_values.update({
'endbuild_datetime_est': endbuild_datetime_est,
'endbuild_datetime_utc': endbuild_datetime_utc,
'events': week_events,
})
path = os.path.join(os.path.dirname(__file__), "../templates/index_buildseason.html")
return template.render(path, self.template_values)
class MainChampsHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_champs"
def __init__(self, *args, **kw):
super(MainChampsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 5
def _render(self, *args, **kw):
year = datetime.datetime.now().year
hou_event_keys_future = Event.query(
Event.year == year,
Event.event_type_enum.IN(EventType.CMP_EVENT_TYPES),
Event.start_date <= datetime.datetime(2017, 4, 22)).fetch_async(keys_only=True)
stl_event_keys_future = Event.query(
Event.year == year,
Event.event_type_enum.IN(EventType.CMP_EVENT_TYPES),
Event.start_date > datetime.datetime(2017, 4, 22)).fetch_async(keys_only=True)
hou_events_futures = ndb.get_multi_async(hou_event_keys_future.get_result())
stl_events_futures = ndb.get_multi_async(stl_event_keys_future.get_result())
self.template_values.update({
"hou_events": [e.get_result() for e in hou_events_futures],
"stl_events": [e.get_result() for e in stl_events_futures],
"year": year,
})
path = os.path.join(os.path.dirname(__file__), '../templates/index_champs.html')
return template.render(path, self.template_values)
class MainCompetitionseasonHandler(CacheableHandler):
CACHE_VERSION = 5
CACHE_KEY_FORMAT = "main_competitionseason"
def __init__(self, *args, **kw):
super(MainCompetitionseasonHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 5
def _render(self, *args, **kw):
week_events = EventHelper.getWeekEvents()
special_webcasts = FirebasePusher.get_special_webcasts()
self.template_values.update({
"events": week_events,
"any_webcast_online": any(w.get('status') == 'online' for w in special_webcasts),
"special_webcasts": special_webcasts,
})
path = os.path.join(os.path.dirname(__file__), '../templates/index_competitionseason.html')
return template.render(path, self.template_values)
class MainInsightsHandler(CacheableHandler):
CACHE_VERSION = 3
CACHE_KEY_FORMAT = "main_insights"
def __init__(self, *args, **kw):
super(MainInsightsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 5
def _render(self, *args, **kw):
week_events = EventHelper.getWeekEvents()
year = datetime.datetime.now().year
special_webcasts = FirebasePusher.get_special_webcasts()
self.template_values.update({
"events": week_events,
"year": year,
"any_webcast_online": any(w.get('status') == 'online' for w in special_webcasts),
"special_webcasts": special_webcasts,
})
insights = ndb.get_multi([ndb.Key(Insight, Insight.renderKeyName(year, insight_name)) for insight_name in Insight.INSIGHT_NAMES.values()])
for insight in insights:
if insight:
self.template_values[insight.name] = insight
path = os.path.join(os.path.dirname(__file__), '../templates/index_insights.html')
return template.render(path, self.template_values)
class MainOffseasonHandler(CacheableHandler):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "main_offseason"
def __init__(self, *args, **kw):
super(MainOffseasonHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def _render(self, *args, **kw):
week_events = EventHelper.getWeekEvents()
self.template_values.update({
"events": week_events,
})
path = os.path.join(os.path.dirname(__file__), '../templates/index_offseason.html')
return template.render(path, self.template_values)
class ContactHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_contact"
def __init__(self, *args, **kw):
super(ContactHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/contact.html")
return template.render(path, self.template_values)
class HashtagsHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_hashtags"
def __init__(self, *args, **kw):
super(HashtagsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/hashtags.html")
return template.render(path, self.template_values)
class AboutHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_about"
def __init__(self, *args, **kw):
super(AboutHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/about.html")
return template.render(path, self.template_values)
class ThanksHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_thanks"
def __init__(self, *args, **kw):
super(ThanksHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/thanks.html")
return template.render(path, self.template_values)
class OprHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_opr"
def __init__(self, *args, **kw):
super(OprHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/opr.html")
return template.render(path, self.template_values)
class PredictionsHandler(CacheableHandler):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = "main_predictions"
def __init__(self, *args, **kw):
super(PredictionsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/predictions.html")
return template.render(path, self.template_values)
class SearchHandler(webapp2.RequestHandler):
def get(self):
try:
q = self.request.get("q")
logging.info("search query: %s" % q)
if q.isdigit():
team_id = "frc%s" % q
team = Team.get_by_id(team_id)
if team:
self.redirect(team.details_url)
return None
elif q[:4].isdigit(): # Check for event key
event = Event.get_by_id(q)
if event:
self.redirect(event.details_url)
return None
else: # Check for event short
year = datetime.datetime.now().year # default to current year
event = Event.get_by_id('{}{}'.format(year, q))
if event:
self.redirect(event.details_url)
return None
except Exception, e:
logging.warning("warning: %s" % e)
finally:
self.response.out.write(render_static("search"))
class WebcastsHandler(CacheableHandler):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "main_webcasts"
def __init__(self, *args, **kw):
super(WebcastsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
year = datetime.datetime.now().year
event_keys = Event.query(Event.year == year).order(Event.start_date).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
self.template_values.update({
'events': events,
'year': year,
})
path = os.path.join(os.path.dirname(__file__), '../templates/webcasts.html')
return template.render(path, self.template_values)
class RecordHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_record"
def __init__(self, *args, **kw):
super(RecordHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/record.html")
return template.render(path, self.template_values)
class ApiWriteHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "api_write"
def __init__(self, *args, **kw):
super(ApiWriteHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/apiwrite.html")
return template.render(path, self.template_values)
class MatchInputHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "match_input"
def __init__(self, *args, **kw):
super(MatchInputHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/matchinput.html")
return template.render(path, self.template_values)
| mit | -4,248,073,567,319,141,400 | 34.087302 | 146 | 0.61238 | false |
luwei0917/awsemmd_script | AAWSEM/getSS.py | 1 | 1701 | #!/usr/bin/env python3
import glob
import os
os.system("mkdir -p _data")
os.chdir("_output")
folder_list = glob.glob("*.jnet")
# print(folder_list, len(folder_list))
# folder_list = ['T0759.jnet']
ssdata = "../_data/"
for protein in folder_list:
with open(protein) as input_data:
protein_name = protein.split(".")[0]
print(protein_name)
out = open(ssdata+protein_name, 'w')
for line in input_data:
# print(line)
top, *data = line.split(",")
if(top == "jnetpred:-"):
# print("hello")
# print(line)
print(data)
a = 0.0
b = 0.0
out.write(str(round(a, 1)))
out.write(' ')
out.write(str(round(b, 1)))
out.write('\n')
for i in data[:-1]: # last one is \n
a = 0.0
b = 0.0
if(i == "E"):
b = 1.0
elif(i == "H"):
a = 1.0
out.write(str(round(a, 1)))
out.write(' ')
out.write(str(round(b, 1)))
out.write('\n')
# if len(sys.argv)!=3:
# print "\n" + sys.argv[0] + " inpute_file output_file\n"
# exit()
#
# input_file = sys.argv[1]
# output_file = sys.argv[2]
#
# inp = open(input_file, 'r')
# st = inp.read().strip()
# inp.close()
#
# out = open(output_file, 'w')
# for s in st:
# a = 0.0
# b = 0.0
# if s=='H':
# a = 1.0
# elif s=='E':
# b = 1.0
# out.write(str(round(a,1)))
# out.write(' ')
# out.write(str(round(b,1)))
# out.write('\n')
# out.close()
| mit | 2,126,273,118,975,896,800 | 25.578125 | 58 | 0.429747 | false |
cheungpat/sqlalchemy-utils | tests/types/test_weekdays.py | 1 | 1608 | import pytest
import sqlalchemy as sa
from sqlalchemy_utils import i18n
from sqlalchemy_utils.primitives import WeekDays
from sqlalchemy_utils.types import WeekDaysType
from sqlalchemy_utils.types.weekdays import babel
from tests import TestCase
@pytest.mark.skipif('babel is None')
class WeekDaysTypeTestCase(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
i18n.get_locale = lambda: babel.Locale('en')
def create_models(self):
class Schedule(self.Base):
__tablename__ = 'schedule'
id = sa.Column(sa.Integer, primary_key=True)
working_days = sa.Column(WeekDaysType)
def __repr__(self):
return 'Schedule(%r)' % self.id
self.Schedule = Schedule
def test_color_parameter_processing(self):
schedule = self.Schedule(
working_days='0001111'
)
self.session.add(schedule)
self.session.commit()
schedule = self.session.query(self.Schedule).first()
assert isinstance(schedule.working_days, WeekDays)
def test_scalar_attributes_get_coerced_to_objects(self):
schedule = self.Schedule(working_days=u'1010101')
assert isinstance(schedule.working_days, WeekDays)
class TestWeekDaysTypeOnSQLite(WeekDaysTypeTestCase):
dns = 'sqlite:///:memory:'
class TestWeekDaysTypeOnPostgres(WeekDaysTypeTestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
class TestWeekDaysTypeOnMySQL(WeekDaysTypeTestCase):
dns = 'mysql+pymysql://travis@localhost/sqlalchemy_utils_test'
| bsd-3-clause | 4,132,694,631,884,193,300 | 29.339623 | 66 | 0.692164 | false |
malept/js-sphinx-inventory | sphinx_inventory/js/mdn.py | 1 | 1705 | # -*- coding: utf-8 -*-
from collections import defaultdict
import json
import logging
from ._compat import ElementTree, urlopen
MDN_SITEMAP = 'https://developer.mozilla.org/sitemaps/en-US/sitemap.xml'
SITEMAP_NS = 'http://www.sitemaps.org/schemas/sitemap/0.9'
log = logging.getLogger(__name__)
def parse():
"""
Generate a cross-reference dictionary for the MDN JavaScript Reference.
:rtype: dict
"""
with urlopen(MDN_SITEMAP) as f:
xml = ElementTree.parse(f)
refs = defaultdict(dict)
for loc in xml.iterfind('{{{ns}}}url/{{{ns}}}loc'.format(ns=SITEMAP_NS)):
url = loc.text
if 'JavaScript/Reference/Global_Objects/' not in url:
continue
url_suffix = url[81:]
parts = url_suffix.split('/')
if len(parts) == 1:
name = parts[0]
if name[0].isupper():
ref_type = 'class'
else:
ref_type = 'data'
elif len(parts) == 2:
cls, attr = parts
with urlopen('{url}$json'.format(url=url)) as f:
metadata = json.loads(f.read().decode('utf-8'))
name = '{0}.{1}'.format(cls, attr)
if 'Method' in metadata['tags']:
ref_type = 'function'
elif 'Property' in metadata['tags']:
ref_type = 'attribute'
else:
fmt = 'Unknown ref_type for {0}. Tags: {1}'
log.warning(fmt.format(url, ', '.join(metadata['tags'])))
continue
else:
log.warning('Skipping URL (too many parts): {0}'.format(url))
continue
refs[ref_type][name] = url_suffix
return dict(refs)
| apache-2.0 | -7,696,636,293,475,174,000 | 31.788462 | 77 | 0.539589 | false |
gitcoinco/web | app/grants/migrations/0014_matchpledge.py | 1 | 1354 | # Generated by Django 2.1.2 on 2019-02-25 03:13
from django.db import migrations, models
import django.db.models.deletion
import economy.models
class Migration(migrations.Migration):
dependencies = [
('grants', '0013_subscription_amount_per_period_usdt'),
]
operations = [
migrations.CreateModel(
name='MatchPledge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(db_index=True, default=economy.models.get_time)),
('modified_on', models.DateTimeField(default=economy.models.get_time)),
('active', models.BooleanField(default=False, help_text='Whether or not the MatchingPledge is active.')),
('amount', models.DecimalField(decimal_places=4, default=1, help_text='The matching pledge amount in DAI.', max_digits=50)),
('comments', models.TextField(blank=True, default='', help_text='The comments.')),
('profile', models.ForeignKey(help_text='The MatchingPledgers profile.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='matchPledges', to='dashboard.Profile')),
],
options={
'abstract': False,
},
),
]
| agpl-3.0 | -1,993,627,749,603,662,300 | 44.133333 | 199 | 0.6226 | false |
carver/ens.py | ens/abis.py | 1 | 22493 | ENS = [
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "resolver",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "owner",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "label",
"type": "bytes32"
},
{
"name": "owner",
"type": "address"
}
],
"name": "setSubnodeOwner",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "ttl",
"type": "uint64"
}
],
"name": "setTTL",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "ttl",
"outputs": [
{
"name": "",
"type": "uint64"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "resolver",
"type": "address"
}
],
"name": "setResolver",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "owner",
"type": "address"
}
],
"name": "setOwner",
"outputs": [],
"payable": False,
"type": "function"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "owner",
"type": "address"
}
],
"name": "Transfer",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": True,
"name": "label",
"type": "bytes32"
},
{
"indexed": False,
"name": "owner",
"type": "address"
}
],
"name": "NewOwner",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "resolver",
"type": "address"
}
],
"name": "NewResolver",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "ttl",
"type": "uint64"
}
],
"name": "NewTTL",
"type": "event"
}
]
AUCTION_REGISTRAR = [
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "releaseDeed",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "getAllowedTime",
"outputs": [
{
"name": "timestamp",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "unhashedName",
"type": "string"
}
],
"name": "invalidateName",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "hash",
"type": "bytes32"
},
{
"name": "owner",
"type": "address"
},
{
"name": "value",
"type": "uint256"
},
{
"name": "salt",
"type": "bytes32"
}
],
"name": "shaBid",
"outputs": [
{
"name": "sealedBid",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "bidder",
"type": "address"
},
{
"name": "seal",
"type": "bytes32"
}
],
"name": "cancelBid",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "entries",
"outputs": [
{
"name": "",
"type": "uint8"
},
{
"name": "",
"type": "address"
},
{
"name": "",
"type": "uint256"
},
{
"name": "",
"type": "uint256"
},
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "ens",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
},
{
"name": "_value",
"type": "uint256"
},
{
"name": "_salt",
"type": "bytes32"
}
],
"name": "unsealBid",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "transferRegistrars",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "",
"type": "address"
},
{
"name": "",
"type": "bytes32"
}
],
"name": "sealedBids",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "state",
"outputs": [
{
"name": "",
"type": "uint8"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
},
{
"name": "newOwner",
"type": "address"
}
],
"name": "transfer",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
},
{
"name": "_timestamp",
"type": "uint256"
}
],
"name": "isAllowed",
"outputs": [
{
"name": "allowed",
"type": "bool"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "finalizeAuction",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "registryStarted",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "launchLength",
"outputs": [
{
"name": "",
"type": "uint32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "sealedBid",
"type": "bytes32"
}
],
"name": "newBid",
"outputs": [],
"payable": True,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "labels",
"type": "bytes32[]"
}
],
"name": "eraseNode",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hashes",
"type": "bytes32[]"
}
],
"name": "startAuctions",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "hash",
"type": "bytes32"
},
{
"name": "deed",
"type": "address"
},
{
"name": "registrationDate",
"type": "uint256"
}
],
"name": "acceptRegistrarTransfer",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "startAuction",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "rootNode",
"outputs": [
{
"name": "",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "hashes",
"type": "bytes32[]"
},
{
"name": "sealedBid",
"type": "bytes32"
}
],
"name": "startAuctionsAndBid",
"outputs": [],
"payable": True,
"type": "function"
},
{
"inputs": [
{
"name": "_ens",
"type": "address"
},
{
"name": "_rootNode",
"type": "bytes32"
},
{
"name": "_startDate",
"type": "uint256"
}
],
"payable": False,
"type": "constructor"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": False,
"name": "registrationDate",
"type": "uint256"
}
],
"name": "AuctionStarted",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": True,
"name": "bidder",
"type": "address"
},
{
"indexed": False,
"name": "deposit",
"type": "uint256"
}
],
"name": "NewBid",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": True,
"name": "owner",
"type": "address"
},
{
"indexed": False,
"name": "value",
"type": "uint256"
},
{
"indexed": False,
"name": "status",
"type": "uint8"
}
],
"name": "BidRevealed",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": True,
"name": "owner",
"type": "address"
},
{
"indexed": False,
"name": "value",
"type": "uint256"
},
{
"indexed": False,
"name": "registrationDate",
"type": "uint256"
}
],
"name": "HashRegistered",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": False,
"name": "value",
"type": "uint256"
}
],
"name": "HashReleased",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": True,
"name": "name",
"type": "string"
},
{
"indexed": False,
"name": "value",
"type": "uint256"
},
{
"indexed": False,
"name": "registrationDate",
"type": "uint256"
}
],
"name": "HashInvalidated",
"type": "event"
}
]
DEED = [
{
"constant": True,
"inputs": [],
"name": "creationDate",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [],
"name": "destroyDeed",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "newOwner",
"type": "address"
}
],
"name": "setOwner",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "registrar",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "owner",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "refundRatio",
"type": "uint256"
}
],
"name": "closeDeed",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "newRegistrar",
"type": "address"
}
],
"name": "setRegistrar",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "newValue",
"type": "uint256"
}
],
"name": "setBalance",
"outputs": [],
"payable": True,
"type": "function"
},
{
"inputs": [],
"type": "constructor"
},
{
"payable": True,
"type": "fallback"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"name": "newOwner",
"type": "address"
}
],
"name": "OwnerChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [],
"name": "DeedClosed",
"type": "event"
}
]
FIFS_REGISTRAR = [
{
"constant": True,
"inputs": [],
"name": "ens",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "",
"type": "bytes32"
}
],
"name": "expiryTimes",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "subnode",
"type": "bytes32"
},
{
"name": "owner",
"type": "address"
}
],
"name": "register",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "rootNode",
"outputs": [
{
"name": "",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"inputs": [
{
"name": "ensAddr",
"type": "address"
},
{
"name": "node",
"type": "bytes32"
}
],
"type": "constructor"
}
]
RESOLVER = [
{
"constant": True,
"inputs": [
{
"name": "interfaceID",
"type": "bytes4"
}
],
"name": "supportsInterface",
"outputs": [
{
"name": "",
"type": "bool"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "contentTypes",
"type": "uint256"
}
],
"name": "ABI",
"outputs": [
{
"name": "contentType",
"type": "uint256"
},
{
"name": "data",
"type": "bytes"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "x",
"type": "bytes32"
},
{
"name": "y",
"type": "bytes32"
}
],
"name": "setPubkey",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "content",
"outputs": [
{
"name": "ret",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "addr",
"outputs": [
{
"name": "ret",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "contentType",
"type": "uint256"
},
{
"name": "data",
"type": "bytes"
}
],
"name": "setABI",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "name",
"outputs": [
{
"name": "ret",
"type": "string"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "name",
"type": "string"
}
],
"name": "setName",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "hash",
"type": "bytes32"
}
],
"name": "setContent",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "pubkey",
"outputs": [
{
"name": "x",
"type": "bytes32"
},
{
"name": "y",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "addr",
"type": "address"
}
],
"name": "setAddr",
"outputs": [],
"payable": False,
"type": "function"
},
{
"inputs": [
{
"name": "ensAddr",
"type": "address"
}
],
"payable": False,
"type": "constructor"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "a",
"type": "address"
}
],
"name": "AddrChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "hash",
"type": "bytes32"
}
],
"name": "ContentChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "name",
"type": "string"
}
],
"name": "NameChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": True,
"name": "contentType",
"type": "uint256"
}
],
"name": "ABIChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "x",
"type": "bytes32"
},
{
"indexed": False,
"name": "y",
"type": "bytes32"
}
],
"name": "PubkeyChanged",
"type": "event"
}
]
REVERSE_REGISTRAR = [
{
"constant": False,
"inputs": [
{
"name": "owner",
"type": "address"
},
{
"name": "resolver",
"type": "address"
}
],
"name": "claimWithResolver",
"outputs": [
{
"name": "node",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "owner",
"type": "address"
}
],
"name": "claim",
"outputs": [
{
"name": "node",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "ens",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "defaultResolver",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "addr",
"type": "address"
}
],
"name": "node",
"outputs": [
{
"name": "ret",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "name",
"type": "string"
}
],
"name": "setName",
"outputs": [
{
"name": "node",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"inputs": [
{
"name": "ensAddr",
"type": "address"
},
{
"name": "resolverAddr",
"type": "address"
}
],
"payable": False,
"type": "constructor"
}
]
| mit | 5,709,209,485,505,465,000 | 15.158764 | 38 | 0.365447 | false |
sekikn/ambari | contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive.py | 2 | 20516 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
from urlparse import urlparse
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.resources.system import File, Execute, Directory
from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
from resource_management.core.shell import as_user
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.format import format
from resource_management.core.exceptions import Fail
from resource_management.core.shell import as_sudo
from resource_management.core.shell import quote_bash_args
from resource_management.core.logger import Logger
from resource_management.core import utils
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
from ambari_commons.constants import SERVICE
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hive(name=None):
import params
XmlConfig("hive-site.xml",
conf_dir = params.hive_conf_dir,
configurations = params.config['configurations']['hive-site'],
owner=params.hive_user,
configuration_attributes=params.config['configuration_attributes']['hive-site']
)
if name in ["hiveserver2","metastore"]:
# Manually overriding service logon user & password set by the installation package
service_name = params.service_map[name]
ServiceConfig(service_name,
action="change_user",
username = params.hive_user,
password = Script.get_password(params.hive_user))
Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)
if name == 'metastore':
if params.init_metastore_schema:
check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}'
'&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
try:
Execute(check_schema_created_cmd)
except Fail:
create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}',
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
Execute(create_schema_cmd,
user = params.hive_user,
logoutput=True
)
if name == "hiveserver2":
if params.hive_execution_engine == "tez":
# Init the tez app dir in hadoop
script_file = __file__.replace('/', os.sep)
cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))
Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hive(name=None):
import params
if name == 'hiveserver2':
# copy tarball to HDFS feature not supported
if not (params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major)):
params.HdfsResource(params.webhcat_apps_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=0755
)
# Create webhcat dirs.
if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
params.HdfsResource(params.hcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.hcat_user,
mode=params.hcat_hdfs_user_mode
)
params.HdfsResource(params.webhcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=params.webhcat_hdfs_user_mode
)
# ****** Begin Copy Tarballs ******
# *********************************
# if copy tarball to HDFS feature supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
copy_to_hdfs("tez", params.user_group, params.hdfs_user)
# Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
# This can use a different source and dest location to account
copy_to_hdfs("pig",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.pig_tar_source,
custom_dest_file=params.pig_tar_dest_file)
copy_to_hdfs("hive",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.hive_tar_source,
custom_dest_file=params.hive_tar_dest_file)
wildcard_tarballs = ["sqoop", "hadoop_streaming"]
for tarball_name in wildcard_tarballs:
source_file_pattern = eval("params." + tarball_name + "_tar_source")
dest_dir = eval("params." + tarball_name + "_tar_dest_dir")
if source_file_pattern is None or dest_dir is None:
continue
source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
for source_file in source_files:
src_filename = os.path.basename(source_file)
dest_file = os.path.join(dest_dir, src_filename)
copy_to_hdfs(tarball_name,
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=source_file,
custom_dest_file=dest_file)
# ******* End Copy Tarballs *******
# *********************************
# if warehouse directory is in DFS
if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(params.default_fs).scheme:
# Create Hive Metastore Warehouse Dir
params.HdfsResource(params.hive_apps_whs_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
mode=0777
)
else:
Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))
# Create Hive User Dir
params.HdfsResource(params.hive_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
mode=params.hive_hdfs_user_mode
)
if not is_empty(params.hive_exec_scratchdir) and not urlparse(params.hive_exec_scratchdir).path.startswith("/tmp"):
params.HdfsResource(params.hive_exec_scratchdir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
group=params.hdfs_user,
mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
params.HdfsResource(None, action="execute")
Directory(params.hive_etc_dir_prefix,
mode=0755
)
# We should change configurations for client as well as for server.
# The reason is that stale-configs are service-level, not component.
Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
for conf_dir in params.hive_conf_dirs_list:
fill_conf_dir(conf_dir)
XmlConfig("hive-site.xml",
conf_dir=params.hive_config_dir,
configurations=params.hive_site_config,
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.hive_user,
group=params.user_group,
mode=0644)
# Generate atlas-application.properties.xml file
if has_atlas_in_cluster():
atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
if name == 'hiveserver2':
XmlConfig("hiveserver2-site.xml",
conf_dir=params.hive_server_conf_dir,
configurations=params.config['configurations']['hiveserver2-site'],
configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
owner=params.hive_user,
group=params.user_group,
mode=0644)
if params.hive_metastore_site_supported and name == 'metastore':
XmlConfig("hivemetastore-site.xml",
conf_dir=params.hive_server_conf_dir,
configurations=params.config['configurations']['hivemetastore-site'],
configuration_attributes=params.config['configuration_attributes']['hivemetastore-site'],
owner=params.hive_user,
group=params.user_group,
mode=0644)
File(format("{hive_config_dir}/hive-env.sh"),
owner=params.hive_user,
group=params.user_group,
content=InlineTemplate(params.hive_env_sh_template)
)
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
create_parents = True,
owner='root',
group='root'
)
File(os.path.join(params.limits_conf_dir, 'hive.conf'),
owner='root',
group='root',
mode=0644,
content=Template("hive.conf.j2")
)
if name == 'metastore' or name == 'hiveserver2':
if params.hive_jdbc_target is not None and not os.path.exists(params.hive_jdbc_target):
jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
if params.hive2_jdbc_target is not None and not os.path.exists(params.hive2_jdbc_target):
jdbc_connector(params.hive2_jdbc_target, params.hive2_previous_jdbc_jar)
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}/{check_db_connection_jar_name}")),
mode = 0644,
)
if name == 'metastore':
File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hivemetastore.properties"),
owner=params.hive_user,
group=params.user_group,
content=Template("hadoop-metrics2-hivemetastore.properties.j2")
)
File(params.start_metastore_path,
mode=0755,
content=StaticFile('startMetastore.sh')
)
if params.init_metastore_schema:
create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_schematool_bin}/schematool -initSchema "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p} -verbose")
check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_schematool_bin}/schematool -info "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p} -verbose"), params.hive_user)
# HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
# Fixing it with the hack below:
quoted_hive_metastore_user_passwd = quote_bash_args(quote_bash_args(params.hive_metastore_user_passwd))
if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[1:-1]
Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(check_schema_created_cmd.replace(
format("-passWord {quoted_hive_metastore_user_passwd}"), "-passWord " + utils.PASSWORDS_HIDE_STRING))
Execute(create_schema_cmd,
not_if = check_schema_created_cmd,
user = params.hive_user
)
elif name == 'hiveserver2':
File(params.start_hiveserver2_path,
mode=0755,
content=Template(format('{start_hiveserver2_script}'))
)
File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
owner=params.hive_user,
group=params.user_group,
content=Template("hadoop-metrics2-hiveserver2.properties.j2")
)
if name != "client":
Directory(params.hive_pid_dir,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
Directory(params.hive_log_dir,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
Directory(params.hive_var_lib,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
"""
Writes configuration files required by Hive.
"""
def fill_conf_dir(component_conf_dir):
import params
Directory(component_conf_dir,
owner=params.hive_user,
group=params.user_group,
create_parents = True
)
XmlConfig("mapred-site.xml",
conf_dir=component_conf_dir,
configurations=params.config['configurations']['mapred-site'],
configuration_attributes=params.config['configuration_attributes']['mapred-site'],
owner=params.hive_user,
group=params.user_group,
mode=0644)
File(format("{component_conf_dir}/hive-default.xml.template"),
owner=params.hive_user,
group=params.user_group
)
File(format("{component_conf_dir}/hive-env.sh.template"),
owner=params.hive_user,
group=params.user_group
)
# Create hive-log4j.properties and hive-exec-log4j.properties
# in /etc/hive/conf and not in /etc/hive2/conf
if params.log4j_version == '1':
log4j_exec_filename = 'hive-exec-log4j.properties'
if (params.log4j_exec_props != None):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=params.log4j_exec_props
)
elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
)
log4j_filename = 'hive-log4j.properties'
if (params.log4j_props != None):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=params.log4j_props
)
elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=0644,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
)
pass # if params.log4j_version == '1'
def jdbc_connector(target, hive_previous_jdbc_jar):
"""
Shared by Hive Batch, Hive Metastore, and Hive Interactive
:param target: Target of jdbc jar name, which could be for any of the components above.
"""
import params
if not params.jdbc_jar_name:
return
if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
if hive_previous_jdbc_jar and os.path.isfile(hive_previous_jdbc_jar):
File(hive_previous_jdbc_jar, action='delete')
# TODO: should be removed after ranger_hive_plugin will not provide jdbc
if params.prepackaged_jdbc_name != params.jdbc_jar_name:
Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
path=["/bin", "/usr/bin/"],
sudo = True)
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source))
# maybe it will be more correcvly to use db type
if params.sqla_db_used:
untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
Execute(untar_sqla_type2_driver, sudo = True)
Execute(format("yes | {sudo} cp {jars_path_in_archive} {hive_lib}"))
Directory(params.jdbc_libs_dir,
create_parents = True)
Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target),
#creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo = True)
else:
#for default hive db (Mysql)
Execute(('cp', '--remove-destination', format('/usr/share/java/{jdbc_jar_name}'), target),
#creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo=True
)
pass
File(target,
mode = 0644,
)
| apache-2.0 | -3,109,588,745,240,251,400 | 41.920502 | 149 | 0.61859 | false |
graphite-server/graphite-web | webapp/graphite/render/views.py | 1 | 16273 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import csv
import math
import pytz
from datetime import datetime
from time import time
from random import shuffle
from httplib import CannotSendRequest
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
from cgi import parse_qs
from cStringIO import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
from graphite.compat import HttpResponse
from graphite.util import getProfileByUsername, json, unpickle
from graphite.remote_storage import HTTPConnectionWithTimeout
from graphite.logger import log
from graphite.render.evaluator import evaluateTarget
from graphite.render.attime import parseATTime
from graphite.render.functions import PieFunctions
from graphite.render.hashing import hashRequest, hashData
from graphite.render.glyph import GraphTypes
from django.http import HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.cache import add_never_cache_headers, patch_response_headers
def renderView(request):
start = time()
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'localOnly' : requestOptions['localOnly'],
'template' : requestOptions['template'],
'data' : []
}
data = requestContext['data']
# First we check the request cache
if useCache:
requestKey = hashRequest(request)
cachedResponse = cache.get(requestKey)
if cachedResponse:
log.cache('Request-Cache hit [%s]' % requestKey)
log.rendering('Returned cached response in %.6f' % (time() - start))
return cachedResponse
else:
log.cache('Request-Cache miss [%s]' % requestKey)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError("Invalid target '%s'" % target)
data.append( (name,value) )
else:
seriesList = evaluateTarget(requestContext, target)
for series in seriesList:
func = PieFunctions[requestOptions['pieMode']]
color = series.color if hasattr(series, 'color') else None
data.append( (series.name, func(requestContext, series) or 0, color))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime)
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
else:
cachedData = None
if cachedData is not None:
requestContext['data'] = data = cachedData
else: # Have to actually retrieve the data now
for target in requestOptions['targets']:
if not target.strip():
continue
t = time()
seriesList = evaluateTarget(requestContext, target)
log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
data.extend(seriesList)
if useCache:
cache.add(dataKey, data, cacheTimeout)
# If data is all we needed, we're done
format = requestOptions.get('format')
if format == 'csv':
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))
return response
if format == 'json':
series_data = []
if 'maxDataPoints' in requestOptions and any(data):
startTime = min([series.start for series in data])
endTime = max([series.end for series in data])
timeRange = endTime - startTime
maxDataPoints = requestOptions['maxDataPoints']
for series in data:
numberOfDataPoints = timeRange/series.step
if maxDataPoints < numberOfDataPoints:
valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
secondsPerPoint = int(valuesPerPoint * series.step)
# Nudge start over a little bit so that the consolidation bands align with each call
# removing 'jitter' seen when refreshing.
nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
series.start = series.start + nudge
valuesToLose = int(nudge/series.step)
for r in range(1, valuesToLose):
del series[0]
series.consolidate(valuesPerPoint)
timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
else:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
else:
for series in data:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
content_type='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data),
content_type='application/json')
if useCache:
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
return response
if format == 'raw':
response = HttpResponse(content_type='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(str,series)) )
response.write('\n')
log.rendering('Total rawData rendering time %.6f' % (time() - start))
return response
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
if format == 'pickle':
response = HttpResponse(content_type='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
log.rendering('Total pickle rendering time %.6f' % (time() - start))
return response
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions)
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
useSVG = graphOptions.get('outputFormat') == 'svg'
if useSVG and 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
content_type='text/javascript')
else:
response = buildResponse(image, 'image/svg+xml' if useSVG else 'image/png')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total rendering time %.6f seconds' % (time() - start))
return response
def parseOptions(request):
queryParams = request.REQUEST
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
assert graphType in GraphTypes, "Invalid graphType '%s', must be one of %s" % (graphType,GraphTypes.keys())
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
requestOptions['cacheTimeout'] = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
# Extract the targets out of the queryParams
mytargets = []
# Normal format: ?target=path.1&target=path.2
if len(queryParams.getlist('target')) > 0:
mytargets = queryParams.getlist('target')
# Rails/PHP/jQuery common practice format: ?target[]=path.1&target[]=path.2
elif len(queryParams.getlist('target[]')) > 0:
mytargets = queryParams.getlist('target[]')
# Collect the targets
for target in mytargets:
requestOptions['targets'].append(target)
template = dict()
for key, val in queryParams.items():
if key.startswith("template["):
template[key[9:-1]] = val
requestOptions['template'] = template
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
if 'noCache' in queryParams:
requestOptions['noCache'] = True
if 'maxDataPoints' in queryParams and queryParams['maxDataPoints'].isdigit():
requestOptions['maxDataPoints'] = int(queryParams['maxDataPoints'])
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
for opt in graphClass.customizable:
if opt in queryParams:
val = queryParams[opt]
if (val.isdigit() or (val.startswith('-') and val[1:].isdigit())) and 'color' not in opt.lower():
val = int(val)
elif '.' in val and (val.replace('.','',1).isdigit() or (val.startswith('-') and val[1:].replace('.','',1).isdigit())):
val = float(val)
elif val.lower() in ('true','false'):
val = val.lower() == 'true'
elif val.lower() == 'default' or val == '':
continue
graphOptions[opt] = val
tzinfo = pytz.timezone(settings.TIME_ZONE)
if 'tz' in queryParams:
try:
tzinfo = pytz.timezone(queryParams['tz'])
except pytz.UnknownTimeZoneError:
pass
requestOptions['tzinfo'] = tzinfo
# Get the time interval for time-oriented graph types
if graphType == 'line' or graphType == 'pie':
if 'until' in queryParams:
untilTime = parseATTime(queryParams['until'], tzinfo)
else:
untilTime = parseATTime('now', tzinfo)
if 'from' in queryParams:
fromTime = parseATTime(queryParams['from'], tzinfo)
else:
fromTime = parseATTime('-1d', tzinfo)
startTime = min(fromTime, untilTime)
endTime = max(fromTime, untilTime)
assert startTime != endTime, "Invalid empty time range"
requestOptions['startTime'] = startTime
requestOptions['endTime'] = endTime
return (graphOptions, requestOptions)
connectionPools = {}
def delegateRendering(graphType, graphOptions):
start = time()
postData = graphType + '\n' + pickle.dumps(graphOptions)
servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
shuffle(servers)
for server in servers:
start2 = time()
try:
# Get a connection
try:
pool = connectionPools[server]
except KeyError: #happens the first time
pool = connectionPools[server] = set()
try:
connection = pool.pop()
except KeyError: #No available connections, have to make a new one
connection = HTTPConnectionWithTimeout(server)
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
# Send the request
try:
connection.request('POST','/render/local/', postData)
except CannotSendRequest:
connection = HTTPConnectionWithTimeout(server) #retry once
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
connection.request('POST', '/render/local/', postData)
# Read the response
response = connection.getresponse()
assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
contentType = response.getheader('Content-Type')
imageData = response.read()
assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
assert imageData, "Received empty response from %s" % server
# Wrap things up
log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
pool.add(connection)
return imageData
except:
log.exception("Exception while attempting remote rendering request on %s" % server)
log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
continue
def renderLocalView(request):
try:
start = time()
reqParams = StringIO(request.body)
graphType = reqParams.readline().strip()
optionsPickle = reqParams.read()
reqParams.close()
graphClass = GraphTypes[graphType]
options = unpickle.loads(optionsPickle)
image = doImageRender(graphClass, options)
log.rendering("Delegated rendering request took %.6f seconds" % (time() - start))
response = buildResponse(image)
add_never_cache_headers(response)
return response
except:
log.exception("Exception in graphite.render.views.rawrender")
return HttpResponseServerError()
def renderMyGraphView(request,username,graphName):
profile = getProfileByUsername(username)
if not profile:
return errorPage("No such user '%s'" % username)
try:
graph = profile.mygraph_set.get(name=graphName)
except ObjectDoesNotExist:
return errorPage("User %s doesn't have a MyGraph named '%s'" % (username,graphName))
request_params = dict(request.REQUEST.items())
if request_params:
url_parts = urlsplit(graph.url)
query_string = url_parts[3]
if query_string:
url_params = parse_qs(query_string)
# Remove lists so that we can do an update() on the dict
for param, value in url_params.items():
if isinstance(value, list) and param != 'target':
url_params[param] = value[-1]
url_params.update(request_params)
# Handle 'target' being a list - we want duplicate &target params out of it
url_param_pairs = []
for key,val in url_params.items():
if isinstance(val, list):
for v in val:
url_param_pairs.append( (key,v) )
else:
url_param_pairs.append( (key,val) )
query_string = urlencode(url_param_pairs)
url = urlunsplit(url_parts[:3] + (query_string,) + url_parts[4:])
else:
url = graph.url
return HttpResponseRedirect(url)
def doImageRender(graphClass, graphOptions):
pngData = StringIO()
t = time()
img = graphClass(**graphOptions)
img.output(pngData)
log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
imageData = pngData.getvalue()
pngData.close()
return imageData
def buildResponse(imageData, content_type="image/png"):
return HttpResponse(imageData, content_type=content_type)
def errorPage(message):
template = loader.get_template('500.html')
context = Context(dict(message=message))
return HttpResponseServerError( template.render(context) )
| apache-2.0 | 521,230,743,486,547,700 | 36.068337 | 125 | 0.676212 | false |
rcocetta/kano-toolset | kano/xwindow.py | 1 | 1767 | #!/usr/bin/env python
# xwindow.py
#
# Copyright (C) 2015 Kano Computing Ltd.
# License: GNU General Public License v2 http://www.gnu.org/licenses/gpl-2.0.txt
#
# low level Xlib utilities
#
# Be careful mixing calls to this with Gtk.
from contextlib import contextmanager
import Xlib.display
from kano.logging import logger
def handle_uncaught_errors(err, req):
# req is always None in the default error handler
logger.error("error from Xlib {}".format(err))
@contextmanager
def display():
'''
A context manager for display
'''
d = Xlib.display.Display()
d.set_error_handler(handle_uncaught_errors)
yield d
d.close()
def find_xwindow_by_id(xid, parent):
'''
Given a parent Xlib Window, find a window with given xid
returning Xlib window object
'''
try:
for c in parent.query_tree().children:
if c.id == xid:
return c
r = find_xwindow_by_id(xid, c)
if r is not None:
return r
except:
return None
def xid_to_str(xid):
''' make a strign suitable for passing on command line'''
return hex(xid).rstrip('L')
# NB this function opens its own X connection. This is only safe because
# we don't return any objects, only xids. Normally Xlib objects must be used
# in the context of a Display().
def get_child_windows_from_xid(xid):
'''
Given an X window id, return the xid's of its children
'''
try:
with display() as d:
root = d.screen().root
xw = find_xwindow_by_id(xid, root)
children = []
for c in xw.query_tree().children:
children.append(xid_to_str(c.id))
return children
except:
return []
| gpl-2.0 | -7,121,769,970,949,436,000 | 23.205479 | 80 | 0.614601 | false |
massixone/mma8451 | rss_client.py | 1 | 5980 | #!/usr/bin/env python
# -*- coding:utf-8, indent=tab, tabstop=4 -*-
#
# See 'LICENSE' for copying
#
# This file contains the code for the client module of 'accel.py'
#
# Revision history
# Date Author Version Details
# ----------------------------------------------------------------------------------
# 2018-01-18 Massimo Di Primio 0.06 1st file implementation
"""Client thread worker - This is a simple client code example for 'accel'.py' program"""
import logging
import time
import datetime
import socket
import json
import rss_client_messages as climsg
import rss_cli_config as ccfg
import raspidata
#def cli_connect(params):
def cli_connect():
"""Open connection to the server"""
server_address = (str(ccfg.serveraddress), int(ccfg.servertcpport))
logging.debug('Trying to connect to server ' + str(server_address))
# Create a TCP/IP socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect the socket to the port where the server is listening
s.connect(server_address)
logging.debug('Connection Established to server ' + str(server_address))
except:
logging.debug(
"Failed to open connection: " + str(ccfg.serverprotocol) +
", to IP: " + str(ccfg.serveraddress) +
", on port: " + str(ccfg.servertcpport)
)
return(-1)
return(s)
def cli_close(s):
"""Close the server connection"""
if s > -1:
s.close()
# def cli_worker(stopEvent, config, accelBuffer):
def cli_worker(stopEvent, accelBuffer):
"""A client worker as thread"""
logging.debug('Thread Starting')
s = cli_connect() # s = cli_connect(config)
send_client_hello(s)
time.sleep(0.5)
send_config_affirm_message(s)
ts = int(time.time())
te = ts
while not stopEvent.wait(0.3):
if len(accelBuffer) > 0:
send_accel_data(s, accelBuffer)
te = int(time.time())
if (te - ts) > 10:
send_client_heartbit(s)
ts = int(time.time())
time.sleep(0.5)
send_zap_message(s)
cli_close(s)
logging.debug("Thread cliWorker is terminating as per your request.")
def send_accel_data(s, accelBuffer):
"""Send acceleration data to the server"""
#msg = dict(cmd='ADM', timestamp=str(datetime.datetime.now()), clid=raspidata.get_serial())
#a = 123 #str(datetime.datetime.now())
pbuf = parse_accel_data(accelBuffer)
msg = dict(cmd = 'ADM', timestamp = str(datetime.datetime.now()), clid = raspidata.get_serial(), data = pbuf)
# if len(pbuf) > 0: # this sometimes returns error (when buf is empty, it has None type)
if (pbuf is not None) and (len(pbuf) > 0):
#str = climsg.accel_data_message(pbuf)
try:
logging.debug("Sending Acceleration data to the server")
s.sendall(str(json.dumps(msg)) + "\n") #s.sendall(json.dumps(str) + "\n")
except:
logging.debug("Failed to send Acceleration-Data to the server")
def send_client_hello(s):
"""Send Hello message to the server"""
msg = dict(cmd = 'CHM', timestamp = str(datetime.datetime.now()), clid = raspidata.get_serial())
try:
logging.debug("Sending Hello to the server")
s.sendall(str(json.dumps(msg)) + "\n") #s.sendall(json.dumps(climsg.hello_message()) + "\n")
except:
logging.debug("Failed to send Hello to the server")
def send_zap_message(s):
"""Send Zap message to the server"""
msg = dict(cmd = 'CZM', timestamp = str(datetime.datetime.now()), clid = raspidata.get_serial())
try:
logging.debug("Sending Zap to the server")
s.sendall(str(json.dumps(msg)) + "\n") #s.sendall(json.dumps(climsg.zap_message()) + "\n")
except:
logging.debug("Failed to send Zap to the server")
def send_config_affirm_message(s): #def send_config_affirm_message(s, config):
msg_data = dict(city = ccfg.cityname, latitude = ccfg.latitude,longitude = ccfg.longitude)
msg = dict(cmd = 'CCA', timestamp = str(datetime.datetime.now()), clid = raspidata.get_serial(), config = msg_data)
try:
logging.debug("Sending client configuration to the server")
s.sendall(str(json.dumps(msg)) + "\n") #s.sendall(climsg.config_affirm_message(cfg_data))
except:
logging.debug("Failed to send client configuration to the server")
def send_client_heartbit(s):
"""Send Heartbit to the server"""
msg = dict(cmd = 'CHB', timestamp = str(datetime.datetime.now()), clid=raspidata.get_serial())
try:
logging.debug("Sending Heartbit to the server")
s.sendall(str(json.dumps(msg)) + "\n") #s.sendall(json.dumps(climsg.heart_bit()) + "\n")
except:
logging.debug("Failed to send Heartbit to the server")
def parse_accel_data(b):
"""Parse acceleration data to make sure we only send meaningfull data to the server"""
tsh = 10
tbuf = []
# tbuf.append([0, 0, 0, 0, 0])
# bLength = len(b)
# logging.debug("parseAccelData(b) # of elements = " + str(len(b)))
if len(b) > 1:
logging.debug("parseAccelData: In AccelData/BufLen: " + str(len(b)) + "/" +str(len(tbuf)))
firstTime = 1
prow = None
for row in b:
crow = b.pop(0) # Get the oldest record
if firstTime == 1:
prow = crow
firstTime = 0
if ( (abs(abs(int(crow[1])) - abs(int(prow[1]))) > tsh) or
(abs(abs(int(crow[2])) - abs(int(prow[2]))) > tsh) or
(abs(abs(int(crow[3])) - abs(int(prow[3]))) > tsh)
):
tbuf.append(crow)
prow = crow
print ("Again PROW/CROW/TBUFLEN:" + str(prow) + " / " + str(crow) + " / " + str(len(tbuf)))
logging.debug("parseAccelData: Out AccelData/BufLen: " + str(len(b)) + "/" +str(len(tbuf)))
return(tbuf)
| gpl-3.0 | -4,353,461,017,884,567,000 | 36.610063 | 119 | 0.593478 | false |
dmccloskey/python_statistics | python_statistics/calculate_importantFeatures.py | 1 | 5116 | from .calculate_dependencies import *
from .calculate_base import calculate_base
class calculate_importantFeatures(calculate_base):
def extract_importantFeatures(self,data_model_I=None,raise_I=False):
'''
INPUT:
data_model_I = classification or regression model
'''
if data_model_I: data_model=data_model_I;
else: data_model = self.data_model;
data_model = self.get_finalEstimator(data_model);
important_features_O = None;
try:
if hasattr(data_model, "feature_importances_"):
important_features_O = data_model.feature_importances_;
except Exception as e:
if raise_I: raise;
else: print(e);
return important_features_O;
def calculate_importantFeatures_std(self,important_features_I,data_model_I=None):
'''
calculate the standard deviation of the important features
based on the feature importances of the estimators
NOTE: ensemble models only
INPUT:
important_features_I = array of important features
data_model_I = classification or regression model
OUTPUT:
n_O = number of estimators
std_O = standard deviation of the important feature
'''
if data_model_I: data_model=data_model_I;
else: data_model = self.data_model;
data_model = self.get_finalEstimator(data_model);
n_O,std_O = np.full(important_features_I.shape,0.),np.full(important_features_I.shape,0.);
try:
if hasattr(data_model, "estimators_"):
std_O = np.std([estimator.feature_importances_ for estimator in data_model.estimators_],
axis=0);
n_O = np.full(std_O.shape,len(data_model.estimators_));
except Exception as e:
if raise_I: raise;
else: print(e);
return n_O,std_O;
def calculate_ZScoreAndPValue(self,value_I,n_I,std_I):
'''
calculate the Z-score and p-value
INPUT:
value_I = important feature value
n_I = number of estimators
std_I = standard deviation of the important feature
'''
if not value_I is None:
zscore_O,pvalue_O=np.full(value_I.shape,0.),np.full(value_I.shape,0.);
if not n_I is None and not 0.0 in n_I:
#calculate the standard error
se_O = std_I/np.sqrt(n_I);
#calculate the zscore
if 0.0 in se_O:
zscore_O=np.full(se_O.shape,1e3); #fill with an arbitrarily large value
for i in range(se_O.shape[0]):
if se_O[i] != 0.0:
zscore_O[i] = value_I[i]/se_O[i];
else:
zscore_O = value_I/se_O;
#calculate the pvalue
pvalue_O = scipy.stats.norm.sf(abs(zscore_O));
return zscore_O,pvalue_O;
def calculate_importantFeature_jackknife(self,data_model_I=None):
''' '''
pass;
def calculate_importantFeature_bootstrap(self,data_model_I=None):
''' '''
pass;
def calculate_VIP(self,data_model_I=None):
''' '''
pass;
def extract_dataFeatureSelection_ranking(self,
data_model_I=None,
raise_I=False):
'''
extract out the ranking from a feature selection data model
INPUT:
data_model_I = feature selection model
'''
if data_model_I: data_model=data_model_I;
else: data_model = self.data_featureSelection;
data_model = self.get_finalEstimator(data_model);
impfeat_values_O,impfeat_scores_O = None,None
try:
impfeat_values_O = data_model.ranking_;
if hasattr(data_model, "grid_scores_"):
impfeat_scores_O = data_model.grid_scores_;
else:
impfeat_scores_O = np.full(impfeat_values_O.shape,0.)
except Exception as e:
if raise_I: raise;
else: print(e);
return impfeat_values_O,impfeat_scores_O;
def extract_coefficientsSVM(self,data_model_I=None,raise_I=False):
'''
INPUT:
data_model_I = support vector machine
OUTPUT:
coefficients_sum_O = sum of the absolute value of the coefficients
for each feature along the n-1 class axis
'''
if data_model_I: data_model=data_model_I;
else: data_model = self.data_model;
data_model = self.get_finalEstimator(data_model);
coefficients_n_O,coefficients_sum_O,coefficients_mean_O,coefficients_std_O = None,None,None,None;
try:
coefficients_n_O = data_model.coef_.shape[0];
coefficients_sum_O = np.abs(data_model.coef_).sum(axis=0);
coefficients_mean_O = np.abs(data_model.coef_).mean(axis=0);
coefficients_std_O = np.abs(data_model.coef_).std(axis=0);
except Exception as e:
if raise_I: raise;
else: print(e);
return coefficients_n_O,coefficients_sum_O,coefficients_mean_O,coefficients_std_O;
| mit | 5,074,370,880,379,480,000 | 38.658915 | 105 | 0.584246 | false |
maxhutch/nek-workflow | ui.py | 1 | 4622 | """
User interfaces for the nek-workflow script
Currently, there is only a command line interface
"""
def command_line_ui():
"""
Command line interface for nek-workflow
Uses python's ArgumentParser to read the command line and then creates
shortcuts for common argument combinations
"""
# grab defaults from config files
from os.path import exists, expanduser, join
import json
defaults = {
'start' : 1,
'nodes' : 1,
'archive' : False,
'upload' : False,
'sync' : True,
'process' : False,
'analysis' : "RTI",
'arch_end' : "alcf#dtn_hpss/~/pub/",
'outp_end' : "maxhutch#alpha-admin/pub/",
'home_end' : "maxhutch#edoras/home/maxhutch/science/RTI/",
'foo' : "bar"
}
if exists(join(expanduser("~"), ".nek-workflow.json")):
with open(join(expanduser("~"), ".nek-workflow.json")) as f:
defaults.update(json.load(f))
# Define arguments
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument("name",
help="Name and path of Nek output files")
p.add_argument("-a", "--archive_end",
help="Archive endpoint", dest="arch_end")
p.add_argument("-m", "--home_end",
help="Home endpoint")
p.add_argument("-o", "--output_end",
help="Output endpoint", dest="outp_end")
p.add_argument("-f", "--frame", type=int,
help="[Starting] Frame number")
p.add_argument("-e", "--frame_end", type=int, default=-1,
help="Ending frame number")
p.add_argument("--analysis", help="Anaysis package to use for post-processing")
p.add_argument("--sync", action="store_true", help="Sync params and chest", dest="sync")
p.add_argument("--no-sync", action="store_false", help="Sync params and chest", dest="sync")
p.add_argument("--process", action="store_true", help="Process the frames", dest="process")
p.add_argument("--no-process", action="store_false", help="Process the frames", dest="process")
p.add_argument("--archive", action="store_true", help="Archive raw", dest="archive")
p.add_argument("--no-archive", action="store_false", help="Archive raw", dest="archive")
p.add_argument("--upload", action="store_true", help="Upload results", dest="upload")
p.add_argument("--no-upload", action="store_false", help="Upload results", dest="upload")
p.add_argument("-n", "--nodes", type=int,
help="Number of nodes to run on")
p.set_defaults(**defaults)
"""
p.add_argument("-s", "--slice", action="store_true",
help="Display slice")
p.add_argument("-c", "--contour", action="store_true",
help="Display contour")
p.add_argument("-n", "--ninterp", type=float, default = 1.,
help="Interpolating order")
p.add_argument("-z", "--mixing_zone", action="store_true",
help="Compute mixing zone width")
p.add_argument("-m", "--mixing_cdf", action="store_true",
help="Plot CDF of box temps")
p.add_argument("-F", "--Fourier", action="store_true",
help="Plot Fourier spectrum in x-y")
p.add_argument("-b", "--boxes", action="store_true",
help="Compute box covering numbers")
p.add_argument("-nb", "--block", type=int, default=65536,
help="Number of elements to process at a time")
p.add_argument("-nt", "--thread", type=int, default=1,
help="Number of threads to spawn")
p.add_argument("-d", "--display", action="store_true", default=False,
help="Display plots with X")
p.add_argument("-p", "--parallel", action="store_true", default=False,
help="Use parallel map (IPython)")
p.add_argument( "--series", action="store_true", default=False,
help="Apply time-series analyses")
p.add_argument("--mapreduce", default=defaults["mapreduce"],
help="Module containing Map and Reduce implementations")
p.add_argument("--post", default=defaults["post"],
help="Module containing post_frame and post_series")
p.add_argument("-v", "--verbose", action="store_true", default=False,
help="Should I be really verbose, that is: wordy?")
"""
# Load the arguments
args = p.parse_args()
if args.frame_end == -1:
args.frame_end = args.frame
args.root = "/" + args.home_end.partition("/")[2]
return args
| gpl-3.0 | 3,446,579,655,806,012,000 | 43.873786 | 98 | 0.577239 | false |
me4502/tandr | tandr.py | 1 | 2132 | import random
import json
from flask import Flask, render_template, url_for, request, redirect, session
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.secret_key = "Ayy lmao"
oauth_key="763519b5c8d1c478f44296c2b6c82dcb772dc4b0fbafa66b68d889cd41da4d71"
oauth_secret="6ecb90487aaf53377f8a0e536c7a4a4ba3b142bb972d838b1034858bd5e670e5"
tanda_url = "https://my.tanda.co/{}"
oauth = OAuth()
tanda_login = oauth.remote_app('tanda',
base_url=tanda_url.format(""),
request_token_url=None,
access_token_url=tanda_url.format("api/oauth/token"),
access_token_method='POST',
authorize_url=tanda_url.format("api/oauth/authorize"),
request_token_params={'scope':'me user'},
consumer_key=oauth_key,
consumer_secret=oauth_secret
)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/rate")
def rate():
return render_template("rate.html")
@app.route("/api/user")
def give_pair():
request = tanda_login.request('/api/v2/users').data
item1 = random.choice(request)
item2 = random.choice(request)
return "[{},{}]".format(json.dumps(item1), json.dumps(item2))
@app.route("/api/result", methods=["POST"])
def result_handler():
return ""
@app.route("/login")
def login():
return tanda_login.authorize(
#callback="https://aqueous-anchorage-15078.herokuapp.com/login/callback",
callback=url_for('oauth_authorized',_external=True),
state={ 'next': request.args.get('next') or request.referrer or None }
)
@app.route("/login/callback")
def oauth_authorized():
next_url = request.args.get('next') or url_for('index')
resp = tanda_login.authorized_response()
if resp is None:
flash(u'Invalid login')
return redirect(next_url)
session['tanda_token'] = (resp['access_token'], '')
session['user'] = tanda_login.request('api/v2/users/me').data
return redirect(next_url)
@tanda_login.tokengetter
def get_tanda_token(token=None):
return session.get('tanda_token')
if __name__ == "__main__":
app.run(debug=True)
| mit | 6,138,273,881,019,834,000 | 29.028169 | 85 | 0.665572 | false |
irgangla/pntools | pntools/partialorder_renderer.py | 1 | 7581 | #!/usr/bin/python3
# -*- coding_ utf-8 -*-
""" This program implements a renderer for LPO files. """
from PIL import Image, ImageDraw, ImageFont, ImageFilter # Python image library (Pillow)
from pntools import partialorder # LPO data structure
import math
import sys
import os
def calculate_size(lpo):
""" This function calculates the size and minimum coordinate
of the LPO.
return: ((width, height), (min_x, min_y))
"""
minmax = [0, 0, 0, 0]
for id, event in lpo.events.items():
x, y = event.position
if x < minmax[0]:
minmax[0] = x
if x > minmax[2]:
minmax[2] = x
if y < minmax[1]:
minmax[1] = y
if y > minmax[3]:
minmax[3] = y
width = minmax[2] - minmax[0] + 100
height = minmax[3] - minmax[1] + 100
offx = minmax[0]
if offx != 0:
offx - 50;
offy = minmax[1]
if offy != 0:
offy - 50;
return ((width, height), (offx, offy))
def create_image(size):
""" This function creates a new image with RGB color space,
white background color and the given size.
size: Size of the image.
return: new Image
"""
return Image.new('RGB', size, color=(255,255,255))
def draw_event(event, draw, doffset):
""" Helper method for event drawing. """
scale = 4
x, y = event.position
x = (x + doffset[0]) * scale
y = (y + doffset[1]) * scale
halfside = 8 * scale
linewidth = 2 * scale
distance = 2 * scale
offset = event.offset[0] * scale, event.offset[1] * scale
font = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 12 * scale)
draw.rectangle([x - halfside, y - halfside, x + halfside, y + halfside],
fill=(0, 0, 0), outline=(0, 0, 0))
draw.rectangle([x - halfside + linewidth, y - halfside + linewidth,
x + halfside - linewidth, y + halfside - linewidth],
fill=(255, 255, 255), outline=(255, 255, 255))
fontsize = font.getsize(event.label)
draw.text((x - fontsize[0] / 2 + offset[0], y + halfside + distance + offset[1]),
event.label, font=font, fill=(0, 0, 0))
def draw_arc(arc, draw, doffset, color):
""" Helper method for arc drawing. """
scale = 4
width = 2 * scale
tipsize = 10 * scale
halfside = 8
start_event = arc.lpo.events[arc.source] # get start event
end_event = arc.lpo.events[arc.target] # get end event
intersections = calculate_intersections(start_event, end_event, halfside) # calculate intersection points
# start point of arc
start = start_event.position[0] + intersections[0][0], start_event.position[1] + intersections[0][1]
# end point of arc
end = end_event.position[0] + intersections[1][0], end_event.position[1] + intersections[1][1]
vector = (float(start_event.position[0] - end_event.position[0]),
float(start_event.position[1] - end_event.position[1]))
vector_length = math.sqrt(vector[0] ** 2 + vector[1] ** 2)
tipcenter = (vector[0] * (tipsize / 2) / vector_length,
vector[1] * (tipsize / 2) / vector_length)
start = (start[0] + doffset[0]) * scale, (start[1] + doffset[1]) * scale
end = (end[0] + doffset[0]) * scale, (end[1] + doffset[1]) * scale
draw.line([start[0], start[1], end[0] + tipcenter[0], end[1] + tipcenter[1]],
fill=color, width=width)
tip = calculate_tip(start_event, end_event, tipsize)
draw.polygon([end, (end[0] + tip[0][0], end[1] + tip[0][1]),
(end[0] + tip[1][0], end[1] + tip[1][1])], outline=color, fill=color)
def calculate_tip(start, end, tipsize):
""" Helper function for tip point calculation.
return ((vektor1_x, vektor1_y), (vektor2_x, vektor2_y))
"""
vector = float(start.position[0] - end.position[0]), float(start.position[1] - end.position[1])
vector_length = math.sqrt(vector[0] ** 2 + vector[1] ** 2)
vector_sized = vector[0] * tipsize / vector_length, vector[1] * tipsize / vector_length
alpha = 30 * 2 * math.pi / 360
sin_alpha = math.sin(alpha)
cos_alpha = math.cos(alpha)
tip1 = (vector_sized[0] * cos_alpha - vector_sized[1] * sin_alpha,
vector_sized[0] * sin_alpha + vector_sized[1] * cos_alpha)
sin_alpha = math.sin(-alpha)
cos_alpha = math.cos(-alpha)
tip2 = (vector_sized[0] * cos_alpha - vector_sized[1] * sin_alpha,
vector_sized[0] * sin_alpha + vector_sized[1] * cos_alpha)
return tip1, tip2
def calculate_intersections(start, end, halfside):
""" Helper function for arc intersection point calculation. """
# vector from the center of the start event to the center of the end event
vector = float(end.position[0] - start.position[0]), float(end.position[1] - start.position[1])
start_vector = calculate_intersection_event(vector, halfside)
#calculate intersection for arc end
end_vector = calculate_intersection_event((-vector[0], -vector[1]), halfside)
return start_vector, end_vector
def calculate_intersection_event(vector, halfside):
""" Helper function, calculates intersection of arc and edge. """
#calculate a factor to scale the x-component to 10px (half of side length)
fact = 1
if vector[0] != 0:
fact = halfside / math.fabs(vector[0])
# scale the vector
intersection_vector = vector[0] * fact, vector[1] * fact
# if y-component of vector is larger than halfside or
# x-component is 0, scale with y-component
if math.fabs(intersection_vector[1]) > halfside or vector[0] == 0:
fact = halfside / math.fabs(vector[1])
intersection_vector = vector[0] * fact, vector[1] * fact
return intersection_vector[0], intersection_vector[1]
def draw_lpo(lpo, skeleton=False, transitive=False, skeleton_color=(0,0,255), transitive_color=(220, 220, 220)):
""" This method renders the given labelled partial order as an Image object. """
size, off = calculate_size(lpo)
doffset = -off[0], -off[1]
w, h = size
image = create_image((w * 4, h * 4))
d = ImageDraw.Draw(image)
if transitive:
for arc in lpo.arcs:
if not arc.user_drawn and not arc.skeleton:
draw_arc(arc, d, doffset, transitive_color)
for arc in lpo.arcs:
if arc.user_drawn:
draw_arc(arc, d, doffset, (0, 0, 0))
if skeleton:
for arc in lpo.arcs:
if arc.skeleton:
draw_arc(arc, d, doffset, skeleton_color)
for id, event in lpo.events.items():
draw_event(event, d, doffset)
return image
def antialias(image, factor):
""" This method applies an anti alias filter to the given image. Therefore
the image size is reduced by the given factor.
"""
x, y = image.size
img = image.resize((int(x / factor), int(y / factor)), Image.ANTIALIAS)
return img
if __name__ == "__main__":
if len(sys.argv) > 1: # load Petri net if file is given as parameter
lpos = partialorder.parse_lpo_file(sys.argv[1])
i = 1
for lpo in lpos:
img = draw_lpo(lpo)
img = antialias(img, 2)
img.show()
img.save("lpo-%d.png" % i)
i += 1
if os.path.exists("../abcabc.lpo"): # debug/demo file
lpos = partialorder.parse_lpo_file("../abcabc.lpo")
img = draw_lpo(lpos[0])
img = antialias(img, 2)
img.show()
img.save("../abcabc.png")
| mit | -4,004,009,900,292,507,000 | 32.396476 | 112 | 0.593589 | false |
gkc1000/pyscf | pyscf/mcscf/mc2step.py | 1 | 6534 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
import time
from functools import reduce
import numpy
import pyscf.lib.logger as logger
from pyscf.mcscf import mc1step
def kernel(casscf, mo_coeff, tol=1e-7, conv_tol_grad=None,
ci0=None, callback=None, verbose=None, dump_chk=True):
if verbose is None:
verbose = casscf.verbose
if callback is None:
callback = casscf.callback
log = logger.Logger(casscf.stdout, verbose)
cput0 = (time.clock(), time.time())
log.debug('Start 2-step CASSCF')
mo = mo_coeff
nmo = mo.shape[1]
ncore = casscf.ncore
ncas = casscf.ncas
nocc = ncore + ncas
eris = casscf.ao2mo(mo)
e_tot, e_cas, fcivec = casscf.casci(mo, ci0, eris, log, locals())
if ncas == nmo and not casscf.internal_rotation:
if casscf.canonicalization:
log.debug('CASSCF canonicalization')
mo, fcivec, mo_energy = casscf.canonicalize(mo, fcivec, eris,
casscf.sorting_mo_energy,
casscf.natorb, verbose=log)
else:
mo_energy = None
return True, e_tot, e_cas, fcivec, mo, mo_energy
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(tol)
logger.info(casscf, 'Set conv_tol_grad to %g', conv_tol_grad)
conv_tol_ddm = conv_tol_grad * 3
conv = False
de, elast = e_tot, e_tot
totmicro = totinner = 0
casdm1 = 0
r0 = None
t2m = t1m = log.timer('Initializing 2-step CASSCF', *cput0)
imacro = 0
while not conv and imacro < casscf.max_cycle_macro:
imacro += 1
njk = 0
t3m = t2m
casdm1_old = casdm1
casdm1, casdm2 = casscf.fcisolver.make_rdm12(fcivec, ncas, casscf.nelecas)
norm_ddm = numpy.linalg.norm(casdm1 - casdm1_old)
t3m = log.timer('update CAS DM', *t3m)
max_cycle_micro = 1 # casscf.micro_cycle_scheduler(locals())
max_stepsize = casscf.max_stepsize_scheduler(locals())
for imicro in range(max_cycle_micro):
rota = casscf.rotate_orb_cc(mo, lambda:fcivec, lambda:casdm1, lambda:casdm2,
eris, r0, conv_tol_grad*.3, max_stepsize, log)
u, g_orb, njk1, r0 = next(rota)
rota.close()
njk += njk1
norm_t = numpy.linalg.norm(u-numpy.eye(nmo))
norm_gorb = numpy.linalg.norm(g_orb)
if imicro == 0:
norm_gorb0 = norm_gorb
de = numpy.dot(casscf.pack_uniq_var(u), g_orb)
t3m = log.timer('orbital rotation', *t3m)
eris = None
u = u.copy()
g_orb = g_orb.copy()
mo = casscf.rotate_mo(mo, u, log)
eris = casscf.ao2mo(mo)
t3m = log.timer('update eri', *t3m)
log.debug('micro %d ~dE=%5.3g |u-1|=%5.3g |g[o]|=%5.3g |dm1|=%5.3g',
imicro, de, norm_t, norm_gorb, norm_ddm)
if callable(callback):
callback(locals())
t2m = log.timer('micro iter %d'%imicro, *t2m)
if norm_t < 1e-4 or abs(de) < tol*.4 or norm_gorb < conv_tol_grad*.2:
break
totinner += njk
totmicro += imicro + 1
e_tot, e_cas, fcivec = casscf.casci(mo, fcivec, eris, log, locals())
log.timer('CASCI solver', *t3m)
t2m = t1m = log.timer('macro iter %d'%imacro, *t1m)
de, elast = e_tot - elast, e_tot
if (abs(de) < tol and
norm_gorb < conv_tol_grad and norm_ddm < conv_tol_ddm):
conv = True
else:
elast = e_tot
if dump_chk:
casscf.dump_chk(locals())
if callable(callback):
callback(locals())
if conv:
log.info('2-step CASSCF converged in %d macro (%d JK %d micro) steps',
imacro, totinner, totmicro)
else:
log.info('2-step CASSCF not converged, %d macro (%d JK %d micro) steps',
imacro, totinner, totmicro)
if casscf.canonicalization:
log.info('CASSCF canonicalization')
mo, fcivec, mo_energy = \
casscf.canonicalize(mo, fcivec, eris, casscf.sorting_mo_energy,
casscf.natorb, casdm1, log)
if casscf.natorb and dump_chk: # dump_chk may save casdm1
occ, ucas = casscf._eig(-casdm1, ncore, nocc)
casdm1 = numpy.diag(-occ)
if dump_chk:
casscf.dump_chk(locals())
log.timer('2-step CASSCF', *cput0)
return conv, e_tot, e_cas, fcivec, mo, mo_energy
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(mc1step.CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.22013929407)
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = mc1step.CASSCF(m, 6, 4)
mc.verbose = 4
mo = m.mo_coeff.copy()
mo[:,2:5] = m.mo_coeff[:,[4,2,3]]
emc = mc.mc2step(mo)[0]
print(ehf, emc, emc-ehf)
#-76.0267656731 -76.0873922924 -0.0606266193028
print(emc - -76.0873923174, emc - -76.0926176464)
| apache-2.0 | 3,943,637,371,586,924,000 | 32.167513 | 88 | 0.536425 | false |
madaari/ti_summer_internship_BBB17 | Smart door bell/lcd.py | 1 | 1817 | #!/usr/bin/python
# Example using a character LCD connected to a Raspberry Pi or BeagleBone Black.
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi pin configuration:
# lcd_rs = 27 # Note this might need to be changed to 21 for older revision Pi's.
# lcd_en = 22
# lcd_d4 = 25
# lcd_d5 = 24
# lcd_d6 = 23
# lcd_d7 = 18
# lcd_backlight = 4
# BeagleBone Black configuration:
lcd_rs = 'P9_15'
lcd_en = 'P9_29'
lcd_d4 = 'P9_31"'
lcd_d5 = 'P9_26'
lcd_d6 = 'P9_30'
lcd_d7 = 'P9_42'
lcd_backlight = 'P9_7'
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Alternatively specify a 20x4 LCD.
# lcd_columns = 20
# lcd_rows = 4
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_backlight)
# Print a two line message
lcd.message('Hello\nworld!')
# Wait 5 seconds
time.sleep(5.0)
# Demo showing the cursor.
lcd.clear()
lcd.show_cursor(True)
lcd.message('Show cursor')
time.sleep(5.0)
# Demo showing the blinking cursor.
lcd.clear()
lcd.blink(True)
lcd.message('Blink cursor')
time.sleep(5.0)
# Stop blinking and showing cursor.
lcd.show_cursor(False)
lcd.blink(False)
# Demo scrolling message right/left.
lcd.clear()
message = 'Scroll'
lcd.message(message)
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_right()
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_left()
# Demo turning backlight off and on.
lcd.clear()
lcd.message('Flash backlight\nin 5 seconds...')
time.sleep(5.0)
# Turn backlight off.
lcd.set_backlight(0)
time.sleep(2.0)
# Change message.
lcd.clear()
lcd.message('Goodbye!')
# Turn backlight on.
lcd.set_backlight(1)
| gpl-3.0 | 1,803,859,612,244,337,000 | 20.891566 | 112 | 0.663731 | false |
libretees/libreshop | libreshop/inventory/migrations/0020_auto_20161023_2045.py | 1 | 1095 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-10-23 20:45
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations
import django_measurement.models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0019_auto_20161022_2348'),
]
operations = [
migrations.RemoveField(
model_name='inventory',
name='cost',
),
migrations.AlterField(
model_name='inventory',
name='packed_weight',
field=django_measurement.models.MeasurementField(default=Decimal('0'), measurement_class='Mass', validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='Packed Weight'),
),
migrations.AlterField(
model_name='inventory',
name='weight',
field=django_measurement.models.MeasurementField(default=Decimal('0'), measurement_class='Mass', validators=[django.core.validators.MinValueValidator(Decimal('0.00'))]),
),
]
| gpl-3.0 | -927,113,509,204,794,900 | 33.21875 | 211 | 0.647489 | false |
bholmgren/pycligen | cligen.py | 1 | 32802 | #
# PyCLIgen module
#
# Copyright (C) 2014-2015 Benny Holmgren
#
# This file is part of PyCLIgen.
#
# PyPyCLIgen is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PyCLIgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyCLIgen; see the file LICENSE.
"""Python binding for the CLIgen library by Olof Hagsand
This module implements a Python API to CLIgen, allowing the developer
to utilize the power of CLIgen without coding in C
"""
# Make python2 behave more like python 3.
from __future__ import unicode_literals, absolute_import, print_function
__version__ = '0.1'
import sys
import copy
import ipaddress
if sys.version_info.major < 3:
from urlparse import urlparse
else:
from urllib.parse import urlparse
import _cligen
from _cligen import *
#
# obsolete: done by CgVar constructor
#
#def cv_parse(str, type, name=None):
# """Parse a string into a CgVar object.
#
# Args:
# str: A string representing a value to be parsed
# type: The CLIgen variable type
# name: The name of the variable (optional)
#
# Returns:
# A CgVar object
#
# Raises:
# ValueError: If the string passed cannot be parsed successfully
# MemoryError: If needed memory was not available
# """
#
# cv = CgVar(type)
# if name is not None:
# cv.name_set(name)
# cv.parse(str)
# return cv
#
# CLIgen
#
class CLIgen (_cligen.CLIgen):
'The CLIgen class'
def __init__(self, *args, **kwargs):
"""
Args:
None
or
syntax-spec: An optional argument specifying a CLIgen syntax format
that will be parsed and activated. The specification can
be provided as a normal string format or as named
arguments containing the specification or a filename of
a file containing the specification. For example:
syntax='myvar="myval";\nhello-world("Greet the world");'
file='/usr/local/myapp/myapp.cli'
Raises:
TypeError: If invalid arguments are provided.
MemoryError: If memory allcoation fails
ValueError: If the string passed cannot be parsed successfully
"""
numargs = len(args) + len(kwargs)
if numargs > 1:
raise TypeError("function takes at most 1 argument ({:d} given)".format(numargs))
# Call parent to setup CLIgen structures.
super(CLIgen, self).__init__(*args, **kwargs)
if numargs is 1:
if len(kwargs) > 0: # named argument
if "file" in kwargs:
pt = ParseTree(self, file=kwargs['file'])
elif "syntax" in kwargs:
pt = ParseTree(self, syntax=kwargs['syntax'])
else:
raise TypeError("'{:s}' is an invalid keyword argument for this function".format(list(kwargs.keys())[0]))
elif len(args) > 0:
pt = ParseTree(self, syntax=args[0])
if pt:
self.tree_add("__CLIgen__", pt)
self.tree_active_set("__CLIgen__")
def output(self, file, out):
"""
CLIgen output function. All terminal output should be made via this method
Args:
file: The IO object to direct the output to, such as a file or sys.stdout
out: The output string/object
Returns:
None
Raises:
IOError: If there is a problem writing to the file object
"""
return super(CLIgen, self)._output(file, str(out))
def _cligen_cb(self, name, vr, arg):
# module_name, class_name = name.rsplit(".", 1)
# m = importlib.import_module(module_name)
if hasattr(sys.modules['__main__'], name) is True:
return getattr(sys.modules['__main__'], name)(self, vr, arg)
return None
def _cligen_expand(self, name, vr, arg):
if hasattr(sys.modules['__main__'], name) is True:
cmds = getattr(sys.modules['__main__'], name)(self, name, vr, arg)
if type(cmds) != list:
raise TypeError("expand callback must return list of dicts")
return cmds
else:
return None
#
# CgVar
#
class CgVar (_cligen.CgVar):
'CLIgen variable object'
def __init__(self, *args, **kwargs):
""" Optional args:
type: The CLIgen variable type
name: The name of the variable
value: A string representing a value to be parsed
Returns:
A new CgVar object
Raises:
ValueError: If the string passed cannot be parsed successfully
MemoryError: If needed memory was not available
"""
return super(CgVar, self).__init__(*args, **kwargs)
def __repr__(self):
return super(CgVar, self).__repr__()
def __str__(self):
return super(CgVar, self).__str__()
def __int__(self):
return int(self._getnumber())
def __float__(self):
return float(self._getnumber())
def __cmp__(self, other):
return super(CgVar, self).__cmp__(other)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __add__(self, other):
val = None
stype = self.type_get()
if isinstance(other, CgVar):
otype = other.type_get()
else:
otype = stype;
if self.isnumeric():
if isinstance(other, CgVar):
if other.isnumeric():
val = self._getnumber() + other._getnumber()
elif other.isstring():
val = str(self) + str(other)
elif isinstance(other, int):
val = self._getnumber() + other
elif isinstance(other, str):
val = str(self) + other
elif self.isstring():
val = str(self) + str(other)
if val is None:
raise TypeError("unsupported operand type(s) for +: 'CgVar({:s})' and CgVar({:s})".format(self.type2str(), other.type2str()))
if (stype < otype):
type = otype
else:
type = stype;
return CgVar(type, self.name_get(), val)
def __iadd__(self, other):
result = self.__add__(other)
self.parse(str(result))
return self
def __sub__(self, other):
val = None
stype = self.type_get()
if isinstance(other, CgVar):
otype = other.type_get()
else:
otype = stype;
if self.isnumeric():
if isinstance(other, CgVar):
if other.isnumeric():
val = self._getnumber() - other._getnumber()
elif other.isstring():
val = str(self) - str(other)
elif isinstance(other, int):
val = self._getnumber() - other
elif isinstance(other, str):
val = str(self) - other
elif self.isstring():
val = str(self) - str(other)
if val is None:
raise TypeError("unsupported operand type(s) for -: 'CgVar({:s})' and CgVar({:s})".format(self.type2str(), other.type2str()))
if (stype < otype):
type = otype
else:
type = stype;
return CgVar(type, self.name_get(), val)
def __isub__(self, other):
result = self.__sub__(other)
self.parse(str(result))
return self
def __mul__(self, other):
stype = self.type_get()
if isinstance(other, CgVar):
otype = other.type_get()
else:
otype = stype;
if self.isnumeric():
factor1 = self._getnumber()
else:
raise TypeError("unsupported operand type for *: 'CgVar({:s})'".format(self.type2str()))
if isinstance(other, CgVar):
if other.isnumeric():
factor2 = other._getnumber()
else:
raise TypeError("unsupported operand type for *: 'CgVar({:s})'".format(other.type2str()))
elif isinstance(other, int) or isinstance(other, float):
factor2 = other
else:
raise TypeError("unsupported operand type for *: '{:s}'".format(other.__class__.__name__))
val = factor1 * factor2
if (stype < otype):
type = otype
else:
type = stype;
return CgVar(type, self.name_get(), val)
def __imul__(self, other):
result = self.__mul__(other)
self.parse(str(result))
return self
def __truediv__(self, other):
precision = self._dec64_n_get()
stype = self.type_get()
if isinstance(other, CgVar):
otype = other.type_get()
else:
otype = stype;
if self.isnumeric():
factor1 = self._getnumber()
else:
raise TypeError("unsupported operand type for /: 'CgVar({:s})'".format(self.type2str()))
if isinstance(other, CgVar):
if other.isnumeric():
factor2 = other._getnumber()
else:
raise TypeError("unsupported operand type for /: 'CgVar({:s})'".format(other.type2str()))
elif isinstance(other, int) or isinstance(other, float):
factor2 = other
else:
raise TypeError("unsupported operand type for /: '{:s}'".format(other.__class__.__name__))
val = factor1 / factor2
if (stype < otype):
type = otype
else:
type = stype;
fmt = '{:.'+str(precision)+'f}'
return CgVar(type, self.name_get(), fmt.format(val))
def __itruediv__(self, other):
result = self.__div__(other)
self.parse(str(result))
return self
def __div__(self, other):
return self.__truediv__(other)
def __idiv__(self, other):
return self.__itruediv__(other)
def __copy__(self):
return super(CgVar, self).__copy__()
def _getnumber(self):
if self.isnumeric() is False:
raise ValueError("not a number object")
type = self.type_get()
if type is CGV_INT8:
return self.int8_get()
elif type is CGV_INT16:
return self.int16_get()
elif type is CGV_INT32:
return self.int32_get()
elif type is CGV_INT64:
return self.int64_get()
elif type is CGV_UINT8:
return self.uint8_get()
elif type is CGV_UINT16:
return self.uint16_get()
elif type is CGV_UINT32:
return self.uint32_get()
elif type is CGV_UINT64:
return self.uint64_get()
elif type is CGV_DEC64:
return self.dec64_get()
def _setnumber(self, num):
if self.isnumeric() is False:
raise ValueError("not a number object")
type = self.type_get()
if type is CGV_INT:
return self.int_set(num)
elif type is CGV_LONG:
return self.long_set(num)
def isnumeric(self):
if self.type_get() in [CGV_INT8, CGV_INT16, CGV_INT32, CGV_INT64, CGV_UINT8, CGV_UINT16, CGV_UINT32, CGV_UINT64, CGV_DEC64]:
return True
return False
def isstring(self):
if self.type_get() in [CGV_STRING, CGV_REST, CGV_INTERFACE, CGV_URL]:
return True
return False
def name_get(self):
"""Get CgVar variable name
Args:
None
Returns:
Name as a string if available or None otherwise
Raises:
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._name_get()
def name_set(self, name):
"""Set CgVar variable name
Args:
'name': New name of variable
Returns:
New name as a string
Raises:
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._name_set(name)
def type_get(self):
"""Get CgVar variable type
Args:
None
Returns:
Variable type as int
Raises:
None
"""
return int(super(CgVar, self)._type_get())
def type_set(self, type):
"""Set CgVar variable type
Args:
'type': New type as int
Returns:
New type
Raises:
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._type_set(type)
def type2str(self, *args):
""""Get string name of CgVar type
Args:
'type': Optionally specify type, otherwise self's current type is used
Returns:
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._type2str(*args)
def int8_get(self):
"""Get CgVar variable 8-bit int value
Args:
None
Returns:
The int value
Raises:
ValueError: If 'self' is not a 8-bit int
"""
return super(CgVar, self)._int8_get()
def int8_set(self, value):
"""Set CgVar variable 8-bit int value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a 8-bit int
"""
return super(CgVar, self)._int8_set(value)
def int16_get(self):
"""Get CgVar variable 16-bit int value
Args:
None
Returns:
The int value
Raises:
ValueError: If 'self' is not a 16-bit int
"""
return super(CgVar, self)._int16_get()
def int16_set(self, value):
"""Set CgVar variable 16-bit int value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a 16-bit int
"""
return super(CgVar, self)._int16_set(value)
def int32_get(self):
"""Get CgVar variable 32-bit int value
Args:
None
Returns:
The int value
Raises:
ValueError: If 'self' is not a 32-bit int
"""
return super(CgVar, self)._int32_get()
def int32_set(self, value):
"""Set CgVar variable 32-bit int value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a 32-bit int
"""
return super(CgVar, self)._int32_set(value)
def int64_get(self):
"""Get CgVar variable 64-bit int value
Args:
None
Returns:
The int value
Raises:
ValueError: If 'self' is not a 64-bit int
"""
return super(CgVar, self)._int64_get()
def int64_set(self, value):
"""Set CgVar variable 64-bit int value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a 64-bit int
"""
return super(CgVar, self)._int64_set(value)
def uint8_get(self):
"""Get CgVar variable 8-bit unsigned int value
Args:
None
Returns:
The unsigned int value
Raises:
ValueError: If 'self' is not a 8-bit unsigned int
"""
return super(CgVar, self)._uint8_get()
def uint8_set(self, value):
"""Set CgVar variable 8-bit unsigned int value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a 8-bit unsigned int
"""
return super(CgVar, self)._uint8_set(value)
def uint16_get(self):
"""Get CgVar variable 16-bit unsigned int value
Args:
None
Returns:
The unsigned int value
Raises:
ValueError: If 'self' is not a 16-bit unsigned int
"""
return super(CgVar, self)._uint16_get()
def uint16_set(self, value):
"""Set CgVar variable 16-bit unsigned int value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a 16-bit unsigned int
"""
return super(CgVar, self)._uint16_set(value)
def uint32_get(self):
"""Get CgVar variable 32-bit unsigned int value
Args:
None
Returns:
The unsigned int value
Raises:
ValueError: If 'self' is not a 32-bit unsigned int
"""
return super(CgVar, self)._uint32_get()
def uint32_set(self, value):
"""Set CgVar variable 32-bit unsigned int value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a 32-bit unsigned int
"""
return super(CgVar, self)._uint32_set(value)
def uint64_get(self):
"""Get CgVar variable 64-bit unsigned int value
Args:
None
Returns:
The unsigned int value
Raises:
ValueError: If 'self' is not a 64-bit unsigned int
"""
return super(CgVar, self)._uint64_get()
def uint64_set(self, value):
"""Set CgVar variable 64-bit unsigned int value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a 64-bit unsigned int
"""
return super(CgVar, self)._uint64_set(value)
def int_get(self):
"""Get CgVar variable int value
Args:
None
Returns:
The int value
Raises:
ValueError: If 'self' is not a int
"""
return super(CgVar, self)._int_get()
def int_set(self, value):
"""Set CgVar variable int value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a int
"""
return super(CgVar, self)._int_set(value)
def long_get(self):
"""Get CgVar variable long value
Args:
None
Returns:
The long value
Raises:
ValueError: If 'self' is not a long
"""
return super(CgVar, self)._long_get()
def long_set(self, value):
"""Set CgVar variable long value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a long
"""
return super(CgVar, self)._long_set(value)
def dec64_get(self):
"""Get CgVar variable 64-bit decimal value
Args:
None
Returns:
The decimal value
Raises:
ValueError: If 'self' is not a decimal value
"""
return float(super(CgVar, self)._dec64_get())
def dec64_set(self, value):
"""Set CgVar variable 64-bit decimal value
Args:
'value': The new value
Returns:
The new value
Raises:
ValueError: If 'self' is not a decimal value
"""
return super(CgVar, self)._uint64_set(str(value))
def bool_get(self):
"""Get CgVar variable boolean value
Args:
None
Returns:
True or False
Raises:
ValueError: If 'self' is not a boolean
"""
return super(CgVar, self)._bool_get()
def bool_set(self, boolean):
"""Set CgVar variable boolean value
Args:
'boolean': The status as a boolean
Returns:
The new value
Raises:
ValueError: If 'self' is not a boolean
"""
return super(CgVar, self)._bool_set(boolean)
def string_get(self):
"""Get CgVar variable string value
Args:
None
Returns:
The string value or None if not set
Raises:
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._string_get()
def string_set(self, string):
"""Set CgVar variable string value
Args:
'string': New value of variable
Returns:
The new value
Raises:
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._string_set(string)
def ipv4addr_get(self):
"""Get IPv4 address value from CgVar object.
Args:
None
Returns:
An ipaddress.IPv4Address object
Raises:
TypeError: If the CgVar object is not of the types CGV_IPV4ADDR
of CGV_IPV4PFX.
"""
return ipaddress.IPv4Address(super(CgVar, self)._ipv4addr_get())
def ipv4masklen_get(self):
"""Get mask length of IPv4 prefix value from CgVar object.
Args:
None
Returns:
The mask length as an int
Raises:
TypeError: If the CgVar object is not of the types CGV_IPV4ADDR
of CGV_IPV4PFX.
"""
return super(CgVar, self)._ipv4masklen_get()
# def ipv4addr_set(self, addr):
# """Get IPv4 address value from CgVar object.
#
# Args:
# addr: An ipaddress.IPv4Address object
#
# Returns:
# True if successful
#
# Raises:
# TypeError: If the CgVar object is not of the type CGV_IPV4ADDR
# MemoryError: If needed memory was not available
#
# """
# if self.type_get() is not CGV_IPV4ADDR:
# raise TypeError("invalid type")
# return self.parse(str(addr))
# def ipv4prefix_set(self, pfx):
# """Get IPv4 address value from CgVar object.
#
# Args:
# pfx: An ipaddress.IPv4Network object
#
# Returns:
# True if successful
#
# Raises:
# TypeError: If the CgVar object is not of the types CGV_IPV4PFX.
# MemoryError: If needed memory was not available
#
# """
# if self.type_get() is not CGV_IPV4PFX:
# raise TypeError("invalid type")
# return self.parse(str(pfx))
def ipv6addr_get(self):
"""Get IP v6 address value from CgVar object.
Args:
None
Returns:
An ipaddress.IPv6Address object
Raises:
TypeError: If the CgVar object is not of the types CGV_IPV6ADDR
of CGV_IPV6PFX.
"""
return ipaddress.IPv6Address(super(CgVar, self)._ipv6addr_get())
def ipv6masklen_get(self):
"""Get mask length of IPv6 prefix value from CgVar object.
Args:
None
Returns:
The mask length as an int
Raises:
TypeError: If the CgVar object is not of the types CGV_IPV6ADDR
of CGV_IPV6PFX.
"""
return super(CgVar, self)._ipv6masklen_get()
# def ipv6addr_set(self, addr):
# """Get IPv6 address value from CgVar object.
#
# Args:
# addr: An ipaddress.IPv6Address object
#
# Returns:
# True if successful
#
# Raises:
# TypeError: If the CgVar object is not of the type CGV_IPV6ADDR
# MemoryError: If needed memory was not available
#
# """
# if self.type_get() is not CGV_IPV6ADDR:
# raise TypeError("invalid type")
# return self.parse(str(addr))
# def ipv6prefix_set(self, pfx):
# """Get IPv6 address value from CgVar object.
#
# Args:
# pfx: An ipaddress.IPv6Network object
#
# Returns:
# True if successful
#
# Raises:
# TypeError: If the CgVar object is not of the types CGV_IPV6PFX.
# MemoryError: If needed memory was not available
#
# """
# if self.type_get() is not CGV_IPV6PFX:
# raise TypeError("invalid type")
# return self.parse(str(pfx))
def mac_get(self):
"""Get CgVar variable MAC address value
Args:
None
Returns:
The MAC address value as a 'long'
Raises:
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._mac_get()
def uuid_get(self):
"""Get CgVar variable UUID value
Args:
None
Returns:
The UUID as an 'uuid' object
Raises:
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._uuid_get()
def time_get(self):
"""Get time value of CgVar object.
Args:
None
Returns:
The time since the epoch as a 'float' object
Raises:
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._time_get()
def time_set(self, timespec):
"""Set time value of CgVar object.
Args:
timespec: The time specification which can either be a Python 'float' or
'int' object specifying seconds since the epoch or a 'string' to be
parsed by CLIgen.
Returns:
The new value as a 'float' object
Raises:
TypeError: If self is not a CGV_TIME or timespec is an invalid type
ValueError: If timespec could not be parsed
MemoryError: If needed memory was not available
"""
if self.type_get() is not CGV_TIME:
raise TypeError("'self' is of invalid type")
if isinstance(timespec, int) or isinstance(timespec, float):
super(CgVar, self)._time_set(str(float(timespec)))
elif isinstance(timespec, str):
self.parse(timespec)
return self.time_get()
def url_get(self):
"""Get URL value from CgVar object.
Args:
None
Returns:
A populated urlib/urlparse ParseResult object
Raises:
TypeError: If self is not CGV_URL type
"""
if self.type_get() is not CGV_URL:
raise TypeError("'self' is of invalid type")
return urlparse(str(self))
def url_set(self, url):
"""Set URL value to CgVar object.
Args:
'url': A string representing the url
Returns:
A populated urlib/urlparse ParseResult object
Raises:
ValueError: If 'string' could not be parsed
MemoryError: If needed memory was not available
"""
if self.type_get() is not CGV_URL:
raise TypeError("'self' is of invalid type")
self.parse(url)
return self.url_get()
def parse(self, string):
"""Parse a string representation of a value and assign the result
to 'self'. The parsing is based on the current type set.
Args:
'string': The string representation of the value. Ex: "1.2.3.4" or
"01:00:af:43:fd:aa"
Returns:
True on success
Raises:
ValueError: If 'string' could not be parsed
MemoryError: If needed memory was not available
"""
return super(CgVar, self)._parse(string)
#
# Cvec
#
class Cvec (_cligen.Cvec):
'A vector of CgVar'
def __init__(self, arg=None):
self._cvec = []
if arg is not None:
if (isinstance(arg, list)):
for cv in arg:
self.append(cv)
else:
raise ValueError
def __str__(self):
return self._cvec.__str__()
# str = "["
# for cv in self._cvec:
# str += "$%s=%s" % (cv.name_get(), cv)
# str += "]"
#
# return str
def __repr__(self):
return self._cvec.__repr__()
def __getitem__(self, key):
if isinstance(key, int):
return self._cvec[key]
elif isinstance(key, str):
for cv in self._cvec:
if cv.name_get() == key:
return cv
raise IndexError("element not found")
else:
raise TypeError('key must be int or str')
def __setitem__(self, key, cv):
if isinstance(key, int):
pass
elif isinstance(key, str):
for idx, c in enumerate(self._cvec):
if c.name_get() == key:
key = idx
break
if isinstance(key, str): # not found
raise IndexError("element not found")
else:
raise TypeError('key must be int or str')
if (isinstance(cv, CgVar)):
self._cvec[key] = cv;
elif isinstance(cv, str):
self._cvec[key].parse(cv)
else:
raise TypeError('cv must be CgVar or str')
return self._cvec[key]
def __iter__(self):
for cv in self._cvec:
yield cv
def __len__(self):
return len(self._cvec)
def __contains__(self, key):
for cv in self._cvec:
if cv.name_get() == key:
return True
return False
def __add__(self, other):
if isinstance(other, Cvec):
new._cvec += other._cvec
return new
elif isinstance(other, CgVar):
new = copy.copy(self)
new.append(other)
return new
else:
raise TypeError("unsupported operand type for +: '{:s}'".format(other.__class__.__name__))
def __iadd__(self, other):
if isinstance(other, Cvec):
self._cvec += other._cvec
elif isinstance(other, CgVar):
self.append(other)
else:
raise TypeError("unsupported operand type for +: '{:s}'".format(other.__class__.__name__))
return self
def __copy__(self):
new = self.__class__()
for cv in self._cvec:
new.append(copy.copy(cv))
return new
def __deepcopy__(self, memo):
return self.__copy__()
def index(self, val):
for idx, cv in enumerate(self._cvec):
if isinstance(val, str):
if cv.name_get() == val:
return idx
elif isinstance(val, CgVar):
if cv == val:
return idx
else:
raise ValueError
def remove(self, val):
del self[self.index(val)]
def append(self, arg):
if isinstance(arg, int):
cv = CgVar(arg)
elif isinstance(arg, CgVar):
cv = arg
else:
raise TypeError("argument must be int or CgVar")
self._cvec.append(cv)
return cv;
def keys(self):
keys = []
for cv in self._cvec:
if cv.name_get() != None:
keys.append(cv.name_get())
return keys
#
# ParseTree
#
#class ParseTree():
# 'CLIgen parse tree'
#
# """ParseTree constructor.
#
#Takes one named argument:
# string='<syntax format>' - String will be parsed as CLIgen syntax.
# file='<filename>' - Argument specifies a file containing CLIgen syntax.
#If argument is unnamed, itinplies a "string" argument.
#"""
## super(ParseTree, self).__init__(*args, **kwargs)
# if len(kwargs) == 1:
# if "file" in kwargs:
# with open(kwargs['file'], 'r') as f:
# self.syntax = f.read()
# elif "string" in kwargs:
# self.syntax = kwargs['string']
# else:
# raise AttributeError("Argument named 'string' or 'file' expected")
#
# elif len(args) == 1:
# self.syntax = args[0]
#
# else:
# raise TypeError("__init__() takes 1 positional argument")
#
# if isinstance(self.syntax, str) is False:
# raise AttributeError("Argument must be string")
#
#
def type2str(type):
"""Get string representation of a CLIgen type
Args:
'type': The CLIgen type
Returns:
MemoryError: If needed memory was not available
"""
return CgVar().type2str(type)
# Testing..
#_types = {
# type2str(CGV_INT) : CGV_INT,
# type2str(CGV_LONG) : CGV_LONG,
# type2str(CGV_BOOL) : CGV_BOOL,
# type2str(CGV_STRING) : CGV_STRING,
# type2str(CGV_REST) : CGV_REST,
# type2str(CGV_INTERFACE) : CGV_INTERFACE,
# type2str(CGV_IPV4ADDR) : CGV_IPV4ADDR,
# type2str(CGV_IPV4PFX) : CGV_IPV4PFX,
# type2str(CGV_IPV6ADDR) : CGV_IPV6ADDR,
# type2str(CGV_IPV6PFX) : CGV_IPV6PFX,
# type2str(CGV_MACADDR) : CGV_MACADDR,
# type2str(CGV_URL) : CGV_URL,
# type2str(CGV_UUID) : CGV_UUID,
# type2str(CGV_TIME) : CGV_TIME,
#}
| gpl-3.0 | 8,613,711,472,700,645,000 | 22.247342 | 137 | 0.550546 | false |
erigones/esdc-ce | pdns/validators.py | 1 | 6625 | import re
from django.core.exceptions import ValidationError
from django.core.validators import validate_ipv4_address, validate_ipv6_address, MaxLengthValidator
from django.utils.translation import ugettext_lazy as _
from pdns.models import Record
re_record_name_valid_chars = re.compile(r'^((([a-z\d_])|(\*\.))(-*[a-z\d_/])*)(\.([a-z\d_](-*[a-z\d/])*))*$',
re.IGNORECASE)
re_record_name_valid_labels = re.compile(r'^[^.]{1,63}(\.[^.]{1,63})*$', re.IGNORECASE)
re_fqdn_parts = re.compile(r'^(?![-/])[a-z\d/-]{1,63}(?<![-/])$', re.IGNORECASE)
err_field_required = _('This field is required.')
class RecordValidationError(ValidationError):
"""
Exception raised for invalid DNS records.
"""
attr = None
def __init__(self, message):
super(RecordValidationError, self).__init__({self.attr: [message]})
class RecordNameValidationError(RecordValidationError):
attr = 'name'
class RecordContentValidationError(RecordValidationError):
attr = 'content'
def is_valid_fqdn(value):
"""Validate hostname"""
if not value or len(value) > 255:
return False
if value[-1] == '.':
value = value[:-1]
return all(re_fqdn_parts.match(i) for i in value.split('.'))
def validate_dns_name(value):
"""Validate DNS domain/record name"""
if not re_record_name_valid_chars.match(value):
raise RecordNameValidationError(_('Invalid characters detected. Enter a valid DNS name.'))
if not re_record_name_valid_labels.match(value):
raise RecordNameValidationError(_('Invalid label detected. Enter a valid DNS name.'))
def validate_fqdn(value):
if not is_valid_fqdn(value):
raise RecordContentValidationError(_('Invalid fully qualified domain name.'))
class BaseRecordValidator(object):
"""
DNS record validation base class.
"""
check_name_suffix_against_domain = True
def __init__(self, domain, name, content):
if name:
validate_dns_name(name)
else:
raise RecordNameValidationError(err_field_required)
if self.check_name_suffix_against_domain:
if name != domain.name and not name.endswith('.' + domain.name):
raise RecordNameValidationError(_('Name does not end with domain name. Enter a valid DNS name.'))
if content is None:
content = ''
self.domain = domain
self.name = name
self.content = content
def __call__(self):
pass
class ARecordValidator(BaseRecordValidator):
def __call__(self):
try:
validate_ipv4_address(self.content)
except ValidationError as exc:
raise RecordContentValidationError(exc.message)
class AAAARecordValidator(BaseRecordValidator):
def __call__(self):
try:
validate_ipv6_address(self.content)
except ValidationError as exc:
raise RecordContentValidationError(exc.message)
class CNAMERecordValidator(BaseRecordValidator):
def __call__(self):
validate_fqdn(self.content)
class MXRecordValidator(BaseRecordValidator):
def __call__(self):
validate_fqdn(self.content)
class NSRecordValidator(BaseRecordValidator):
def __call__(self):
validate_fqdn(self.content)
class TXTRecordValidator(BaseRecordValidator):
def __call__(self):
try:
MaxLengthValidator(64000)(self.content)
except ValidationError as exc:
raise RecordContentValidationError(exc.message)
class PTRRecordValidator(BaseRecordValidator):
def __call__(self):
validate_fqdn(self.content)
class SRVRecordValidator(BaseRecordValidator):
def __call__(self):
# TODO: name = '_service._protocol.name.'
# content = 'weight port target'
content = self.content.strip().split(' ')
if len(content) != 3:
raise RecordContentValidationError(_('Invalid number of SRV fields.'))
try:
if not (0 < int(content[0]) < 65536):
raise ValueError
except ValueError:
raise RecordContentValidationError(_('Invalid weight field.'))
try:
if not (0 < int(content[1]) < 65536):
raise ValueError
except ValueError:
raise RecordContentValidationError(_('Invalid port field.'))
if not is_valid_fqdn(content[2]):
raise RecordContentValidationError(_('Invalid target field.'))
class SOARecordValidator(BaseRecordValidator):
re_email_addr = re.compile(r'^[a-z0-9_][a-z0-9_.-]$', re.IGNORECASE)
check_name_suffix_against_domain = False
def __call__(self):
if self.name != self.domain.name:
raise RecordContentValidationError(_('Name has to be the same as domain name for a SOA record.'))
# content = 'example.com. hostmaster.example.com. 1 7200 900 1209600 86400'
content = self.content.strip().split(' ')
if len(content) != 7:
raise RecordContentValidationError(_('Invalid number of SOA fields.'))
if not is_valid_fqdn(content[0]):
raise RecordContentValidationError(_('Invalid name server field.'))
if not is_valid_fqdn(content[1]):
raise RecordContentValidationError(_('Invalid email address field.'))
try:
if not(0 <= int(content[2]) <= 4294967295):
raise ValueError
except ValueError:
raise RecordContentValidationError(_('Invalid serial number field.'))
interval_fields = content[3:]
for i, field_name in enumerate(('refresh', 'retry', 'expiry', 'min-ttl')):
try:
if not(-2147483647 <= int(interval_fields[i]) <= 2147483647):
raise ValueError
except ValueError:
raise RecordContentValidationError(_('Invalid %(field_name)s field.') % {'field_name': field_name})
DNS_RECORD_VALIDATORS = {
Record.A: ARecordValidator,
Record.AAAA: AAAARecordValidator,
Record.CNAME: CNAMERecordValidator,
Record.MX: MXRecordValidator,
Record.NS: NSRecordValidator,
Record.TXT: TXTRecordValidator,
Record.PTR: PTRRecordValidator,
Record.SRV: SRVRecordValidator,
Record.SOA: SOARecordValidator,
}
def get_dns_record_validator(record_type):
return DNS_RECORD_VALIDATORS.get(record_type, BaseRecordValidator)
def run_record_validator(domain, record_type, record_name, record_content):
validator_class = get_dns_record_validator(record_type)
return validator_class(domain, record_name, record_content)()
| apache-2.0 | -6,718,438,184,819,827,000 | 30.398104 | 115 | 0.640453 | false |
Azure/azure-sdk-for-python | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_sql_pool_metadata_sync_configs_operations.py | 1 | 9326 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SqlPoolMetadataSyncConfigsOperations(object):
"""SqlPoolMetadataSyncConfigsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
sql_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.MetadataSyncConfig"]
"""Get SQL pool metadata sync config.
Get the metadata sync configuration for a SQL pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetadataSyncConfig, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.MetadataSyncConfig or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.MetadataSyncConfig"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('MetadataSyncConfig', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/metadataSync/config'} # type: ignore
def create(
self,
resource_group_name, # type: str
workspace_name, # type: str
sql_pool_name, # type: str
metadata_sync_configuration, # type: "_models.MetadataSyncConfig"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.MetadataSyncConfig"]
"""Set SQL pool metadata sync config.
Set the metadata sync configuration for a SQL pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param metadata_sync_configuration: Metadata sync configuration.
:type metadata_sync_configuration: ~azure.mgmt.synapse.models.MetadataSyncConfig
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetadataSyncConfig, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.MetadataSyncConfig or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.MetadataSyncConfig"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(metadata_sync_configuration, 'MetadataSyncConfig')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('MetadataSyncConfig', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/metadataSync/config'} # type: ignore
| mit | 7,013,729,555,727,071,000 | 48.343915 | 213 | 0.658374 | false |
pombredanne/anvil | anvil/actions/test.py | 1 | 2048 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log
from anvil.actions import base as action
LOG = log.getLogger(__name__)
class TestAction(action.Action):
@property
def lookup_name(self):
return 'test'
def _run(self, persona, component_order, instances):
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values())
general_package = "general"
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Installing packages"),
run=lambda i: dependency_handler.install_all_deps(),
end=None,
),
[general_package],
{general_package: instances[general_package]},
"package-install-all-deps"
# no removals
)
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Running tests of component %s.',
colorizer.quote(i.name)),
run=lambda i: i.run_tests(),
end=None,
),
component_order,
instances,
None,
)
| apache-2.0 | -9,080,190,932,409,066,000 | 34.929825 | 78 | 0.576172 | false |
escapewindow/mozharness | configs/unittests/win_unittest.py | 1 | 7148 | import os
import sys
# OS Specifics
ABS_WORK_DIR = os.path.join(os.getcwd(), "build")
BINARY_PATH = os.path.join(ABS_WORK_DIR, "firefox", "firefox.exe")
INSTALLER_PATH = os.path.join(ABS_WORK_DIR, "installer.zip")
XPCSHELL_NAME = 'xpcshell.exe'
EXE_SUFFIX = '.exe'
DISABLE_SCREEN_SAVER = False
ADJUST_MOUSE_AND_SCREEN = True
#####
config = {
"buildbot_json_path": "buildprops.json",
"exes": {
'python': sys.executable,
'virtualenv': [sys.executable, 'c:/mozilla-build/buildbotve/virtualenv.py'],
'hg': 'c:/mozilla-build/hg/hg',
'mozinstall': ['%s/build/venv/scripts/python' % os.getcwd(),
'%s/build/venv/scripts/mozinstall-script.py' % os.getcwd()],
},
###
"installer_path": INSTALLER_PATH,
"binary_path": BINARY_PATH,
"xpcshell_name": XPCSHELL_NAME,
"virtualenv_path": 'venv',
"virtualenv_python_dll": os.path.join(os.path.dirname(sys.executable), "python27.dll"),
"find_links": [
"http://pypi.pvt.build.mozilla.org/pub",
"http://pypi.pub.build.mozilla.org/pub",
],
"pip_index": False,
"exe_suffix": EXE_SUFFIX,
"run_file_names": {
"mochitest": "runtests.py",
"webapprt": "runtests.py",
"reftest": "runreftest.py",
"xpcshell": "runxpcshelltests.py",
"cppunittest": "runcppunittests.py",
"jittest": "jit_test.py",
"mozbase": "test.py"
},
"minimum_tests_zip_dirs": ["bin/*", "certs/*", "modules/*", "mozbase/*", "config/*"],
"specific_tests_zip_dirs": {
"mochitest": ["mochitest/*"],
"webapprt": ["mochitest/*"],
"reftest": ["reftest/*", "jsreftest/*"],
"xpcshell": ["xpcshell/*"],
"cppunittest": ["cppunittests/*"],
"jittest": ["jit-test/*"],
"mozbase": ["mozbase/*"]
},
# test harness options are located in the gecko tree
"in_tree_config": "config/mozharness/windows_config.py",
# local mochi suites
"all_mochitest_suites":
{
"plain1": ["--total-chunks=5", "--this-chunk=1", "--chunk-by-dir=4"],
"plain2": ["--total-chunks=5", "--this-chunk=2", "--chunk-by-dir=4"],
"plain3": ["--total-chunks=5", "--this-chunk=3", "--chunk-by-dir=4"],
"plain4": ["--total-chunks=5", "--this-chunk=4", "--chunk-by-dir=4"],
"plain5": ["--total-chunks=5", "--this-chunk=5", "--chunk-by-dir=4"],
"plain": [],
"plain-chunked": ["--chunk-by-dir=4"],
"chrome": ["--chrome"],
"browser-chrome": ["--browser-chrome"],
"browser-chrome-1": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=1"],
"browser-chrome-2": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=2"],
"browser-chrome-3": ["--browser-chrome", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=3"],
"browser-chrome-chunked": ["--browser-chrome", "--chunk-by-dir=5"],
"mochitest-devtools-chrome": ["--browser-chrome", "--subsuite=devtools"],
"mochitest-devtools-chrome-1": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=1"],
"mochitest-devtools-chrome-2": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=2"],
"mochitest-devtools-chrome-3": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5", "--total-chunks=3", "--this-chunk=3"],
"mochitest-devtools-chrome-chunked": ["--browser-chrome", "--subsuite=devtools", "--chunk-by-dir=5"],
"mochitest-metro-chrome": ["--browser-chrome", "--metro-immersive"],
"a11y": ["--a11y"],
"plugins": ['--setpref=dom.ipc.plugins.enabled=false',
'--setpref=dom.ipc.plugins.enabled.x86_64=false',
'--ipcplugins']
},
# local webapprt suites
"all_webapprt_suites": {
"chrome": ["--webapprt-chrome", "--browser-arg=-test-mode"],
"content": ["--webapprt-content"]
},
# local reftest suites
"all_reftest_suites": {
"reftest": ["tests/reftest/tests/layout/reftests/reftest.list"],
"crashtest": ["tests/reftest/tests/testing/crashtest/crashtests.list"],
"jsreftest": ["--extra-profile-file=tests/jsreftest/tests/user.js", "tests/jsreftest/tests/jstests.list"],
"reftest-ipc": ['--setpref=browser.tabs.remote=true',
'--setpref=browser.tabs.remote.autostart=true',
'--setpref=layers.async-pan-zoom.enabled=true',
'tests/reftest/tests/layout/reftests/reftest-sanity/reftest.list'],
"reftest-no-accel": ["--setpref=gfx.direct2d.disabled=true", "--setpref=layers.acceleration.disabled=true",
"tests/reftest/tests/layout/reftests/reftest.list"],
"reftest-omtc": ["--setpref=layers.offmainthreadcomposition.enabled=true",
"tests/reftest/tests/layout/reftests/reftest.list"],
"crashtest-ipc": ['--setpref=browser.tabs.remote=true',
'--setpref=browser.tabs.remote.autostart=true',
'--setpref=layers.async-pan-zoom.enabled=true',
'tests/reftest/tests/testing/crashtest/crashtests.list'],
},
"all_xpcshell_suites": {
"xpcshell": ["--manifest=tests/xpcshell/tests/all-test-dirs.list",
"%(abs_app_dir)s/" + XPCSHELL_NAME]
},
"all_cppunittest_suites": {
"cppunittest": ['tests/cppunittests']
},
"all_jittest_suites": {
"jittest": []
},
"all_mozbase_suites": {
"mozbase": []
},
"run_cmd_checks_enabled": True,
"preflight_run_cmd_suites": [
# NOTE 'enabled' is only here while we have unconsolidated configs
{
"name": "disable_screen_saver",
"cmd": ["xset", "s", "off", "s", "reset"],
"architectures": ["32bit", "64bit"],
"halt_on_failure": False,
"enabled": DISABLE_SCREEN_SAVER
},
{
"name": "run mouse & screen adjustment script",
"cmd": [
# when configs are consolidated this python path will only show
# for windows.
sys.executable,
"../scripts/external_tools/mouse_and_screen_resolution.py",
"--configuration-url",
"https://hg.mozilla.org/%(repo_path)s/raw-file/%(revision)s/" +
"testing/machine-configuration.json"],
"architectures": ["32bit"],
"halt_on_failure": True,
"enabled": ADJUST_MOUSE_AND_SCREEN
},
],
"repos": [{"repo": "https://hg.mozilla.org/build/tools"}],
"vcs_output_timeout": 1000,
"minidump_stackwalk_path": "%(abs_work_dir)s/tools/breakpad/win32/minidump_stackwalk.exe",
"minidump_save_path": "%(abs_work_dir)s/../minidumps",
"buildbot_max_log_size": 52428800,
"default_blob_upload_servers": [
"https://blobupload.elasticbeanstalk.com",
],
"blob_uploader_auth_file": os.path.join(os.getcwd(), "oauth.txt"),
}
| mpl-2.0 | -4,124,509,830,676,141,000 | 46.026316 | 141 | 0.563095 | false |
airtonix/django-server-status | server_status/__init__.py | 1 | 1363 |
def autodiscover():
import copy
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from .conf import settings
from .registry import plugins
"""
Auto-discover INSTALLED_APPS plugin modules and fail silently when
not present. This forces an import on them to register the plugin.
"""
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's plugin module.
try:
before_import_registry = copy.copy(plugins._registry)
name = '{}.{}'.format(app, settings.SERVER_STATUS_PLUGIN_MODULE_NAME)
import_module(name)
except Exception as error:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
plugins._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have a plugin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, settings.SERVER_STATUS_PLUGIN_MODULE_NAME):
raise | mit | -2,577,932,855,770,941,000 | 40.333333 | 84 | 0.648569 | false |
Nic30/hwtLib | hwtLib/peripheral/usb/usb2/utmi_agent_test.py | 1 | 1608 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.interfaces.utils import addClkRstn
from hwt.synthesizer.unit import Unit
from hwtLib.peripheral.usb.usb2.ulpi_agent_test import UlpiAgentTC, \
UlpiUsbAgentTC
from hwtLib.peripheral.usb.usb2.utmi import Utmi_8b
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.peripheral.usb.usb2.utmi_usb_agent import UtmiUsbAgent
from hwtLib.peripheral.usb.descriptors.cdc import get_default_usb_cdc_vcp_descriptors
class UtmiWire(Unit):
def _declr(self):
addClkRstn(self)
self.host = Utmi_8b()
self.dev = Utmi_8b()._m()
def _impl(self):
self.dev(self.host)
class UtmiAgentTC(UlpiAgentTC):
@classmethod
def setUpClass(cls):
cls.u = UtmiWire()
cls.compileSim(cls.u)
def format_pid_before_tx(self, pid: int):
return int(pid)
class UtmiUsbAgentTC(UlpiUsbAgentTC):
@classmethod
def setUpClass(cls):
cls.u = u = UtmiWire()
cls.compileSim(u)
def setUp(self):
SimTestCase.setUp(self)
u = self.u
u.host._ag = UtmiUsbAgent(self.rtl_simulator, u.host)
u.dev._ag = UtmiUsbAgent(self.rtl_simulator, u.dev)
u.dev._ag.descriptors = get_default_usb_cdc_vcp_descriptors()
UtmiAgentTCs = [
UtmiAgentTC,
UtmiUsbAgentTC,
]
if __name__ == '__main__':
import unittest
suite = unittest.TestSuite()
# suite.addTest(UtmiAgentTC("test_link_to_phy"))
for tc in UtmiAgentTCs:
suite.addTest(unittest.makeSuite(tc))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| mit | -7,891,177,194,865,482,000 | 24.52381 | 85 | 0.669776 | false |
linkinwong/word2vec | src/crf-paper-script/chunker12_ntancestor_sgd.py | 1 | 1524 | #!/usr/bin/env python
"""
A feature extractor for chunking.
__author__ = linlin
"""
# Separator of field values.
separator = ' '
# Field names of the input data.
fields = 'y w pos token sem part pp p n nn nont'
# Attribute templates.
templates = (
(('w', -2), ),
(('w', -1), ),
(('w', 0), ),
(('w', 1), ),
(('w', 2), ),
(('w', -1), ('w', 0)),
(('w', 0), ('w', 1)),
(('pos', -2), ),
(('pos', -1), ),
(('pos', 0), ),
(('pos', 1), ),
(('pos', 2), ),
(('pos', -2), ('pos', -1)),
(('pos', -1), ('pos', 0)),
(('pos', 0), ('pos', 1)),
(('pos', 1), ('pos', 2)),
(('pos', -2), ('pos', -1), ('pos', 0)),
(('pos', -1), ('pos', 0), ('pos', 1)),
(('pos', 0), ('pos', 1), ('pos', 2)),
(('sem', -2),),
(('sem', -1),),
(('sem', 0),),
(('sem', 1),),
(('sem', 2),),
(('sem', -2), ('sem', -1)),
(('sem', -1), ('sem', 0)),
(('sem', 0), ('sem', 1)),
(('sem', 1), ('sem', 2)),
(('part', 0),),
(('pp', 0),),
(('p', 0),),
(('n', 0),),
(('nn', 0),),
(('nont', 0),),
)
import crfutils
def feature_extractor(X):
# Apply attribute templates to obtain features (in fact, attributes)
crfutils.apply_templates(X, templates)
if X:
# Append BOS and EOS features manually
X[0]['F'].append('__BOS__') # BOS feature
X[-1]['F'].append('__EOS__') # EOS feature
if __name__ == '__main__':
crfutils.main(feature_extractor, fields=fields, sep=separator)
| apache-2.0 | -3,359,384,484,029,679,000 | 22.8125 | 72 | 0.399606 | false |
freakboy3742/django | tests/queries/test_q.py | 8 | 4747 | from django.db.models import BooleanField, Exists, F, OuterRef, Q
from django.db.models.expressions import RawSQL
from django.test import SimpleTestCase
from .models import Tag
class QTests(SimpleTestCase):
def test_combine_and_empty(self):
q = Q(x=1)
self.assertEqual(q & Q(), q)
self.assertEqual(Q() & q, q)
q = Q(x__in={}.keys())
self.assertEqual(q & Q(), q)
self.assertEqual(Q() & q, q)
def test_combine_and_both_empty(self):
self.assertEqual(Q() & Q(), Q())
def test_combine_or_empty(self):
q = Q(x=1)
self.assertEqual(q | Q(), q)
self.assertEqual(Q() | q, q)
q = Q(x__in={}.keys())
self.assertEqual(q | Q(), q)
self.assertEqual(Q() | q, q)
def test_combine_empty_copy(self):
base_q = Q(x=1)
tests = [
base_q | Q(),
Q() | base_q,
base_q & Q(),
Q() & base_q,
]
for i, q in enumerate(tests):
with self.subTest(i=i):
self.assertEqual(q, base_q)
self.assertIsNot(q, base_q)
def test_combine_or_both_empty(self):
self.assertEqual(Q() | Q(), Q())
def test_combine_not_q_object(self):
obj = object()
q = Q(x=1)
with self.assertRaisesMessage(TypeError, str(obj)):
q | obj
with self.assertRaisesMessage(TypeError, str(obj)):
q & obj
def test_combine_negated_boolean_expression(self):
tagged = Tag.objects.filter(category=OuterRef('pk'))
tests = [
Q() & ~Exists(tagged),
Q() | ~Exists(tagged),
]
for q in tests:
with self.subTest(q=q):
self.assertIs(q.negated, True)
def test_deconstruct(self):
q = Q(price__gt=F('discounted_price'))
path, args, kwargs = q.deconstruct()
self.assertEqual(path, 'django.db.models.Q')
self.assertEqual(args, (('price__gt', F('discounted_price')),))
self.assertEqual(kwargs, {})
def test_deconstruct_negated(self):
q = ~Q(price__gt=F('discounted_price'))
path, args, kwargs = q.deconstruct()
self.assertEqual(args, (('price__gt', F('discounted_price')),))
self.assertEqual(kwargs, {'_negated': True})
def test_deconstruct_or(self):
q1 = Q(price__gt=F('discounted_price'))
q2 = Q(price=F('discounted_price'))
q = q1 | q2
path, args, kwargs = q.deconstruct()
self.assertEqual(args, (
('price__gt', F('discounted_price')),
('price', F('discounted_price')),
))
self.assertEqual(kwargs, {'_connector': 'OR'})
def test_deconstruct_and(self):
q1 = Q(price__gt=F('discounted_price'))
q2 = Q(price=F('discounted_price'))
q = q1 & q2
path, args, kwargs = q.deconstruct()
self.assertEqual(args, (
('price__gt', F('discounted_price')),
('price', F('discounted_price')),
))
self.assertEqual(kwargs, {})
def test_deconstruct_multiple_kwargs(self):
q = Q(price__gt=F('discounted_price'), price=F('discounted_price'))
path, args, kwargs = q.deconstruct()
self.assertEqual(args, (
('price', F('discounted_price')),
('price__gt', F('discounted_price')),
))
self.assertEqual(kwargs, {})
def test_deconstruct_nested(self):
q = Q(Q(price__gt=F('discounted_price')))
path, args, kwargs = q.deconstruct()
self.assertEqual(args, (Q(price__gt=F('discounted_price')),))
self.assertEqual(kwargs, {})
def test_deconstruct_boolean_expression(self):
expr = RawSQL('1 = 1', BooleanField())
q = Q(expr)
_, args, kwargs = q.deconstruct()
self.assertEqual(args, (expr,))
self.assertEqual(kwargs, {})
def test_reconstruct(self):
q = Q(price__gt=F('discounted_price'))
path, args, kwargs = q.deconstruct()
self.assertEqual(Q(*args, **kwargs), q)
def test_reconstruct_negated(self):
q = ~Q(price__gt=F('discounted_price'))
path, args, kwargs = q.deconstruct()
self.assertEqual(Q(*args, **kwargs), q)
def test_reconstruct_or(self):
q1 = Q(price__gt=F('discounted_price'))
q2 = Q(price=F('discounted_price'))
q = q1 | q2
path, args, kwargs = q.deconstruct()
self.assertEqual(Q(*args, **kwargs), q)
def test_reconstruct_and(self):
q1 = Q(price__gt=F('discounted_price'))
q2 = Q(price=F('discounted_price'))
q = q1 & q2
path, args, kwargs = q.deconstruct()
self.assertEqual(Q(*args, **kwargs), q)
| bsd-3-clause | -6,501,565,920,460,780,000 | 32.195804 | 75 | 0.544765 | false |
frol/django-thumbnail-works | src/thumbnail_works/images.py | 1 | 7573 |
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from PIL import Image, ImageFilter
except ImportError:
import Image
import ImageFilter
from cropresize2 import CM_AUTO, crop_resize
from django.core.files.base import ContentFile
from thumbnail_works import settings
from thumbnail_works.exceptions import ThumbnailOptionError, ThumbnailWorksError, NoAccessToImage
from thumbnail_works.utils import get_width_height_from_string
class ImageProcessor:
"""Adds image processing support to ImageFieldFile or derived classes.
Required instance attributes::
self.identifier
self.proc_opts
self.name
self.storage
"""
DEFAULT_OPTIONS = {
'size': None,
'sharpen': False,
'detail': False,
'upscale': False,
'crop': CM_AUTO,
'format': settings.THUMBNAILS_FORMAT,
}
def setup_image_processing_options(self, proc_opts):
"""Sets the image processing options as an attribute of the
ImageFieldFile instance.
If ``proc_opts`` is ``None``, then ``self.proc_opts`` is also set to
``None``. This is allowed in favor of the source image which may not be
processed.
This method checks the provided options and also ensures that the
final dictionary contains all the supported options with a default
or a user-defined value.
"""
if proc_opts is None:
if self.identifier is not None: # self is a thumbnail
raise ThumbnailOptionError('It is not possible to set the \
image processing options to None on thumbnails')
self.proc_opts = None
elif not isinstance(proc_opts, dict):
raise ThumbnailOptionError('A dictionary object is required')
else:
for option in proc_opts.keys():
if option not in self.DEFAULT_OPTIONS.keys():
raise ThumbnailOptionError('Invalid thumbnail option `%s`' % option)
self.proc_opts = self.DEFAULT_OPTIONS.copy()
self.proc_opts.update(proc_opts)
def get_image_extension(self):
"""Returns the extension in accordance to the image format.
If the image processing options ``self.proc_opts`` is not a dict,
None is returned.
"""
if not isinstance(self.proc_opts, dict):
return
ext = self.proc_opts['format'].lower()
if ext == 'jpeg':
return '.jpg'
return '.%s' % ext
def generate_image_name(self, name, force_ext=None):
"""Generates a path for the image file taking the format into account.
This method should be used by both the source image and thumbnails
to get their ``name`` attribute.
Arguments
---------
``name``
A relative path to MEDIA_ROOT. ``name`` cannot be empty or None.
In such a case a ``ThumbnailWorksError`` is raised.
``force_ext``
Can be used to force a specific extension. By default, the extension
is derived from the user-specified image format and is generated by
the ``get_image_extension()`` method.
Path transformation logic
-------------------------
Path transformation logic for source image and thumbnails.
- Assuming ``name = 'images/photo.jpg'``:
- source: images/photo.<extension>
- thumbnail: images/<THUMBNAILS_DIRNAME>/photo.<identifier>.<extension>
"""
if not name:
raise ThumbnailWorksError('The provided name is not usable: "%s"')
root_dir = os.path.dirname(name) # images
filename = os.path.basename(name) # photo.jpg
base_filename, default_ext = os.path.splitext(filename)
if force_ext is not None:
ext = force_ext
else:
ext = self.get_image_extension()
if ext is None:
ext = default_ext
if self.identifier is None: # For source images
image_filename = '%s%s' % (base_filename, ext)
return os.path.join(root_dir, image_filename)
else: # For thumbnails
image_filename = '%s.%s%s' % (base_filename, self.identifier, ext)
if settings.THUMBNAILS_DIRNAME:
return os.path.join(root_dir, settings.THUMBNAILS_DIRNAME, image_filename)
return os.path.join(root_dir, image_filename)
def get_image_content(self):
"""Returns the image data as a ContentFile."""
try:
content = ContentFile(self.storage.open(self.name).read())
except IOError:
raise NoAccessToImage()
else:
return content
def process_image(self, content=None):
"""Processes and returns the image data."""
if content is None:
content = self.get_image_content()
# Image.open() accepts a file-like object, but it is needed
# to rewind it back to be able to get the data,
content.seek(0)
im = Image.open(content)
# Convert to RGB format
if im.mode not in ('L', 'RGB', 'RGBA'):
im = im.convert('RGB')
# Process
im = self._fix_orientation(im)
size = self.proc_opts['size']
upscale = self.proc_opts['upscale']
crop = self.proc_opts['crop']
if size is not None:
new_size = get_width_height_from_string(size)
im = self._resize(im, new_size, upscale, crop)
sharpen = self.proc_opts['sharpen']
if sharpen:
im = self._sharpen(im)
detail = self.proc_opts['detail']
if detail:
im = self._detail(im)
# Save image data
format = self.proc_opts['format']
buffer = StringIO()
if format == 'JPEG':
im.save(buffer, format, quality=settings.THUMBNAILS_QUALITY)
else:
im.save(buffer, format)
data = buffer.getvalue()
return ContentFile(data)
# Processors
def _fix_orientation(self, im):
"""
Rotate (and/or flip) the thumbnail to respect the image EXIF orientation
data.
"""
try:
exif = im._getexif()
except AttributeError:
exif = None
if exif:
orientation = exif.get(0x0112)
if orientation == 2:
im = im.transpose(im.FLIP_LEFT_RIGHT)
elif orientation == 3:
im = im.rotate(180)
elif orientation == 4:
im = im.transpose(im.FLIP_TOP_BOTTOM)
elif orientation == 5:
im = im.rotate(-90).transpose(im.FLIP_LEFT_RIGHT)
elif orientation == 6:
im = im.rotate(-90)
elif orientation == 7:
im = im.rotate(90).transpose(im.FLIP_LEFT_RIGHT)
elif orientation == 8:
im = im.rotate(90)
return im
def _resize(self, im, size, upscale, crop_mode):
return crop_resize(im, size, exact_size=upscale, crop_mode=crop_mode)
def _sharpen(self, im):
return im.filter(ImageFilter.SHARPEN)
def _detail(self, im):
return im.filter(ImageFilter.DETAIL)
| apache-2.0 | 2,111,310,946,451,512,800 | 32.50885 | 97 | 0.564902 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_12_01/operations/_express_route_circuit_peerings_operations.py | 1 | 21909 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations(object):
"""ExpressRouteCircuitPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitPeering"
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCircuitPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCircuitPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitPeering"]
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitPeeringListResult"]
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'} # type: ignore
| mit | 4,481,814,720,706,058,000 | 48.906606 | 220 | 0.643845 | false |
cscanlin/munger-builder | munger_builder/urls.py | 1 | 1769 | """munger_builder URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', views.home_page, name='home_page'),
url(r'^app_index/$', views.app_index, name='app_index'),
url(r'^admin/', include('smuggler.urls')), # before admin url patterns!
url(r'^admin/', include(admin.site.urls)),
url(r'^contact/', include('contact_form.urls')),
url(r'^script_builder/', include('script_builder.urls')),
url(r'^script_runner_index/', include('script_runner.urls')),
url(r'^register/$', views.register, name='register'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url('^', include('django.contrib.auth.urls')),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit | -621,726,591,384,239,400 | 43.358974 | 78 | 0.651781 | false |
n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/NFR4XBoot/ubi_reader/ubifs/output.py | 5 | 4034 | import os
import struct
from ubifs.defines import *
from ubifs.misc import decompress
def dents(ubifs, inodes, dent_node, path = '', perms = False):
inode = inodes[dent_node.inum]
dent_path = os.path.join(path, dent_node.name)
if dent_node.type == UBIFS_ITYPE_DIR:
try:
if not os.path.exists(dent_path):
os.mkdir(dent_path)
if perms:
set_file_perms(dent_path, inode)
except Exception as e:
ubifs.log.write('DIR Fail: %s' % e)
if 'dent' in inode:
for dnode in inode['dent']:
dents(ubifs, inodes, dnode, dent_path, perms)
elif dent_node.type == UBIFS_ITYPE_REG:
try:
if inode['ino'].nlink > 1:
if 'hlink' not in inode:
inode['hlink'] = dent_path
buf = process_reg_file(ubifs, inode, dent_path)
write_reg_file(dent_path, buf)
else:
os.link(inode['hlink'], dent_path)
else:
buf = process_reg_file(ubifs, inode, dent_path)
write_reg_file(dent_path, buf)
if perms:
set_file_perms(dent_path, inode)
except Exception as e:
ubifs.log.write('FILE Fail: %s' % e)
elif dent_node.type == UBIFS_ITYPE_LNK:
try:
os.symlink('%s' % inode['ino'].data, dent_path)
except Exception as e:
ubifs.log.write('SYMLINK Fail: %s : %s' % (inode['ino'].data, dent_path))
elif dent_node.type in [UBIFS_ITYPE_BLK, UBIFS_ITYPE_CHR]:
try:
dev = struct.unpack('<II', inode['ino'].data)[0]
if perms:
os.mknod(dent_path, inode['ino'].mode, dev)
if perms:
set_file_perms(path, inode)
else:
write_reg_file(dent_path, str(dev))
if perms:
set_file_perms(dent_path, inode)
except Exception as e:
ubifs.log.write('DEV Fail: %s : %s' % (dent_path, e))
elif dent_node.type == UBIFS_ITYPE_FIFO:
try:
os.mkfifo(dent_path, inode['ino'].mode)
if perms:
set_file_perms(dent_path, inode)
except Exception as e:
ubifs.log.write('FIFO Fail: %s : %s' % (dent_path, e))
elif dent_node.type == UBIFS_ITYPE_SOCK:
try:
write_reg_file(dent_path, '')
if perms:
set_file_perms(dent_path, inode)
except Exception as e:
ubifs.log.write('SOCK Fail: %s' % dent_path)
def set_file_perms(path, inode):
try:
os.chmod(path, inode['ino'].mode)
os.chown(path, inode['ino'].uid, inode['ino'].gid)
except:
raise Exception('Failed File Permissions: %s' % path)
def write_reg_file(path, data):
with open(path, 'wb') as f:
f.write(data)
def process_reg_file(ubifs, inode, path):
try:
buf = ''
if 'data' in inode:
compr_type = 0
sorted_data = sorted(inode['data'], key=lambda x: x.key['khash'])
last_khash = sorted_data[0].key['khash'] - 1
for data in sorted_data:
if data.key['khash'] - last_khash != 1:
while 1 != data.key['khash'] - last_khash:
buf += '\x00' * UBIFS_BLOCK_SIZE
last_khash += 1
compr_type = data.compr_type
ubifs.file.seek(data.offset)
d = ubifs.file.read(data.compr_len)
buf += decompress(compr_type, data.size, d)
last_khash = data.key['khash']
except Exception as e:
raise Exception('inode num:%s :%s' % (inode['ino'].key['ino_num'], e))
if inode['ino'].size > len(buf):
buf += '\x00' * (inode['ino'].size - len(buf))
return buf | gpl-2.0 | 2,170,450,015,451,508,500 | 33.716814 | 85 | 0.49182 | false |
scrapinghub/flatson | docs/conf.py | 1 | 8392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# flatson documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import flatson
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flatson'
copyright = u'2015, ScrapingHub'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = flatson.__version__
# The full version, including alpha/beta/rc tags.
release = flatson.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'flatsondoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'flatson.tex',
u'Flatson Documentation',
u'ScrapingHub', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flatson',
u'Flatson Documentation',
[u'ScrapingHub'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'flatson',
u'Flatson Documentation',
u'ScrapingHub',
'flatson',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause | 8,846,287,868,338,261,000 | 29.516364 | 76 | 0.70448 | false |
karimbahgat/PyAgg | __private__/temp_dev/rendererclass.py | 1 | 8714 |
# Check dependencies
try:
import PIL, PIL.Image, PIL.ImageTk
except ImportError:
raise Exception("The renderer requires PIL or Pillow but could not find it")
try:
import aggdraw
except ImportError:
raise Exception("The renderer requires aggdraw but could not find it")
import itertools
def grouper(iterable, n):
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=None, *args)
# Main class
class AggRenderer:
"""
This class is used to draw each feature with aggdraw as long as
it is given instructions via a color/size/options dictionary.
"""
def __init__(self):
self.sysfontfolders = dict([("windows","C:/Windows/Fonts/")])
self.fontfilenames = dict([("default", "TIMES.TTF"),
("times new roman","TIMES.TTF"),
("arial","ARIAL.TTF")])
def new_image(self, width, height, background=None, mode="RGBA"):
"""
This must be called before doing any rendering.
Note: this replaces any previous image drawn on so be sure to
retrieve the old image before calling it again to avoid losing work
"""
self.img = PIL.Image.new(mode, (width, height), background)
self.width,self.height = width,height
self.drawer = aggdraw.Draw(self.img)
def coordinate_space(self, bbox):
"""
Check link for how to do this: http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.html
"""
# its ratio compared to original * new + offset as ratio of orig * new
xleft,ytop,xright,ybottom = bbox
x2x = (xleft,xright)
y2y = (ybottom,ytop)
xwidth = max(x2x)-min(x2x)
yheight = max(y2y)-min(y2y)
#xprod = self.width * self.width / float(xwidth * self.width)
#yprod = self.height * self.height / float(yheight * self.height)
xprod = self.width / float(xwidth)
yprod = self.height / float(yheight)
xoffset = (-xleft / float(xwidth) * self.width) #- (xwidth * xprod)
yoffset = (-ytop / float(yheight) * self.height) #- (yheight * yprod)
transcoeffs = (xprod, 0, xoffset,
0, yprod, yoffset)
print transcoeffs
a,b,c,d,e,f = transcoeffs
for x,y in grouper([100,100, 900,100, 900,500, 100,500, 100,100], 2):
print (x,y),"=",(x*a + y*b + c, x*d + y*e + f)
self.drawer.settransform(transcoeffs)
def draw_point(self, xy, symbol="circle", **options):
"""
Draw a point as one of several symbol types.
"""
args = []
fillsize = options["fillsize"]
if options["outlinecolor"]:
pen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(pen)
if options["fillcolor"]:
brush = aggdraw.Brush(options["fillcolor"])
args.append(brush)
x,y = xy
bbox = [x-fillsize, y-fillsize, x+fillsize, y+fillsize]
if symbol == "circle":
self.drawer.ellipse(bbox, *args)
elif symbol == "square":
self.drawer.rectangle(bbox, *args)
def draw_lines(self, coords, **options):
"""
Connect a series of flattened coordinate points with one or more lines.
"""
path = aggdraw.Path()
def traverse_ring(coords):
# begin
coords = grouper(coords, 2)
startx,starty = next(coords)
path.moveto(startx, starty)
# connect to each successive point
for nextx,nexty in coords:
path.lineto(nextx, nexty)
# get drawing tools from options
args = []
if options["outlinecolor"]:
pen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(pen)
if options["fillcolor"]:
brush = aggdraw.Brush(options["fillcolor"])
args.append(brush)
# draw the constructed path
self.drawer.path((0,0), path, *args)
def draw_polygon(self, coords, holes=[], **options):
"""
Draw polygon and holes with color fill.
Note: holes must be counterclockwise.
"""
path = aggdraw.Path()
def traverse_ring(coords):
# begin
coords = grouper(coords, 2)
startx,starty = next(coords)
path.moveto(startx, starty)
# connect to each successive point
for nextx,nexty in coords:
path.lineto(nextx, nexty)
path.close()
# first exterior
traverse_ring(coords)
# then holes
for hole in holes:
# !!! need to test for ring direction !!!
hole = (xory for point in reversed(tuple(grouper(hole, 2))) for xory in point)
traverse_ring(hole)
# options
args = []
if options["fillcolor"]:
fillbrush = aggdraw.Brush(options["fillcolor"])
args.append(fillbrush)
if options["outlinecolor"]:
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(outlinepen)
self.drawer.path((0,0), path, *args)
def get_tkimage(self):
self.drawer.flush()
return PIL.ImageTk.PhotoImage(self.img)
def save(self, filepath):
self.drawer.flush()
self.img.save(filepath)
## def draw_text(self, relx, rely, text, options):
## """
## draws basic text, no effects
## """
## fontlocation = self.sysfontfolders[OSSYSTEM]+self.fontfilenames[options["textfont"]]
## font = aggdraw.Font(color=options["textcolor"], file=fontlocation, size=options["textsize"], opacity=options["textopacity"])
## fontwidth, fontheight = self.drawer.textsize(text, font)
## textanchor = options.get("textanchor")
## if textanchor:
## textanchor = textanchor.lower()
## if textanchor == "center":
## x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
## y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
## else:
## x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
## y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
## if "n" in textanchor:
## y = int(MAPHEIGHT*rely)
## elif "s" in textanchor:
## y = int(MAPHEIGHT*rely) - int(fontheight)
## if "e" in textanchor:
## x = int(MAPWIDTH*relx) - int(fontwidth)
## elif "w" in textanchor:
## x = int(MAPWIDTH*relx)
## if options.get("textboxfillcolor") or options.get("textboxoutlinecolor"):
## relfontwidth, relfontheight = (fontwidth/float(MAPWIDTH), fontheight/float(MAPHEIGHT))
## relxmid,relymid = (x/float(MAPWIDTH)+relfontwidth/2.0,y/float(MAPHEIGHT)+relfontheight/2.0)
## relupperleft = (relxmid-relfontwidth*options["textboxfillsize"]/2.0, relymid-relfontheight*options["textboxfillsize"]/2.0)
## relbottomright = (relxmid+relfontwidth*options["textboxfillsize"]/2.0, relymid+relfontheight*options["textboxfillsize"]/2.0)
## options["fillcolor"] = options["textboxfillcolor"]
## options["outlinecolor"] = options["textboxoutlinecolor"]
## options["outlinewidth"] = options["textboxoutlinewidth"]
## self.RenderRectangle(relupperleft, relbottomright, options)
## self.drawer.text((x,y), text, font)
if __name__ == "__main__":
# Test
import Tkinter as tk
window = tk.Tk()
label = tk.Label(window)
label.pack()
import random
def random_n(minval, maxval, n=1):
ns = (random.randrange(minval,maxval) for _ in xrange(n))
return tuple(ns)
renderer = AggRenderer()
renderer.new_image(300, 300)
renderer.coordinate_space([0,0,1000,600])
renderer.draw_polygon(coords=[100,100, 900,100, 900,500, 100,500, 100,100],
holes=[[400,400, 600,400, 600,450, 400,450, 400,400]],
fillcolor=random_n(0,222,n=3),
outlinecolor=random_n(0,222,n=3),
outlinewidth=10)
renderer.draw_point(xy=random_n(0, 300, n=2),
fillsize=22,
fillcolor=random_n(0,222,n=3),
outlinecolor=random_n(0,222,n=3),
outlinewidth=10)
img = renderer.get_tkimage()
label["image"] = label.img = img
window.mainloop()
| mit | 6,361,390,834,902,520,000 | 33.995984 | 138 | 0.56415 | false |
crossroadchurch/paul | openlp/plugins/alerts/__init__.py | 1 | 1670 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`alerts` module provides the Alerts plugin for producing impromptu on-screen announcements during a service.
"""
| gpl-2.0 | -5,792,317,587,671,682,000 | 68.583333 | 117 | 0.443713 | false |
nathanielksmith/done | parsedatetime/tests/TestUnits.py | 1 | 5406 | #!/usr/bin/env python
"""
Test parsing of units
"""
import unittest, time, datetime
import parsedatetime.parsedatetime as pt
# a special compare function is used to allow us to ignore the seconds as
# the running of the test could cross a minute boundary
def _compareResults(result, check):
target, t_flag = result
value, v_flag = check
t_yr, t_mth, t_dy, t_hr, t_min, _, _, _, _ = target
v_yr, v_mth, v_dy, v_hr, v_min, _, _, _, _ = value
return ((t_yr == v_yr) and (t_mth == v_mth) and (t_dy == v_dy) and
(t_hr == v_hr) and (t_min == v_min)) and (t_flag == v_flag)
class test(unittest.TestCase):
def setUp(self):
self.cal = pt.Calendar()
self.yr, self.mth, self.dy, self.hr, self.mn, self.sec, self.wd, self.yd, self.isdst = time.localtime()
def testMinutes(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(minutes=1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('1 minute', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('1 minutes', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('1 min', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('1min', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('1 m', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('1m', start), (target, 2)))
def testHours(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(hours=1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('1 hour', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('1 hours', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('1 hr', start), (target, 2)))
def testDays(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(days=1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('1 day', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 days', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1days', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 dy', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 d', start), (target, 1)))
def testNegativeDays(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(days=-1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('-1 day', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('-1 days', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('-1days', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('-1 dy', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('-1 d', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('- 1 day', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('- 1 days', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('- 1days', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('- 1 dy', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('- 1 d', start), (target, 1)))
def testWeeks(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(weeks=1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('1 week', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1week', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 weeks', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 wk', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 w', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1w', start), (target, 1)))
def testMonths(self):
s = datetime.datetime.now()
t = self.cal.inc(s, month=1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('1 month', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 months', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1month', start), (target, 1)))
def testYears(self):
s = datetime.datetime.now()
t = self.cal.inc(s, year=1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('1 year', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 years', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 yr', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 y', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1y', start), (target, 1)))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -5,020,984,759,974,754,000 | 40.906977 | 111 | 0.608028 | false |
jefftc/changlab | Betsy/Betsy/modules/annotate_probes.py | 1 | 3787 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
outfile):
import StringIO
import arrayio
from genomicode import arrayplatformlib
from genomicode import parallel
from genomicode import filelib
from genomicode import AnnotationMatrix
from Betsy import module_utils as mlib
M = arrayio.read(in_data.identifier)
metadata = {}
# Add GENE_ID, GENE_SYMBOL, and DESCRIPTION. Figure out which
# platforms provide each one of this.
CATEGORIES = [
arrayplatformlib.GENE_ID,
arrayplatformlib.GENE_SYMBOL,
# biomaRt doesn't convert description. So just ignore it
# for now.
# TODO: implement DESCRIPTION.
#arrayplatformlib.DESCRIPTION,
]
#all_platforms = arrayplatformlib.identify_all_platforms_of_matrix(M)
#assert all_platforms, "Unknown platform: %s" % in_data.identifier
#header, platform_name = all_platforms[0]
scores = arrayplatformlib.score_matrix(M)
scores = [x for x in scores if x.max_score >= 0.75]
assert scores, "I could not identify any platforms."
# Find all the platforms not in the matrix.
platforms = [
arrayplatformlib.find_platform_by_name(x.platform_name) for
x in scores]
categories = [x.category for x in platforms]
missing = [x for x in CATEGORIES if x not in categories]
score = scores[0]
platform = platforms[0]
to_add = [] # list of platform names
for category in missing:
x = arrayplatformlib.PLATFORMS
x = [x for x in x if x.category == category]
x = [x for x in x if x.bm_organism == platform.bm_organism]
x = [x for x in x if x.name != score.platform_name]
# Take the first one, if any.
if x:
to_add.append(x[0].name)
if to_add:
annotate = mlib.get_config(
"annotate_matrix", which_assert_file=True)
sq = parallel.quote
cmd = [
"python",
sq(annotate),
"--no_na",
"--header", sq(score.header),
]
for x in to_add:
x = ["--platform", sq(x)]
cmd.extend(x)
cmd.append(in_data.identifier)
cmd = " ".join(cmd)
data = parallel.sshell(cmd)
metadata["commands"] = [cmd]
assert data.find("Traceback") < 0, data
else:
data = open(in_data.identifier).read()
# Clean up the headers.
platform2pretty = {
"Entrez_ID_human" : "Gene ID",
"Entrez_Symbol_human" : "Gene Symbol",
"Entrez_ID_mouse" : "Gene ID",
"Entrez_Symbol_mouse" : "Gene Symbol",
}
handle = open(outfile, 'w')
header_written = False
for cols in filelib.read_cols(StringIO.StringIO(data)):
if not header_written:
cols = [platform2pretty.get(x, x) for x in cols]
cols = AnnotationMatrix.uniquify_headers(cols)
header_written = True
print >>handle, "\t".join(cols)
return metadata
def name_outfile(self, antecedents, user_options):
return "signal_annot.tdf"
#from Betsy import module_utils
#original_file = module_utils.get_inputid(antecedents.identifier)
#filename = 'signal_annot_' + original_file + '.tdf'
#return filename
| mit | -6,616,411,264,953,687,000 | 35.76699 | 77 | 0.552416 | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/aio/operations/_disk_encryption_sets_operations.py | 1 | 35493 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DiskEncryptionSetsOperations:
"""DiskEncryptionSetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_06_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSet",
**kwargs: Any
) -> "_models.DiskEncryptionSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(disk_encryption_set, 'DiskEncryptionSet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSet",
**kwargs: Any
) -> AsyncLROPoller["_models.DiskEncryptionSet"]:
"""Creates or updates a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:param disk_encryption_set: disk encryption set object supplied in the body of the Put disk
encryption set operation.
:type disk_encryption_set: ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DiskEncryptionSet or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
disk_encryption_set=disk_encryption_set,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSetUpdate",
**kwargs: Any
) -> "_models.DiskEncryptionSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(disk_encryption_set, 'DiskEncryptionSetUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSetUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.DiskEncryptionSet"]:
"""Updates (patches) a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk
encryption set operation.
:type disk_encryption_set: ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DiskEncryptionSet or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
disk_encryption_set=disk_encryption_set,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
async def get(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> "_models.DiskEncryptionSet":
"""Gets information about a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiskEncryptionSet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DiskEncryptionSetList"]:
"""Lists all the disk encryption sets under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskEncryptionSetList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DiskEncryptionSetList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DiskEncryptionSetList"]:
"""Lists all the disk encryption sets under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskEncryptionSetList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DiskEncryptionSetList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets'} # type: ignore
def list_associated_resources(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ResourceUriList"]:
"""Lists all resources that are encrypted with this disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceUriList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_06_30.models.ResourceUriList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceUriList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_associated_resources.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceUriList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_associated_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}/associatedResources'} # type: ignore
| mit | 2,431,537,871,508,831,000 | 50.588663 | 225 | 0.647564 | false |
SymbiFlow/prjxray | fuzzers/015-clb-nffmux/top.py | 1 | 9314 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os, random
random.seed(0)
from prjxray import util
from prjxray import verilog
# INCREMENT is the amount of additional CLBN to be instantiated in the design.
# This makes the fuzzer compilation more robust against failures.
INCREMENT = os.getenv('CLBN', 0)
CLBN = 600 + int(INCREMENT)
print('//Requested CLBs: %s' % str(CLBN))
def gen_slicels():
for _tile_name, site_name, _site_type in util.get_roi().gen_sites(
['SLICEL']):
yield site_name
def gen_slicems():
for _tile_name, site_name, _site_type in util.get_roi().gen_sites(
['SLICEM']):
yield site_name
DIN_N = CLBN * 8
DOUT_N = CLBN * 8
verilog.top_harness(DIN_N, DOUT_N)
f = open('params.csv', 'w')
f.write('module,loc,n\n')
slicels = gen_slicels()
slicems = gen_slicems()
print(
'module roi(input clk, input [%d:0] din, output [%d:0] dout);' %
(DIN_N - 1, DOUT_N - 1))
for i in range(CLBN):
use_slicem = (i % 2) == 0
if use_slicem:
loc = next(slicems)
variants = ['AX', 'CY', 'F78', 'O5', 'O6', 'XOR', 'MC31']
else:
loc = next(slicels)
variants = ['AX', 'CY', 'F78', 'O5', 'O6', 'XOR']
modules = ['clb_NFFMUX_' + x for x in variants]
module = random.choice(modules)
if module == 'clb_NFFMUX_MC31':
n = 3 # Only DOUTMUX has MC31 input
elif module == 'clb_NFFMUX_F78':
n = random.randint(0, 2)
else:
n = random.randint(0, 3)
print(' %s' % module)
print(' #(.LOC("%s"), .N(%d))' % (loc, n))
print(
' clb_%d (.clk(clk), .din(din[ %d +: 8]), .dout(dout[ %d +: 8]));'
% (i, 8 * i, 8 * i))
f.write('%s,%s,%s\n' % (module, loc, n))
f.close()
print(
'''endmodule
// ---------------------------------------------------------------------
''')
print(
'''
module myLUT8 (input clk, input [7:0] din,
output lut8o, output lut7bo, output lut7ao,
//caro: XOR additional result (main output)
//carco: CLA result (carry module additional output)
output caro, output carco,
output bo5, output bo6,
output wire mc31,
output wire ff_q, //always connect to output
input wire ff_d); //mux output net
parameter LOC="SLICE_FIXME";
parameter N=-1;
parameter ALUT_SRL=0;
wire [3:0] caro_all;
assign caro = caro_all[N];
wire [3:0] carco_all;
assign carco = carco_all[N];
wire [3:0] lutno6;
wire [3:0] lutno5;
assign bo5 = lutno5[N];
assign bo6 = lutno6[N];
//Outputs does not have to be used, will stay without it
(* LOC=LOC, BEL="F8MUX", KEEP, DONT_TOUCH *)
MUXF8 mux8 (.O(lut8o), .I0(lut7bo), .I1(lut7ao), .S(din[6]));
(* LOC=LOC, BEL="F7BMUX", KEEP, DONT_TOUCH *)
MUXF7 mux7b (.O(lut7bo), .I0(lutno6[3]), .I1(lutno6[2]), .S(din[6]));
(* LOC=LOC, BEL="F7AMUX", KEEP, DONT_TOUCH *)
MUXF7 mux7a (.O(lut7ao), .I0(lutno6[1]), .I1(lutno6[0]), .S(din[6]));
(* LOC=LOC, BEL="D6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_DEAD_0000_0001)
) lutd (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[3]),
.O6(lutno6[3]));
(* LOC=LOC, BEL="C6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_BEEF_0000_0001)
) lutc (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[2]),
.O6(lutno6[2]));
(* LOC=LOC, BEL="B6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_CAFE_0000_0001)
) lutb (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[1]),
.O6(lutno6[1]));
generate if (ALUT_SRL != 0) begin
(* LOC=LOC, BEL="A6LUT", KEEP, DONT_TOUCH *)
SRLC32E #(
.INIT(64'h8000_1CE0_0000_0001)
) srla (
.CLK(clk),
.CE(din[6]),
.D(din[5]),
.A(din[4:0]),
.Q(lutno6[0]),
.Q31(mc31));
assign lutno5[0] = din[6];
end else begin
(* LOC=LOC, BEL="A6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_1CE0_0000_0001)
) luta (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[0]),
.O6(lutno6[0]));
end endgenerate
//Outputs do not have to be used, will stay without them
(* LOC=LOC, KEEP, DONT_TOUCH *)
CARRY4 carry4(.O(caro_all), .CO(carco_all), .DI(lutno5), .S(lutno6), .CYINIT(1'b0), .CI());
generate
if (N == 3) begin
(* LOC=LOC, BEL="DFF", KEEP, DONT_TOUCH *)
FDPE bff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(ff_d));
end
if (N == 2) begin
(* LOC=LOC, BEL="CFF", KEEP, DONT_TOUCH *)
FDPE bff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(ff_d));
end
if (N == 1) begin
(* LOC=LOC, BEL="BFF", KEEP, DONT_TOUCH *)
FDPE bff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(ff_d));
end
if (N == 0) begin
(* LOC=LOC, BEL="AFF", KEEP, DONT_TOUCH *)
FDPE bff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(ff_d));
end
endgenerate
endmodule
//******************************************************************************
//BFFMUX tests
module clb_NFFMUX_AX (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
/*
D: DX
drawn a little differently
not a mux control
becomes a dedicated external signal
C: CX
B: BX
A: AX
*/
wire ax = din[6]; //used on MUX8:S, MUX7A:S, and MUX7B:S
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(), .carco(),
.bo5(), .bo6(),
.ff_q(dout[0]),
.ff_d(ax));
endmodule
module clb_NFFMUX_CY (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire carco;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(), .carco(carco),
.bo5(), .bo6(),
.ff_q(dout[0]),
.ff_d(carco));
endmodule
module clb_NFFMUX_F78 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire lut8o, lut7bo, lut7ao;
/*
D: N/A (no such mux position)
C: F7B:O
B: F8:O
A: F7A:O
*/
wire ff_d;
generate
if (N == 3) begin
//No muxes, so this is undefined
invalid_configuration invalid_configuration3();
end else if (N == 2) begin
assign ff_d = lut7bo;
end else if (N == 1) begin
assign ff_d = lut8o;
end else if (N == 0) begin
assign ff_d = lut7ao;
end
endgenerate
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(lut8o), .lut7bo(lut7bo), .lut7ao(lut7ao),
.caro(), .carco(),
.bo5(), .bo6(),
.ff_q(dout[0]),
.ff_d(ff_d));
endmodule
module clb_NFFMUX_O5 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire bo5;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(), .carco(),
.bo5(bo5), .bo6(),
.ff_q(dout[0]),
.ff_d(bo5));
endmodule
module clb_NFFMUX_O6 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire bo6;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(), .carco(),
.bo5(), .bo6(bo6),
.ff_q(dout[0]),
.ff_d(bo6));
endmodule
module clb_NFFMUX_XOR (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire caro;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(caro), .carco(),
.bo5(), .bo6(bo6),
.ff_q(dout[0]),
.ff_d(caro));
endmodule
module clb_NFFMUX_MC31 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1; // Dummy
wire mc31;
myLUT8 #(.LOC(LOC), .N(3), .ALUT_SRL(1))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(caro), .carco(),
.bo5(), .bo6(bo6),
.mc31(mc31),
.ff_q(dout[0]),
.ff_d(mc31));
endmodule
''')
| isc | -5,168,153,440,399,580,000 | 24.587912 | 95 | 0.476916 | false |
bolkedebruin/airflow | tests/providers/amazon/aws/sensors/test_sagemaker_endpoint.py | 1 | 3408 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
from airflow.providers.amazon.aws.sensors.sagemaker_endpoint import SageMakerEndpointSensor
DESCRIBE_ENDPOINT_CREATING_RESPONSE = {
'EndpointStatus': 'Creating',
'ResponseMetadata': {
'HTTPStatusCode': 200,
}
}
DESCRIBE_ENDPOINT_INSERVICE_RESPONSE = {
'EndpointStatus': 'InService',
'ResponseMetadata': {
'HTTPStatusCode': 200,
}
}
DESCRIBE_ENDPOINT_FAILED_RESPONSE = {
'EndpointStatus': 'Failed',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
'FailureReason': 'Unknown'
}
DESCRIBE_ENDPOINT_UPDATING_RESPONSE = {
'EndpointStatus': 'Updating',
'ResponseMetadata': {
'HTTPStatusCode': 200,
}
}
class TestSageMakerEndpointSensor(unittest.TestCase):
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, 'describe_endpoint')
def test_sensor_with_failure(self, mock_describe, mock_get_conn):
mock_describe.side_effect = [DESCRIBE_ENDPOINT_FAILED_RESPONSE]
sensor = SageMakerEndpointSensor(
task_id='test_task',
poke_interval=1,
aws_conn_id='aws_test',
endpoint_name='test_job_name'
)
self.assertRaises(AirflowException, sensor.execute, None)
mock_describe.assert_called_once_with('test_job_name')
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, '__init__')
@mock.patch.object(SageMakerHook, 'describe_endpoint')
def test_sensor(self, mock_describe, hook_init, mock_get_conn):
hook_init.return_value = None
mock_describe.side_effect = [
DESCRIBE_ENDPOINT_CREATING_RESPONSE,
DESCRIBE_ENDPOINT_UPDATING_RESPONSE,
DESCRIBE_ENDPOINT_INSERVICE_RESPONSE
]
sensor = SageMakerEndpointSensor(
task_id='test_task',
poke_interval=1,
aws_conn_id='aws_test',
endpoint_name='test_job_name'
)
sensor.execute(None)
# make sure we called 3 times(terminated when its completed)
self.assertEqual(mock_describe.call_count, 3)
# make sure the hook was initialized with the specific params
calls = [
mock.call(aws_conn_id='aws_test'),
mock.call(aws_conn_id='aws_test'),
mock.call(aws_conn_id='aws_test')
]
hook_init.assert_has_calls(calls)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,590,471,882,298,991,000 | 31.769231 | 91 | 0.670481 | false |
m87/pyEM | offline.py | 1 | 5995 | import utils as uts
import numpy as np
from scipy.misc import logsumexp
from config import *
from thirdparty import log_mvnpdf, log_mvnpdf_diag
from scipy.stats import multivariate_normal
EPS = np.finfo(float).eps
class BatchGauss(object):
def __init__(self, param):
self.n = param[CLUSTERS]
self.th = float(param['th'])
self.IT = int(param['iter'])
self.covt = param['cov']
self.param = int(param['n'])
self.mvnpdf = {'full': log_mvnpdf, 'diag': log_mvnpdf_diag}
self.hist = []
def __e(self, X):
lg = self.mvnpdf[self.covt](np.array(X[:self.param]), self.means, self.COV[self.covt])
logResps = lg + np.log(self.weights)
self.hist.append(-np.sum(logsumexp(logResps,axis=1))/self.N)
maxLg = np.max(logResps)
logResps -= maxLg
self.resps = np.exp(logResps)
np.clip(self.resps, 10*EPS, np.inf, out=self.resps)
self.resps /= np.sum(self.resps, axis=1)[:, None]
def __m(self, X):
tmpResps = self.resps.sum(axis=0)
respsMeanSum = np.dot(self.resps.T, X[:self.param])
invRMS = 1.0 / (tmpResps[:, np.newaxis] + 10 * EPS)
self.weights = (tmpResps / (np.sum(tmpResps) + 10 * EPS) + EPS)
self.means = respsMeanSum * invRMS
self.cov = np.zeros((self.n,self.dim, self.dim))
for c in range(self.n):
post = self.resps[:,c]
diff = X[:self.param] - self.means[c]
av = np.dot(post * diff.T, diff) / np.sum(post)
self.covars[c] = av
self.diagCovars[c] = np.diag(self.covars[c])
def predict(self, X):
lg = self.log_mvnpdf[self.covt](np.array([X]), self.means, self.COV[self.covt])
logResps = lg + np.log(self.weights)
maxLg = np.max(logResps)
logResps -= maxLg
self.resps = np.exp(logResps)
self.resps /= np.sum(self.resps, axis=1)[:, None]
return np.argmax(self.resps)
def load(self, weights, means, covars):
self.weights = np.load(weights)
self.means = np.load(means)
self.covars = np.load(covars)
self.diagCovars = np.zeros((self.dim,))
for c in self.covars:
self.diagCovars[c] = np.diag(self.covars[c])
def fit(self, dataset):
self.__prepare(dataset)
j=0
for i in range(2):
print(i)
self.__e(dataset)
self.__m(dataset)
while True:
print(j)
j+=1
self.__e(dataset)
if abs(self.hist[-1] - self.hist[-2]) <= self.th:
return
if j > self.IT:
return
self.__m(dataset)
def __prepare(self, dataset):
shape = dataset.shape()
self.dim = shape[0][0]
self.N = len(dataset);
self.weights = np.ones((self.n,))
self.weights /= self.n
self.means = np.zeros((self.n,self.dim))
for it in range(self.n):
self.means[it] = dataset[it]
self.covars = np.array([np.identity(self.dim) for x in range(self.n)])
self.diagCovars = np.ones((self.n,self.dim))
self.COV = {'full' : self.covars, 'diag' : self.diagCovars}
self.I ={'full': 1.0, 'diag': np.identity(self.dim)}
def __str__(self):
out = ""
np.set_printoptions(threshold=np.nan)
out += 'w: ' + str(self.weights) + '\nm: ' + str(self.means) + '\nc: ' + str(self.covars)
return out
def save(self, path):
np.save(path+"/weights", self.weights)
np.save(path+"/means", self.means)
np.save(path+"/covars", self.covars)
np.save(path+"/hist", self.hist)
class BatchEntropy(object):
def __init__(self, n_clusters, parma=None):
self.n = n_clusters
self.param = int(parma)
self.lam=0.9
self.hist = []
def __e(self, X):
self.resps = self.weights * np.exp(uts.log_mvnpdf(np.array(X[:self.param]), self.means, self.covars))
np.clip(self.resps, 0.00000000000000000001, np.inf, out=self.resps)
self.resps /= np.sum(self.resps, axis=1)[:, None]
def __m(self, X):
self.sumResps = np.sum(self.resps, axis=0)
self.weights = self.weights * np.exp(np.dot(self.sumResps, -self.lam/self.param))
self.weights /= np.sum(self.weights)
self.sumMeans = np.zeros((self.n, self.dim))
for c in range(self.n):
for it,i in enumerate(X[:self.param]):
diff = i - self.means[c]
self.sumMeans[c] += np.dot(self.resps[it][c],diff)
self.means[c] += np.dot(self.lam/self.param, self.sumMeans[c])
iC =np.linalg.pinv(self.covars[c])
nkCov = np.zeros((self.dim, self.dim))
for it, instance in enumerate(X[:self.param]):
diff = instance - self.means[c]
nkCov += self.resps[it][c] * (iC - np.dot(np.dot(iC,diff[:,None])*diff,iC ))
iC +=np.dot(self.lam/self.param , nkCov)
self.covars[c] = np.linalg.inv(iC)
def fit(self, dataset):
#print(np.exp(uts.log_mvnpdf(np.array([[1,1]]), np.array([[1,1]]), np.array([[[1,0],[0,1]]]))))
#print(dataset.shape())
self.__prepare(dataset)
for i in range(10):
print(i)
self.__e(dataset)
self.__m(dataset)
def __prepare(self, dataset):
shape = dataset.shape()
self.dim = shape[0][0]
self.N = 0;
self.weights = np.ones((self.n,))
self.weights /= self.n
self.means = np.zeros((self.n,self.dim))
for it in range(self.n):
self.means[it] = dataset[it]
self.covars = np.array([np.identity(self.dim) for x in range(self.n)])
def __str__(self):
out = ""
np.set_printoptions(threshold=np.nan)
out += 'w: ' + str(self.weights) + '\nm: ' + str(self.means) + '\nc: ' + str(self.covars)
return out
| mit | -7,289,077,316,482,555,000 | 33.454023 | 109 | 0.541118 | false |
AWinterman/api_object_specification | docs/conf.py | 1 | 9402 | # -*- coding: utf-8 -*-
#
# api_object_specification documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 20 09:52:22 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('./api_object_spec/'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
import api_object_spec.parser.model as model
model.DefinitionList
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'toc'
# General information about the project.
project = u'api_object_specification'
copyright = u'2015, Andrew Winterman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'api_object_specificationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'api_object_specification.tex', u'api\\_object\\_specification Documentation',
u'Andrew Winterman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'api_object_specification', u'api_object_specification Documentation',
[u'Andrew Winterman'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'api_object_specification', u'api_object_specification Documentation',
u'Andrew Winterman', 'api_object_specification', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 | -7,082,252,508,944,450,000 | 31.309278 | 90 | 0.708892 | false |
Alzon/senlin | senlin/tests/unit/db/test_node_api.py | 1 | 21627 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import six
from oslo_utils import timeutils as tu
from senlin.common import exception
from senlin.db.sqlalchemy import api as db_api
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
from senlin.tests.unit.db import shared
UUID1 = shared.UUID1
UUID2 = shared.UUID2
UUID3 = shared.UUID3
class DBAPINodeTest(base.SenlinTestCase):
def setUp(self):
super(DBAPINodeTest, self).setUp()
self.ctx = utils.dummy_context()
self.profile = shared.create_profile(self.ctx)
self.cluster = shared.create_cluster(self.ctx, self.profile)
def test_node_create(self):
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(0, len(nodes))
res = shared.create_node(self.ctx, self.cluster, self.profile)
node = db_api.node_get(self.ctx, res.id)
self.assertIsNotNone(node)
self.assertEqual('test_node_name', node.name)
self.assertEqual(UUID1, node.physical_id)
self.assertEqual(1, node.index)
self.assertIsNone(node.role)
self.assertIsNone(node.created_time)
self.assertIsNone(node.updated_time)
self.assertIsNone(node.deleted_time)
self.assertEqual('ACTIVE', node.status)
self.assertEqual('create complete', node.status_reason)
self.assertEqual('{"foo": "123"}', json.dumps(node.metadata))
self.assertEqual('{"key1": "value1"}', json.dumps(node.data))
self.assertEqual(self.cluster.id, node.cluster_id)
self.assertEqual(self.profile.id, node.profile_id)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(1, len(nodes))
def test_node_get(self):
res = shared.create_node(self.ctx, self.cluster, self.profile)
node = db_api.node_get(self.ctx, res.id)
self.assertIsNotNone(node)
node = db_api.node_get(self.ctx, UUID2)
self.assertIsNone(node)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(1, len(nodes))
def test_node_get_show_deleted(self):
res = shared.create_node(self.ctx, self.cluster, self.profile)
node_id = res.id
node = db_api.node_get(self.ctx, node_id)
self.assertIsNotNone(node)
db_api.node_delete(self.ctx, node_id)
node = db_api.node_get(self.ctx, node_id)
self.assertIsNone(node)
node = db_api.node_get(self.ctx, node_id, show_deleted=False)
self.assertIsNone(node)
node = db_api.node_get(self.ctx, node_id, show_deleted=True)
self.assertEqual(node_id, node.id)
def test_node_get_by_name(self):
shared.create_node(self.ctx, self.cluster, self.profile)
node = db_api.node_get_by_name(self.ctx, 'test_node_name')
self.assertIsNotNone(node)
self.assertEqual('test_node_name', node.name)
self.assertEqual(self.cluster.id, node.cluster_id)
res = db_api.node_get_by_name(self.ctx, 'BogusName')
self.assertIsNone(res)
def test_node_get_by_name_show_deleted(self):
node_name = 'test_node_name'
shared.create_node(self.ctx, self.cluster, self.profile,
name=node_name)
node = db_api.node_get_by_name(self.ctx, node_name)
self.assertIsNotNone(node)
node_id = node.id
db_api.node_delete(self.ctx, node_id)
res = db_api.node_get_by_name(self.ctx, node_name)
self.assertIsNone(res)
res = db_api.node_get_by_name(self.ctx, node_name, show_deleted=False)
self.assertIsNone(res)
res = db_api.node_get_by_name(self.ctx, node_name, show_deleted=True)
self.assertEqual(node_id, res.id)
def test_node_get_by_short_id(self):
node_id1 = 'same-part-unique-part'
node_id2 = 'same-part-part-unique'
shared.create_node(self.ctx, None, self.profile,
id=node_id1, name='node-1')
shared.create_node(self.ctx, None, self.profile,
id=node_id2, name='node-2')
for x in range(len('same-part-')):
self.assertRaises(exception.MultipleChoices,
db_api.node_get_by_short_id,
self.ctx, node_id1[:x])
res = db_api.node_get_by_short_id(self.ctx, node_id1[:11])
self.assertEqual(node_id1, res.id)
res = db_api.node_get_by_short_id(self.ctx, node_id2[:11])
self.assertEqual(node_id2, res.id)
res = db_api.node_get_by_short_id(self.ctx, 'non-existent')
self.assertIsNone(res)
def test_node_get_by_short_id_show_deleted(self):
node_id = 'this-is-a-unique-id'
shared.create_node(self.ctx, None, self.profile, id=node_id)
res = db_api.node_get_by_short_id(self.ctx, node_id[:5])
self.assertEqual(node_id, res.id)
res = db_api.node_get_by_short_id(self.ctx, node_id[:7])
self.assertEqual(node_id, res.id)
db_api.node_delete(self.ctx, node_id)
res = db_api.node_get_by_short_id(self.ctx, node_id[:5])
self.assertIsNone(res)
res = db_api.node_get_by_short_id(self.ctx, node_id[:5],
show_deleted=False)
self.assertIsNone(res)
res = db_api.node_get_by_short_id(self.ctx, node_id[:5],
show_deleted=True)
self.assertEqual(node_id, res.id)
def test_node_get_all(self):
values = [{'name': 'node1'}, {'name': 'node2'}, {'name': 'node3'}]
[shared.create_node(self.ctx, None, self.profile, **v) for v in values]
nodes = db_api.node_get_all(self.ctx)
self.assertEqual(3, len(nodes))
names = [node.name for node in nodes]
[self.assertIn(val['name'], names) for val in values]
def test_node_get_all_with_cluster_id(self):
values = [{'name': 'node1'}, {'name': 'node2'}, {'name': 'node3'}]
for v in values:
shared.create_node(self.ctx, self.cluster, self.profile, **v)
shared.create_node(self.ctx, None, self.profile, name='node0')
nodes = db_api.node_get_all(self.ctx, cluster_id=self.cluster.id)
self.assertEqual(3, len(nodes))
names = [node.name for node in nodes]
[self.assertIn(val['name'], names) for val in values]
def test_node_get_all_show_deleted(self):
values = [{'id': 'node1'}, {'id': 'node2'}, {'id': 'node3'}]
for v in values:
shared.create_node(self.ctx, self.cluster, self.profile, **v)
db_api.node_delete(self.ctx, 'node2')
nodes = db_api.node_get_all(self.ctx)
self.assertEqual(2, len(nodes))
nodes = db_api.node_get_all(self.ctx, show_deleted=False)
self.assertEqual(2, len(nodes))
nodes = db_api.node_get_all(self.ctx, show_deleted=True)
self.assertEqual(3, len(nodes))
def test_node_get_all_with_limit_marker(self):
node_ids = ['node1', 'node2', 'node3']
for v in node_ids:
shared.create_node(self.ctx, self.cluster, self.profile,
id=v, init_time=tu.utcnow())
nodes = db_api.node_get_all(self.ctx, limit=1)
self.assertEqual(1, len(nodes))
nodes = db_api.node_get_all(self.ctx, limit=2)
self.assertEqual(2, len(nodes))
nodes = db_api.node_get_all(self.ctx, limit=5)
self.assertEqual(3, len(nodes))
nodes = db_api.node_get_all(self.ctx, marker='node1')
self.assertEqual(2, len(nodes))
nodes = db_api.node_get_all(self.ctx, marker='node2')
self.assertEqual(1, len(nodes))
nodes = db_api.node_get_all(self.ctx, marker='node3')
self.assertEqual(0, len(nodes))
nodes = db_api.node_get_all(self.ctx, limit=1, marker='node1')
self.assertEqual(1, len(nodes))
def test_node_get_all_used_sort_keys(self):
node_ids = ['node1', 'node2', 'node3']
for v in node_ids:
shared.create_node(self.ctx, self.cluster, self.profile, id=v)
mock_paginate = self.patchobject(db_api.utils, 'paginate_query')
sort_keys = ['index', 'name', 'created_time', 'updated_time',
'deleted_time', 'status']
db_api.node_get_all(self.ctx, sort_keys=sort_keys)
args = mock_paginate.call_args[0]
used_sort_keys = set(args[3])
expected_keys = set(['index', 'name', 'created_time', 'updated_time',
'deleted_time', 'status', 'id'])
self.assertEqual(expected_keys, used_sort_keys)
def test_node_get_all_sort_keys_wont_change(self):
sort_keys = ['id']
db_api.node_get_all(self.ctx, sort_keys=sort_keys)
self.assertEqual(['id'], sort_keys)
def test_node_get_all_sort_keys_and_dir(self):
values = [{'id': '001', 'name': 'node1', 'status': 'ACTIVE'},
{'id': '002', 'name': 'node3', 'status': 'ERROR'},
{'id': '003', 'name': 'node2', 'status': 'UPDATING'}]
for v in values:
shared.create_node(self.ctx, self.cluster, self.profile, **v)
nodes = db_api.node_get_all(self.ctx, sort_keys=['name', 'status'],
sort_dir='asc')
self.assertEqual(3, len(nodes))
# Sorted by name
self.assertEqual('001', nodes[0].id)
self.assertEqual('003', nodes[1].id)
self.assertEqual('002', nodes[2].id)
nodes = db_api.node_get_all(self.ctx, sort_keys=['status', 'name'],
sort_dir='asc')
self.assertEqual(3, len(nodes))
# Sorted by statuses (ascending)
self.assertEqual('001', nodes[0].id)
self.assertEqual('002', nodes[1].id)
self.assertEqual('003', nodes[2].id)
nodes = db_api.node_get_all(self.ctx, sort_keys=['status', 'name'],
sort_dir='desc')
self.assertEqual(3, len(nodes))
# Sorted by statuses (descending)
self.assertEqual('003', nodes[0].id)
self.assertEqual('002', nodes[1].id)
self.assertEqual('001', nodes[2].id)
def test_node_get_all_default_sort_dir(self):
nodes = [shared.create_node(self.ctx, None, self.profile,
init_time=tu.utcnow())
for x in range(3)]
results = db_api.node_get_all(self.ctx, sort_dir='asc')
self.assertEqual(3, len(results))
self.assertEqual(nodes[0].id, results[0].id)
self.assertEqual(nodes[1].id, results[1].id)
self.assertEqual(nodes[2].id, results[2].id)
def test_node_get_all_with_filters(self):
shared.create_node(self.ctx, None, self.profile, name='node1')
shared.create_node(self.ctx, None, self.profile, name='node2')
filters = {'name': ['node1', 'nodex']}
results = db_api.node_get_all(self.ctx, filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('node1', results[0]['name'])
filters = {'name': 'node1'}
results = db_api.node_get_all(self.ctx, filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('node1', results[0]['name'])
def test_node_get_all_with_empty_filters(self):
shared.create_node(self.ctx, None, self.profile, name='node1')
shared.create_node(self.ctx, None, self.profile, name='node2')
filters = None
results = db_api.node_get_all(self.ctx, filters=filters)
self.assertEqual(2, len(results))
def test_node_get_all_with_project_safe(self):
shared.create_node(self.ctx, None, self.profile, name='node1')
shared.create_node(self.ctx, None, self.profile, name='node2')
self.ctx.project = 'a-different-project'
results = db_api.node_get_all(self.ctx, project_safe=False)
self.assertEqual(2, len(results))
self.ctx.project = 'a-different-project'
results = db_api.node_get_all(self.ctx)
self.assertEqual(0, len(results))
results = db_api.node_get_all(self.ctx, project_safe=True)
self.assertEqual(0, len(results))
def test_node_get_by_cluster(self):
cluster1 = shared.create_cluster(self.ctx, self.profile)
node0 = shared.create_node(self.ctx, None, self.profile)
node1 = shared.create_node(self.ctx, self.cluster, self.profile)
node2 = shared.create_node(self.ctx, self.cluster, self.profile)
node3 = shared.create_node(self.ctx, cluster1, self.profile)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(2, len(nodes))
self.assertEqual(set([node1.id, node2.id]),
set([nodes[0].id, nodes[1].id]))
nodes = db_api.node_get_all_by_cluster(self.ctx, None)
self.assertEqual(1, len(nodes))
self.assertEqual(node0.id, nodes[0].id)
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id)
self.assertEqual(1, len(nodes))
self.assertEqual(node3.id, nodes[0].id)
def test_node_get_by_cluster_show_deleted(self):
node0 = shared.create_node(self.ctx, self.cluster, self.profile)
node1 = shared.create_node(self.ctx, self.cluster, self.profile)
nodes = db_api.node_get_all_by_cluster(self.ctx,
self.cluster.id)
self.assertEqual(2, len(nodes))
self.assertEqual(set([node0.id, node1.id]),
set([nodes[0].id, nodes[1].id]))
db_api.node_delete(self.ctx, node1.id)
nodes = db_api.node_get_all_by_cluster(self.ctx,
self.cluster.id)
self.assertEqual(1, len(nodes))
self.assertEqual(node0.id, nodes[0].id)
nodes = db_api.node_get_all_by_cluster(self.ctx,
self.cluster.id,
show_deleted=True)
self.assertEqual(2, len(nodes))
self.assertEqual(set([node0.id, node1.id]),
set([nodes[0].id, nodes[1].id]))
def test_node_get_by_name_and_cluster(self):
node_name = 'test_node_007'
shared.create_node(self.ctx, self.cluster, self.profile,
name=node_name)
node = db_api.node_get_by_name_and_cluster(self.ctx,
node_name,
self.cluster.id)
self.assertIsNotNone(node)
self.assertEqual(node_name, node.name)
self.assertEqual(self.cluster.id, node.cluster_id)
node = db_api.node_get_by_name_and_cluster(self.ctx, 'not-exist',
self.cluster.id)
self.assertIsNone(node)
node = db_api.node_get_by_name_and_cluster(self.ctx, node_name,
'BogusClusterID')
self.assertIsNone(node)
def test_node_get_by_physical_id(self):
shared.create_node(self.ctx, self.cluster, self.profile,
physical_id=UUID1)
node = db_api.node_get_by_physical_id(self.ctx, UUID1)
self.assertIsNotNone(node)
self.assertEqual(UUID1, node.physical_id)
node = db_api.node_get_by_physical_id(self.ctx, UUID2)
self.assertIsNone(node)
def test_node_update(self):
node = shared.create_node(self.ctx, self.cluster, self.profile)
new_attributes = {
'name': 'new node name',
'status': 'bad status',
'role': 'a new role',
}
db_api.node_update(self.ctx, node.id, new_attributes)
node = db_api.node_get(self.ctx, node.id)
self.assertEqual('new node name', node.name)
self.assertEqual('bad status', node.status)
self.assertEqual('a new role', node.role)
def test_node_update_not_found(self):
new_attributes = {'name': 'new_name'}
ex = self.assertRaises(exception.NodeNotFound,
db_api.node_update,
self.ctx, 'BogusId', new_attributes)
self.assertEqual('The node (BogusId) could not be found.',
six.text_type(ex))
def test_node_update_cluster_status_updated(self):
cluster = db_api.cluster_get(self.ctx, self.cluster.id)
self.assertEqual('INIT', cluster.status)
node = shared.create_node(self.ctx, self.cluster, self.profile)
new_attributes = {
'name': 'new_name',
'status': 'ERROR',
'status_reason': 'Something is wrong',
}
db_api.node_update(self.ctx, node.id, new_attributes)
node = db_api.node_get(self.ctx, node.id)
self.assertEqual('new_name', node.name)
self.assertEqual('ERROR', node.status)
self.assertEqual('Something is wrong', node.status_reason)
cluster = db_api.cluster_get(self.ctx, self.cluster.id)
self.assertEqual('WARNING', cluster.status)
reason = 'Node new_name: Something is wrong'
self.assertEqual(reason, cluster.status_reason)
def test_node_migrate_from_none(self):
node_orphan = shared.create_node(self.ctx, None, self.profile)
timestamp = tu.utcnow()
node = db_api.node_migrate(self.ctx, node_orphan.id, self.cluster.id,
timestamp)
cluster = db_api.cluster_get(self.ctx, self.cluster.id)
self.assertEqual(timestamp, node.updated_time)
self.assertEqual(self.cluster.id, node.cluster_id)
self.assertEqual(2, cluster.next_index)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(1, len(nodes))
def test_node_migrate_to_none(self):
node = shared.create_node(self.ctx, self.cluster, self.profile)
timestamp = tu.utcnow()
node_new = db_api.node_migrate(self.ctx, node.id, None, timestamp)
self.assertEqual(timestamp, node_new.updated_time)
self.assertIsNone(node_new.cluster_id)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(0, len(nodes))
def test_node_migrate_between_clusters(self):
cluster1 = shared.create_cluster(self.ctx, self.profile)
cluster2 = shared.create_cluster(self.ctx, self.profile)
node = shared.create_node(self.ctx, cluster1, self.profile)
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id)
self.assertEqual(1, len(nodes))
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster2.id)
self.assertEqual(0, len(nodes))
self.assertEqual(2, cluster1.next_index)
self.assertEqual(1, cluster2.next_index)
timestamp = tu.utcnow()
node_new = db_api.node_migrate(self.ctx, node.id, cluster2.id,
timestamp)
cluster1 = db_api.cluster_get(self.ctx, cluster1.id)
cluster2 = db_api.cluster_get(self.ctx, cluster2.id)
self.assertEqual(timestamp, node_new.updated_time)
self.assertEqual(cluster2.id, node_new.cluster_id)
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id)
self.assertEqual(0, len(nodes))
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster2.id)
self.assertEqual(1, len(nodes))
self.assertEqual(2, cluster1.next_index)
self.assertEqual(2, cluster2.next_index)
# Migrate it back!
timestamp = tu.utcnow()
node_new = db_api.node_migrate(self.ctx, node.id, cluster1.id,
timestamp)
cluster1 = db_api.cluster_get(self.ctx, cluster1.id)
cluster2 = db_api.cluster_get(self.ctx, cluster2.id)
self.assertEqual(timestamp, node_new.updated_time)
self.assertEqual(cluster1.id, node_new.cluster_id)
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id)
self.assertEqual(1, len(nodes))
nodes = db_api.node_get_all_by_cluster(self.ctx, cluster2.id)
self.assertEqual(0, len(nodes))
self.assertEqual(3, cluster1.next_index)
self.assertEqual(2, cluster2.next_index)
def test_node_delete(self):
node = shared.create_node(self.ctx, self.cluster, self.profile)
node_id = node.id
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(1, len(nodes))
db_api.node_delete(self.ctx, node_id)
res = db_api.node_get(self.ctx, node_id)
self.assertIsNone(res)
nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id)
self.assertEqual(0, len(nodes))
def test_node_delete_not_found(self):
node_id = 'BogusNodeID'
res = db_api.node_delete(self.ctx, node_id)
self.assertIsNone(res)
res = db_api.node_get(self.ctx, node_id)
self.assertIsNone(res)
| apache-2.0 | 8,853,146,320,183,755,000 | 40.11597 | 79 | 0.597355 | false |
r3vl1s/collagen | collagen/collagen.py | 1 | 1496 | '''
Collagen is a commandline utility that creates a collage using image segmentation.
'''
import os
import os.path
from collagen import segments
from collagen import collage
import sys
from collagen import utils
COLLAGEN_DIR = os.path.abspath(__file__ + "/../../")
def segment_images(source_image_folder,segment_path, segments_per_image, config):
print('Segmenting images...')
images = os.listdir(source_image_folder)
if images:
for image in images:
image_path = os.path.join(source_image_folder,image)
if utils.is_image(image_path):
segments.segments(image_path, segments_per_image, segment_path, config)
else:
sys.exit('No images to segment in source folder.')
def collagen(source_folder, output_folder):
config = utils.load_config('config')
num_collages = config[':collage'][':number_of_collages']
max_pastes = config[':collage'][':max_pastes']
min_pastes = config[':collage'][':min_pastes']
segments_per_image = config[':segments_per_image']
image_dim = (config[':collage'][':width'], config[':collage'][':height'])
segment_path = os.path.join(COLLAGEN_DIR, config[':segments_folder'])
segment_images(source_folder, segment_path, segments_per_image, config)
name = []
for i in range(num_collages):
name.append(collage.collage(segment_path, output_folder, min_pastes, max_pastes, image_dim))
utils.delete_folder_files(segment_path)
return name
| mit | -5,582,000,089,645,530,000 | 30.166667 | 100 | 0.673128 | false |
wdbm/abstraction | toywv-4.py | 1 | 7646 | #!/usr/bin/env python
"""
################################################################################
# #
# toywv-4 #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This toy program converts a specified text expression to a word vector. It #
# also converts a bank of text expressions to word vectors. It compares the #
# specified text expression word vector to the bank of text expressions word #
# vectors and returns the closest match. #
# #
# copyright (C) 2017 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
Usage:
program [options]
Options:
-h, --help Show this help message.
--version Show the version and exit.
-v, --verbose Show verbose logging.
-s, --silent silent
-u, --username=USERNAME username
--expression=TEXT text expression to convert to word vector
[default: All those moments will be lost in time.]
--wordvectormodel=NAME word vector model
[default: Brown_corpus.wvm]
"""
name = "toywv-4"
version = "2017-03-15T1527Z"
logo = None
import datetime
import docopt
import inspect
import logging
import numpy
import os
import subprocess
import sys
import time
import abstraction
import dataset
import datavision
from gensim.models import Word2Vec
import propyte
import pyprel
import shijian
import technicolor
def main(options):
global program
program = propyte.Program(
options = options,
name = name,
version = version,
logo = logo
)
global log
from propyte import log
expression = options["--expression"]
word_vector_model = options["--wordvectormodel"]
model_word2vec = abstraction.load_word_vector_model(
filename = word_vector_model
)
sentences = [
"What are you dirty hooers doing on my planet?",
"What time is it?",
"What can you do?",
"Change the color from red to black.",
"All those moments will be lost in time.",
"All of those moments will be lost in time.",
"All of those moments are to be lost in time."
]
result = most_similar_expression(
expression = expression,
expressions = sentences,
model_word2vec = model_word2vec
)
pyprel.print_line()
log.info("input expression: {expression}".format(
expression = expression
))
log.info("most similar expression: {expression}".format(
expression = result
))
pyprel.print_line()
program.terminate()
def most_similar_expression(
expression = None,
expressions = None,
model_word2vec = None,
detail = True
):
working_expression_NL = expression
# Convert the expression to a word vector.
working_expression_WV =\
abstraction.convert_sentence_string_to_word_vector(
sentence_string = working_expression_NL,
model_word2vec = model_word2vec
)
stored_expressions = dict()
for expression in expressions:
stored_expressions[expression] =\
abstraction.convert_sentence_string_to_word_vector(
sentence_string = expression,
model_word2vec = model_word2vec
)
# Define table headings.
table_contents = [[
"working expression natural language",
"stored expression natural language",
"absolute magnitude difference between working amd stored expression "
"word vectors",
"angle between working and stored expression word vectors"
]]
# Compare the expression word vector representation to existing word
# vectors.
magnitude_differences = []
angles = []
stored_expressions_NL_list = []
magnitude_working_expression_WV = datavision.magnitude(working_expression_WV)
for stored_expression_NL in stored_expressions:
stored_expression_WV = stored_expressions[stored_expression_NL]
magnitude_stored_expression_WV = datavision.magnitude(stored_expression_WV)
magnitude_difference_working_expression_WV_stored_expression_WV = abs(
magnitude_working_expression_WV - magnitude_stored_expression_WV
)
angle_working_expression_WV_stored_expression_WV = datavision.angle(
working_expression_WV,
stored_expression_WV
)
# Store comparison results in lists.
magnitude_differences.append(
magnitude_difference_working_expression_WV_stored_expression_WV
)
angles.append(
angle_working_expression_WV_stored_expression_WV
)
stored_expressions_NL_list.append(
stored_expression_NL
)
# Build table.
table_contents.append([
str(working_expression_NL),
str(stored_expression_NL),
str(magnitude_difference_working_expression_WV_stored_expression_WV),
str(angle_working_expression_WV_stored_expression_WV)]
)
if detail:
# Record table.
print(
pyprel.Table(
contents = table_contents
)
)
index_minimum_angles = angles.index(min(angles))
translation_expression_NL = stored_expressions_NL_list[index_minimum_angles]
return translation_expression_NL
if __name__ == "__main__":
options = docopt.docopt(__doc__)
if options["--version"]:
print(version)
exit()
main(options)
| gpl-3.0 | 4,299,325,012,864,996,400 | 36.116505 | 83 | 0.511771 | false |
drcraig/concorde | concorde/__init__.py | 1 | 3678 | # coding: utf-8
from __future__ import with_statement
import os
import codecs
from operator import itemgetter
from datetime import datetime
from urlparse import urljoin
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from markdown import Markdown
import dateutil.parser
from jinja2 import Environment, FileSystemLoader
import PyRSS2Gen
def get_source_files(paths, extensions=['.md', '.markdown'], recurse=False):
files = []
for path in paths:
if os.path.isfile(path):
files.append(path)
for root, dirs, filenames in os.walk(path):
files.extend([os.path.join(root, filename) for filename in filenames])
if not recurse:
break
return [f for f in files if os.path.splitext(f)[1] in extensions]
def parse_markdown_file(md_file, output_extension=''):
md = Markdown(extensions=['extra', 'meta', 'nl2br', 'sane_lists'])
html = md.convert(codecs.open(md_file, 'r', 'utf-8').read())
slug, _ = os.path.splitext(os.path.basename(md_file))
if not hasattr(md, 'Meta'):
md.Meta = {}
data = {}
data.update(md.Meta)
date = datetime.fromtimestamp(os.path.getmtime(md_file))
if md.Meta.get('date'):
date = dateutil.parser.parse(md.Meta.get('date')[0])
data.update({
'title': md.Meta.get('title', [''])[0] or slug.replace('-', ' ').replace('_', ' ').title(),
'date': date,
'html': html,
'slug': slug,
'source': md_file,
'link': os.path.splitext(md_file)[0] + output_extension
})
return data
def render(context, template):
env = Environment(loader=FileSystemLoader(os.path.dirname(template)),
trim_blocks=True, lstrip_blocks=True)
return env.get_template(os.path.basename(template)).render(context)
def write(content, filename):
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(content)
def render_articles(md_files, template, output_extension=''):
for md_file in md_files:
context = parse_markdown_file(md_file, output_extension)
content = render(context, template)
write(content, context['link'])
def file_relpath(file1, file2):
return os.path.join(os.path.relpath(os.path.dirname(file1), os.path.dirname(file2)),
os.path.basename(file1))
def render_to_index(md_files, template, indexfile, output_extension):
articles = [parse_markdown_file(md_file, output_extension) for md_file in md_files]
articles.sort(key=itemgetter('date'), reverse=True)
for article in articles:
article['url'] = file_relpath(article['link'], indexfile)
content = render({'articles': articles}, template)
write(content, indexfile)
def generate_feed(md_files, output_extension, feedfile, feed_url, title='', description=''):
articles = [parse_markdown_file(md_file, output_extension) for md_file in md_files]
articles.sort(key=itemgetter('date'), reverse=True)
for article in articles:
relative_path = file_relpath(article['link'], feedfile)
article['url'] = urljoin(feed_url, relative_path)
rss = PyRSS2Gen.RSS2(
title=title,
link=feed_url,
description=description,
lastBuildDate=datetime.now(),
items = [
PyRSS2Gen.RSSItem(
title=article['title'],
link=article['url'],
description=article['html'],
guid=PyRSS2Gen.Guid(article['url']),
pubDate=article['date']
) for article in articles
]
)
with open(feedfile, 'w') as f:
rss.write_xml(f)
| mit | -4,614,828,912,946,080,000 | 34.028571 | 99 | 0.630778 | false |
gumpu/TSP_Animation | anim.py | 1 | 19264 | #!/usr/bin/python2
# This only runs under Python2
import math
import time
import random
import datetime
from node import Node
from node import length
from node import total_length
from node import dump
import matplotlib.pyplot as plt
import numpy
l_min = None
pic = 0
# v = [-100,4100,-100,2100]
def framec( solution, nc ):
global pic
cities = [ (n.x,n.y) for n in solution ]
cities = numpy.array( cities )
plt.axis( [-100,4100,-100,2100] )
plt.axis('off')
plt.plot(cities[:,0],cities[:,1],'ko')
plt.title('{} Cities, 5 Search Algorithms'.format(nc))
plt.savefig( ("%05d" % pic)+'.png')
plt.clf()
print pic
pic += 1
def frame0(solution, nodes, l, title):
global pic
cities = [(n.x,n.y) for n in solution]
cities.append(( solution[0].x, solution[0].y ))
cities = numpy.array( cities )
all_node = [(n.x,n.y) for n in nodes]
all_nodes = numpy.array(all_node)
plt.axis([-100, 4100, -100, 2100])
plt.axis('off')
plt.plot(cities[:,0], cities[:,1], 'bo-')
plt.plot(all_nodes[:,0], all_nodes[:,1], 'ko')
plt.title('{} Tour length {:.1f}'.format(title,l))
plt.savefig( ("%05d" % pic)+'.png')
plt.clf()
print pic
pic += 1
nn = 0
def frame(nodes, solution, sn, t, c, y, x, z, gain):
global pic
global nn
cities = [(n.x,n.y) for n in solution]
cities = numpy.array(cities)
cities2 = [(c.x,c.y), (y.x,y.y)]
cities3 = [(x.x,x.y), (z.x,z.y)]
cities2 = numpy.array(cities2)
cities3 = numpy.array(cities3)
plt.plot(cities[:,0],cities[:,1],'bo-')
#plt.scatter(cities[:,0], cities[:,1],s=50,c='k')
if gain < 0:
plt.scatter(cities2[:,0], cities2[:,1], c='r',s=180)
plt.plot(cities2[:,0],cities2[:,1], c='r',linewidth=2)
plt.scatter(cities3[:,0], cities3[:,1],c='b',s=150)
plt.plot(cities3[:,0],cities3[:,1], c='r',linewidth=2)
else:
plt.scatter(cities2[:,0], cities2[:,1], c='g',s=180)
plt.plot(cities2[:,0], cities2[:,1], c='g',linewidth=2)
plt.scatter(cities3[:,0], cities3[:,1], c='b',s=150)
plt.plot(cities3[:,0], cities3[:,1], c='g',linewidth=2)
plt.axis( [-100,4100,-100,2100] )
plt.axis('off')
# In first few frames there might not be an l_min yet
if l_min is None:
plt.title('(4) SA Temp {:4.1f} Best Tour ---\nSwaps {} Gain {:12.2f} '.format(t, l_min, nn, gain))
else:
plt.title('(4) SA Temp {:4.1f} Best Tour {:6.1f}\nSwaps {} Gain {:12.2f} '.format(t, l_min, nn, gain))
plt.savefig( ("%05d" % pic)+'.png')
plt.clf()
pic += 1
print pic
def frame4(nodes, solution, sn, c, y, x, z, gain):
global pic
global nn
l_min = total_length( nodes, solution )
cities = [ (n.x,n.y) for n in solution ]
cities = numpy.array( cities )
cities2 = [ (c.x,c.y), (y.x,y.y) ]
cities3 = [ (x.x,x.y), (z.x,z.y) ]
cities2 = numpy.array( cities2 )
cities3 = numpy.array( cities3 )
plt.plot(cities[:,0],cities[:,1],'bo-')
#plt.scatter(cities[:,0], cities[:,1],s=50,c='k')
if gain < 0:
plt.scatter(cities2[:,0], cities2[:,1],c='r',s=180)
plt.plot(cities2[:,0],cities2[:,1],c='r',linewidth=2)
plt.scatter(cities3[:,0], cities3[:,1],c='b',s=150)
plt.plot(cities3[:,0],cities3[:,1],c='r',linewidth=2)
else:
plt.scatter(cities2[:,0], cities2[:,1],c='g',s=180)
plt.plot(cities2[:,0],cities2[:,1],c='g',linewidth=2)
plt.scatter(cities3[:,0], cities3[:,1],c='b',s=150)
plt.plot(cities3[:,0],cities3[:,1],c='g',linewidth=2)
plt.axis( [-100,4100,-100,2100] )
plt.axis('off')
plt.title('(3) 2-Opt Tour {:6.1f}'.format(l_min))
plt.savefig( ("%05d" % pic)+'.png')
plt.clf()
pic += 1
print pic
#-----------------------------------------------------------------------------
#
# Before 2opt After 2opt
# Y Z Y Z
# O O-----> O-->O---->
# / \ ^ \
# / \ | \
# / \| \
# ->O O ->O------>O
# C X C X
#
# In a 2opt optimization step we consider two nodes, Y and X. (Between Y
# and X there might be many more nodes, but they don't matter.) We also
# consider the node C following Y and the node Z following X. i
#
# For the optimization we see replacing the edges CY and XZ with the edges CX
# and YZ reduces the length of the path C -> Z. For this we only need to
# look at |CY|, |XZ|, |CX| and |YZ|. |YX| is the same in both
# configurations.
#
# If there is a length reduction we swap the edges AND reverse the direction
# of the edges between Y and X.
#
# In the following function we compute the amount of reduction in length
# (gain) for all combinations of nodes (X,Y) and do the swap for the
# combination that gave the best gain.
#
def optimize2opt(nodes, solution, number_of_nodes):
best = 0
best_move = None
# For all combinations of the nodes
for ci in range(0, number_of_nodes):
for xi in range(0, number_of_nodes):
yi = (ci + 1) % number_of_nodes # C is the node before Y
zi = (xi + 1) % number_of_nodes # Z is the node after X
c = solution[ ci ]
y = solution[ yi ]
x = solution[ xi ]
z = solution[ zi ]
# Compute the lengths of the four edges.
cy = length( c, y )
xz = length( x, z )
cx = length( c, x )
yz = length( y, z )
# Only makes sense if all nodes are distinct
if xi != ci and xi != yi:
# What will be the reduction in length.
gain = (cy + xz) - (cx + yz)
# Is is any better then best one sofar?
if gain > best:
# Yup, remember the nodes involved
best_move = (ci,yi,xi,zi)
best = gain
print best_move, best
if best_move is not None:
(ci,yi,xi,zi) = best_move
# This four are needed for the animation later on.
c = solution[ ci ]
y = solution[ yi ]
x = solution[ xi ]
z = solution[ zi ]
# Create an empty solution
new_solution = range(0,number_of_nodes)
# In the new solution C is the first node.
# This we we only need two copy loops instead of three.
new_solution[0] = solution[ci]
n = 1
# Copy all nodes between X and Y including X and Y
# in reverse direction to the new solution
while xi != yi:
new_solution[n] = solution[xi]
n = n + 1
xi = (xi-1)%number_of_nodes
new_solution[n] = solution[yi]
n = n + 1
# Copy all the nodes between Z and C in normal direction.
while zi != ci:
new_solution[n] = solution[zi]
n = n + 1
zi = (zi+1)%number_of_nodes
# Create a new animation frame
frame4(nodes, new_solution, number_of_nodes, c, y, x, z, gain)
return (True,new_solution)
else:
return (False,solution)
#-----------------------------------------------------------------------------
# This is an SA optimization step.
# It uses the same principle as the optimize2opt with the following
# differences:
#
# (1) Instead of all combinations of (X,Y) is picks a single combination
# at random.
#
# (1) Instead of only doing an edge swap if it reduces the length, it
# sometimes (depending on chance) also does a swap that INCREASES the length.
# How often this happens depends on the temperature t and the gain.
# For high temperatures this happens often and large negative gains are accepted,
# but the lower the temperature the less often it happens and only small
# negative gains are accepted.
#
def sa_optimize_step(nodes, solution, number_of_nodes, t):
global nn
# Pick X and Y at random.
ci = random.randint(0, number_of_nodes-1)
yi = (ci + 1) % number_of_nodes
xi = random.randint(0, number_of_nodes-1)
zi = (xi + 1) % number_of_nodes
if xi != ci and xi != yi:
c = solution[ci]
y = solution[yi]
x = solution[xi]
z = solution[zi]
cy = length(c, y)
xz = length(x, z)
cx = length(c, x)
yz = length(y, z)
gain = (cy + xz) - (cx + yz)
if gain < 0:
# We only accept a negative gain conditionally
# The probability is based on the magnitude of the gain
# and the temperature.
u = math.exp( gain / t )
elif gain > 0.05:
u = 1 # always except a good gain.
else:
u = 0 # No idea why I did this....
# random chance, picks a number in [0,1)
if (random.random() < u):
nn = nn + 1
#print " ", gain
# Make a new solution with both edges swapped.
new_solution = range(0,number_of_nodes)
new_solution[0] = solution[ci]
n = 1
while xi != yi:
new_solution[n] = solution[xi]
n = n + 1
xi = (xi-1)%number_of_nodes
new_solution[n] = solution[yi]
n = n + 1
while zi != ci:
new_solution[n] = solution[zi]
n = n + 1
zi = (zi+1)%number_of_nodes
# Create an animation frame for this step
frame(nodes, new_solution, number_of_nodes, t, c, y, x, z, gain)
return new_solution
else:
return solution
else:
return solution
#----------------------------------------------------------------------------
def greedy_algorithm(nodes):
# Greedy Algorithm
print 'Computing greedy path'
free_nodes = nodes[:]
solution = []
n = free_nodes[0]
free_nodes.remove(n)
solution.append( n )
while len(free_nodes) > 0:
print(len(free_nodes))
min_l = None
min_n = None
for c in free_nodes:
l = length( c, n )
if min_l is None:
min_l = l
min_n = c
elif l < min_l:
min_l = l
min_n = c
solution.append(min_n)
free_nodes.remove(min_n)
n = min_n
return solution
#-----------------------------------------------------------------------------
def two_opt_algorithm(nodes, number_of_nodes):
# Create an initial solution
solution = [n for n in nodes]
go = True
# Try to optimize the solution with 2opt until
# no further optimization is possible.
while go:
(go,solution) = optimize2opt(nodes, solution, number_of_nodes)
return solution
#-----------------------------------------------------------------------------
def sa_algorithm(nodes, number_of_nodes):
# Create an initial solution that we can improve upon.
solution = [n for n in nodes]
# The temperature t. This is the most important parameter of the SA
# algorithm. It starts at a high temperature and is then slowly decreased.
# Both rate of decrease and initial values are parameters that need to be
# tuned to get a good solution.
# The initial temperature. This should be high enough to allow the
# algorithm to explore many sections of the search space. Set too high it
# will waste a lot of computation time randomly bouncing around the search
# space.
t = 100
# Length of the best solution so far.
l_min = total_length( nodes, solution )
best_solution = []
i = 0
while t > 0.1:
i = i + 1
# Given a solution we create a new solution
solution = sa_optimize_step(nodes, solution, number_of_nodes, t)
# every ~200 steps
if i >= 200:
i = 0
# Compute the length of the solution
l = total_length( nodes, solution )
print " ", l, t, nn
# Lower the temperature.
# The slower we do this, the better then final solution
# but also the more times it takes.
t = t*0.9995
# See if current solution is a better solution then the previous
# best one.
if l_min is None: # TODO: This can be removed, as l_min is set above.
l_min = l
elif l < l_min:
# Yup it is, remember it.
l_min = l
print "++", l, t
best_solution = solution[:]
else:
pass
return best_solution
# From: http://stackoverflow.com/questions/16625507/python-checking-if-point-is-inside-a-polygon
# (Patrick Jordan)
# Modified to work with nodes.
def point_in_poly(x, y, poly):
n = len(poly)
inside = False
p1x = poly[0].x
p1y = poly[0].y
for i in range(n+1):
p2x = poly[i % n].x
p2y = poly[i % n].y
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
def area_triangle(n1, n2, n3):
# Area of triangle via Heron's Formula
# <https://en.wikipedia.org/wiki/Heron%27s_formula>
a = length(n1, n2)
b = length(n2, n3)
c = length(n3, n1)
p = (a + b + c)/2.0
area = math.sqrt(p*(p-a)*(p-b)*(p-c))
return area
def miss_perry_s_compass(nodes, number_of_nodes):
# Compute Center of all nodes
sum_x = 0
sum_y = 0
for n in nodes:
sum_x += n.x
sum_y += n.y
c_x = sum_x / number_of_nodes
c_y = sum_y / number_of_nodes
# Make a node for this center.
center_node = Node(-1, c_x, c_y)
sorted_nodes = []
done = [False] * number_of_nodes
# Sort the nodes based on the distance from the center node.
for i in range(number_of_nodes):
max_l = -1
furthest = None
for j in range(number_of_nodes):
if done[j]:
pass
else:
l = length(center_node, nodes[j])
if l > max_l:
furthest = j
max_l = l
sorted_nodes.append(nodes[furthest])
done[furthest] = True
# Create initial polygon
solution = [sorted_nodes[0], sorted_nodes[1], sorted_nodes[2]]
for i in range(3, number_of_nodes):
new_node = sorted_nodes[i]
closest = sorted_nodes[0]
min_l = length(closest, new_node)
index_in_list = 0
for j in range(1, i):
l = length(sorted_nodes[j], new_node)
if l < min_l:
index_in_list = j
closest = sorted_nodes[j]
min_l = l
# Is the node inside or outside the polygon?
if point_in_poly(new_node.x, new_node.y, solution):
idx_before = (index_in_list - 1) % i
idx_after = (index_in_list + 1) % i
# it is Inside
area1 = area_triangle(new_node, closest, solution[idx_before])
area2 = area_triangle(new_node, closest, solution[idx_after])
if area1 < area2:
# Insert new node between closest and next
pass
else:
# Insert
pass
pass
else:
# it is outside
pass
return sorted_nodes
#-----------------------------------------------------------------------------
if False:
# Experiment with Perry Algorithm
do_greedy = False
do_intro = False
do_perry = True
do_2opt = False
do_sa = False
else:
# Production
do_greedy = True
do_intro = True
do_perry = False
do_2opt = True
do_sa = True
def create_animation(nodes):
global nn
global l_min
number_of_nodes = len( nodes )
print('Size {}'.format( number_of_nodes ))
if do_greedy:
# Greedy Algorithm
print 'Computing greedy path'
solution = greedy_algorithm(nodes)
else:
# For debugging
solution = [n for n in nodes]
if do_intro:
# Only cities
solution0 = [n for n in nodes]
for i in range(2, number_of_nodes):
s = solution0[0:i]
framec(s, number_of_nodes)
# Show all cities for an additional 20 frames.
for i in range(20):
framec(s, number_of_nodes)
# Animate the Random Search
for i in range(2, number_of_nodes):
s = solution0[0:i]
frame0(s, nodes, total_length(nodes, s), "(1) Random Path")
s = solution0
for i in range(60):
frame0(s, nodes, total_length(nodes, s), "(1) Random Path")
# Animate the Greedy Search
for i in range(2, number_of_nodes):
s = solution[0:i]
frame0(s, nodes, total_length(nodes, s), "(2) Greedy Search")
s = solution
for i in range(60):
frame0(s, nodes, total_length(nodes, s), "(2) Greedy Search")
# Under construction
if do_perry:
solution = miss_perry_s_compass(nodes, number_of_nodes)
for i in range(2, number_of_nodes):
s = solution[0:i]
frame0(s, nodes, total_length(nodes, s), "(1) Random Path")
for i in range(60):
frame0(solution, nodes, total_length(nodes, s), "(3) Miss Perry")
if do_2opt:
print("2-Opt")
# Run 2-Opt algorithm and create animation frames for each step
s = two_opt_algorithm(nodes, number_of_nodes)
# Show the best solution for an additional 60 frames.
for i in range(60):
frame0(s, nodes, total_length(nodes, s), "(4) 2-Opt")
if do_sa:
#=== Simulated Annealing
print("SA")
# Run SA algorithm and create animation frames for each step
s = sa_algorithm(nodes, number_of_nodes)
# Show the best solution for an additional 60 frames.
for i in range(60):
frame0(s, nodes, total_length(nodes, s), "(5) SA")
return s
#-----------------------------------------------------------------------------
def read_problem(problem_file_name):
nodes = []
with open(problem_file_name) as inpf:
first_line = inpf.readline()
node_count = int(first_line)
i = 0
for line in inpf:
parts = line.split()
nodes.append(Node(i, float(parts[0]), float(parts[1])))
i = i + 1
return nodes
#-----------------------------------------------------------------------------
def solve(problem_file_name):
# This it to make sure we get the same answer each time.
random.seed(8111142)
solution_string = None
nodes = read_problem(problem_file_name)
solution = create_animation(nodes)
objective = total_length(nodes, solution)
solution_string = str(objective) + ' 0\n'
solution_string += ' '.join(map(lambda x: str(x.id), solution))
return solution_string
if __name__ == '__main__':
print solve('problem3.dat')
| gpl-3.0 | -2,581,411,592,816,155,600 | 31.053245 | 112 | 0.524917 | false |
charettes/django-dynamic-choices | tests/test_models.py | 1 | 2717 | from __future__ import unicode_literals
from django.core.exceptions import FieldError, ValidationError
from django.db.models import Model
from django.test import SimpleTestCase, TestCase
from dynamic_choices.db.models import DynamicChoicesForeignKey
from .models import ALIGNMENT_EVIL, ALIGNMENT_GOOD, Enemy, Master, Puppet
class DefinitionValidationTest(SimpleTestCase):
def test_missing_method(self):
with self.assertRaises(FieldError):
class MissingChoicesCallbackModel(Model):
field = DynamicChoicesForeignKey('self', choices='missing_method')
class Meta:
app_label = 'dynamic_choices'
def test_callable(self):
class CallableChoicesCallbackModel(Model):
field = DynamicChoicesForeignKey('self', choices=lambda qs: qs)
class Meta:
app_label = 'dynamic_choices'
class DynamicForeignKeyTests(TestCase):
def setUp(self):
self.good_master = Master.objects.create(alignment=ALIGNMENT_GOOD)
self.evil_master = Master.objects.create(alignment=ALIGNMENT_EVIL)
def test_valid_value(self):
good_puppet = Puppet(master=self.good_master, alignment=ALIGNMENT_GOOD)
good_puppet.full_clean()
good_puppet.save()
evil_puppet = Puppet(master=self.evil_master, alignment=ALIGNMENT_EVIL)
evil_puppet.full_clean()
evil_puppet.save()
enemy = Enemy(puppet=evil_puppet, enemy=good_puppet, because_of=self.good_master)
enemy.full_clean(exclude=['since'])
def test_invalid_value(self):
puppet = Puppet(master=self.good_master, alignment=ALIGNMENT_EVIL)
self.assertRaises(ValidationError, puppet.full_clean)
class DynamicOneToOneFieldTests(TestCase):
fixtures = ['dynamic_choices_test_data']
def setUp(self):
self.good_puppet = Puppet.objects.get(alignment=ALIGNMENT_GOOD)
self.evil_puppet = Puppet.objects.get(alignment=ALIGNMENT_EVIL)
def test_valid_value(self):
self.evil_puppet.secret_lover = self.good_puppet
self.evil_puppet.full_clean()
self.evil_puppet.save()
self.assertEqual(self.good_puppet.secretly_loves_me, self.evil_puppet)
self.good_puppet.secret_lover = self.evil_puppet
self.good_puppet.full_clean()
def test_invalid_value(self):
self.evil_puppet.secret_lover = self.good_puppet
self.evil_puppet.save()
self.good_puppet.secret_lover = self.good_puppet
self.assertRaises(
ValidationError, self.good_puppet.full_clean,
"Since the evil puppet secretly loves the good puppet the good puppet can only secretly love the bad one."
)
| mit | 2,345,321,175,337,379,000 | 37.267606 | 118 | 0.686051 | false |
znick/anytask | anytask/anyrb/views.py | 1 | 1098 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from issues.models import Issue
@csrf_exempt
def message_from_rb(request, review_id):
for one_issue in Issue.objects.filter(task__rb_integrated=True).order_by('-update_time'):
try:
if one_issue.get_byname('review_id') == review_id:
issue = one_issue
break
except: # noqa
pass
if request.method == 'POST':
value = {'files': [], 'comment': ''}
value['comment'] = u'<p><strong>{2} <a href="{1}/r/{0}">Review request {0}</a></strong>.</p>' \
.format(review_id, settings.RB_API_URL, _(u'novyj_kommentarij'))
author = get_object_or_404(User, username=request.POST.get('author', ''))
issue.set_byname('comment', value, author)
issue.save()
return HttpResponse(status=201)
| mit | -44,139,875,798,322,490 | 38.214286 | 103 | 0.631148 | false |
hjfreyer/pledgeservice | backend/model.py | 1 | 28312 | import datetime
import json
import logging
import os
import random
from rauth import OAuth2Service
from collections import namedtuple
from google.appengine.ext import db
import cache
class Error(Exception): pass
# Used to indicate which data objects were created in which version of
# the app, in case we need to special-case some logic for objects
# which came before a certain point.
#
# Versions:
# <missing>: Initial model.
# 2: Implemented sharded counter for donation total. Objects before
# this version are not included in that counter.
# 3: Include information in the User model including their name and whether
# they wish to be subscribed to the mailing list.
# 4: Pledges now have "team"s.
# 5: Reset the total sharding counter.
# 6: Pledges now have "pledge_type"s.
# 7: Adds Pledge.stripe_charge. Pledges no longer created without a successful
# charge. Thus, ChargeStatus is obsolete and deprecated.
# 8: Adds whether or not pledges are anonymous
# 9: Previous versions were not summed on demand into TeamTotal objects.
# Model 9 and newer pledges are.
# 10: SurveResult field added. This is just a text field, one of:
# Digital voter registration ads to grow the electorate
# Video trackers to force candidate transparency
# No robocalls
# State-of-the-art digital canvassing and field tools
# No negative ads
# Whatever helps us win
# 11: Paypal support
# 12: TeamTotal.num_pledges added. This is a live total of completed pledges for
# that team.
# 13: Added 'Use for Upton' value to Pledge. This allows the pledge to be used
# for Upton campaign, if true
#
# 14: Added addressCheckPass to Pledge. This will be false if Stripe returns false in
# the address validation field. In Lessig project, all earlier pledges can be assumed
# to have this set to True
#
MODEL_VERSION = 14
# Config singleton. Loaded once per instance and never modified. It's
# okay if try to load it multiple times, so no worries about race
# conditions.
#
# Note that this isn't really a "model", it's built up from config.json
# and the "Secrets" model.
#
# TODO(hjfreyer): Deprecate this and replace it with handlers.Environment.
class Config(object):
ConfigType = namedtuple('ConfigType',
['app_name',
'stripe_public_key', 'stripe_private_key',
'mailchimp_api_key', 'mailchimp_list_id',
'paypal_user', 'paypal_password', 'paypal_signature',
'paypal_url', 'paypal_api_url', 'bitpay_api_key'])
_instance = None
@staticmethod
def get():
if Config._instance:
return Config._instance
j = json.load(open('config.json'))
s = Secrets.get()
if j.get('hardCodeStripe'):
stripe_public_key = j['stripePublicKey']
stripe_private_key = j['stripePrivateKey']
else:
stripe_public_key = s.stripe_public_key
stripe_private_key = s.stripe_private_key
if s:
bitpay_api_key = s.bitpay_api_key
if 'productionPaypal' in j and j['productionPaypal']:
paypal_api_url = "https://api-3t.paypal.com/nvp"
paypal_url = "https://www.paypal.com/webscr"
paypal_user = s.paypal_user
paypal_password = s.paypal_password
paypal_signature = s.paypal_signature
else: # Use the Sanbox
paypal_api_url = "https://api-3t.sandbox.paypal.com/nvp"
paypal_url = "https://www.sandbox.paypal.com/webscr"
paypal_user = s.paypal_sandbox_user
paypal_password = s.paypal_sandbox_password
paypal_signature = s.paypal_sandbox_signature
Config._instance = Config.ConfigType(
app_name = j['appName'],
stripe_public_key=stripe_public_key,
stripe_private_key=stripe_private_key,
mailchimp_api_key=s.mailchimp_api_key,
mailchimp_list_id=s.mailchimp_list_id,
paypal_user = paypal_user,
paypal_password = paypal_password,
paypal_signature = paypal_signature,
paypal_api_url = paypal_api_url,
paypal_url = paypal_url,
bitpay_api_key = bitpay_api_key
)
return Config._instance
# Secrets to store in the DB, rather than git.
#
# If you add a field to this, set the default to the empty string, and then
# after pushing the code, go to /admin and select the "Update Secrets model
# properties" command. Then you should be able to edit the new field in the
# datastore.
class Secrets(db.Model):
SINGLETON_KEY = 'SINGLETON'
# We include the public key so they're never out of sync.
stripe_public_key = db.StringProperty(default='')
stripe_private_key = db.StringProperty(default='')
mailchimp_api_key = db.StringProperty(default='')
mailchimp_list_id = db.StringProperty(default='')
paypal_sandbox_user = db.StringProperty(default='')
paypal_sandbox_password = db.StringProperty(default='')
paypal_sandbox_signature = db.StringProperty(default='')
paypal_user = db.StringProperty(default='')
paypal_password = db.StringProperty(default='')
paypal_signature = db.StringProperty(default='')
bitpay_api_key = db.StringProperty(default='')
nationbuilder_token = db.StringProperty(default='')
@staticmethod
def get():
return Secrets.get_or_insert(key_name=Secrets.SINGLETON_KEY)
@staticmethod
@db.transactional
def update():
s = Secrets.get_by_key_name(Secrets.SINGLETON_KEY)
if s is None:
s = Secrets(key_name=Secrets.SINGLETON_KEY)
s.put()
class User(db.Model):
model_version = db.IntegerProperty()
# a user's email is also the model key
email = db.EmailProperty(required=True)
first_name = db.StringProperty()
last_name = db.StringProperty()
# Collected in our system for Bitcoin users only
address = db.StringProperty()
city = db.StringProperty()
state = db.StringProperty()
zipCode = db.StringProperty()
# occupation and employer are logically required for all new users, but we
# don't have this data for everyone. so from a data model perspective, they
# aren't required.
occupation = db.StringProperty(required=False)
employer = db.StringProperty(required=False)
phone = db.StringProperty(required=False)
# whether or not the pledge was donated specifically for a particular
# political affiliation
target = db.StringProperty(required=False)
# the results of a survey from mayday.us/goodfight
surveyResult = db.StringProperty(required=False)
# this is the nonce for what we'll put in a url to send to people when we ask
# them to update their information. it's kind of like their password for the
# user-management part of the site.
url_nonce = db.StringProperty(required=True)
from_import = db.BooleanProperty(required=False)
# whether the user opted in to receive email from us
mail_list_optin = db.BooleanProperty(required=False)
@staticmethod
@db.transactional
def createOrUpdate(email, first_name=None, last_name=None, occupation=None,
employer=None, phone=None, target=None,
from_import=None, mail_list_optin=None, surveyResult=None,
address=None, city=None, state=None, zipCode=None
):
user = User.get_by_key_name(email)
if user is None:
user = User(model_version=MODEL_VERSION,
key_name=email,
email=email,
url_nonce=os.urandom(32).encode("hex"),
from_import=from_import,
mail_list_optin=mail_list_optin)
def choose(current, new):
# If this is an import, current data should win.
if from_import:
return current or new
else:
return new or current
user.first_name = choose(user.first_name, first_name)
user.last_name = choose(user.last_name, last_name)
user.occupation = choose(user.occupation, occupation)
user.employer = choose(user.employer, employer)
user.phone = choose(user.phone, phone)
user.target = choose(user.target, target)
user.surveyResult = choose(user.surveyResult, surveyResult)
user.address = choose(user.address, address)
user.city = choose(user.city, city)
user.state = choose(user.state, state)
user.zipCode = choose(user.zipCode, zipCode)
user.mail_list_optin = choose(user.mail_list_optin, mail_list_optin)
user.put()
return user
# for bitpay pledges, we need to store the form data while waiting for the
# response from bitpay
class TempPledge(db.Model):
model_version = db.IntegerProperty()
email = db.EmailProperty(required=True)
phone = db.StringProperty()
name = db.StringProperty()
occupation = db.StringProperty()
employer = db.StringProperty()
target = db.StringProperty()
subscribe = db.BooleanProperty(required=False, default=True)
amountCents = db.IntegerProperty(required=True)
firstName = db.StringProperty()
lastName = db.StringProperty()
address = db.StringProperty()
city = db.StringProperty()
state = db.StringProperty()
zipCode = db.StringProperty()
bitcoinConfirm=db.BooleanProperty(required=False, default=False)
keep_donation=db.BooleanProperty(default=False)
team = db.StringProperty()
source = db.StringProperty(required=False)
# all pledge_types for bitpay pledges must be "DONATION"
bitpay_invoice_id = db.StringProperty()
pledge_id = db.StringProperty(required=False)
class Pledge(db.Model):
model_version = db.IntegerProperty()
# a user's email is also the User model key
email = db.EmailProperty(required=True)
# this is the string id for the stripe api to access the customer. we are
# doing a whole stripe customer per pledge.
stripeCustomer = db.StringProperty()
# ID of a successful stripe transaction which occurred prior to creating this
# pledge.
stripe_charge_id = db.StringProperty()
# Paypal specific fields
paypalPayerID = db.StringProperty()
paypalTransactionID = db.StringProperty()
#BitPay specific field
bitpay_invoice_id = db.StringProperty()
# when the donation occurred
donationTime = db.DateTimeProperty(auto_now_add=True)
# we plan to have multiple fundraising rounds. right now we're in round "1"
fundraisingRound = db.StringProperty()
# what the user is pledging for
amountCents = db.IntegerProperty(required=True)
keepDonation = db.BooleanProperty(default=False)
# Enum for what kind of pledge this is, represented as a string for
# readability. Valid values are:
# - CONDITIONAL: only happens if we meet our goal.
# - DONATION: happens regardless
TYPE_CONDITIONAL = 'CONDITIONAL'
TYPE_DONATION = 'DONATION'
TYPE_VALUES = [TYPE_CONDITIONAL, TYPE_DONATION]
pledge_type = db.StringProperty()
note = db.TextProperty(required=False)
# Optionally, a pledge can be assigned to a "team".
team = db.StringProperty()
# Optionally, a pledge can be attributed to a "source".
source = db.StringProperty(required=False)
# If anonymous, the pledge shouldn't be displayed along with the user's name
# publically
anonymous = db.BooleanProperty(required=False, default=False)
# it's possible we'll want to let people change just their pledge. i can't
# imagine a bunch of people pledging with the same email address and then
# getting access to change a bunch of other people's credit card info, but
# maybe we should support users only changing information relating to a
# specific pledge. if so, this is their site-management password.
url_nonce = db.StringProperty(required=True)
thank_you_sent_at = db.DateTimeProperty(required=False)
# implementing recurring payment support! this is just recording if the
# payment is intended to be recurring
recurring = db.BooleanProperty(default=False)
end_date = db.DateTimeProperty(required=False)
recurrence_period = db.StringProperty(required=False)
# Allow donation to be used for Upton campaign
allowUpton = db.BooleanProperty(default=False)
# Whether recurring donation was upsold from the thank you page
upsell = db.BooleanProperty(default=False)
@staticmethod
def create(email, stripe_customer_id, stripe_charge_id,
paypal_payer_id, paypal_txn_id,
amount_cents, pledge_type, team, source, anonymous, bitpay_invoice_id,
recurring, recurrence_period, enddate, keep_donation, upsell,
addressCheckPass
):
assert pledge_type in Pledge.TYPE_VALUES
pledge = Pledge(model_version=MODEL_VERSION,
email=email,
stripeCustomer=stripe_customer_id,
stripe_charge_id=stripe_charge_id,
paypalPayerID=paypal_payer_id,
paypalTransactionID=paypal_txn_id,
amountCents=amount_cents,
pledge_type=pledge_type,
team=team,
source=source,
url_nonce=os.urandom(32).encode("hex"),
anonymous=anonymous,
bitpay_invoice_id=bitpay_invoice_id,
recurring=recurring,
end_date=enddate,
recurrence_period=recurrence_period,
keep_donation=keep_donation,
upsell=upsell,
addressCheckPass=addressCheckPass)
pledge.put()
if team:
TeamTotal.add(team, amount_cents)
return pledge
class TeamTotal(db.Model):
# this is also the model key
team = db.StringProperty(required=True)
totalCents = db.IntegerProperty(required=False)
num_pledges = db.IntegerProperty(required=False)
@classmethod
@db.transactional
def _create(cls, team_id, pledge_8_count, num_pledges):
tt = cls.get_by_key_name(team_id)
if tt is not None:
return tt
tt = cls(key_name=team_id, team=team_id, totalCents=pledge_8_count,
num_pledges=num_pledges)
tt.put()
return tt
@staticmethod
def _pledge8Count(team_id):
"""do this outside of a transaction"""
total = 0
for pledge in Pledge.all().filter("team =", team_id):
if pledge.model_version < 9:
total += pledge.amountCents
return total
@classmethod
def _get(cls, team_id):
tt = cls.get_by_key_name(team_id)
if tt is None:
tt = cls._create(team_id, cls._pledge8Count(team_id), 0)
return tt
@classmethod
def get(cls, team_id):
return cls._get(team_id).totalCents
@classmethod
@db.transactional
def _add(cls, team_id, amount_cents):
tt = cls.get_by_key_name(team_id)
tt.totalCents += amount_cents
try:
tt.num_pledges += 1
except:
tt.num_pledges = 1
tt.put()
@classmethod
def add(cls, team_id, amount_cents):
# make sure the team total exists first before we add
cls._get(team_id)
# okay safe to add
cls._add(team_id, amount_cents)
def addPledge(email,
amount_cents, pledge_type,
first_name, last_name, occupation, employer, phone,
target, team, source, mail_list_optin, anonymous, surveyResult=None,
stripe_customer_id=None, stripe_charge_id=None,
paypal_txn_id=None, paypal_payer_id=None,
address=None, city=None, state=None, zipCode=None,
bitpay_invoice_id = None, recurring = None,
recurrence_period = None, enddate = None, keep_donation = None,
upsell = None, addressCheckPass = True):
"""Creates a User model if one doesn't exist, finding one if one already
does, using the email as a user key. Then adds a Pledge to the User with
the given card token as a new credit card.
@return: the pledge
"""
# TODO: know if this is a bitcoin pledge and check all 3
# if not (stripe_customer_id or paypal_txn_id):
# raise Error('We must supply either stripe or Paypal ids')
# first, let's find the user by email
user = User.createOrUpdate(
email=email, first_name=first_name, last_name=last_name,
occupation=occupation, employer=employer, phone=phone, target=target,
mail_list_optin=mail_list_optin, surveyResult=surveyResult,
address=address, city=city, state=state, zipCode=zipCode )
return user, Pledge.create(email=email,
stripe_customer_id=stripe_customer_id,
stripe_charge_id=stripe_charge_id,
paypal_txn_id=paypal_txn_id,
paypal_payer_id=paypal_payer_id,
amount_cents=amount_cents,
pledge_type=pledge_type,
team=team,
source=source,
anonymous=anonymous,
bitpay_invoice_id = bitpay_invoice_id,
recurring = recurring,
enddate = enddate,
recurrence_period = recurrence_period,
keep_donation = keep_donation,
upsell = upsell,
addressCheckPass = addressCheckPass)
class WpPledge(db.Model):
# wp_post_id is also the model key
wp_post_id = db.IntegerProperty(required=True)
email = db.EmailProperty(required=True)
stripeCustomer = db.StringProperty(required=True)
amountCents = db.IntegerProperty(required=True)
donationTime = db.DateTimeProperty(required=True)
occupation = db.StringProperty(required=False)
employer = db.StringProperty(required=False)
phone = db.StringProperty(required=False)
target = db.StringProperty(required=False)
url_nonce = db.StringProperty(required=True)
keep_donation=db.BooleanProperty(default=False)
class ChargeStatus(db.Model):
"""Indicates whether a Pledge or WpPledge has been charged or not.
The key of this model must always be the child of a Pledge or WpPledge, with
key_name='SINGLETON'.
When a ChargeStatus is created, it represents permission to execute the charge
for the parent Pledge or WpPledge. When start_time is set, it indicates that
some task has attempted to execute that charge. When end_time is set, it
indicates that the charge was successfully completed, and that information
about that charge can be found in the other fields.
If start_time is sufficiently far in the past (10 minutes, say), and end_time
is as of yet unset, something went wrong which needs to be looked into
manually.
"""
SINGLETON_KEY = 'SINGLETON'
# These three times are as described in the comment above.
request_time = db.DateTimeProperty(required=True)
start_time = db.DateTimeProperty()
end_time = db.DateTimeProperty()
stripe_charge_id = db.StringProperty()
@staticmethod
@db.transactional
def request(pledge_key):
"""Indicates that we are allowed to execute the charge at our leisure."""
charge_key = ChargeStatus._get_charge_key(pledge_key)
pledge = db.get(pledge_key)
charge_status = db.get(charge_key)
if not pledge:
raise Error('No pledge found with key: %s' % pledge_key)
if charge_status:
logging.warning('Requesting already requested charge for pledge: %s',
pledge_key)
return
charge_status = ChargeStatus(key=charge_key,
request_time=datetime.datetime.now())
charge_status.put()
@staticmethod
def execute(stripe_backend, pledge_key):
"""Attempts to execute the charge.
First, sets the start_time atomically and releases the lock. Then tries to
charge the user. If successful, sets end_time and the paper trail for the
charge.
"""
charge_key = ChargeStatus._get_charge_key(pledge_key)
# First, indicate that we've started (or bail if someone else already has).
@db.transactional
def txn():
pledge = db.get(pledge_key)
charge = db.get(charge_key)
if not pledge:
raise Error('No pledge found with key: %s' % pledge_key)
if not charge:
raise Error('Cannot execute unrequested charge. No status for: %s' %
pledge_key)
if charge.start_time:
return True, None, None
else:
charge.start_time = datetime.datetime.now()
charge.put()
return False, pledge, charge
already_started, pledge, charge = txn()
if already_started:
logging.warning('Execution of charge already started for pledge %s',
pledge_key)
return
# TODO(hjfreyer): Generalize to paypal.
charge.stripe_charge_id = stripe_backend.Charge(pledge.stripeCustomer,
pledge.amountCents)
charge.end_time = datetime.datetime.now()
# Since we have the lock on this, the transaction should be unnecessary, but
# let's indulge in a little paranoia.
@db.transactional
def txn2():
charge2 = db.get(charge_key)
if charge2.end_time:
raise Error('Lock stolen while executing transaction! Pledge %s' %
pledge_key)
charge.put()
txn2()
@staticmethod
def _get_charge_key(pledge_key):
return db.Key.from_path('ChargeStatus', ChargeStatus.SINGLETON_KEY,
parent=pledge_key)
class StretchCheckTotal(db.Model):
dollars = db.IntegerProperty()
@classmethod
def update(cls, newTotal):
total = cls.all().get()
if total:
total.dollars = newTotal
total.put()
else:
newTotalForDB = cls(dollars = newTotal)
newTotalForDB.put()
@classmethod
def get(cls):
total = cls.all().get()
if total:
return total.dollars
else:
logging.info('No StretchCheckTotal')
return 0
SHARD_KEY_TEMPLATE = 'shard-{}-{:d}'
SHARD_COUNT = 50
STRETCH_CACHE_MISS_TOTAL = 0
class ShardedCounter(db.Model):
count = db.IntegerProperty(default=0)
@staticmethod
def clear(name):
cache.ClearShardedCounterTotal(name)
@staticmethod
def get_count(name):
total = cache.GetShardedCounterTotal(name)
if total is None:
total = 0
all_keys = ShardedCounter._get_keys_for(name)
for counter in db.get(all_keys):
if counter is not None:
total += counter.count
logging.info("recalculated counter %s to %s", name, total)
# Add the stretch check total which is not reflected elsewhere in the counter
# And is set manually
# This is here so that is only read out on a cache miss
stretchCheckTotal = StretchCheckTotal.get()
if stretchCheckTotal < STRETCH_CACHE_MISS_TOTAL:
stretchCheckTotal = STRETCH_CACHE_MISS_TOTAL
total += stretchCheckTotal
cache.SetShardedCounterTotal(name, total)
return total
@staticmethod
def _get_keys_for(name):
shard_key_strings = [SHARD_KEY_TEMPLATE.format(name, index)
for index in range(SHARD_COUNT)]
return [db.Key.from_path('ShardedCounter', shard_key_string)
for shard_key_string in shard_key_strings]
@staticmethod
@db.transactional
def increment(name, delta):
index = random.randint(0, SHARD_COUNT - 1)
shard_key_string = SHARD_KEY_TEMPLATE.format(name, index)
counter = ShardedCounter.get_by_key_name(shard_key_string)
if counter is None:
counter = ShardedCounter(key_name=shard_key_string)
counter.count += delta
counter.put()
cache.IncrementShardedCounterTotal(name, delta)
class Issue(db.Model):
name = db.StringProperty(required=True)
count = db.IntegerProperty(required=False, default=1)
@staticmethod
@db.transactional
def upsert(name):
name = name.strip().title()
issue = Issue.get_by_key_name(name)
if issue is None:
issue = Issue(model_version=MODEL_VERSION,
key_name=name,
name=name,
count=1)
else:
issue.count += 1
issue.put()
return issue
class IssueVote(db.Model):
email = db.StringProperty(required=True)
name = db.StringProperty(required=True)
@staticmethod
@db.transactional
def tally(email, name):
IssueVote(model_version=MODEL_VERSION,
email=email,
name=name.strip().title()).put()
class CandidateVote(db.Model):
email = db.StringProperty(required=True)
name = db.StringProperty(required=True)
@staticmethod
@db.transactional
def tally(email, name):
CandidateVote(model_version=MODEL_VERSION,
email=email,
name=name.strip().title()).put()
class SimpleKv(db.Model):
"""A store for blobs of data that need to be temporarily stored somewhere.
Generally, using this probably isn't the right thing to do, but sometimes
you just need to persist a little data between requests.
Currently, this is used by:
* The paypal handler.
"""
value = db.StringProperty(required=True, indexed=False)
# SECONDARY MODELS
# ################
# These models are used as caches for other parts of the data model,
# and should always be regenerable. Do not make these the single
# source of truth for anything!
# Generated by commands.FindMissingDataUsers.
class MissingDataUsersSecondary(db.Model):
email = db.EmailProperty(required=True)
# amountCents never needs to be recomputed. The only way it can
# change is to go up, and if it does, it means the user pledged
# again, so they must have filled in their missing data.
amountCents = db.IntegerProperty(required=True)
def addNationBuilderDonation(email,
amount_cents, pledge_type,
first_name, last_name, occupation, employer, phone,
target, team, source, mail_list_optin, anonymous, surveyResult=None,
stripe_customer_id=None, stripe_charge_id=None,
paypal_txn_id=None, paypal_payer_id=None,
address=None, city=None, state=None, zipCode=None,
bitpay_invoice_id = None, recurring = None, enddate = None,
recurrence_period = None, nationBuilderVars = None):
nationbuilder_token = Secrets.get().nationbuilder_token
donation = {'amount_in_cents':amount_cents,
'email':email,
'succeeded_at': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
donation['billing_address'] = {}
if first_name:
donation['first_name'] = first_name
if last_name:
donation['last_name'] = last_name
if occupation:
donation['occupation'] = occupation
if employer:
donation['employer'] = employer
if phone:
pass
if anonymous:
pass
if target:
pass
if team:
pass
if source:
pass
if surveyResult:
pass
if stripe_customer_id:
donation['stripe_customer_id'] = stripe_customer_id
if stripe_charge_id:
donation['stripe_charge_id'] = stripe_charge_id
if paypal_txn_id:
donation['paypal_txn_id'] = paypal_txn_id
if paypal_payer_id:
donation['paypal_payer_id'] = paypal_payer_id
if address:
donation['billing_address']['address1'] = address
if city:
donation['billing_address']['city'] = city
if state:
donation['billing_address']['state'] = state
if zipCode:
donation['billing_address']['zip'] = zipCode
if bitpay_invoice_id:
donation['bitpay_invoice_id'] = bitpay_invoice_id
if recurring:
donation['recurring'] = recurring
if enddate:
donation['end_date'] = enddate
if recurrence_period:
donation['recurrence_period'] = recurrence_period
if nationBuilderVars:
donation.update(nationBuilderVars)
nation_slug = "mayday"
access_token_url = "http://" + nation_slug + ".nationbuilder.com/oauth/token"
authorize_url = nation_slug + ".nationbuilder.com/oauth/authorize"
service = OAuth2Service(
client_id = "",
client_secret = "",
name = "anyname",
authorize_url = authorize_url,
access_token_url = access_token_url,
base_url = nation_slug + ".nationbuilder.com")
session = service.get_session(nationbuilder_token)
response = session.post('https://' + nation_slug +".nationbuilder.com/api/v1/donations",
data=json.dumps({'donation':donation}),
headers={"content-type":"application/json"}
)
| agpl-3.0 | 6,659,446,111,206,480,000 | 33.568987 | 93 | 0.662157 | false |
tvtsoft/odoo8 | addons/hr/hr.py | 1 | 18501 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.modules.module import get_module_resource
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class hr_employee_category(osv.Model):
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "hr.employee.category"
_description = "Employee Category"
_columns = {
'name': fields.char("Employee Tag", required=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('hr.employee.category', 'Parent Category', select=True),
'child_ids': fields.one2many('hr.employee.category', 'parent_id', 'Child Categories'),
'employee_ids': fields.many2many('hr.employee', 'employee_category_rel', 'category_id', 'emp_id', 'Employees'),
}
_constraints = [
(osv.osv._check_recursion, _('Error! You cannot create recursive category.'), ['parent_id'])
]
class hr_job(osv.Model):
def _get_nbr_employees(self, cr, uid, ids, name, args, context=None):
res = {}
for job in self.browse(cr, uid, ids, context=context):
nb_employees = len(job.employee_ids or [])
res[job.id] = {
'no_of_employee': nb_employees,
'expected_employees': nb_employees + job.no_of_recruitment,
}
return res
def _get_job_position(self, cr, uid, ids, context=None):
res = []
for employee in self.pool.get('hr.employee').browse(cr, uid, ids, context=context):
if employee.job_id:
res.append(employee.job_id.id)
return res
_name = "hr.job"
_description = "Job Position"
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Job Name', required=True, select=True, translate=True),
'expected_employees': fields.function(_get_nbr_employees, string='Total Forecasted Employees',
help='Expected number of employees for this job position after new recruitment.',
store = {
'hr.job': (lambda self,cr,uid,ids,c=None: ids, ['no_of_recruitment'], 10),
'hr.employee': (_get_job_position, ['job_id'], 10),
}, type='integer',
multi='_get_nbr_employees'),
'no_of_employee': fields.function(_get_nbr_employees, string="Current Number of Employees",
help='Number of employees currently occupying this job position.',
store = {
'hr.employee': (_get_job_position, ['job_id'], 10),
}, type='integer',
multi='_get_nbr_employees'),
'no_of_recruitment': fields.integer('Expected New Employees', copy=False,
help='Number of new employees you expect to recruit.'),
'no_of_hired_employee': fields.integer('Hired Employees', copy=False,
help='Number of hired employees for this job position during recruitment phase.'),
'employee_ids': fields.one2many('hr.employee', 'job_id', 'Employees', groups='base.group_user'),
'description': fields.text('Job Description'),
'requirements': fields.text('Requirements'),
'department_id': fields.many2one('hr.department', 'Department'),
'company_id': fields.many2one('res.company', 'Company'),
'state': fields.selection([('recruit', 'Recruitment in Progress'), ('open', 'Recruitment Closed')],
string='Status', readonly=True, required=True,
track_visibility='always', copy=False,
help="Set whether the recruitment process is open or closed for this job position."),
'write_date': fields.datetime('Update Date', readonly=True),
}
_defaults = {
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'hr.job', context=ctx),
'state': 'recruit',
'no_of_recruitment' : 1,
}
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id, department_id)', 'The name of the job position must be unique per department in company!'),
]
def set_recruit(self, cr, uid, ids, context=None):
for job in self.browse(cr, uid, ids, context=context):
no_of_recruitment = job.no_of_recruitment == 0 and 1 or job.no_of_recruitment
self.write(cr, uid, [job.id], {'state': 'recruit', 'no_of_recruitment': no_of_recruitment}, context=context)
return True
def set_open(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'open',
'no_of_recruitment': 0,
'no_of_hired_employee': 0
}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if 'name' not in default:
job = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % (job.name)
return super(hr_job, self).copy(cr, uid, id, default=default, context=context)
# ----------------------------------------
# Compatibility methods
# ----------------------------------------
_no_of_employee = _get_nbr_employees # v7 compatibility
job_open = set_open # v7 compatibility
job_recruitment = set_recruit # v7 compatibility
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_order = 'name_related'
_inherits = {'resource.resource': "resource_id"}
_inherit = ['mail.thread']
_mail_post_access = 'read'
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
#we need a related field in order to be able to sort the employee by name
'name_related': fields.related('resource_id', 'name', type='char', string='Name', readonly=True, store=True),
'country_id': fields.many2one('res.country', 'Nationality (Country)'),
'birthday': fields.date("Date of Birth"),
'ssnid': fields.char('SSN No', help='Social Security Number'),
'sinid': fields.char('SIN No', help="Social Insurance Number"),
'identification_id': fields.char('Identification No'),
'gender': fields.selection([('male', 'Male'), ('female', 'Female'), ('other', 'Other')], 'Gender'),
'marital': fields.selection([('single', 'Single'), ('married', 'Married'), ('widower', 'Widower'), ('divorced', 'Divorced')], 'Marital Status'),
'department_id': fields.many2one('hr.department', 'Department'),
'address_id': fields.many2one('res.partner', 'Working Address'),
'address_home_id': fields.many2one('res.partner', 'Home Address'),
'bank_account_id': fields.many2one('res.partner.bank', 'Bank Account Number', domain="[('partner_id','=',address_home_id)]", help="Employee bank salary account"),
'work_phone': fields.char('Work Phone', readonly=False),
'mobile_phone': fields.char('Work Mobile', readonly=False),
'work_email': fields.char('Work Email', size=240),
'work_location': fields.char('Work Location'),
'notes': fields.text('Notes'),
'parent_id': fields.many2one('hr.employee', 'Manager'),
'category_ids': fields.many2many('hr.employee.category', 'employee_category_rel', 'emp_id', 'category_id', 'Tags'),
'child_ids': fields.one2many('hr.employee', 'parent_id', 'Subordinates'),
'resource_id': fields.many2one('resource.resource', 'Resource', ondelete='cascade', required=True, auto_join=True),
'coach_id': fields.many2one('hr.employee', 'Coach'),
'job_id': fields.many2one('hr.job', 'Job Title'),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the employee, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store = {
'hr.employee': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the employee. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store = {
'hr.employee': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the employee. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'passport_id': fields.char('Passport No'),
'color': fields.integer('Color Index'),
'city': fields.related('address_id', 'city', type='char', string='City'),
'login': fields.related('user_id', 'login', type='char', string='Login', readonly=1),
'last_login': fields.related('user_id', 'date', type='datetime', string='Latest Connection', readonly=1),
}
def _get_default_image(self, cr, uid, context=None):
image_path = get_module_resource('hr', 'static/src/img', 'default_image.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
defaults = {
'active': 1,
'image': _get_default_image,
'color': 0,
}
def unlink(self, cr, uid, ids, context=None):
resource_ids = []
for employee in self.browse(cr, uid, ids, context=context):
resource_ids.append(employee.resource_id.id)
super(hr_employee, self).unlink(cr, uid, ids, context=context)
return self.pool.get('resource.resource').unlink(cr, uid, resource_ids, context=context)
def onchange_address_id(self, cr, uid, ids, address, context=None):
if address:
address = self.pool.get('res.partner').browse(cr, uid, address, context=context)
return {'value': {'work_phone': address.phone, 'mobile_phone': address.mobile}}
return {'value': {}}
def onchange_company(self, cr, uid, ids, company, context=None):
address_id = False
if company:
company_id = self.pool.get('res.company').browse(cr, uid, company, context=context)
address = self.pool.get('res.partner').address_get(cr, uid, [company_id.partner_id.id], ['default'])
address_id = address and address['default'] or False
return {'value': {'address_id': address_id}}
def onchange_department_id(self, cr, uid, ids, department_id, context=None):
value = {'parent_id': False}
if department_id:
department = self.pool.get('hr.department').browse(cr, uid, department_id)
value['parent_id'] = department.manager_id.id
return {'value': value}
def onchange_user(self, cr, uid, ids, user_id, context=None):
if user_id:
user = self.pool['res.users'].browse(cr, uid, user_id, context=context)
values = {
'name': user.name,
'work_email': user.email,
'image': user.image,
}
return {'value': values}
def action_follow(self, cr, uid, ids, context=None):
""" Wrapper because message_subscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_subscribe_users(cr, uid, ids, context=context)
def action_unfollow(self, cr, uid, ids, context=None):
""" Wrapper because message_unsubscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_unsubscribe_users(cr, uid, ids, context=context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=None, context=None):
""" Overwrite of the original method to always follow user_id field,
even when not track_visibility so that a user will follow it's employee
"""
if auto_follow_fields is None:
auto_follow_fields = ['user_id']
user_field_lst = []
for name, field in self._fields.items():
if name in auto_follow_fields and name in updated_fields and field.comodel_name == 'res.users':
user_field_lst.append(name)
return user_field_lst
_constraints = [(osv.osv._check_recursion, _('Error! You cannot create recursive hierarchy of Employee(s).'), ['parent_id']),]
class hr_department(osv.osv):
_name = "hr.department"
_description = "HR Department"
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _dept_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_columns = {
'name': fields.char('Department Name', required=True),
'complete_name': fields.function(_dept_name_get_fnc, type="char", string='Name'),
'company_id': fields.many2one('res.company', 'Company', select=True, required=False),
'parent_id': fields.many2one('hr.department', 'Parent Department', select=True),
'child_ids': fields.one2many('hr.department', 'parent_id', 'Child Departments'),
'manager_id': fields.many2one('hr.employee', 'Manager', track_visibility='onchange'),
'member_ids': fields.one2many('hr.employee', 'department_id', 'Members', readonly=True),
'jobs_ids': fields.one2many('hr.job', 'department_id', 'Jobs'),
'note': fields.text('Note'),
'color': fields.integer('Color Index'),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr.department', context=c),
}
_constraints = [
(osv.osv._check_recursion, _('Error! You cannot create recursive departments.'), ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
if context is None:
context = {}
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def create(self, cr, uid, vals, context=None):
# TDE note: auto-subscription of manager done by hand, because currently
# the tracking allows to track+subscribe fields linked to a res.user record
# An update of the limited behavior should come, but not currently done.
manager_id = vals.get("manager_id")
new_id = super(hr_department, self).create(cr, uid, vals, context=context)
if manager_id:
employee = self.pool.get('hr.employee').browse(cr, uid, manager_id, context=context)
if employee.user_id:
self.message_subscribe_users(cr, uid, [new_id], user_ids=[employee.user_id.id], context=context)
return new_id
def write(self, cr, uid, ids, vals, context=None):
# TDE note: auto-subscription of manager done by hand, because currently
# the tracking allows to track+subscribe fields linked to a res.user record
# An update of the limited behavior should come, but not currently done.
if isinstance(ids, (int, long)):
ids = [ids]
employee_ids = []
if 'manager_id' in vals:
manager_id = vals.get("manager_id")
if manager_id:
employee = self.pool['hr.employee'].browse(cr, uid, manager_id, context=context)
if employee.user_id:
self.message_subscribe_users(cr, uid, ids, user_ids=[employee.user_id.id], context=context)
for department in self.browse(cr, uid, ids, context=context):
employee_ids += self.pool['hr.employee'].search(
cr, uid, [
('id', '!=', manager_id),
('department_id', '=', department.id),
('parent_id', '=', department.manager_id.id)
], context=context)
self.pool['hr.employee'].write(cr, uid, employee_ids, {'parent_id': manager_id}, context=context)
return super(hr_department, self).write(cr, uid, ids, vals, context=context)
class res_users(osv.osv):
_name = 'res.users'
_inherit = 'res.users'
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
result = super(res_users, self).write(cr, uid, ids, vals, context=context)
employee_obj = self.pool.get('hr.employee')
if vals.get('name'):
for user_id in ids:
if user_id == SUPERUSER_ID:
employee_ids = employee_obj.search(cr, uid, [('user_id', '=', user_id)])
employee_obj.write(cr, uid, employee_ids, {'name': vals['name']}, context=context)
return result
| agpl-3.0 | 5,865,851,376,304,998,000 | 47.686842 | 170 | 0.591914 | false |
margulies/surfdist | examples/demo.py | 1 | 1682 | import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
import os
import surfdist as sd
# calculate and display distance from central sulcus at each node:
cmap='coolwarm'
base_dir = '/Applications/freesurfer/subjects/'
surf = nib.freesurfer.read_geometry(os.path.join(base_dir, 'bert/surf/lh.pial'))
cort = np.sort(nib.freesurfer.read_label(os.path.join(base_dir, 'bert/label/lh.cortex.label')))
sulc=nib.freesurfer.read_morph_data(os.path.join(base_dir, 'bert/surf/lh.sulc'))
# load central sulcus nodes
src = sd.load.load_freesurfer_label(os.path.join(base_dir, 'bert/label/lh.aparc.a2009s.annot'), 'S_central', cort)
# calculate distance
dist = sd.analysis.dist_calc(surf, cort, src)
# visualize
plot_med = sd.viz.viz(surf[0], surf[1], dist, bg_map=sulc, bg_on_stat=True, cmap=cmap)
plot_lat = sd.viz.viz(surf[0], surf[1], dist, azim=180, bg_map=sulc, bg_on_stat=True, cmap=cmap)
# Calculate distances on native surface and display on fsaverage
fsa4 = nib.freesurfer.read_geometry(os.path.join(base_dir,'fsaverage4/surf/lh.sphere.reg'))[0]
fsa4_sulc=nib.freesurfer.read_morph_data(os.path.join(base_dir, 'fsaverage4/surf/lh.sulc'))
native = nib.freesurfer.read_geometry(os.path.join(base_dir, 'bert/surf/lh.sphere.reg'))[0]
idx_fsa4_to_native = sd.utils.find_node_match(fsa4, native)[0]
surf_fsa4 = nib.freesurfer.read_geometry(os.path.join(base_dir, 'fsaverage4/surf/lh.pial'))
plot_fsa4_med = sd.viz.viz(surf_fsa4[0], surf_fsa4[1], dist[idx_fsa4_to_native], bg_map=fsa4_sulc, bg_on_stat=True, cmap=cmap)
plot_fsa4_lat = sd.viz.viz(surf_fsa4[0], surf_fsa4[1], dist[idx_fsa4_to_native], azim=180, bg_map=fsa4_sulc, bg_on_stat=True, cmap=cmap)
plt.show()
| mit | 3,033,224,661,328,431,000 | 48.470588 | 136 | 0.738407 | false |
shoopio/shoop | shuup_tests/core/test_order_refund_taxes.py | 1 | 7789 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from collections import defaultdict
from decimal import Decimal
import pytest
from shuup.core.defaults.order_statuses import create_default_order_statuses
from shuup.core.models import OrderLine, OrderLineType, Supplier
from shuup.core.order_creator import OrderCreator, OrderSource
from shuup.testing import factories
from shuup.utils.money import Money
from shuup.utils.numbers import bankers_round
def bround(value):
return bankers_round(value, 2)
@pytest.mark.parametrize("include_tax", [True, False])
@pytest.mark.django_db
def test_order_full_refund_with_taxes(include_tax):
tax_rate = Decimal(0.2) # 20%
product_price = 100
discount_amount = 30
random_line_price = 5
shop = factories.get_shop(include_tax)
source = OrderSource(shop)
source.status = factories.get_initial_order_status()
supplier = factories.get_default_supplier()
create_default_order_statuses()
tax = factories.get_tax("sales-tax", "Sales Tax", tax_rate)
factories.create_default_tax_rule(tax)
product = factories.create_product("sku", shop=shop, supplier=supplier, default_price=product_price)
line = source.add_line(
line_id="product-line",
type=OrderLineType.PRODUCT,
product=product,
supplier=supplier,
quantity=1,
shop=shop,
base_unit_price=source.create_price(product_price),
)
discount_line = source.add_line(
line_id="discount-line",
type=OrderLineType.DISCOUNT,
supplier=supplier,
quantity=1,
base_unit_price=source.create_price(0),
discount_amount=source.create_price(discount_amount),
parent_line_id=line.line_id
)
raw_total_price = Decimal(product_price - discount_amount)
total_taxful = bround(source.taxful_total_price.value)
total_taxless = bround(source.taxless_total_price.value)
if include_tax:
assert total_taxful == bround(raw_total_price)
assert total_taxless == bround(raw_total_price / (1 + tax_rate))
else:
assert total_taxful == bround(raw_total_price * (1 + tax_rate))
assert total_taxless == bround(raw_total_price)
# Lines without quantity shouldn't affect refunds
other_line = source.add_line(
text="This random line for textual information",
line_id="other-line",
type=OrderLineType.OTHER,
quantity=0
)
# Lines with quantity again should be able to be refunded normally.
other_line_with_quantity = source.add_line(
line_id="other_line_with_quantity",
type=OrderLineType.OTHER,
text="Special service $5/h",
quantity=1,
base_unit_price=source.create_price(random_line_price)
)
raw_total_price = Decimal(product_price - discount_amount + random_line_price)
total_taxful = bround(source.taxful_total_price.value)
total_taxless = bround(source.taxless_total_price.value)
if include_tax:
assert total_taxful == bround(raw_total_price)
assert total_taxless == bround(raw_total_price / (1 + tax_rate))
else:
assert total_taxful == bround(raw_total_price * (1 + tax_rate))
assert total_taxless == bround(raw_total_price)
creator = OrderCreator()
order = creator.create_order(source)
assert order.taxful_total_price.value == total_taxful
assert order.taxless_total_price.value == total_taxless
order.create_payment(order.taxful_total_price)
assert order.is_paid()
order.create_full_refund()
assert order.taxful_total_price_value == 0
for parent_order_line in order.lines.filter(parent_line__isnull=True):
if parent_order_line.quantity == 0:
assert not parent_order_line.child_lines.exists()
else:
refund_line = parent_order_line.child_lines.filter(type=OrderLineType.REFUND).first()
assert refund_line
assert parent_order_line.taxful_price.value == -refund_line.taxful_price.value
assert parent_order_line.taxless_price.value == -refund_line.taxless_price.value
assert parent_order_line.price.value == -refund_line.price.value
@pytest.mark.parametrize("include_tax", [True, False])
@pytest.mark.django_db
def test_order_partial_refund_with_taxes(include_tax):
tax_rate = Decimal(0.2) # 20%
product_price = 100
discount_amount = 30
random_line_price = 5
refunded_amount = 15
shop = factories.get_shop(include_tax)
source = OrderSource(shop)
source.status = factories.get_initial_order_status()
supplier = factories.get_default_supplier()
create_default_order_statuses()
tax = factories.get_tax("sales-tax", "Sales Tax", tax_rate)
factories.create_default_tax_rule(tax)
product = factories.create_product("sku", shop=shop, supplier=supplier, default_price=product_price)
line = source.add_line(
line_id="product-line",
type=OrderLineType.PRODUCT,
product=product,
supplier=supplier,
quantity=1,
shop=shop,
base_unit_price=source.create_price(product_price),
)
discount_line = source.add_line(
line_id="discount-line",
type=OrderLineType.DISCOUNT,
supplier=supplier,
quantity=1,
base_unit_price=source.create_price(0),
discount_amount=source.create_price(discount_amount),
parent_line_id=line.line_id
)
raw_total_price = Decimal(product_price - discount_amount)
total_taxful = bround(source.taxful_total_price.value)
total_taxless = bround(source.taxless_total_price.value)
if include_tax:
assert total_taxful == bround(raw_total_price)
assert total_taxless == bround(raw_total_price / (1 + tax_rate))
else:
assert total_taxful == bround(raw_total_price * (1 + tax_rate))
assert total_taxless == bround(raw_total_price)
creator = OrderCreator()
order = creator.create_order(source)
assert order.taxful_total_price.value == total_taxful
assert order.taxless_total_price.value == total_taxless
order.create_payment(order.taxful_total_price)
assert order.is_paid()
refund_data = [dict(
amount=Money(refunded_amount, shop.currency),
quantity=1,
line=order.lines.products().first(),
)]
order.create_refund(refund_data)
total_taxful = bround(order.taxful_total_price.value)
total_taxless = bround(order.taxless_total_price.value)
taxless_refunded_amount = (refunded_amount / (1 + tax_rate))
if include_tax:
raw_total_price = Decimal(product_price - discount_amount - refunded_amount)
assert total_taxful == bround(raw_total_price)
assert total_taxless == bround(raw_total_price / (1 + tax_rate))
else:
# the refunded amount it considered a taxful price internally
raw_total_price = Decimal(product_price - discount_amount)
assert total_taxful == bround((raw_total_price * (1 + tax_rate)) - refunded_amount)
assert total_taxless == bround(raw_total_price - taxless_refunded_amount)
refund_line = order.lines.refunds().filter(type=OrderLineType.REFUND).first()
if include_tax:
assert refund_line.taxful_price.value == -bround(refunded_amount)
assert refund_line.taxless_price.value == -bround(taxless_refunded_amount)
else:
assert refund_line.taxful_price.value == -bround(refunded_amount)
assert refund_line.taxless_price.value == -bround(taxless_refunded_amount)
| agpl-3.0 | 1,890,248,602,022,425,900 | 37.369458 | 104 | 0.679163 | false |
jat255/hyperspyUI | hyperspyui/signalwrapper.py | 1 | 10542 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Fri Oct 24 18:27:15 2014
@author: Vidar Tonaas Fauske
"""
from .util import fig2win
from qtpy import QtCore
from .modelwrapper import ModelWrapper
from .actionable import Actionable
class SignalWrapper(Actionable):
closing = QtCore.Signal()
model_added = QtCore.Signal(object)
model_removed = QtCore.Signal(object)
_untitled_counter = 0
def __init__(self, signal, mainwindow, name=None):
super(SignalWrapper, self).__init__()
self.signal = signal
# Override replot on Signal instance
self._old_replot = signal._replot
signal._replot = self._replot
if name is None:
if signal.metadata.General.title:
name = signal.metadata.General.title
elif signal.tmp_parameters.has_item('filename'):
name = signal.tmp_parameters.filename
else:
name = "Untitled %d" % SignalWrapper._untitled_counter
SignalWrapper._untitled_counter += 1
self.name = name
self.figures = []
self.mainwindow = mainwindow
self.models = []
self._keep_on_close = 0
self._magic_jump = (8, 30)
self.navigator_plot = None
self.signal_plot = None
self._nav_geom = None
self._sig_geom = None
self._replotargs = ((), {})
self._model_id = 1
self.add_action('plot', "&Plot", self.replot)
self.add_action('add_model', "Add &model", self.make_model)
self.add_separator()
self.add_action('close', "&Close", self.close)
@property
def keep_on_close(self):
return self._keep_on_close > 0
@keep_on_close.setter
def keep_on_close(self, value):
if value:
self._keep_on_close += 1
else:
if self._keep_on_close > 0:
self._keep_on_close -= 1
def plot(self, *args, **kwargs):
self.keep_on_close = True
self.signal.plot(*args, **kwargs)
self.keep_on_close = False
self.update_figures()
self._replotargs = (args, kwargs)
self.mainwindow.main_frame.subWindowActivated.emit(
self.mainwindow.main_frame.activeSubWindow())
# Redraw plot needed for pyqt5
if self.signal._plot and self.signal._plot.signal_plot:
self.signal._plot.signal_plot.figure.canvas.draw_idle()
def _replot(self):
if self.signal._plot is not None:
if self.signal._plot.is_active() is True:
self.replot()
def replot(self):
old = self.mainwindow.updatesEnabled()
self.mainwindow.setUpdatesEnabled(False)
try:
self.plot(*self._replotargs[0], **self._replotargs[1])
finally:
self.mainwindow.setUpdatesEnabled(old)
def switch_signal(self, new_signal):
"""
Switch the signal wrapped by this wrapper. To complete the switch, the
signal should also be replotted if previously plotted. For performance
reasons this is left as the responsibility of the caller.
"""
old_signal = self.signal
self.signal = new_signal
idx = -1
for i, s in enumerate(self.mainwindow.hspy_signals):
if s is old_signal:
idx = i
break
self.mainwindow.lut_signalwrapper[new_signal] = self
del self.mainwindow.lut_signalwrapper[old_signal]
if idx >= 0:
self.mainwindow.hspy_signals[idx] = new_signal
def update(self):
if self.navigator_plot is not None:
self.navigator_plot.update()
if self.signal_plot is not None:
self.signal_plot.update()
def update_figures(self):
old_nav = self.navigator_plot
old_sig = self.signal_plot
self.remove_figure(old_nav)
self.remove_figure(old_sig)
self.navigator_plot = None
self.signal_plot = None
atleast_one_changed = False
# If we have a navigator plot
if self.signal._plot and self.signal._plot.navigator_plot:
# Set internal `navigator_plot` to window containing it
navi = self.signal._plot.navigator_plot.figure
self.navigator_plot = fig2win(navi, self.mainwindow.figures)
# Did the window change?
if old_nav is not self.navigator_plot:
# Process the plot
title = navi.axes[0].set_title("") # remove title
title.set_visible(False)
# Wire closing event
self.navigator_plot.closing.connect(self.nav_closing)
# Set a reference on window to self
self.navigator_plot.setProperty('hyperspyUI.SignalWrapper',
self)
# Add to figures list
self.add_figure(self.navigator_plot)
# Did we have a previous window?
if old_nav is not None:
navi.tight_layout()
# Save geometry of old, and make sure it is closed
self._nav_geom = old_nav.saveGeometry()
old_nav.closing.disconnect(self.nav_closing)
old_nav.close()
atleast_one_changed = True
# If we have stored geometry, and a valid plot, restore
if self._nav_geom is not None and self.navigator_plot is not None:
self.navigator_plot.restoreGeometry(self._nav_geom)
self._nav_geom = None
if self.signal._plot and self.signal._plot.signal_plot is not None:
sigp = self.signal._plot.signal_plot.figure
self.signal_plot = fig2win(sigp, self.mainwindow.figures)
if old_sig is not self.signal_plot:
title = sigp.axes[0].set_title("")
title.set_visible(False)
self.signal_plot.closing.connect(self.sig_closing)
self.signal_plot.setProperty('hyperspyUI.SignalWrapper', self)
self.add_figure(self.signal_plot)
if old_sig is not None:
sigp.tight_layout()
self._sig_geom = old_sig.saveGeometry()
old_sig.closing.disconnect(self.sig_closing)
old_sig.close()
atleast_one_changed = True
if self._sig_geom is not None and self.signal_plot is not None:
self.signal_plot.restoreGeometry(self._sig_geom)
self._sig_geom = None
if atleast_one_changed:
self.mainwindow.check_action_selections()
def add_figure(self, fig):
self.figures.append(fig)
def remove_figure(self, fig):
if fig in self.figures:
self.figures.remove(fig)
def as_signal2D(self, axis=(0, 1)):
signal = self.signal
self.close() # Store geomtery and close
# Swap geometries
tmp = self._sig_geom
self._sig_geom = self._nav_geom
self._nav_geom = tmp
self.signal = signal.as_signal2D(axis)
def as_signal1D(self, axis=0):
signal = self.signal
self.close() # Store geomtery and close
# Swap geometries
tmp = self._sig_geom
self._sig_geom = self._nav_geom
self._nav_geom = tmp
self.signal = signal.as_signal1D(axis)
def make_model(self, *args, **kwargs):
m = self.signal.create_model(*args, **kwargs)
self.mainwindow.record_code("signal = ui.get_selected_signal()")
self.mainwindow.record_code("model = signal.create_model()")
# modelname = self.signal.metadata.General.title
modelname = "Model %d" % self._model_id
self._model_id += 1
mw = ModelWrapper(m, self, modelname)
self.add_model(mw)
mw.plot()
return mw
def add_model(self, model):
self.models.append(model)
self.model_added.emit(model)
def remove_model(self, model):
self.models.remove(model)
self.model_removed.emit(model)
self.plot()
def nav_closing(self):
if self.navigator_plot:
p = self.navigator_plot.pos()
self.navigator_plot.move(p.x() + self._magic_jump[0],
p.y() + self._magic_jump[1])
self._nav_geom = self.navigator_plot.saveGeometry()
self.navigator_plot = None
if self.signal_plot is None:
self._closed()
def sig_closing(self):
if self.signal_plot:
p = self.signal_plot.pos()
# For some reason the position changes -8,-30 on closing, at least
# it does on windows 7, Qt4.
self.signal_plot.move(p.x() + self._magic_jump[0],
p.y() + self._magic_jump[1])
self._sig_geom = self.signal_plot.saveGeometry()
if self.navigator_plot is not None:
self.navigator_plot.close()
self.navigator_plot = None
self.signal_plot = None
self._closed()
def close(self):
if self.signal_plot is not None:
self.signal_plot.close()
self.signal_plot = None
if self.navigator_plot is not None:
self.navigator_plot.close()
self.navigator_plot = None
self._closed()
def _closed(self):
if not self.keep_on_close:
self.closing.emit()
# TODO: Should probably be with by events for concistency
if self in self.mainwindow.signals and not self.keep_on_close:
self.mainwindow.signals.remove(self)
self.signal._replot = self._old_replot
self._old_replot = None
self.signal = None
| gpl-3.0 | 7,307,429,685,893,864,000 | 35.86014 | 82 | 0.580725 | false |
kenb123/Basic-Expression-Lexicon-Variation-Algorithms-BELVA | plugins/policies/mutate/policy_multiply_word_by_4.py | 1 | 2407 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------------------------------------
# pyOwaspBELVA - Contextual custom dictionary builder with character and word variations for pen-testers
# Copyright (C) 2016 OWASP Foundation / Kenneth F. Belva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# This project is named after my amazing father:
# Albert Joseph BELVA
#
# And, it is dedicated to him and his memory.
#
# This dedication and project is to raise awareness for
# Lewy Body Disease / Dementia which my father lived with
# since his mid-60s until his passing at 72.
#
# More information on Lewy Body Dementia may be found here:
# https://en.wikipedia.org/wiki/Dementia_with_Lewy_bodies
#
# Please add this dedication to every file in the project.
# Thank you much. -Ken
#--------------------------------------------------------------------------------------------------
import os.path, sys
class About():
def run(self):
status_dict = {}
status_dict['active'] = True
# status_dict['active'] = False
return status_dict
class Description():
def run(self):
DescriptionDetails = {}
DescriptionDetails['name'] = "Multiply Word by Four"
return DescriptionDetails
class MutateWord():
def run(self, word):
#all words must be returned in an array
return_array = []
return_array.append(str(word) + str(word) + str(word) + str(word))
return return_array
| gpl-3.0 | -7,133,153,777,334,881,000 | 30.684211 | 108 | 0.560449 | false |
Donkyhotay/MoonPy | zope/app/publisher/browser/menumeta.py | 1 | 10323 | ##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Menu Directives Configuration Handlers
$Id: menumeta.py 67630 2006-04-27 00:54:03Z jim $
"""
import zope.component
from zope.configuration.exceptions import ConfigurationError
from zope.interface.interface import InterfaceClass
from zope.interface import Interface
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
from zope.security.checker import InterfaceChecker, CheckerPublic
from zope.component.interface import provideInterface
from zope.component.zcml import adapter, proxify, utility
from zope.app.component.contentdirective import ClassDirective
from zope.app.pagetemplate.engine import Engine
from zope.app.container.interfaces import IAdding
from zope.app.publisher.browser.menu import BrowserMenu
from zope.app.publisher.browser.menu import BrowserMenuItem, BrowserSubMenuItem
from zope.app.publisher.interfaces.browser import IBrowserMenu
from zope.app.publisher.interfaces.browser import IBrowserMenuItem
from zope.app.publisher.interfaces.browser import IMenuItemType
from zope.app.publisher.interfaces.browser import AddMenu
# Create special modules that contain all menu item types
from types import ModuleType as module
import sys
menus = module('menus')
sys.modules['zope.app.menus'] = menus
_order_counter = {}
def menuDirective(_context, id=None, class_=BrowserMenu, interface=None,
title=u'', description=u''):
"""Registers a new browser menu."""
if id is None and interface is None:
raise ConfigurationError(
"You must specify the 'id' or 'interface' attribute.")
if interface is None:
interface = InterfaceClass(id, (),
__doc__='Menu Item Type: %s' %id,
__module__='zope.app.menus')
# Add the menu item type to the `menus` module.
# Note: We have to do this immediately, so that directives using the
# MenuField can find the menu item type.
setattr(menus, id, interface)
path = 'zope.app.menus.' + id
else:
path = interface.__module__ + '.' + interface.getName()
# If an id was specified, make this menu available under this id.
# Note that the menu will be still available under its path, since it
# is an adapter, and the `MenuField` can resolve paths as well.
if id is None:
id = path
else:
# Make the interface available in the `zope.app.menus` module, so
# that other directives can find the interface under the name
# before the CA is setup.
_context.action(
discriminator = ('browser', 'MenuItemType', path),
callable = provideInterface,
args = (path, interface, IMenuItemType, _context.info)
)
setattr(menus, id, interface)
# Register the layer interface as an interface
_context.action(
discriminator = ('interface', path),
callable = provideInterface,
args = (path, interface),
kw = {'info': _context.info}
)
# Register the menu item type interface as an IMenuItemType
_context.action(
discriminator = ('browser', 'MenuItemType', id),
callable = provideInterface,
args = (id, interface, IMenuItemType, _context.info)
)
# Register the menu as a utility
utility(_context, IBrowserMenu, class_(id, title, description), name=id)
def menuItemDirective(_context, menu, for_,
action, title, description=u'', icon=None, filter=None,
permission=None, layer=IDefaultBrowserLayer, extra=None,
order=0):
"""Register a single menu item."""
return menuItemsDirective(_context, menu, for_, layer).menuItem(
_context, action, title, description, icon, filter,
permission, extra, order)
def subMenuItemDirective(_context, menu, for_, title, submenu,
action=u'', description=u'', icon=None, filter=None,
permission=None, layer=IDefaultBrowserLayer,
extra=None, order=0):
"""Register a single sub-menu menu item."""
return menuItemsDirective(_context, menu, for_, layer).subMenuItem(
_context, submenu, title, description, action, icon, filter,
permission, extra, order)
class MenuItemFactory(object):
"""generic factory for menu items."""
def __init__(self, factory, **kwargs):
self.factory = factory
if 'permission' in kwargs and kwargs['permission'] == 'zope.Public':
kwargs['permission'] = CheckerPublic
self.kwargs = kwargs
def __call__(self, context, request):
item = self.factory(context, request)
for key, value in self.kwargs.items():
setattr(item, key, value)
if item.permission is not None:
checker = InterfaceChecker(IBrowserMenuItem, item.permission)
item = proxify(item, checker)
return item
class menuItemsDirective(object):
"""Register several menu items for a particular menu."""
def __init__(self, _context, menu, for_, layer=IDefaultBrowserLayer):
self.for_ = for_
self.menuItemType = menu
self.layer = layer
def menuItem(self, _context, action, title, description=u'',
icon=None, filter=None, permission=None, extra=None, order=0):
if filter is not None:
filter = Engine.compile(filter)
if order == 0:
order = _order_counter.get(self.for_, 1)
_order_counter[self.for_] = order + 1
factory = MenuItemFactory(
BrowserMenuItem,
title=title, description=description, icon=icon, action=action,
filter=filter, permission=permission, extra=extra, order=order,
_for=self.for_)
adapter(_context, (factory,), self.menuItemType,
(self.for_, self.layer), name=title)
def subMenuItem(self, _context, submenu, title, description=u'',
action=u'', icon=None, filter=None, permission=None,
extra=None, order=0):
if filter is not None:
filter = Engine.compile(filter)
if order == 0:
order = _order_counter.get(self.for_, 1)
_order_counter[self.for_] = order + 1
factory = MenuItemFactory(
BrowserSubMenuItem,
title=title, description=description, icon=icon, action=action,
filter=filter, permission=permission, extra=extra, order=order,
_for=self.for_, submenuId=submenu)
adapter(_context, (factory,), self.menuItemType,
(self.for_, self.layer), name=title)
def __call__(self, _context):
# Nothing to do.
pass
def _checkViewFor(for_=None, layer=None, view_name=None):
"""Check if there is a view of that name registered for IAdding
and IBrowserRequest. If not raise a ConfigurationError
It will raise a ConfigurationError if :
o view=""
o if view_name is not registred
"""
if view_name is None:
raise ConfigurationError(
"Within a addMenuItem directive the view attribut"
" is optional but can\'t be empty"
)
gsm = zope.component.getGlobalSiteManager()
if gsm.adapters.lookup((for_, layer),
Interface, view_name) is None:
raise ConfigurationError(
"view name %s not found " %view_name
)
def addMenuItem(_context, title, description='', menu=None, for_=None,
class_=None, factory=None, view=None, icon=None, filter=None,
permission=None, layer=IDefaultBrowserLayer, extra=None,
order=0):
"""Create an add menu item for a given class or factory
As a convenience, a class can be provided, in which case, a
factory is automatically defined based on the class. In this
case, the factory id is based on the class name.
"""
if for_ is not None:
_context.action(
discriminator = None,
callable = provideInterface,
args = ('', for_)
)
forname = 'For' + for_.getName()
else:
for_ = IAdding
forname = ''
if menu is not None:
if isinstance(menu, (str, unicode)):
menu = zope.component.getUtility(IMenuItemType, menu)
if menu is None:
raise ValueError("Missing menu id '%s'" % menu)
if class_ is None:
if factory is None:
raise ValueError("Must specify either class or factory")
else:
if factory is not None:
raise ValueError("Can't specify both class and factory")
if permission is None:
raise ValueError(
"A permission must be specified when a class is used")
factory = "BrowserAdd%s__%s.%s" % (
forname, class_.__module__, class_.__name__)
ClassDirective(_context, class_).factory(_context, id=factory)
extra = {'factory': factory}
if view:
action = view
# This action will check if the view exists
_context.action(
discriminator = None,
callable = _checkViewFor,
args = (for_, layer, view),
order=999999
)
else:
action = factory
if menu == None:
menu = AddMenu
return menuItemsDirective(_context, menu, for_, layer).menuItem(
_context, action, title, description, icon, filter,
permission, extra, order)
| gpl-3.0 | -2,425,772,843,166,543,400 | 36.402174 | 79 | 0.6161 | false |
neuroailab/tfutils | tfutils/tests/mnist_data.py | 1 | 4588 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tf.data.Dataset interface to the MNIST dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import shutil
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0 - 0.5
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip({'images': images, 'labels': labels})
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
def build_data(directory, batch_size, group):
if group == 'train':
dataset = train(directory).apply(
tf.contrib.data.shuffle_and_repeat(
10000))
else:
dataset = test(directory).repeat()
# Batch it
dataset = dataset.apply(
tf.contrib.data.batch_and_drop_remainder(batch_size))
next_element = dataset.make_one_shot_iterator().get_next()
return next_element
| mit | 2,490,599,746,435,539,000 | 33.757576 | 80 | 0.66456 | false |
AsymmetricVentures/asym-fields | setup.py | 1 | 1826 | # -*- coding: utf-8 -*-
# Asymmetric Base Framework :: Fields
# Copyright (C) 2013-2014 Asymmetric Ventures Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from datetime import datetime
from setuptools import setup, find_packages
classifiers = """
Development Status :: 4 - Beta
Framework :: Django
Programming Language :: Python
Intended Audience :: Developers
Natural Language :: English
Operating System :: OS Independent
Topic :: Software Development :: Libraries
Topic :: Utilities
License :: OSI Approved :: GNU General Public License v2 (GPLv2)
Topic :: Software Development :: Libraries :: Application Frameworks
"""
version = '0.2.0'
url = 'https://github.com/AsymmetricVentures/asym-fields'
setup(
name = 'asymm-fields',
version = '{}-{}'.format(version, datetime.now().strftime('%Y%m%d%H%M')),
url = url,
download_url = '{}/archive/v{}.tar.gz'.format(url, version),
author = 'Richard Eames',
author_email = '[email protected]',
packages = find_packages(),
classifiers = list(filter(None, classifiers.split('\n'))),
install_requires = (
'django>=1.4.5',
'jinja2>=2.7',
'pytz', # most recent
),
test_suite = 'run_tests.main'
)
| gpl-2.0 | 9,190,243,004,921,041,000 | 32.814815 | 76 | 0.710296 | false |
mpi-sws-rse/thingflow-python | examples/kalman_model.py | 1 | 4244 | import asyncio
import numpy as np
from sklearn import linear_model
# For Kalman filtering
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise
from thingflow.base import OutputThing, InputThing, from_iterable, Scheduler
class SGDLinearRegressionModel(OutputThing, InputThing):
def __init__(self):
OutputThing.__init__(self, ports=['train', 'observe', 'predict'])
self.clf = linear_model.SGDRegressor()
def on_train_next(self, x):
print("On train next called")
# training input: train the model
xx = np.asarray(x[0])
yy = np.asarray(x[1])
self.clf.partial_fit(xx, yy)
def on_train_error(self, x):
print("On train error called")
self.on_error(x)
def on_train_completed(self):
print("On train completed called")
self.on_completed()
def on_observe_next(self, x):
print("On observe next called")
xx = np.asarray(x)
p = self.clf.predict(xx)
self._dispatch_next(p, port='predict')
def on_observe_error(self, x):
self.on_error(x)
def on_observe_completed(self):
self.on_completed()
class FilterModel(OutputThing, InputThing):
def __init__(self, filter):
OutputThing.__init__(self, ports=['observe', 'predict'])
self.filter = filter
def on_observe_next(self, measurement):
print("On observerain next called")
# training input: train the model
self.filter.predict()
self.filter.update(measurement)
self._dispatch_next(self.filter.x, port='predict')
def on_observe_error(self, x):
print("On observe error called")
self.on_error(x)
def on_observe_completed(self):
print("On observe completed called")
self.on_completed()
class KalmanFilterModel(FilterModel):
"""Implements Kalman filters using filterpy.
x' = Fx + Bu + w
y = H x + ww
"""
def __init__(self, dim_state, dim_control, dim_measurement,
initial_state_mean, initial_state_covariance,
matrix_F, matrix_B,
process_noise_Q,
matrix_H, measurement_noise_R):
filter = KalmanFilter(dim_x=dim_state, dim_u=dim_control, dim_z=dim_measurement)
filter.x = initial_state_mean
filter.P = initial_state_covariance
filter.Q = process_noise_Q
filter.F = matrix_F
filter.B = matrix_B
filter.H = matrix_H
filter.R = measurement_noise_R # covariance matrix
super().__init__(filter)
def main_linear():
obs_stream = from_iterable(iter([ [ [ [1.0, 1.0], [2.0, 2.0]], [1.0, 2.0] ], [ [ [6.0, 6.0], [9.0, 9.0]], [6.0, 9.0] ] ]))
pred_stream = from_iterable(iter([ [3.0, 3.0] ]))
model = SGDLinearRegressionModel()
obs_stream.connect(model, port_mapping=('default', 'train'))
obs_stream.connect(print)
pred_stream.connect(model, port_mapping=('default', 'observe'))
model.connect(print, port_mapping=('predict', 'default'))
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_periodic(obs_stream, 1)
scheduler.schedule_periodic(pred_stream, 5)
scheduler.run_forever()
def main_kalman():
dim_x = 2
dim_u = 1
dim_z = 1
initial_state_mean = np.array([ [1.0] , [0.0] ])
initial_state_covariance = 1000 * np.eye(dim_x)
F = np.array([ [ 1., 1.], [0., 1.] ])
B = np.zeros((2, 1) )
Q = Q_discrete_white_noise(dim=2, dt=0.1, var=0.13)
H = np.array([[1.,0.]])
R = 5 * np.eye(1)
model = KalmanFilterModel(dim_x, dim_u, dim_z, initial_state_mean, initial_state_covariance,
F, B, Q, H, R)
measurement_stream = from_iterable(iter([ [ 1.0 ], [0.0] ]))
# measurement_stream = from_iterable(iter([ np.array([ [1.0, 1.0] ]) ]))
measurement_stream.connect(model, port_mapping=('default', 'observe'))
model.connect(print, port_mapping=('predict', 'default'))
scheduler = Scheduler(asyncio.get_event_loop())
scheduler.schedule_periodic(measurement_stream, 1)
scheduler.run_forever()
def main():
main_kalman()
if __name__ == '__main__':
main()
| apache-2.0 | -5,613,145,215,875,922,000 | 31.396947 | 128 | 0.602733 | false |
louargantb/onectl | onectl/sources/includes/xmlparser.py | 1 | 8015 | from includes import xmltodic
import re
import xml.etree.ElementTree as ET
# INI XML paths
XML_INFO = '/plugins/plugin[name={0}]/info'
XML_PLUGIN_FILE = '/plugins/file'
XML_PLUGIN_FILE_TYPE = '/plugins/file_type'
# INI XML PATHS
XML_INI_KEY = '/plugins/plugin[name={0}]/key'
XML_INI_SECTION = '/plugins/plugin[name={0}]/section'
XML_INI_KEY_FORMAT = '/plugins/plugin[name={0}]/input/format'
XML_INI_KEY_SEPARATOR = '/plugins/plugin[name={0}]/input/separator'
XML_INI_KEYTYPE = '/plugins/plugin[name={0}]/input/type'
XML_INI_LIVE = '/plugins/plugin[name={0}]/live'
XML_INI_REGEXP = '/plugins/plugin[name={0}]/input/validation/regexp'
XML_INI_VAL_MIN = '/plugins/plugin[name={0}]/input/validation/min'
XML_INI_VAL_MAX = '/plugins/plugin[name={0}]/input/validation/max'
# XML files
XML_PLUGIN_DESTINATION = '/plugins/destination'
LOG4j_XML_PATH = "appender[@name={0}]/param[@name={1}]"
XML_FILE_TYPES = ['ini', 'service', 'cache', 'log4j']
XML_DYNC_FILE_TYPES = ['log4j']
def get_xml_field_from_dict(dct, path):
xml_tags = path.strip('/').split('/')
for tag in xml_tags:
try:
if('[' in tag) and (']' in tag):
# get valuew between []
match = tag.split('[')[1].split(']')[0]
tag=tag.split('[',1)[0]
#get the value to mach key=ivalue @param=value
key=None
keyvalue=None
if re.search("=", match) :
keys = match.split('=',1)
key=keys[0]
keyvalue=keys[1]
else:
key=match
dct = dct[tag]
bIsValidKey=False
if type(dct)is list:
for entry in dct:
if keyvalue:
if (entry[key] == keyvalue):
dct=entry
bIsValidKey=True
break
elif key in entry:
dct=entry
bIsValidKey=True
break
if not bIsValidKey:
return None
else:
dct = dct[tag]
except (KeyError, TypeError):
return None
return dct
def create_xml_dict(xml_file_name):
# open the xml file
xml_file = open(xml_file_name, "r")
# take contents
xml_string = xml_file.read()
# create dictionary
xml_dict = xmltodic.parse(xml_string)
return xml_dict
def get_xml_field_from_xmlfile(xml_file_name, tag):
# get the dictionary
xml_dict = create_xml_dict(xml_file_name)
if not xml_dict:
return None
# get the value
res = get_xml_field_from_dict(xml_dict, tag)
return res
def validatePluginFromXml(xml_file, file_type, plugin_dict):
''' Validate if xml was correctly writen'''
if not plugin_dict:
raise ValueError('Empty plugin in XML %s.Please check documentation' %xml_file)
if not file_type:
raise ValueError('Missing file_type in xml ' + xml_file)
if not 'name' in plugin_dict:
raise ValueError('Missing plugin name in xml ' + xml_file)
if not 'info' in plugin_dict:
raise ValueError('Missing <info> field for plugin %s in xml %s.Please change the xml and execute onectl -load-plugins' %(plugin_dict['name'], xml_file))
if file_type not in XML_FILE_TYPES:
raise ValueError('Unknown filetype %s in xml %s. Valid types: %s' %(file_type,xml_file, ' '.join(XML_FILE_TYPES)))
if file_type == 'service':
pass
elif file_type == 'ini':
if not 'name' in plugin_dict:
raise ValueError('Missing plugin name in xml ' + xml_file)
if not 'key' in plugin_dict:
raise ValueError('Missing <key> field for plugin %s in xml %s.Please change the xml and execute onectl -load-plugins' %(plugin_dict['name'], xml_file))
if not 'info' in plugin_dict:
raise ValueError('Missing <info> field for plugin %s in xml %s.Please change the xml and execute onectl -load-plugins' %(plugin_dict['name'], xml_file))
if not 'input' in plugin_dict:
raise ValueError('Missing <input> field for plugin %s in xml %s.Please change the xml and execute onectl -load-plugins' %(plugin_dict['name'], xml_file))
else:
if not 'type' in plugin_dict['input']:
raise ValueError('Missing <input/type> field for plugin %s in xml %s.Please change the xml and execute onectl -load-plugins' %(plugin_dict['name'], xml_file))
else:
input_type = plugin_dict['input']['type']
if (input_type.lower() != 'list') and (input_type.lower() != 'integer-list') and (input_type.lower() != 'string') and (input_type.lower() != 'integer'):
raise ValueError('Field <input/type>:%s for plugin %s in xml %s can be one of the following: STRING,INTEGER,LIST,INTEGER-LIST' %(input_type, plugin_dict['name'], xml_file))
if 'validation' in plugin_dict['input']:
if not plugin_dict['input']['validation']:
return
# in case of a digit
if (input_type.lower() == 'integer') or (input_type.lower() == 'integer-list'):
if 'min' in plugin_dict['input']['validation']:
min_check = plugin_dict['input']['validation']['min']
if min_check is not None:
if not re.match("^-?\d*\.{0,1}\d+$", min_check):
raise ValueError('Field <input/validation/min>:%s for plugin %s in xml %s should be a digit' %(min_check, plugin_dict['name'], xml_file))
if 'max' in plugin_dict['input']['validation']:
max_check = plugin_dict['input']['validation']['max']
if max_check is not None:
if not re.match("^-?\d*\.{0,1}\d+$", max_check):
raise ValueError('Field <input/validation/max>:%s for plugin %s in xml %s should be a digit' %(max_check, plugin_dict['name'], xml_file))
else:
if 'min' in plugin_dict['input']['validation']:
raise ValueError('Field validation/min in plugin %s in xml %s can be used with input/type INTEGER or INTEGER-LIST only' %(plugin_dict['name'], xml_file))
if 'max' in plugin_dict['input']['validation']:
raise ValueError('Field validation/max in plugin %s in xml %s can be used with input/type INTEGER or INTEGER_LIST only' %(plugin_dict['name'], xml_file))
# XML plugin element object
def get_log4j_plugins_tocreate(xml_file_name, tag):
tree = ET.parse(xml_file_name)
root = tree.getroot()
elems = root
res_list = {}
for parent in root.findall('appender'):
parent_name = parent.attrib['name']
child_list = []
for child in parent.findall('param'):
child_name = child.attrib['name']
child_list.append(child_name)
#child_list = get_attrib_list(parent,param,attrib)
if child_list:
res_list[parent_name]=child_list
return res_list
def get_xml_tag_values(tag):
param=None
arrib=None
attribvalue=None
if('[' in tag) and (']' in tag):
# get valuew between []
match = tag.split('[')[1].split(']')[0]
param=tag.split('[',1)[0]
#get the value to mach key=value @param=value
attribs = match.split('=',1)
attrib=attribs[0]
if attrib.startswith('@'):
attrib = attrib.strip('@')
attribvalue=attribs[1]
else:
attrib = attribs[0]
attribvalue=attribs[1]
else:
param=tag
return param, attrib, attribvalue
def get_element_tree_elem( elems, tag):
''' get element from tree '''
try:
param, attrib, attribvalue = get_xml_tag_values(tag)
out_list = []
if param and attrib and attribvalue:
out_list = []
for elem in elems:
if attrib:
if attribvalue and attribvalue == elem.get(attrib,None):
out_list=elem
#elif param and attrib:
elif param:
out_list = elems.findall(param)
return out_list
except:
raise
def get_elem_tree(xml_file, path):
try:
tags = path.split('/')
tree = ET.parse(xml_file)
root = tree.getroot()
#elems = root.findall(param)
elems=[]
for tag in tags:
if not elems:
param, attrib, attribvalue = get_xml_tag_values(tag)
elems = root.findall(param)
elems = get_element_tree_elem(elems,tag)
return tree,elems
except:
raise
def get_xml_elem_value(xml_file, path, attrib):
''' Get attribute from xml file and path'''
try:
tree, elem = get_elem_tree(xml_file, path)
if attrib:
return elem.get(attrib,None)
else:
return elem
except:
raise
def set_xml_elem_value(xml_file, path, attrib, new_value):
''' Set attribute from xml file and path '''
try:
tree, elem = get_elem_tree(xml_file, path)
if attrib:
elem.attrib['value']=new_value
tree.write(xml_file)
except:
raise
| gpl-2.0 | 3,797,542,777,021,214,700 | 29.945946 | 177 | 0.655646 | false |
ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/lib/pubsub/core/kwargs/listenerimpl.py | 1 | 3654 | """
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
from .listenerbase import ListenerBase, ValidatorBase
from .callables import ListenerMismatchError
class Listener(ListenerBase):
"""
Wraps a callable so it can be stored by weak reference and introspected
to verify that it adheres to a topic's MDS.
A Listener instance
has the same hash value as the callable that it wraps.
Callables that have 'argName=pub.AUTO_TOPIC' as a kwarg will
be given the Topic object for the message sent by sendMessage().
Such a Listener will have wantsTopicObjOnCall() True.
Callables that have a '\**kargs' argument will receive all message
data, not just that for the topic they are subscribed to. Such a listener
will have wantsAllMessageData() True.
"""
def __call__(self, kwargs, actualTopic, allKwargs=None):
"""Call the listener with **kwargs. Note that it raises RuntimeError
if listener is dead. Should always return True (False would require
the callable_ be dead but self hasn't yet been notified of it...)."""
if self.acceptsAllKwargs:
kwargs = allKwargs or kwargs # if allKwargs is None then use kwargs
if self._autoTopicArgName is not None:
kwargs = kwargs.copy()
kwargs[self._autoTopicArgName] = actualTopic
cb = self._callable()
if cb is None:
self._calledWhenDead()
cb(**kwargs)
return True
class ListenerValidator(ValidatorBase):
"""
Do not accept any required args or *args; accept any **kwarg,
and require that the Listener have at least all the kwargs (can
have extra) of Topic.
"""
def _validateArgs(self, listener, paramsInfo):
# accept **kwargs
# accept *args
# check if listener missing params (only possible if
# paramsInfo.acceptsAllKwargs is False)
allTopicMsgArgs = self._topicArgs | self._topicKwargs
allParams = set(paramsInfo.allParams)
if not paramsInfo.acceptsAllKwargs:
missingParams = allTopicMsgArgs - allParams
if missingParams:
msg = 'needs to accept %s more args (%s)' \
% (len(missingParams), ''.join(missingParams))
raise ListenerMismatchError(msg, listener, missingParams)
else:
# then can accept that some parameters missing from listener
# signature
pass
# check if there are unknown parameters in listener signature:
extraArgs = allParams - allTopicMsgArgs
if extraArgs:
if allTopicMsgArgs:
msg = 'args (%s) not allowed, should be (%s)' \
% (','.join(extraArgs), ','.join(allTopicMsgArgs))
else:
msg = 'no args allowed, has (%s)' % ','.join(extraArgs)
raise ListenerMismatchError(msg, listener, extraArgs)
# we accept listener that has fewer required paams than TMS
# since all args passed by name (previous showed that spec met
# for all parameters).
# now make sure listener doesn't require params that are optional in TMS:
extraArgs = set( paramsInfo.getRequiredArgs() ) - self._topicArgs
if extraArgs:
msg = 'params (%s) missing default values' % (','.join(extraArgs),)
raise ListenerMismatchError(msg, listener, extraArgs)
| mit | 1,942,347,132,885,969,200 | 37.290323 | 81 | 0.617953 | false |
liosha2007/temporary-groupdocs-python-sdk | setup.py | 1 | 1037 | #!/usr/bin/env python
from distutils.core import setup
if __name__ == '__main__':
import sys
execfile("groupdocs/version.py")
setup(
name = __pkgname__,
version = __version__,
author = "GroupDocs Team",
author_email = "[email protected]",
description = "A Python interface to the GroupDocs API",
keywords = "groupdocs, document management, viewer, annotation, signature",
license = "Apache License (2.0)",
long_description = open('README.rst').read(),
platforms = 'any',
packages = ['groupdocs', 'groupdocs.models'],
url = "http://groupdocs.com/",
download_url = "https://github.com/groupdocs/groupdocs-python",
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: Apache Software License"
],
data_files=[('', ['README.rst'])]
)
| apache-2.0 | -1,423,879,829,098,188,000 | 30.424242 | 77 | 0.656702 | false |
efiop/dvc | dvc/compare.py | 1 | 8887 | from collections import abc
from itertools import chain, repeat, zip_longest
from operator import itemgetter
from typing import (
TYPE_CHECKING,
Any,
Dict,
ItemsView,
Iterable,
Iterator,
List,
Mapping,
MutableSequence,
Sequence,
Tuple,
Union,
overload,
)
from funcy import reraise
if TYPE_CHECKING:
from dvc.ui.table import CellT
class Column(List["CellT"]):
pass
def with_value(value, default):
return default if value is None else value
class TabularData(MutableSequence[Sequence["CellT"]]):
def __init__(self, columns: Sequence[str], fill_value: str = ""):
self._columns: Dict[str, Column] = {name: Column() for name in columns}
self._keys: List[str] = list(columns)
self._fill_value = fill_value
@property
def columns(self) -> List[Column]:
return list(map(self.column, self.keys()))
def column(self, name: str) -> Column:
return self._columns[name]
def items(self) -> ItemsView[str, Column]:
projection = {k: self.column(k) for k in self.keys()}
return projection.items()
def keys(self) -> List[str]:
return self._keys
def _iter_col_row(
self, row: Sequence["CellT"]
) -> Iterator[Tuple["CellT", Column]]:
for val, col in zip_longest(row, self.columns):
if col is None:
break
yield with_value(val, self._fill_value), col
def append(self, value: Sequence["CellT"]) -> None:
for val, col in self._iter_col_row(value):
col.append(val)
def extend(self, values: Iterable[Sequence["CellT"]]) -> None:
for row in values:
self.append(row)
def insert(self, index: int, value: Sequence["CellT"]) -> None:
for val, col in self._iter_col_row(value):
col.insert(index, val)
def __iter__(self) -> Iterator[List["CellT"]]:
return map(list, zip(*self.columns))
def __getattr__(self, item: str) -> Column:
with reraise(KeyError, AttributeError):
return self.column(item)
def __getitem__(self, item: Union[int, slice]):
func = itemgetter(item)
it = map(func, self.columns)
if isinstance(item, slice):
it = map(list, zip(*it))
return list(it)
@overload
def __setitem__(self, item: int, value: Sequence["CellT"]) -> None:
...
@overload
def __setitem__(
self, item: slice, value: Iterable[Sequence["CellT"]]
) -> None:
...
def __setitem__(self, item, value) -> None:
it = value
if isinstance(item, slice):
n = len(self.columns)
normalized_rows = (
chain(val, repeat(self._fill_value, n - len(val)))
for val in value
)
# we need to transpose those rows into columnar format
# as we work in terms of column-based arrays
it = zip(*normalized_rows)
for i, col in self._iter_col_row(it):
col[item] = i
def __delitem__(self, item: Union[int, slice]) -> None:
for col in self.columns:
del col[item]
def __len__(self) -> int:
return len(self.columns[0])
@property
def shape(self) -> Tuple[int, int]:
return len(self.columns), len(self)
def drop(self, *col_names: str) -> None:
for col_name in col_names:
self._keys.remove(col_name)
self._columns.pop(col_name)
def rename(self, from_col_name: str, to_col_name: str) -> None:
self._columns[to_col_name] = self._columns.pop(from_col_name)
self._keys[self._keys.index(from_col_name)] = to_col_name
def project(self, *col_names: str) -> None:
self.drop(*(set(self._keys) - set(col_names)))
self._keys = list(col_names)
def to_csv(self) -> str:
import csv
from io import StringIO
buff = StringIO()
writer = csv.writer(buff)
writer.writerow(self.keys())
for row in self:
writer.writerow(row)
return buff.getvalue()
def add_column(self, name: str) -> None:
self._columns[name] = Column([self._fill_value] * len(self))
self._keys.append(name)
def row_from_dict(self, d: Mapping[str, "CellT"]) -> None:
keys = self.keys()
for key in d:
if key not in keys:
self.add_column(key)
row: List["CellT"] = [
with_value(d.get(key), self._fill_value) for key in self.keys()
]
self.append(row)
def render(self, **kwargs: Any):
from dvc.ui import ui
ui.table(self, headers=self.keys(), **kwargs)
def as_dict(
self, cols: Iterable[str] = None
) -> Iterable[Dict[str, "CellT"]]:
keys = self.keys() if cols is None else set(cols)
return [
{k: self._columns[k][i] for k in keys} for i in range(len(self))
]
def _normalize_float(val: float, precision: int):
return f"{val:.{precision}g}"
def _format_field(
val: Any, precision: int = None, round_digits: bool = False
) -> str:
def _format(_val):
if isinstance(_val, float) and precision:
func = round if round_digits else _normalize_float
return func(_val, precision)
if isinstance(_val, abc.Mapping):
return {k: _format(v) for k, v in _val.items()}
if isinstance(_val, list):
return [_format(x) for x in _val]
return _val
return str(_format(val))
def diff_table(
diff,
title: str,
old: bool = True,
no_path: bool = False,
show_changes: bool = True,
precision: int = None,
round_digits: bool = False,
on_empty_diff: str = None,
) -> TabularData:
headers: List[str] = ["Path", title, "Old", "New", "Change"]
fill_value = "-"
td = TabularData(headers, fill_value=fill_value)
for fname, diff_in_file in diff.items():
for item, change in sorted(diff_in_file.items()):
old_value = with_value(change.get("old"), fill_value)
new_value = with_value(change.get("new"), fill_value)
diff_value = with_value(
change.get("diff", on_empty_diff), fill_value
)
td.append(
[
fname,
str(item),
_format_field(old_value, precision, round_digits),
_format_field(new_value, precision, round_digits),
_format_field(diff_value, precision, round_digits),
]
)
if no_path:
td.drop("Path")
if not show_changes:
td.drop("Change")
if not old:
td.drop("Old")
td.rename("New", "Value")
return td
def show_diff(
diff,
title: str,
old: bool = True,
no_path: bool = False,
show_changes: bool = True,
precision: int = None,
round_digits: bool = False,
on_empty_diff: str = None,
markdown: bool = False,
) -> None:
td = diff_table(
diff,
title=title,
old=old,
no_path=no_path,
show_changes=show_changes,
precision=precision,
round_digits=round_digits,
on_empty_diff=on_empty_diff,
)
td.render(markdown=markdown)
def metrics_table(
metrics,
all_branches: bool = False,
all_tags: bool = False,
all_commits: bool = False,
precision: int = None,
round_digits: bool = False,
):
from dvc.utils.diff import format_dict
from dvc.utils.flatten import flatten
td = TabularData(["Revision", "Path"], fill_value="-")
for branch, val in metrics.items():
for fname, metric in val.items():
row_data: Dict[str, str] = {"Revision": branch, "Path": fname}
flattened = (
flatten(format_dict(metric))
if isinstance(metric, dict)
else {"": metric}
)
row_data.update(
{
k: _format_field(v, precision, round_digits)
for k, v in flattened.items()
}
)
td.row_from_dict(row_data)
rev, path, *metrics_headers = td.keys()
td.project(rev, path, *sorted(metrics_headers))
if not any([all_branches, all_tags, all_commits]):
td.drop("Revision")
return td
def show_metrics(
metrics,
markdown: bool = False,
all_branches: bool = False,
all_tags: bool = False,
all_commits: bool = False,
precision: int = None,
round_digits: bool = False,
) -> None:
td = metrics_table(
metrics,
all_branches=all_branches,
all_tags=all_tags,
all_commits=all_commits,
precision=precision,
round_digits=round_digits,
)
td.render(markdown=markdown)
| apache-2.0 | 1,916,319,566,862,563,300 | 26.685358 | 79 | 0.554518 | false |
chriswmackey/UWG_Python | tests/test_psychrometrics.py | 1 | 2082 | import pytest
from uwg import psychrometrics
def test_psychrometric_float_point():
# Input values
Tdb_in = 297.5311337413935
w_in = 0.018576773131376
P = 10090
Tdb, w, phi, h, Tdp, v = psychrometrics.psychrometrics(Tdb_in, w_in, P)
assert Tdb == pytest.approx(24.381133741393512, abs=1e-15) # Tdb [C]
assert w == pytest.approx(0.018576773131376, abs=1e-15) # W [kgv/kgd]
assert phi == pytest.approx(9.581555922752541, abs=1e-15) # RH Pw/Pws*100
assert h == pytest.approx(7.183036653518451e+04, abs=1e-15) # Enthalpy [J/kgd]
assert Tdp == pytest.approx(-10.012150181172135, abs=1e-15) # Wet bulb temp [C]
assert v == pytest.approx(8.717031296493113, abs=1e-15) # Spec. vol [m3/kga]
def test_psychrometric_simple_1():
# Really simple, coarse tests
# Input values
Tdb_in = 20.0 + 273.15
w_in = 0.002
P = 101325.0
Tdb, w, phi, h, Tdp, v = psychrometrics.psychrometrics(Tdb_in, w_in, P)
# Tests for 20, 0.002, atmosphere
assert Tdb + 273.15 == pytest.approx(Tdb_in, True, 1e-14) # Tdb [C]
assert w == pytest.approx(w_in, 1e-14) # W [kgv/kga]
assert phi == pytest.approx(13., 1) # RH [%}
assert h / 1000. == pytest.approx(25., 1) # Enthalpy [J/kga]
assert v == pytest.approx(0.83, 1e-2) # Spec. vol [m^3 kg-1]
def test_psychrometric_simple_2():
# Really simple, coarse tests
# Input values
Tdb_in = 40.0 + 273.15
w_in = 0.009
P = 101325.0
Tdb, w, phi, h, Tdp, v = psychrometrics.psychrometrics(Tdb_in, w_in, P)
# Tests for 40, 0.009, atmosphere
assert Tdb+273.15 == pytest.approx(Tdb_in, True, 1e-14) # Tdb [C]
assert w == pytest.approx(w_in, 1e-14) # W [kgv/kga]
assert phi == pytest.approx(19.5, 1e-1) # RH [%}
assert h / 1000. == pytest.approx(63., 1) # Enthalpy [J/kga]
assert v == pytest.approx(0.9, 1e-1) # Spec. vol [m^3 kg-1]
| gpl-3.0 | 8,035,852,966,113,686,000 | 37.555556 | 87 | 0.564361 | false |
arth-co/saleor | saleor/api/views.py | 1 | 3143 | from django.shortcuts import render
# Create your views here.
from rest_framework import viewsets, permissions
from rest_framework.authentication import TokenAuthentication, SessionAuthentication, BasicAuthentication
from rest_framework.response import Response
import django_filters
from rest_framework import parsers, filters
from rest_framework.renderers import JSONRenderer, TemplateHTMLRenderer
from rest_framework_xml.parsers import XMLParser
from rest_framework_xml.renderers import XMLRenderer
from rest_framework.authtoken.views import ObtainAuthToken
from saleor.product.models.base import Product, ProductVariant
from .serializers import ProductSerializer, ProductVariantSerializer
class ObtainAuthTokenXML(ObtainAuthToken):
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,XMLParser)
renderer_classes = (XMLRenderer,)
obtain_auth_token_xml = ObtainAuthTokenXML.as_view()
class ProductFilter(django_filters.FilterSet):
# Defining Filter on Product Price
min_price = django_filters.NumberFilter(name="price", lookup_type='gte')
max_price = django_filters.NumberFilter(name="price", lookup_type='lte')
# Defining Filter on Product Category
category = django_filters.CharFilter(name="categories__name")
#
class Meta:
model = Product
# Filters on name, category and price
fields = ['name','categories','price','weight']
class ProductViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `detail` actions.
"""
queryset = Product.objects.all()
authentication_classes = (BasicAuthentication, TokenAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
renderer_classes = (XMLRenderer, JSONRenderer,)
filter_backends = (filters.DjangoFilterBackend,filters.SearchFilter, filters.OrderingFilter)
search_fields = ('name','categories__name')
ordering_fields = ('name')
filter_class = ProductFilter
serializer_class = ProductSerializer
class ProductVariantFilter (django_filters.FilterSet):
# Defining Filter on Product Price
min_price = django_filters.NumberFilter(name="price_override", lookup_type='gte')
max_price = django_filters.NumberFilter(name="price_override", lookup_type='lte')
# Defining Filter on Product Category
product = django_filters.CharFilter(name="product__name")
#
class Meta:
model = ProductVariant
# Filters on name, category and price
fields = ['name','product','price_override','weight_override']
class ProductVariantViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `detail` actions.
"""
queryset = ProductVariant.objects.all()
authentication_classes = (BasicAuthentication, TokenAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
renderer_classes = (XMLRenderer, JSONRenderer,)
filter_backends = (filters.DjangoFilterBackend,)
filter_class = ProductVariantFilter
serializer_class = ProductVariantSerializer
| bsd-3-clause | -7,556,553,129,753,698,000 | 35.976471 | 105 | 0.757875 | false |
zacernst/bayes_tensorflow | bayes_tensorflow.py | 1 | 35543 | """
Work in progress.
Bayes network classes that will convert a network specification into
a set of TensorFlow ops and a computation graph.
The main work of the classes is to move from a directed graph to a
set of formulas expressing what's known about the probability distribution
of the events represented in the graph. When those formulas are derived,
they can be associated with the corresponding nodes in the graph, and used
for local message passing, Monte Carlo simulations, and translation into
Tensorflow ops to be sent to the cpu or gpu.
The flow of information through the classes is as follows: The user will
create ``BayesNodes`` and connect them into the desired graph structure.
Depending on the way in which the graph will be queried, a set of
probability statements will be required. These take the form of ``Probability``
objects, which associate a ``Statement`` (which looks like a formula from
propositional logic, where the variables correspond to graph nodes) with
a value, e.g. ``P(a & ~b) = .5``. The statements are stored in a container
called a ``FactBook`` which can be queried. At that point, we have enough
information to automatically generate an abstract syntax tree of all the
functions that are necessary for computing probabilities on the graph. The
AST can be transformed into Python functions dynamically, or into
Tensorflow ops.
Current state: We can define a graph, and we can define what's known about
direct causal influences (using the ``FactBook`` class). Basic facts about
the graph can be calculated, such as which nodes d-separate arbitrary pairs
of nodes. Message passing is underway, we can generate the expressions for
any node's ``alpha`` and ``lambda`` messages (following Pearl 1988). The
portions of the AST which have been defined can be traversed with the
usual ``__iter__`` method.
Next step is to be able to traverse the trees of all the message passing
functions and conditionally replace subexpressions based on the structure
of the tree -- for example, rewriting expressions of the form ``P(a & b | c)``
if ``c`` d-separates ``a`` from ``b`` in the directed graph.
"""
from types import *
import itertools
import hashlib
import copy
import functools
import random
from tabulate import tabulate
import tensorflow as tf
class BayesNetValidationError(Exception):
pass
def dict_to_function(arg_dict):
"""
We need functions for Tensorflow ops, so we will use this function
to dynamically create functions from dictionaries.
"""
def inner_function(lookup, **inner_dict):
return inner_dict[lookup]
new_function = functools.partial(inner_function, **arg_dict)
return new_function
class Arithmetic(object):
"""
This is a mix-in class for enabling arithmetic or algebraic functions over
the objects.
"""
def __add__(self, other):
"""
Allows us to use familiar ``+`` to denote addition.
"""
return Add(self, other)
def __mul__(self, other):
"""
Allows us to use familiar ``*`` for multiplication.
"""
return Multiply(self, other)
class Sigma(Arithmetic):
"""
Summation over a list of ``Arithmetic`` objects.
"""
def __init__(self, *values):
self.values = values
def __repr__(self):
return '(Sigma: ' + ', '.join([str(value) for value in self.values]) + ')'
def __iter__(self):
for value in self.values:
yield value
if hasattr(value, '__iter__'):
for i in value:
yield i
class Pi(Arithmetic):
"""
Multiplication over a list of ``Arithmetic`` objects.
"""
def __init__(self, *values):
self.values = values
def __repr__(self):
return '(Pi: ' + ', '.join([str(value) for value in self.values]) + ')'
def __iter__(self):
for value in self.values:
yield value
if hasattr(value, '__iter__'):
for i in value:
yield i
def bayes(given_probability):
"""
Takes P(a | b) and returns equivalent probability using Bayes Theorem.
Result is alpha * P(a) * Likelihood(a).
"""
given = given_probability.statement
return (
alpha(given.event, given.given) * Probability(given.event) *
Probability(Given(given.given, given.event)))
class One(Arithmetic):
"""
Could be handy for base case in recursive multiplications.
"""
pass
def __repr__(self):
return '1'
class Number(Arithmetic):
def __init__(self, value):
self.value = value
def __repr__(self):
return str(self.value)
class Inverse(Arithmetic):
def __init__(self, expression):
if not isinstance(expression, Arithmetic):
raise BayesNetValidationError('Inverse applies only to ``Arithmetic`` objects')
self.expression = expression
def __repr__(self):
return '1 / ' + str(self.expression)
def __iter__(self):
yield self.expression
if hasattr(self.expression, '__iter__'):
for i in self.expression:
yield i
class Add(Arithmetic):
def __init__(self, addend_1, addend_2):
if not isinstance(addend_1, Arithmetic) or not isinstance(addend_2, Arithmetic):
raise BayesNetValidationError('Add only defined for ``Arithmetic`` objects')
self.addend_1 = addend_1
self.addend_2 = addend_2
def __repr__(self):
return '({addend_1} + {addend_2})'.format(
addend_1=str(self.addend_1), addend_2=str(self.addend_2))
def __iter__(self):
yield self.addend_1
if hasattr(self.addend_1, '__iter__'):
for i in self.addend_1:
yield i
yield self.addend_2
if hasattr(self.addend_2, '__iter__'):
for i in self.addend_2:
yield i
class Multiply(Arithmetic):
def __init__(self, multiplicand_1, multiplicand_2):
if (not isinstance(multiplicand_1, Arithmetic) or
not isinstance(multiplicand_2, Arithmetic)):
raise BayesNetValidationError('Multiply only defined for ``Arithmetic`` objects')
self.multiplicand_1 = multiplicand_1
self.multiplicand_2 = multiplicand_2
def __repr__(self):
return '({multiplicand_1} * {multiplicand_2})'.format(
multiplicand_1=str(self.multiplicand_1), multiplicand_2=str(self.multiplicand_2))
def __iter__(self):
for multiplicand in [self.multiplicand_1, self.multiplicand_2]:
yield multiplicand
if hasattr(multiplicand, '__iter__'):
for i in multiplicand:
yield i
class Probability(Arithmetic):
"""
``Probability`` objects have ``Statement`` objects as attributes and are related
to floats in the range ``[0, 1]``.
"""
def __init__(self, statement):
if not isinstance(statement, Statement):
raise BayesNetValidationError('Probability applies only to ``Statement``s')
self.statement = statement
def __repr__(self):
return 'P({statement})'.format(statement=str(self.statement))
def __iter__(self):
yield self.statement
if hasattr(self.statement, '__iter__'):
for i in self.statement:
yield i
def __eq__(self, other):
try:
return self.statement == other.statement
except AttributeError: # attributes are different, not equal
return False
class Statement(object):
"""
This is a mix-in class for allowing boolean connectives to be used.
Any connective other than conjunction and negation is immediately
translated to one using only conjunction and negation.
"""
def __and__(self, other):
if self is other:
return self
return Conjunction(self, other)
def __or__(self, other):
return ~(~self & ~other)
def __invert__(self):
return Negation(self)
def __gt__(self, other):
return ~(self & ~other)
def __eq__(self, other):
"""
Equality test.
Not all cases are accounted for yet.
"""
if not isinstance(other, Statement):
return False
if isinstance(self, Conjunction) and len(self.conjuncts) == 1:
left = self.conjuncts[0]
else:
left = self
if isinstance(other, Conjunction) and len(other.conjuncts) == 1:
right = other.conjuncts[0]
else:
right = other
if isinstance(left, Negation) != isinstance(right, Negation):
return False
if isinstance(left, Conjunction) and not isinstance(right, Conjunction):
return False
if left is right:
return True
if isinstance(left, Negation) and isinstance(right, Negation):
return left.statement is right.statement
return False # This is sketchy -- there might be other cases to check
def is_literal(self):
"""
A ``literal`` is an atomic formula or the negation of an atomic formula.
"""
return self.is_atomic() or (
isinstance(self, Negation) and self.statement.is_atomic())
def is_atomic(self):
"""
Although you can define a new statement using connectives other than
conjunction and negation, they are immediately transformed into
conjunctions and negations upon instantiation. Thus, we can test
whether a statement is atomic by checking whether it is of type
``Negation`` or ``Conjunction``.
"""
return not isinstance(self, (Negation, Conjunction,))
def truth_value(self):
"""
Recursively evaluate ``self`` to see if it's True or False relative
to the graph.
"""
if self.is_atomic():
return self.state
elif isinstance(self, Conjunction):
return all(conjunction.truth_value() for conjunction in self.conjuncts)
elif isinstance(self, Negation):
return not self.statement.truth_value()
else:
raise BayesNetValidationError('This should not happen.')
class FactBook(object):
"""
Holds a list of facts.
"""
def __init__(self):
self.facts = []
def __lt__(self, other):
self.facts.append(other)
def __contains__(self, other):
return other in self.facts
def __iadd__(self, other):
self.facts.append(other)
return self
def __repr__(self):
return '\n'.join([str(i) for i in self.facts])
def __iter__(self):
for fact in self.facts:
yield fact
class Equals(object):
"""
An ``Equals`` is an assertion that an event has a probability of being true.
"""
def __init__(self, statement, probability):
self.statement = statement
self.probability = probability
def __eq__(self, other):
return self.statement == other.statement and self.probability == other.probability
def __repr__(self):
return str(self.statement) + ' = ' + str(self.probability)
class Given(Statement):
"""
Expressions like ``x|y`` are not events or states; they're used only in
probability assignments. So they get their own class.
"""
def __init__(self, event, given):
self.event = event
self.given = given
def __repr__(self):
return ' '.join([str(self.event), '|', str(self.given)])
def __eq__(self, other):
if not isinstance(other, Given):
return False
return self.event == other.event and self.given == other.given
def __iter__(self):
yield self.event
if hasattr(self.event, '__iter__'):
for i in self.event:
yield i
yield self.given
if hasattr(self.given, '__iter__'):
for i in self.given:
yield i
def event_combinations(*events):
"""
Returns a list of lists of statements. Each sublist is a complete description
of the set of ``*events``..
"""
number_of_events = len(events)
out = []
for boolean_combination in itertools.product(
*([[True, False]] * number_of_events)):
out.append(
[events[i] if boolean else Negation(events[i])
for i, boolean in enumerate(boolean_combination)])
return out
class Negation(Statement):
"""
A negated statement.
"""
def __init__(self, statement):
self.statement = statement
def __repr__(self):
return '~' + str(self.statement)
def __iter__(self):
yield self.statement
if hasattr(self.statement, '__iter__'):
for i in self.statement:
yield i
class Conjunction(Statement):
"""
A list of conjuncts.
"""
def __init__(self, *args):
"""
The user will likely define conjunctions like ``a & b & c``, which
would typically yield ``(a & b) & c``, which is correct but
inconvenient. Better to have ``(a & b & c)`` for easier enumeration
through the conjuncts. So the ``__init__`` function checks each
conjunct to see if it's a conjunction, and appends those conjuncts
to a "flattened" list.
"""
self.conjuncts = []
for arg in args:
if isinstance(arg, Conjunction):
self.conjuncts += arg.conjuncts
else:
self.conjuncts.append(arg)
def __repr__(self):
return (
'(' + ' & '.join(
[str(conjunct) for conjunct in self.conjuncts]) + ')')
def __eq__(self, other):
if not isinstance(other, Conjunction):
return False
return self.conjuncts == other.conjuncts
def __iter__(self):
for conjunct in self.conjuncts:
yield conjunct
if hasattr(conjunct, '__iter__'):
for i in conjunct:
yield i
class BayesNode(Statement):
"""
This is the main class for the module.
It represents a vertex in the Bayes network.
"""
def __init__(
self,
activation_probability=None,
state=None,
fact_book=None,
pinned=False,
name=None):
self.fact_book = fact_book
self.incoming_edges = []
self.outgoing_edges = []
self.activation_probability = activation_probability
self.state = state
self.pinned = pinned
self.name = name or hashlib.md5(str(id(self))).hexdigest()
self.parent_fact_lookup = None
# For now, we are not calling the parent class's ``__init__`` method.
# super(BayesNode, self).__init__()
def _alpha(self, *children):
"""
Normalization factor for node with children.
"""
if len(children) == 0:
children = self.children
general_case = (
(Probability(self) * Pi(
*[Probability(Given(child, self)) for child in children])) +
(Probability(~self) * Pi(
*[Probability(Given(child, ~self)) for child in children])))
return general_case
def _pi(self, value=True):
"""
Computes message propagated from the parents of ``self`` to ``self``.
TODO: Make this take d-separation into account -- i.e. graphs that are
DAGs but not causal polytrees.
"""
parents = self.parents
if self.is_source():
return Probability(self)
else:
# TODO: Take into account parent._alpha() negated
return Pi(
*[((Probability(Given(self, parent)) * parent._pi(value=True)) +
(Probability(Given(self, ~parent)) * parent._pi(value=False))) for parent in parents])
def _lambda(self, value=True): # I wish lambda weren't a reserved word
"""
Likelihood of ``self``. Recursively called for each descendant of ``self``
until a sink is reached, in which case it returns an object of type
``One``.
"""
children = self.children
target = self if value else ~self
if self.is_sink():
return One()
else:
# TODO: Take into account child._lambda() negated
general_case = Pi(
*[((Probability(Given(child, target)) * child._lambda(value=True)) +
(Probability(Given(~child, target)) * child._lambda(value=False))) for child in children])
return general_case
def top_down_eval(self):
# evaluate the value of self, given the parents only
pass
def check_satisfied_parent_requirements(self):
return len(self.missing_parent_requirements()) == 0
def value_in_book(self, fact):
"""
Return the relevant ``Equals`` object for the ``fact``.
"""
for book_fact in self.fact_book:
if not isinstance(book_fact, Equals):
continue
fact_book_statement = book_fact.statement
if not isinstance(fact_book_statement, Probability):
raise BayesNetValidationError('This should not happen.')
if fact_book_statement.statement == fact:
return book_fact
return None # if the fact isn't present
def fact_requirements_satisfied(self, facts):
satisfied_requirements = []
unsatisfied_requirements = []
for fact in facts:
book_value = self.value_in_book(fact)
if book_value is None:
unsatisfied_requirements.append(fact)
else:
satisfied_requirements.append(book_value)
return satisfied_requirements, unsatisfied_requirements
def satisfied_child_requirements(self):
child_requirements = self.child_fact_requirements()
satisfied, _ = self.fact_requirements_satisfied(child_requirements)
return satisfied
def satisfied_parent_requirements(self):
parent_requirements = self.parent_fact_requirements()
satisfied, _ = self.fact_requirements_satisfied(parent_requirements)
return satisfied
def __repr__(self):
return str(self.name)
def __rshift__(self, other):
"""
Create an edge from self to other.
"""
edge = BayesEdge(self, other)
self.outgoing_edges.append(edge)
other.incoming_edges.append(edge)
def connected_nodes(self):
"""
Returns a list of all the nodes connected (directly or indirectly) to
the node. In other words, it returns all the nodes in the graph.
"""
node_list = []
def recurse(node):
if node in node_list:
return
node_list.append(node)
for child in node.children:
recurse(child)
for parent in node.parents:
recurse(parent)
recurse(self)
return node_list
def associate_fact_book(self, fact_book):
"""
When we associate a ``FactBook`` with a specific node, then we need
to propagate it across all the nodes in the graph.
"""
for node in self.connected_nodes():
node.fact_book = fact_book
self.fact_book = fact_book
def descendants(self):
"""
Return a list of all the descendants of the node.
"""
node_list = []
def recurse(node):
if node in node_list:
return
node_list.append(node)
for child in node.children:
recurse(child)
recurse(self)
return node_list
def iter_undirected_paths(self, target=None):
"""
Returns a list of lists, which are paths connecting self to other,
ignoring the directionality of the edges.
"""
def recurse(step_list):
current_node = step_list[-1]
if current_node is target:
yield step_list
else:
next_steps = current_node.children + current_node.parents
next_steps = [i for i in next_steps if i not in step_list]
if len(next_steps) == 0 and step_list[-1] is target or target is None:
yield step_list
for next_step in next_steps:
for i in recurse(copy.copy(step_list) + [next_step]):
yield i
for path in recurse([self]):
yield path
def undirected_paths(self, target=None):
return list(self.iter_undirected_paths(target=target))
def is_source(self):
"""
Tests whether there is no incoming edge.
"""
return len(self.parents) == 0
def annotate_path(self, *path):
"""
Examines each pair nodes in a path and annotates them with the directionality
of the edges in the original graph. To be used for testing d-separation.
"""
annotated_path = []
for index, node in enumerate(path):
if index == len(path) - 1:
continue
next_node = path[index + 1]
path_triple = (
node, '->' if next_node in node.children
else '<-', next_node,)
annotated_path.append(path_triple)
return annotated_path
def annotated_paths(self, target=None):
return [
self.annotate_path(*path) for path in
self.iter_undirected_paths(target=target)]
@staticmethod
def path_patterns(annotated_path):
"""
The d-separation criterion requires us to check whether paths have
arrows converging on nodes, diverging from them, or chains of arrows
pointing in the same direction.
"""
if len(annotated_path) < 2:
return None
path_pattern_list = []
for index, first_triple in enumerate(annotated_path[:-1]):
second_triple = annotated_path[index + 1]
quintuple = (
first_triple[0], first_triple[1], first_triple[2],
second_triple[1], second_triple[2],)
first_arrow = first_triple[1]
second_arrow = second_triple[1]
pattern = None
if first_arrow == '<-' and second_arrow == '->':
pattern = 'diverge'
elif first_arrow == '->' and second_arrow == '<-':
pattern = 'converge'
elif first_arrow == second_arrow:
pattern = 'chain'
else:
raise BayesNetValidationError('This should not happen.')
path_pattern_list.append((pattern, quintuple,))
return path_pattern_list
def all_path_patterns(self, target=None):
"""
Return all patterns, labeled with 'converge', 'diverge', etc. from ``self``
to (optional) ``target``.
"""
return [
self.path_patterns(path) for path in
self.annotated_paths(target=target)]
def is_sink(self):
"""
Tests whether there is no outgoing edge.
"""
return len(self.children) == 0
@property
def parents(self):
"""
Return the parent nodes of the current node.
"""
return [edge.source for edge in self.incoming_edges]
@property
def children(self):
"""
Return the child nodes of the current node.
"""
return [edge.target for edge in self.outgoing_edges]
def parent_fact_requirements(self):
"""
This looks at all parents of ``self`` and returns a list of lists.
Each sublist is a boolean combination of each of the upstream nodes.
Each combination (e.g. ``a & b``, ``a & ~b``, ``~a & b``, ``~a & ~b``)
has to be represented in the ``FactBook`` if we are to accurately
determine the influence of parent nodes on their child nodes. In
the Bayes net literature, the messages conveying this information
from parents import to children is denoted ``pi``, whereas the
information transmitted from children to parents is denoted ``lambda``.
"""
incoming_nodes = self.parents
if len(incoming_nodes) == 0:
return []
event_tuples = event_combinations(*incoming_nodes)
print event_tuples
positive = [
Given(self,
Conjunction(*event_tuple) if len(event_tuple) > 1 else
event_tuple[0])
for event_tuple in event_tuples]
negative = [
Given(~self,
Conjunction(*event_tuple) if len(event_tuple) > 1 else
event_tuple[0])
for event_tuple in event_tuples]
return positive + negative
def child_fact_requirements(self):
"""
Returns list of all facts required for lambda messages.
"""
outgoing_nodes = self.children
return (
[Given(child, self) for child in outgoing_nodes] +
[Given(child, Negation(self))
for child in outgoing_nodes] +
[Given(~child, self) for child in outgoing_nodes] +
[Given(~child, Negation(self))
for child in outgoing_nodes])
def missing_parent_requirements(self):
requirements = self.parent_fact_requirements()
_, missing = self.fact_requirements_satisfied(requirements)
return missing
def missing_child_requirements(self):
requirements = self.child_fact_requirements()
_, missing = self.fact_requirements_satisfied(requirements)
return missing
def relevant_parent_fact_dict(self):
parent_requirements = self.fact_requirements()
relevant_fact_requirements = [
fact for fact in self.fact_book
if fact.statement in parent_requirements]
relevant_fact_dict = {
fact.statement: fact.probability for fact in
relevant_fact_requirements}
return relevant_fact_dict
def create_parent_fact_function(self):
"""
Retrieves all the relevant facts from ``self.fact_book``,
creates a dictionary for lookups, then returns a function
that replaces dictionary lookups with function calls.
"""
return dict_to_function(self.relevant_parent_fact_dict())
def d_separated(self, z, y):
"""
Test whether ``z`` d-separates node ``self`` from node ``y``.
The concept of d-separation is central to Bayes networks. If ``y``
d-separates ``x`` from ``z``, then ``x`` and ``z`` are probabilistically
independent, given ``y``. In other parlance, it's a "screening-off"
condition. For example, coffee drinkers get lung cancer at a higher rate
than non-coffee drinkers. But that's because smokers are more likely
to be coffee drinkers, and smoking causes cancer. So smoking "screens off"
coffee from cancer. In the language of Bayes nets, smoking d-separates
coffee and cancer. That is, if you know already whether someone is a
smoker, then learning about their coffee consumption doesn't give you
any information about the probability that they will get cancer.
"""
def path_d_separated(path_pattern, z):
"""
Test whether the ``path_pattern`` is d-separated by ``z``.
"""
# Verify that we're handling the None case correctly
if path_pattern is None: # Degenerate case
return False
for category, quintuple in path_pattern:
w = quintuple[2]
if category == 'converge':
if w is z or w in z.descendants():
return True
elif category == 'chain':
if w is z:
return True
elif category == 'diverge':
if w is z:
return True
else:
raise BayesNetValidationError('This should never happen.')
return False # No w satisfying d-separation was found
path_patterns = self.all_path_patterns(target=y)
return all(
path_d_separated(path_pattern, z) for
path_pattern in path_patterns)
def d_separates_all(self, list_of_nodes):
"""
Tests whether each pair of nodes in ``list_of_nodes`` is d-separated
from each other by self. This will be used (e.g.) to determine how to
evaluate ``Given`` statements where the ``statement`` is a conjunction.
Specifically, if ``x`` d-separates ``y1``, ``y2``, and ``y3`` then
``P(y1, y2, y3 | x) == P(y1 | x) * P(y2 | x) * P(y3 | x)``.
"""
return all(
self.d_separated(list(node_pair)) for node_pair in
itertools.combinations(list_of_nodes, 2))
def event_combinations_satisfied(self, node_list):
for i in range(len(node_list)):
i += 1
for combo in event_combinations(node_list, combination_length=i):
given_combo = [Given(b, i) for i in combo]
satisfied_requirements, unsatisfied_requirements = (
b.fact_requirements_satisfied(given_combo))
if (len(satisfied_requirements) + len(unsatisfied_requirements) !=
len(given_combo)):
raise BayesNetValidationError('What?!')
elif len(satisfied_requirements) == len(given_combo):
yield combo, satisfied_requirements
def audit(self, print_table=True):
"""
Return a table of facts about the graph, which facts
are missing, etc.
"""
audit_list = []
for node in self.connected_nodes():
info_dict = {}
info_dict['sink'] = node.is_sink()
info_dict['source'] = node.is_source()
info_dict['number_of_parents'] = len(node.parents)
info_dict['number_of_children'] = len(node.children)
info_dict['satisfied_parent_requirements'] = node.satisfied_parent_requirements()
audit_list.append(info_dict)
if print_table:
print tabulate(audit_list, headers='keys')
else:
return audit_list
class BayesEdge(object):
"""
An edge connecting source to target. This shouldn't be called
directly -- ``BayesNode`` objects should be connected to each
other, and this constructor will be called by the ``__add__``
method in ``BayesNode``.
"""
def __init__(self, source, target):
self.source = source
self.target = target
def sandbox():
"""
just for testing
"""
x = tf.Variable(3, name='x')
y = tf.Variable(4, name='y')
f = x * x * y + y + 2
with tf.Session() as sess:
init = tf.global_variables_initializer() # node to initialize the rest
init.run() # Run the initializer for all variables
result = f.eval()
print result
a = BayesNode(name='a')
b = BayesNode(name='b')
c = BayesNode(name='c')
# d = BayesNode(name='d')
# e = BayesNode(name='e')
a >> b
b >> c
# b >> d
c >> b
# d >> e
fact_book = FactBook()
fact_list = [
Equals(Probability(Given(b, a)), .2),
Equals(Probability(Given(b, ~a)), .5),
Equals(Probability(Given(~b, a)), .8),
Equals(Probability(Given(~b, ~a)), .5),
Equals(Probability(Given(c, b)), .8),
Equals(Probability(Given(c, ~b)), .1),
Equals(Probability(a), .8)]
for fact in fact_list:
fact_book += fact
b.associate_fact_book(fact_book)
for node in a.connected_nodes():
node.state = random.choice([True, False])
random_node = random.choice(a.connected_nodes())
print b.value_in_book(b.parent_fact_requirements()[1])
if random_node.is_source():
pi_values = None
print '--------------'
def parent_messages_multiplicands(some_node, target_self_truth_value=True):
"""
Get the values from each parent which we will multiply together to get
the probability that ``some_node`` is ``target_self_truth_value``.
"""
multiplicands = []
for parent_fact_requirement in some_node.parent_fact_requirements():
fact = some_node.value_in_book(parent_fact_requirement)
if fact is None:
raise BayesNetValidationError('missing fact!')
parent_state = fact.statement.statement.given
self_event_state = fact.statement.statement.event
state_probability = fact.probability
print self_event_state, parent_state, state_probability, parent_state.truth_value()
# Check that ``self``'s truth value is the same as target; and
# the parent truth value is true. If so, append the multiplicand.
if (self_event_state.is_atomic() == target_self_truth_value and
parent_state.truth_value()):
multiplicands.append(state_probability)
print multiplicands
parent_messages_multiplicands(b, target_self_truth_value=True)
b.state = False
c.state = True
print (a & b).truth_value()
import pdb; pdb.set_trace()
def conjunction_factory(*conjuncts):
if len(conjuncts) == 1:
return conjuncts[0]
else:
return Conjunction(*conjuncts)
def event_combinations(event_list, combination_length=None):
"""
For all combinations of events in ``event_list`` of length
``combination_length``, yield list of all possible truth value
assignments in those combinations (as a ``Conjunction).
"""
combination_length = combination_length or len(event_list)
if combination_length == 0:
raise BayesNetValidationError('combination_length must be > 0.')
combination_length = combination_length or len(event_list)
for sublist in itertools.combinations(event_list, combination_length):
inner_sublists = []
for boolean_combination in itertools.product(
*([[True, False]] * combination_length)):
inner_sublist = [
item if boolean_combination[index] else ~item
for index, item in enumerate(sublist)]
inner_sublists.append(conjunction_factory(*inner_sublist))
yield inner_sublists
if __name__ == '__main__':
a = BayesNode(name='a')
b = BayesNode(name='b')
c = BayesNode(name='c')
a >> c
b >> c
l = [a, b, c]
a.state = True
b.state = True
c.state = False
fact_book = FactBook()
fact_list = [
Equals(Probability(Given(b, a)), .2),
Equals(Probability(Given(b, ~a)), .5),
Equals(Probability(Given(~b, a)), .8),
Equals(Probability(Given(~b, ~a)), .5),
Equals(Probability(Given(c, b)), .8),
Equals(Probability(Given(c, ~b)), .1),
Equals(Probability(a), .8)]
for fact in fact_list:
fact_book += fact
b.associate_fact_book(fact_book)
print list(b.event_combinations_satisfied(l))
| mit | -2,955,960,630,564,812,300 | 31.370674 | 109 | 0.584306 | false |
Chaffelson/whoville | whoville/cloudbreak/models/rds_config_response.py | 1 | 15043 | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class RDSConfigResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'connection_url': 'str',
'type': 'str',
'connector_jar_url': 'str',
'id': 'int',
'creation_date': 'int',
'public_in_account': 'bool',
'cluster_names': 'list[str]',
'stack_version': 'str',
'database_engine': 'str',
'connection_driver': 'str',
'database_engine_display_name': 'str',
'workspace': 'WorkspaceResourceResponse'
}
attribute_map = {
'name': 'name',
'connection_url': 'connectionURL',
'type': 'type',
'connector_jar_url': 'connectorJarUrl',
'id': 'id',
'creation_date': 'creationDate',
'public_in_account': 'publicInAccount',
'cluster_names': 'clusterNames',
'stack_version': 'stackVersion',
'database_engine': 'databaseEngine',
'connection_driver': 'connectionDriver',
'database_engine_display_name': 'databaseEngineDisplayName',
'workspace': 'workspace'
}
def __init__(self, name=None, connection_url=None, type=None, connector_jar_url=None, id=None, creation_date=None, public_in_account=False, cluster_names=None, stack_version=None, database_engine=None, connection_driver=None, database_engine_display_name=None, workspace=None):
"""
RDSConfigResponse - a model defined in Swagger
"""
self._name = None
self._connection_url = None
self._type = None
self._connector_jar_url = None
self._id = None
self._creation_date = None
self._public_in_account = None
self._cluster_names = None
self._stack_version = None
self._database_engine = None
self._connection_driver = None
self._database_engine_display_name = None
self._workspace = None
self.name = name
self.connection_url = connection_url
self.type = type
if connector_jar_url is not None:
self.connector_jar_url = connector_jar_url
if id is not None:
self.id = id
if creation_date is not None:
self.creation_date = creation_date
if public_in_account is not None:
self.public_in_account = public_in_account
if cluster_names is not None:
self.cluster_names = cluster_names
if stack_version is not None:
self.stack_version = stack_version
self.database_engine = database_engine
self.connection_driver = connection_driver
self.database_engine_display_name = database_engine_display_name
if workspace is not None:
self.workspace = workspace
@property
def name(self):
"""
Gets the name of this RDSConfigResponse.
Name of the RDS configuration resource
:return: The name of this RDSConfigResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this RDSConfigResponse.
Name of the RDS configuration resource
:param name: The name of this RDSConfigResponse.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def connection_url(self):
"""
Gets the connection_url of this RDSConfigResponse.
JDBC connection URL in the form of jdbc:<db-type>://<address>:<port>/<db>
:return: The connection_url of this RDSConfigResponse.
:rtype: str
"""
return self._connection_url
@connection_url.setter
def connection_url(self, connection_url):
"""
Sets the connection_url of this RDSConfigResponse.
JDBC connection URL in the form of jdbc:<db-type>://<address>:<port>/<db>
:param connection_url: The connection_url of this RDSConfigResponse.
:type: str
"""
if connection_url is None:
raise ValueError("Invalid value for `connection_url`, must not be `None`")
self._connection_url = connection_url
@property
def type(self):
"""
Gets the type of this RDSConfigResponse.
Type of RDS, aka the service name that will use the RDS like HIVE, DRUID, SUPERSET, RANGER, etc.
:return: The type of this RDSConfigResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this RDSConfigResponse.
Type of RDS, aka the service name that will use the RDS like HIVE, DRUID, SUPERSET, RANGER, etc.
:param type: The type of this RDSConfigResponse.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
@property
def connector_jar_url(self):
"""
Gets the connector_jar_url of this RDSConfigResponse.
URL that points to the jar of the connection driver(connector)
:return: The connector_jar_url of this RDSConfigResponse.
:rtype: str
"""
return self._connector_jar_url
@connector_jar_url.setter
def connector_jar_url(self, connector_jar_url):
"""
Sets the connector_jar_url of this RDSConfigResponse.
URL that points to the jar of the connection driver(connector)
:param connector_jar_url: The connector_jar_url of this RDSConfigResponse.
:type: str
"""
self._connector_jar_url = connector_jar_url
@property
def id(self):
"""
Gets the id of this RDSConfigResponse.
id of the resource
:return: The id of this RDSConfigResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this RDSConfigResponse.
id of the resource
:param id: The id of this RDSConfigResponse.
:type: int
"""
self._id = id
@property
def creation_date(self):
"""
Gets the creation_date of this RDSConfigResponse.
creation time of the resource in long
:return: The creation_date of this RDSConfigResponse.
:rtype: int
"""
return self._creation_date
@creation_date.setter
def creation_date(self, creation_date):
"""
Sets the creation_date of this RDSConfigResponse.
creation time of the resource in long
:param creation_date: The creation_date of this RDSConfigResponse.
:type: int
"""
self._creation_date = creation_date
@property
def public_in_account(self):
"""
Gets the public_in_account of this RDSConfigResponse.
resource is visible in account
:return: The public_in_account of this RDSConfigResponse.
:rtype: bool
"""
return self._public_in_account
@public_in_account.setter
def public_in_account(self, public_in_account):
"""
Sets the public_in_account of this RDSConfigResponse.
resource is visible in account
:param public_in_account: The public_in_account of this RDSConfigResponse.
:type: bool
"""
self._public_in_account = public_in_account
@property
def cluster_names(self):
"""
Gets the cluster_names of this RDSConfigResponse.
list of clusters which use config
:return: The cluster_names of this RDSConfigResponse.
:rtype: list[str]
"""
return self._cluster_names
@cluster_names.setter
def cluster_names(self, cluster_names):
"""
Sets the cluster_names of this RDSConfigResponse.
list of clusters which use config
:param cluster_names: The cluster_names of this RDSConfigResponse.
:type: list[str]
"""
self._cluster_names = cluster_names
@property
def stack_version(self):
"""
Gets the stack_version of this RDSConfigResponse.
(HDP, HDF)Stack version for the RDS configuration
:return: The stack_version of this RDSConfigResponse.
:rtype: str
"""
return self._stack_version
@stack_version.setter
def stack_version(self, stack_version):
"""
Sets the stack_version of this RDSConfigResponse.
(HDP, HDF)Stack version for the RDS configuration
:param stack_version: The stack_version of this RDSConfigResponse.
:type: str
"""
self._stack_version = stack_version
@property
def database_engine(self):
"""
Gets the database_engine of this RDSConfigResponse.
Name of the external database engine (MYSQL, POSTGRES...)
:return: The database_engine of this RDSConfigResponse.
:rtype: str
"""
return self._database_engine
@database_engine.setter
def database_engine(self, database_engine):
"""
Sets the database_engine of this RDSConfigResponse.
Name of the external database engine (MYSQL, POSTGRES...)
:param database_engine: The database_engine of this RDSConfigResponse.
:type: str
"""
if database_engine is None:
raise ValueError("Invalid value for `database_engine`, must not be `None`")
self._database_engine = database_engine
@property
def connection_driver(self):
"""
Gets the connection_driver of this RDSConfigResponse.
Name of the JDBC connection driver (for example: 'org.postgresql.Driver')
:return: The connection_driver of this RDSConfigResponse.
:rtype: str
"""
return self._connection_driver
@connection_driver.setter
def connection_driver(self, connection_driver):
"""
Sets the connection_driver of this RDSConfigResponse.
Name of the JDBC connection driver (for example: 'org.postgresql.Driver')
:param connection_driver: The connection_driver of this RDSConfigResponse.
:type: str
"""
if connection_driver is None:
raise ValueError("Invalid value for `connection_driver`, must not be `None`")
self._connection_driver = connection_driver
@property
def database_engine_display_name(self):
"""
Gets the database_engine_display_name of this RDSConfigResponse.
Display name of the external database engine (Mysql, PostgreSQL...)
:return: The database_engine_display_name of this RDSConfigResponse.
:rtype: str
"""
return self._database_engine_display_name
@database_engine_display_name.setter
def database_engine_display_name(self, database_engine_display_name):
"""
Sets the database_engine_display_name of this RDSConfigResponse.
Display name of the external database engine (Mysql, PostgreSQL...)
:param database_engine_display_name: The database_engine_display_name of this RDSConfigResponse.
:type: str
"""
if database_engine_display_name is None:
raise ValueError("Invalid value for `database_engine_display_name`, must not be `None`")
self._database_engine_display_name = database_engine_display_name
@property
def workspace(self):
"""
Gets the workspace of this RDSConfigResponse.
workspace of the resource
:return: The workspace of this RDSConfigResponse.
:rtype: WorkspaceResourceResponse
"""
return self._workspace
@workspace.setter
def workspace(self, workspace):
"""
Sets the workspace of this RDSConfigResponse.
workspace of the resource
:param workspace: The workspace of this RDSConfigResponse.
:type: WorkspaceResourceResponse
"""
self._workspace = workspace
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, RDSConfigResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -8,205,625,027,101,043,000 | 31.211991 | 984 | 0.610384 | false |
rlpy/rlpy | rlpy/Domains/__init__.py | 1 | 1308 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
#from Domain import Domain
from future import standard_library
standard_library.install_aliases()
from .HelicopterHover import HelicopterHover, HelicopterHoverExtended
from .HIVTreatment import HIVTreatment
from .PuddleWorld import PuddleWorld
from .GridWorld import GridWorld
from .BlocksWorld import BlocksWorld
from .MountainCar import MountainCar
from .ChainMDP import ChainMDP
from .SystemAdministrator import SystemAdministrator
from .PST import PST
from .Pacman import Pacman
from .IntruderMonitoring import IntruderMonitoring
from .FiftyChain import FiftyChain
from .FlipBoard import FlipBoard
from .RCCar import RCCar
from .Acrobot import Acrobot, AcrobotLegacy
from .Bicycle import BicycleBalancing, BicycleRiding
from .Swimmer import Swimmer
from .Pinball import Pinball
from .FiniteTrackCartPole import (FiniteCartPoleBalance,
FiniteCartPoleBalanceOriginal,
FiniteCartPoleBalanceModern,
FiniteCartPoleSwingUp,
FiniteCartPoleSwingUpFriction)
from .InfiniteTrackCartPole import InfCartPoleBalance, InfCartPoleSwingUp
| bsd-3-clause | 7,672,595,396,540,913,000 | 41.193548 | 73 | 0.769878 | false |
roberzguerra/scout | registration/admin.py | 1 | 3065 | # -*- coding:utf-8 -*-
from django.contrib import admin
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from registration.models import RegistrationProfile
class RegistrationAdmin(admin.ModelAdmin):
list_display = ('user', 'activation_key_expired')
raw_id_fields = ['user']
search_fields = ('user__username', 'user__first_name', 'user__last_name')
actions = ['activate_users', 'resend_activation_email',]
def activate_users(self, request, queryset):
"""
Activates the selected users, if they are not alrady
activated.
"""
for profile in queryset:
RegistrationProfile.objects.activate_user(profile.activation_key)
activate_users.short_description = _(u"Ativar Usuários")
def resend_activation_email(self, request, queryset):
"""
Re-sends activation emails for the selected users.
Note that this will *only* send activation emails for users
who are eligible to activate; emails will not be sent to users
whose activation keys have expired or who have already
activated.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
for profile in queryset:
if not profile.activation_key_expired():
profile.send_activation_email(site)
resend_activation_email.short_description = _("Re-send activation emails")
class RegistrationUserAdmin(UserAdmin):
list_display = ('username', 'get_full_name', 'email', 'is_staff', 'get_registration_profile', 'get_group_scout')
search_fields = ('username', 'first_name', 'last_name', 'email')
#ordering = ('is_staff')
list_filter = UserAdmin.list_filter + ('is_active',)
def get_full_name(self, obj):
return obj.get_full_name()
get_full_name.allow_tags = True
get_full_name.short_description = _(u"Nome Completo")
def get_group_scout(self, obj):
name = u''
profile = obj.registrationprofile_set.get()
if profile:
name = profile.scout_group
return name
get_group_scout.allow_tags = True
get_group_scout.short_description = _(u"Grupo Escoteiro")
def get_registration_profile(self, obj):
link = u''
profile = obj.registrationprofile_set.get()
if profile:
link = '<a href="%s">%s</a>' % (reverse('admin:registration_registrationprofile_change', args=(profile.pk,)), profile.user)
return link
get_registration_profile.allow_tags = True
get_registration_profile.short_description = _(u"Perfil")
admin.site.register(RegistrationProfile, RegistrationAdmin)
admin.site.unregister(User)
admin.site.register(User, RegistrationUserAdmin) | gpl-2.0 | 8,738,751,509,169,948,000 | 35.488095 | 135 | 0.669713 | false |
astrofrog/astrodendro | setup.py | 1 | 2023 | #!/usr/bin/env python
from setuptools import setup, Command
try: # Python 3.x
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError: # Python 2.x
from distutils.command.build_py import build_py
class DendroTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import os
import shutil
import tempfile
# First ensure that we build the package so that 2to3 gets executed
self.reinitialize_command('build', inplace=False)
self.run_command('build')
build_cmd = self.get_finalized_command('build')
new_path = os.path.abspath(build_cmd.build_lib)
# Copy the build to a temporary directory for the purposes of testing
# - this avoids creating pyc and __pycache__ directories inside the
# build directory
tmp_dir = tempfile.mkdtemp(prefix='astrodendro-test-')
testing_path = os.path.join(tmp_dir, os.path.basename(new_path))
shutil.copytree(new_path, testing_path)
import sys
import subprocess
errno = subprocess.call([sys.executable, os.path.abspath('runtests.py')], cwd=testing_path)
raise SystemExit(errno)
setup(name='dendro-core',
version='0.0.1',
description='Python package for computation of astronomical dendrograms',
author='Braden MacDonald and Thomas Robitaille',
author_email='[email protected]',
packages=['astrodendro', 'astrodendro.io', 'astrodendro.test'],
package_data={'astrodendro.test':['*.npz']},
provides=['astrodendro'],
requires=['numpy'],
cmdclass={'build_py': build_py, 'test': DendroTest},
keywords=['Scientific/Engineering'],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
)
| mit | -6,794,361,530,192,240,000 | 31.111111 | 99 | 0.630252 | false |
dseomn/cohydra | cohydra/util.py | 1 | 1625 | import os
import shutil
def recursive_scandir(top_dir, dir_first=True):
"""Recursively scan a path.
Args:
top_dir: The path to scan.
dir_first: If true, yield a directory before its contents.
Otherwise, yield a directory's contents before the
directory itself.
Returns:
A generator of tuples of a path relative to the top path, and an
os.DirEntry object of the file or directory at that path. The
top_dir itself is not included.
"""
def f(relpath, dir_entry):
if dir_first and dir_entry is not None:
yield relpath, dir_entry
path = os.path.join(top_dir, relpath)
for entry in os.scandir(path):
entry_relpath = os.path.join(relpath, entry.name)
if entry.is_dir():
for item in f(entry_relpath, entry):
yield item
else:
yield entry_relpath, entry
if not dir_first and dir_entry is not None:
yield relpath, dir_entry
return f('', None)
def fix_dir_stats(profile):
"""Fix directory stats for a profile.
This function assumes that every directory in the output corresponds
to a directory in the input with the same relative path. If that is
not true for the profile, do not use this function.
For each directory in profile.dst_path(), this will copy stats from
the corresponding directory in profile.src_path().
"""
for dst_relpath, dst_entry in recursive_scandir(profile.dst_path()):
if not dst_entry.is_dir():
continue
src_relpath = dst_relpath
shutil.copystat(
profile.src_path(src_relpath),
profile.dst_path(dst_relpath),
)
| apache-2.0 | 3,305,364,171,830,099,000 | 25.639344 | 70 | 0.667692 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.