gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
''' lol, i really really wasn't expecting this to turn into a serious disassembler... '''
from . import optable,decoder,modrm
# XXX: figure out how to add these explicit imports to the doc output
# for this module. (without having to use __all__)
from .decoder import isprefix,consume,decodeInteger,encodeInteger
lookup = optable.Lookup
# equivalent to decoder.consume(iter(string)) ->
# (prefix, opcode, modrm, sib, disp, immediate)
decode = lambda string: consume(iter(string))
def extractmodrm(instruction):
'''Return the (Mod, Reg, r/m) components of an instruction'''
modrm = getModrm(instruction)
return decoder.extractmodrm( decodeInteger(modrm) )
def extractsib(instruction):
'''Returns (scale,index,base) of an instruction'''
sib = getSib(instruction)
return decoder.extractsib( decodeInteger(sib) )
def disassemble(codeblock):
'''Disassembles string into a list of instruction tuples'''
result = []
code = iter(codeblock)
try:
while True:
result.append( consume(code) )
except StopIteration:
pass
return result
def new():
'''A new empty instruction'''
return ('','','','','','')
def length(instruction):
return len(b''.join(instruction))
def stringToNumber(string):
'''This function name is deprecated in favor of encodeInteger'''
return decodeInteger(string)
def numberToString(number, bytes):
'''This function name is deprecated in favor of encodeInteger'''
return encodeInteger(number, bytes)
def getPrefix(instruction): return instruction[0:1]
def getOpcode(instruction): return instruction[1:2]
def getModrm(instruction): return instruction[2:3]
def getSIB(instruction): return instruction[3:4]
def getDisplacement(instruction): return instruction[4:5]
def getImmediate(instruction): return instruction[5:6]
#instruction = (prefix, opcode, modrm, sib, disp, immediate)
def setPrefix(instruction, value):
n = instruction
return (value, n[1], n[2], n[3], n[4], n[5])
def setOpcode(instruction, value):
n = instruction
return (n[0], value, n[2], n[3], n[4], n[5])
def setModrm(instruction, value):
n = instruction
return (n[0], n[1], value, n[3], n[4], n[5])
def setSIB(instruction, value):
n = instruction
return (n[0], n[1], n[2], value, n[4], n[5])
def setDisplacement(instruction, value):
n = instruction
return (n[0], n[1], n[2], n[3], value, n[5])
def setImmediate(instruction, value):
n = instruction
return (n[0], n[1], n[2], n[3], n[4], value)
def isInstruction(value):
'''returns true if provided a valid instruction'''
return isinstance(value, tuple) and len(value) == 6
def promoteBranch(instruction, size):
return { 1 : promoteBranch_8, 2 : promoteBranch_16, 4 : promoteBranch_32 }[size](instruction)
def promoteBranch_8(instruction):
'''Promote(?) instruction to an 8-bit branch'''
imm = getImmediate(instruction)
offset = decodeInteger(imm, True)
prefix = b''.join([x for x in getPrefix(instruction) if x != b'\x66'])
if isConditionalBranch8(instruction) or isUnconditionalBranch8(instruction) or isRelativeCall(instruction):
result = instruction
offset += length(result)
elif isUnconditionalBranch(instruction):
result = setOpcode(instruction, b'\xeb')
elif isConditionalBranch(instruction):
column = ord(getOpcode(instruction)[1]) & 0xf
result = setOpcode(instruction, chr(column | 0x70))
else:
raise NotImplementedError('Unable to promote a non-branch instruction to 8-bits: {!r}'.format(n))
result = setPrefix(setImmediate(result, b'\x00'), prefix)
return setImmediate(result, encodeInteger(offset-length(result), 1))
def promoteBranch_32(instruction):
imm = getImmediate(instruction)
offset = decodeInteger(imm, True) + length(instruction)
prefix = b''.join([x for x in getPrefix(instruction) if x != b'\x66'])
if isConditionalBranch8(instruction):
column = ord(getOpcode(instruction)) & 0xf
result = setOpcode(instruction, b'\x0f'+chr(column | 0x80))
elif isUnconditionalBranch8(instruction):
result = setOpcode(instruction, b'\xe9')
elif isRelativeCall(instruction) or isUnconditionalBranch(instruction) or isConditionalBranch(instruction):
result = instruction
else:
raise NotImplementedError('Unable to promote a non-branch instruction to 32-bits: {!r}'.format(n))
result = setPrefix(setImmediate(result, b'\x00\x00\x00\x00'), prefix)
return setImmediate(result, encodeInteger(offset-length(result), 4))
def promoteBranch_16(instruction):
raise NotImplementedError("16-bit absolute branches not implemented really")
result = promoteBranch_32(instruction)
imm = getImmediate(result)
offset = decodeInteger(imm, True) - length(result)
# downgrade the opcode
prefix = getPrefix(result)
if b'\x66' not in prefix:
prefix += b'\x66'
result = setPrefix(result, prefix)
offset += length(result)
return setImmediate(result, encodeInteger(offset, 2))
def getRelativeAddress(pc, instruction):
'''Given the specified instruction and address, will return the target branch address'''
imm = getImmediate(instruction)
l = len(imm)
ofs = decodeInteger(imm)
pc += length(instruction)
if (l == 4) and (ofs & 0x80000000):
return pc - (0x100000000 - ofs)
elif (l == 2) and (ofs & 0x8000):
return pc - (0x10000 - ofs)
elif (l == 1) and (ofs & 0x80):
return pc - (0x100 - ofs)
# otherwise we're just jumping forward
return pc + ofs
def setRelativeAddress(source, instruction, target):
# subtract the old instruction length
instructionlength = length(instruction)
res = target - source - instructionlength
if res >= -0x80 and res < 0x80:
result = promoteBranch_8(instruction)
sz = length(result) - length(instruction)
return setImmediate(result, encodeInteger(res-sz, 1))
# elif res >= -0x8000 and res < 0x8000:
# result = promoteBranch_16(instruction)
# sz = length(result) - length(instruction)
# return setImmediate(result, encodeInteger(res-sz, 2))
elif res >= -0x80000000 and res < 0x80000000:
result = promoteBranch_32(instruction)
sz = length(result) - length(instruction)
return setImmediate(result, encodeInteger(res-sz, 4))
raise NotImplementedError("Unable to figure out immediate value size for %x"% res)
def isConditionalBranch8(instruction):
opcode = getOpcode(instruction)
if len(opcode) == 1:
ch = ord(opcode[0])
return ch & 0xf0 == 0x70
return False
def isConditionalBranch32(instruction):
opcode = getOpcode(instruction)
if len(opcode) == 2:
ch = ord(opcode[1])
return ch & 0xf0 == 0x80
return False
def isConditionalBranch(instruction):
return isConditionalBranch8(instruction) or isConditionalBranch32(instruction)
## regular branches
def isUnconditionalBranch8(instruction):
'''jmp Jb'''
return getOpcode(instruction) == b'\xeb'
def isUnconditionalBranch32(instruction):
'''jmp Jz'''
return getOpcode(instruction) == b'\xe9'
def isUnconditionalBranch(instruction):
return isUnconditionalBranch8(instruction) or isUnconditionalBranch32(instruction)
def isJmpFF(instruction):
opcode = getOpcode(instruction)
if opcode == b'\xff':
modrm = getModrm(instruction)
mod,reg,rm = decoder.extractmodrm(ord(modrm))
return reg in [4,5]
return False
def isShortJmp(instruction):
opcode = getOpcode(instruction)
if opcode == b'\xff':
modrm = getModrm(instruction)
mod,reg,rm = decoder.extractmodrm(ord(modrm))
return reg == 4
return False
def isFarJmp(instruction):
opcode = getOpcode(instruction)
if opcode == b'\xff':
modrm = getModrm(instruction)
mod,reg,rm = decoder.extractmodrm(ord(modrm))
return reg == 5
return False
### XXX: these branch tests will need to be legitimately tested
def isRegisterBranch(instruction):
if isJmpFF(instruction):
modrm = getModrm(instruction)
mod,reg,rm = decoder.extractmodrm(ord(modrm))
return mod == 3
return False
def isMemoryBranch(instruction):
if isJmpFF(instruction):
modrm = getModrm(instruction)
mod,reg,rm = decoder.extractmodrm(ord(modrm))
return mod < 3
return False
def isDispBranch(instruction):
if isJmpFF(instruction):
modrm = getModrm(instruction)
mod,reg,rm = decoder.extractmodrm(ord(modrm))
return rm == 5 and mod in [1,2]
return False
def isSibBranch(instruction):
if isJmpFF(instruction):
modrm = getModrm(instruction)
mod,reg,rm = decoder.extractmodrm(ord(modrm))
return rm == 4 and mod < 3
return False
def isAbsoluteBranch(instruction):
'''jmp Ap'''
opcode = getOpcode(instruction)
return opcode == b'\xea'
def isRelativeBranch(instruction):
return isUnconditionalBranch(instruction) or isConditionalBranch(instruction)
def isBranch(instruction):
return isRelativeBranch(instruction) or isAbsoluteBranch(instruction) or \
isRegisterBranch(instruction) or isMemoryBranch(instruction) or \
isDispBranch(instruction) or isSibBranch(instruction)
## calls
def isAbsoluteCall(instruction):
'''call Ap'''
return getOpcode(instruction) == b'\x9a'
def isRelativeCall(instruction):
'''call Jz'''
return getOpcode(instruction) == b'\xe8'
def isRegisterCall(instruction):
'''call Ev'''
if getOpcode(instruction) == b'\xff':
modrm = getModrm(instruction)
mod,reg,rm = decoder.extractmodrm(ord(modrm))
return reg == 2 and mod == 3
return False
def isMemoryCall(instruction):
'''call Mp'''
if getOpcode(instruction) == b'\xff':
modrm = getModrm(instruction)
mod,reg,rm = decoder.extractmodrm(ord(modrm))
return reg in [2,3] and mod < 3
return False
def isCall(instruction):
return isRelativeCall(instruction) or isMemoryCall(instruction) or isRegisterCall(instruction) or isAbsoluteCall(instruction)
def isReturn(instruction):
'''retn and friends'''
return getOpcode(instruction) in [b'\xc2', b'\xc3', b'\xca', b'\xcb', b'\xcf']
if __name__ == '__main__':
import ia32
class Result(Exception): pass
class Success(Result): pass
class Failure(Result): pass
TestCaseList = []
def TestCase(fn):
def harness(**kwds):
name = fn.__name__
try:
res = fn(**kwds)
except Success:
print('%s: Success'% name)
return True
except Failure as E:
pass
print('%s: Failure'% name)
return False
TestCaseList.append(harness)
return fn
# relative
if False:
code = b'\xE8\x72\xFB\xFF\xFF'
insn = decode(code)
print('rel',isRelativeCall(insn))
# register
# 11 010 110
if False:
code = b'\xff\xd6'
insn = decode(code)
print('reg',isRegisterCall(insn))
# memory
# 00 010 101
if False:
code = b'\xFF\x15\xC0\x52\x5C\x00'
insn = decode(code)
print('mem',isMemoryCall(insn))
# forgot
# 00 100 101
if False:
code = b'\xFF\x25\xB0\x51\x5C\x00'
insn = decode(code)
print(repr(insn))
print('mem',isBranch(insn),isMemoryBranch(insn))
if False:
code = b'\x0f\x0f\xe1\xb4'
insn = decode(code)
print(getOpcode(insn) == b'\x0f\x0f')
print(getModrm(insn) == b'\xe1')
print(getImmediate(insn) == b'\xb4')
@TestCase
def relative_0():
code = b'\x0f\x85\x16\x01\x00\x00'
n = decode(code)
address = 0x100ee82a
target = 0x100ee946
if getRelativeAddress(address, n) == target:
raise Success
return
@TestCase
def relative_1():
code = b'\x74\x26'
n = decode(code)
address = 0x100EE836
target = 0x100EE85E
if getRelativeAddress(address, n) == target:
raise Success
return
@TestCase
def relative_2():
code = b'\x75\xdb'
n = decode(code)
address = 0x100EED78
target = 0x100EED55
if getRelativeAddress(address, n) == target:
raise Success
return
@TestCase
def relative_3():
code = b'\x0f\x86\xa2\x05\x00\x00'
n = decode(code)
address = 0x101781a3
target = 0x1017874B
if getRelativeAddress(address, n) == target:
raise Success
return
@TestCase
def relative_4():
code = b'\x0f\x8c\x97\xfa\xff\xff'
n = decode(code)
address = 0x10178743
target = 0x101781E0
if getRelativeAddress(address, n) == target:
raise Success
return
@TestCase
def promote_0():
'''8b to 8b'''
a = decode(b'\xeb\xfe')
b = promoteBranch_8(a)
if a == b:
raise Success
return
# @TestCase
def promote_1():
'''16b to 16b'''
a = decode(b'\x66\xe9\xfe\xff')
b = promoteBranch_16(a)
if a == b:
raise Success
print(repr(a))
print(repr(b))
return
@TestCase
def promote_2():
'''32b to 32b'''
a = decode(b'\x0F\x84\x14\x2D\xFC\xFF')
b = promoteBranch_32(a)
if a == b:
raise Success
return
@TestCase
def promote_3():
'''8b to 32b forwards'''
a = decode(b'\x7f\x0c')
b = decode(b'\x0f\x8f\x08\x00\x00\x00')
if promoteBranch_32(a) == b:
raise Success
return
# @TestCase
def promote_4():
'''8b to 16b'''
# XXX: this doesn't disassemble correctly in windbg
a = decode(b'\xeb\x0b')
b = decode(b'\x66\xe9\x08\x00f')
a = promoteBranch_16(a)
if a == b:
raise Success
print(repr(a))
print(repr(b))
return
@TestCase
def promote_5():
'''8b to 32b backwards'''
a = decode(b'\xeb\xe1')
b = decode(b'\xe9\xde\xff\xff\xff')
if promoteBranch_32(a) == b:
raise Success
return
@TestCase
def Test_0():
code = b'\xeb\xfe'
n = setRelativeAddress(0x77be0000, decode(code), 0x77be0000)
if b''.join(n) == b'\xeb\xfe':
raise Success
return
def test_set(source, instruction, target):
n = decode(instruction)
x = setRelativeAddress(source, n, target)
res = getRelativeAddress(source, n)
if res != target:
print('%x -- %x != %x'% (source,res,target))
raise Failure
raise Success
@TestCase
def Test_1():
code = b'\xE9\x9F\x91\xFF\xFF'
test_set(0x7deb7534, code, 0x7deb06d8)
@TestCase
def Test_2():
code = b'\x0F\x85\xEC\x6D\x00\x00'
test_set(0x7deb073f, code, 0x7deb7531)
@TestCase
def Test_3():
code = b'\x75\x18'
test_set(0x7deb0927, code, 0x7deb0941)
@TestCase
def Test_4():
code = b'\x72\xd3'
test_set(0x7deafc45, code, 0x7deafc1a)
@TestCase
def Test_5():
code = b'\x75\x09'
test_set(0x1000d6ae, code, 0x1000d6b9)
@TestCase
def Test_6():
code = b'\x75\xde'
test_set(0x1000d670, code, 0x1000d650)
@TestCase
def Test_7():
code = b'\xe8\x47\x4f\x37\x00'
test_set(0x1000d6b4, code, 0x10382600)
@TestCase
def Test_8():
code = b'\x66\xe9\x42\x03'
test_set(0x1000e1b5, code, 0x1000e4fb)
@TestCase
def Test_9():
code = b'\x66\xe9\x42\xd3'
test_set(0x10006418, code, 0x1000375e)
@TestCase
def Test_a():
code = b'\x0F\x85\xAE\xF1\xFF\xFF'
test_set(0x7deb0aca, code, 0x7deafc7e)
@TestCase
def Test_b():
code = b'\xe9\xfa\xfe\x54\x89'
test_set(0x76ec010f, code, 0x41000e)
@TestCase
def Test_c():
code = b'\xe9\xec\xff\xff\xff'
test_set(0x40000f, code, 0x400000)
@TestCase
def Test_d():
code = b'\xeb\xf0'
n = ia32.promoteBranch_32( ia32.decode(code) )
if b''.join(n) == b'\xe9\xed\xff\xff\xff':
raise Success
print(repr(decode(code)), repr(n))
raise Failure
@TestCase
def Test_e():
code = b'\xe9\xc1\xfe\xff\xff'
test_set( 0x101d70e9, code, 0x101d6faf )
@TestCase
def Test_f():
code = b'\xe9\xa6\x00\x00\x00'
test_set( 0x101dc252, code, 0x101dc2fd )
if False:
code = b'\x0f\x85\x7f\xff\xff\xff'
n = setRelativeAddress(0, decode(code), -5)
print(hex(getRelativeAddress(0, n)))
n = setRelativeAddress(0, decode(code), 0x2)
print(repr(n))
print(hex(getRelativeAddress(0, n)))
print(hex(decodeInteger(getImmediate(n)) + length(n)))
if False:
code = b'\x77\x08'
code = b'\x0f\x87\x04\x00\x00\x00'
n = decode(code)
x = promoteBranch_8(n)
print(repr(x))
print('%x:%x -> %x'% (0x70, 0x72, 0x7a))
@TestCase
def Test_10():
res = '''
b8b5000000ba0003fe7fff12c20c00b8b6000000ba0003fe7fff12c21800b8b7000000ba0003fe7fff12c22400b8b8000000ba0003fe7fff12c22400b8b9000000ba0003fe7fff12c21800b8ba000000ba0003fe7fff12c21400b8bb000000ba0003fe7fff12c20400b8bc000000ba0003fe7fff12c20800b8bd000000ba0003fe7fff12c20c00b8be000000ba0003fe7fff12c21400
6a1468082d917ce85cbcffff8a1dc0e1977c8b750c33d23bf20f85f81602008b7d103bfa740289178b4d08f7c1fcffffff0f851cbb02003bfa0f842dbb02008bc183e0020f85d416020084db754a33db4384cb0f84821d00006878e1977c3bc20f8597170200e832e3feff85f60f8536bb020064a118000000b9d8e0977cf00fc1194381e3ffff00008b402425ff0f0000c1e0100bd8891f33f68bc6e802bcffffc20c0090ffffffff43e8937c5ee8937c90909090906a0c68802d917ce8a6bbffff8b5508f7c2feffffff0f85b8bb02008b4d0c85c97436f7c1000000f00f85bebb020064a118000000c1e91033482466f7c1ff0f0f85a7bb0200f6c2010f84107a01006878e1977ce86fe3feff33c0e88ebbffffc208009090909090ffffffff23e9937c3ee9937c90909090908bff558bec8b45145333db3bc3568b75080f8c04b60200895e048bc8c1e1100bc88b
6a186868d7917ce8c411ffff33db895de4895de0a0c0e1977c895ddc895dd8381dc4e0977c0f8599db0000895dfc3ac374528d45e050ff7508e8ce56ffff84c074118b45e06639583a0f85f0e6010080483604834dfcffe8220000008b45e4e8a711ffffc20400909090909090ffffffff000000004bbe937c9090909090395ddc751cc38d45d8505353e8d954ffff8945e43bc37cbdc745dc01000000eb93ff75d86a01e87555ffffebd88b85f0fdffff56ffb5e8fdffffc603006a016a00ff7004ffb5ecfdffffe8c0e6ffff8bf885ff0f8cae550200ff36e8b5eaffffff36ffb5ecfdffffe8ebe8ffff8bf885ff0f8d76f9ffffe9c85502008b4df88b4508
ddd8d9eec3ddd8ddd8d9e8c3dbbd62ffffffdbad62fffffff68569ffffff407408c68570ffffff07c3c68570ffffff01dc05c80e987cc3d9c9dbbd62ffffffdbad62fffffff68569ffffff407409c68570ffffff07eb07c68570ffffff01dec1c3dbbd62ffffffdbad62fffffff68569ffffff407420d9c9dbbd62ffffffdbad62fffffff68569ffffff407409c68570ffffff07eb07c68570ffffff01dec1c3ddd8ddd8db2d68e0977c80bd70ffffff007f07c68570ffffff010ac9c30ac97402d9e0c390ccccccccccccc68570ffff
ddd8ddd8d9e8c3dbbd62ffffffdbad62fffffff68569ffffff407408c68570ffffff07c3c68570ffffff01dc05c80e987cc3d9c9dbbd62ffffffdbad62fffffff68569ffffff407409c68570ffffff07eb07c68570ffffff01dec1c3dbbd62ffffffdbad62fffffff68569ffffff407420d9c9dbbd62ffffffdbad62fffffff68569ffffff407409c68570ffffff07eb07c68570ffffff01dec1c3ddd8ddd8db2d68e0977c80bd70ffffff007f07c68570ffffff010ac9c30ac97402d9e0c390ccccccccccccc68570fffffffe0aed75
ddd8ddd8db2d68e0977c80bd70ffffff007f07c68570ffffff010ac9c30ac97402d9e0c390ccccccccccccc68570fffffffe0aed753fd9c9d9f1eb0dc68570fffffffe32edd9eadec9e83f010000d9e8dec1f68561ffffff017404d9e8def1f6c2407502d9fd0aed0f84e4feffffd9e0e9ddfeffffe8560100000bc0748232ed83f8027402f6d5d9c9d9e1ebabe9d3feffffe969ffffffddd8ddd8db2d80e0977cc68570ffffff02c3d9edd9c9d9e49bddbd60ffffff9bf68561ffffff410f853cffffffd9f1c3c68570ffffff02ddd8
ddd8db2d68e0977c80bd70ffffff007f07c68570ffffff010ac9c30ac97402d9e0c390ccccccccccccc68570fffffffe0aed753fd9c9d9f1eb0dc68570fffffffe32edd9eadec9e83f010000d9e8dec1f68561ffffff017404d9e8def1f6c2407502d9fd0aed0f84e4feffffd9e0e9ddfeffffe8560100000bc0748232ed83f8027402f6d5d9c9d9e1ebabe9d3feffffe969ffffffddd8ddd8db2d80e0977cc68570ffffff02c3d9edd9c9d9e49bddbd60ffffff9bf68561ffffff410f853cffffffd9f1c3c68570ffffff02ddd8db2d8ae0977cc30ac90f8523ffffffc3d9ec
c68570ffffff010ac9c30ac97402d9e0c390ccccccccccccc68570fffffffe0aed753fd9c9d9f1eb0dc68570fffffffe32edd9eadec9e83f010000d9e8dec1f68561ffffff017404d9e8def1f6c2407502d9fd0aed0f84e4feffffd9e0e9ddfeffffe8560100000bc0748232ed83f8027402f6d5d9c9d9e1ebabe9d3feffffe969ffffffddd8ddd8db2d80e0977cc68570ffffff02c3d9edd9c9d9e49bddbd60ffffff9bf68561ffffff410f853cffffffd9f1c3c68570ffffff02ddd8db2d8ae0977cc30ac90f8523ffffffc3d9eceb02d9edd9c90ac90f8510ffffffd9f1c3
d9c0d9fcd8d99bdfe09e751ad9c0dc0dea0e987cd9c0d9fcded99bdfe09e740db801000000c3b800000000ebf8b802000000ebf15683ec748bf45683ec08dd1c2483ec08dd1c249bdd7608e8d988030083c414dd6608dd0683c4745e85c00f85dffdffffeb05e9d8fdffffc3cccccccccc558bec5756538b4d100bc974408b75088b7d0cb741b35ab6208d098a26468a07473ae074183ae772063ae3770202e63ac772063ac3770202c63ae0750749740debd9eb09b9ffffffff7202f7d98bc15b5e5fc9c38b442404538b4c240c56578b5004558b70088b780c8b298bdf33de23da33df03dd8b288d9c1d78a46ad7c1c3078bc603da33c223c38b690433c603fd0556b7c7e803f8c1c70c8bc203fb33c323c78b690833c203f505db70202403
8bff558bec5151dd450856dc15f817957c33d233f6dfe0f6c4057a02d9e0b80000f07fdd5df8394514b90000f0ff753e3955107578dd45f8dc1de817957cdfe0f6c4410f8482000000dd45f8dc1de817957cdfe0f6c4058b45180f8b89000000dd05f80f987c33f646e9ef000000394d14753a3955107535dd45f8dc1de817957cdfe0f6c4417507d9eee9cb000000dd45f8dc1de817957cdfe0f6c4058b45187abedd05f00f987ce9b000000039450c753f3955080f85a4000000dd4510dc1df817957cdfe0f6c441750bdd05f00f987ce984000000dd4510dc1df817957cdf
'''.strip().split('\n')
source = iter(res[0].decode('hex'))
import ia32
print(repr(ia32.consume(source)))
raise NotImplementedError
if __name__ == '__main__':
results = []
for t in TestCaseList:
results.append( t() )
pass
|
|
# coding: utf-8
from __future__ import absolute_import
import functools
import re
from flask.ext import login
from flask.ext import wtf
from flask.ext.oauthlib import client as oauth
from google.appengine.ext import ndb
import flask
import unidecode
import wtforms
import cache
import config
import model
import task
import util
from main import app
_signals = flask.signals.Namespace()
###############################################################################
# Flask Login
###############################################################################
login_manager = login.LoginManager()
class AnonymousUser(login.AnonymousUserMixin):
id = 0
admin = False
name = 'Anonymous'
user_db = None
def key(self):
return None
def has_permission(self, permission):
return False
login_manager.anonymous_user = AnonymousUser
class FlaskUser(AnonymousUser):
def __init__(self, user_db):
self.user_db = user_db
self.id = user_db.key.id()
self.name = user_db.name
self.admin = user_db.admin
def key(self):
return self.user_db.key.urlsafe()
def get_id(self):
return self.user_db.key.urlsafe()
def is_authenticated(self):
return True
def is_active(self):
return self.user_db.active
def is_anonymous(self):
return False
def has_permission(self, permission):
return self.user_db.has_permission(permission)
@login_manager.user_loader
def load_user(key):
user_db = ndb.Key(urlsafe=key).get()
if user_db:
return FlaskUser(user_db)
return None
login_manager.init_app(app)
def current_user_id():
return login.current_user.id
def current_user_key():
return login.current_user.user_db.key if login.current_user.user_db else None
def current_user_db():
return login.current_user.user_db
def is_logged_in():
return login.current_user.id != 0
###############################################################################
# Decorators
###############################################################################
def login_required(f):
decorator_order_guard(f, 'auth.login_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in():
return f(*args, **kwargs)
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return decorated_function
def admin_required(f):
decorator_order_guard(f, 'auth.admin_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in() and current_user_db().admin:
return f(*args, **kwargs)
if not is_logged_in() and flask.request.path.startswith('/api/'):
return flask.abort(401)
if not is_logged_in():
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
permission_registered = _signals.signal('permission-registered')
def permission_required(permission=None, methods=None):
def permission_decorator(f):
decorator_order_guard(f, 'auth.permission_required')
# default to decorated function name as permission
perm = permission or f.func_name
meths = [m.upper() for m in methods] if methods else None
permission_registered.send(f, permission=perm)
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if meths and flask.request.method.upper() not in meths:
return f(*args, **kwargs)
if is_logged_in() and current_user_db().has_permission(perm):
return f(*args, **kwargs)
if not is_logged_in():
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
return permission_decorator
###############################################################################
# Sign in stuff
###############################################################################
class SignInForm(wtf.Form):
email = wtforms.StringField(
'Email',
[wtforms.validators.required()],
filters=[util.email_filter],
)
password = wtforms.StringField(
'Password',
[wtforms.validators.required()],
)
remember = wtforms.BooleanField(
'Keep me signed in',
[wtforms.validators.optional()],
)
recaptcha = wtf.RecaptchaField()
next_url = wtforms.HiddenField()
@app.route('/signin/', methods=['GET', 'POST'])
def signin():
next_url = util.get_next_url()
form = None
if config.CONFIG_DB.has_email_authentication:
form = form_with_recaptcha(SignInForm())
save_request_params()
if form.validate_on_submit():
result = get_user_db_from_email(form.email.data, form.password.data)
if result:
cache.reset_auth_attempt()
return signin_user_db(result)
if result is None:
form.email.errors.append('Email or Password do not match')
if result is False:
return flask.redirect(flask.url_for('welcome'))
if not form.errors:
form.next_url.data = next_url
if form and form.errors:
cache.bump_auth_attempt()
return flask.render_template(
'auth/auth.html',
title='Sign in',
html_class='auth',
next_url=next_url,
form=form,
form_type='signin' if config.CONFIG_DB.has_email_authentication else '',
**urls_for_oauth(next_url)
)
###############################################################################
# Sign up stuff
###############################################################################
class SignUpForm(wtf.Form):
email = wtforms.StringField(
'Email',
[wtforms.validators.required(), wtforms.validators.email()],
filters=[util.email_filter],
)
recaptcha = wtf.RecaptchaField()
@app.route('/signup/', methods=['GET', 'POST'])
def signup():
next_url = util.get_next_url()
form = None
if config.CONFIG_DB.has_email_authentication:
form = form_with_recaptcha(SignUpForm())
save_request_params()
if form.validate_on_submit():
user_db = model.User.get_by('email', form.email.data)
if user_db:
form.email.errors.append('This email is already taken.')
if not form.errors:
user_db = create_user_db(
None,
util.create_name_from_email(form.email.data),
form.email.data,
form.email.data,
)
user_db.put()
task.activate_user_notification(user_db)
cache.bump_auth_attempt()
return flask.redirect(flask.url_for('welcome'))
if form and form.errors:
cache.bump_auth_attempt()
title = 'Sign up' if config.CONFIG_DB.has_email_authentication else 'Sign in'
return flask.render_template(
'auth/auth.html',
title=title,
html_class='auth',
next_url=next_url,
form=form,
**urls_for_oauth(next_url)
)
###############################################################################
# Sign out stuff
###############################################################################
@app.route('/signout/')
def signout():
login.logout_user()
flask.flash('You have been signed out.', category='success')
return flask.redirect(util.param('next') or flask.url_for('signin'))
###############################################################################
# Helpers
###############################################################################
def url_for_signin(service_name, next_url):
return flask.url_for('signin_%s' % service_name, next=next_url)
def urls_for_oauth(next_url):
return {
'bitbucket_signin_url': url_for_signin('bitbucket', next_url),
'dropbox_signin_url': url_for_signin('dropbox', next_url),
'facebook_signin_url': url_for_signin('facebook', next_url),
'github_signin_url': url_for_signin('github', next_url),
'google_signin_url': url_for_signin('google', next_url),
'instagram_signin_url': url_for_signin('instagram', next_url),
'linkedin_signin_url': url_for_signin('linkedin', next_url),
'microsoft_signin_url': url_for_signin('microsoft', next_url),
'twitter_signin_url': url_for_signin('twitter', next_url),
'vk_signin_url': url_for_signin('vk', next_url),
'yahoo_signin_url': url_for_signin('yahoo', next_url),
}
def create_oauth_app(service_config, name):
upper_name = name.upper()
app.config[upper_name] = service_config
service_oauth = oauth.OAuth()
service_app = service_oauth.remote_app(name, app_key=upper_name)
service_oauth.init_app(app)
return service_app
def decorator_order_guard(f, decorator_name):
if f in app.view_functions.values():
raise SyntaxError(
'Do not use %s above app.route decorators as it would not be checked. '
'Instead move the line below the app.route lines.' % decorator_name
)
def save_request_params():
flask.session['auth-params'] = {
'next': util.get_next_url(),
'remember': util.param('remember', bool),
}
def signin_oauth(oauth_app, scheme='http'):
try:
flask.session.pop('oauth_token', None)
save_request_params()
return oauth_app.authorize(callback=flask.url_for(
'%s_authorized' % oauth_app.name, _external=True, _scheme=scheme
))
except oauth.OAuthException:
flask.flash(
'Something went wrong with sign in. Please try again.',
category='danger',
)
return flask.redirect(flask.url_for('signin', next=util.get_next_url()))
def form_with_recaptcha(form):
should_have_recaptcha = cache.get_auth_attempt() >= config.RECAPTCHA_LIMIT
if not (should_have_recaptcha and config.CONFIG_DB.has_recaptcha):
del form.recaptcha
return form
###############################################################################
# User related stuff
###############################################################################
def create_user_db(auth_id, name, username, email='', verified=False, **props):
email = email.lower() if email else ''
if verified and email:
user_dbs, user_cr = model.User.get_dbs(email=email, verified=True, limit=2)
if len(user_dbs) == 1:
user_db = user_dbs[0]
user_db.auth_ids.append(auth_id)
user_db.put()
task.new_user_notification(user_db)
return user_db
if isinstance(username, str):
username = username.decode('utf-8')
username = unidecode.unidecode(username.split('@')[0].lower()).strip()
username = re.sub(r'[\W_]+', '.', username)
new_username = username
n = 1
while not model.User.is_username_available(new_username):
new_username = '%s%d' % (username, n)
n += 1
user_db = model.User(
name=name,
email=email,
username=new_username,
auth_ids=[auth_id] if auth_id else [],
verified=verified,
token=util.uuid(),
**props
)
user_db.put()
task.new_user_notification(user_db)
return user_db
@ndb.toplevel
def signin_user_db(user_db):
if not user_db:
return flask.redirect(flask.url_for('signin'))
flask_user_db = FlaskUser(user_db)
auth_params = flask.session.get('auth-params', {
'next': flask.url_for('welcome'),
'remember': False,
})
flask.session.pop('auth-params', None)
if login.login_user(flask_user_db, remember=auth_params['remember']):
user_db.put_async()
flask.flash('Hello %s, welcome to %s.' % (
user_db.name, config.CONFIG_DB.brand_name,
), category='success')
return flask.redirect(util.get_next_url(auth_params['next']))
flask.flash('Sorry, but you could not sign in.', category='danger')
return flask.redirect(flask.url_for('signin'))
def get_user_db_from_email(email, password):
user_dbs, user_cursor = model.User.get_dbs(email=email, active=True, limit=2)
if not user_dbs:
return None
if len(user_dbs) > 1:
flask.flash('''We are sorry but it looks like there is a conflict with
your account. Our support team is already informed and we will get
back to you as soon as possible.''', category='danger')
task.email_conflict_notification(email)
return False
user_db = user_dbs[0]
if user_db.password_hash == util.password_hash(user_db, password):
return user_db
return None
|
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
from __future__ import print_function
import errno
import os
import random
from azurelinuxagent.common.cgroup import CpuCgroup, MemoryCgroup, CGroup
from azurelinuxagent.common.exception import CGroupsException
from azurelinuxagent.common.utils import fileutil
from tests.tools import AgentTestCase, patch, data_dir
def consume_cpu_time():
waste = 0
for x in range(1, 200000):
waste += random.random()
return waste
class TestCGroup(AgentTestCase):
@staticmethod
def _clean_up_test_files():
with open(os.path.join(data_dir, "cgroups", "cpu_mount", "tasks"), mode="wb") as tasks:
tasks.truncate(0)
with open(os.path.join(data_dir, "cgroups", "memory_mount", "tasks"), mode="wb") as tasks:
tasks.truncate(0)
with open(os.path.join(data_dir, "cgroups", "cpu_mount", "cgroup.procs"), mode="wb") as procs:
procs.truncate(0)
with open(os.path.join(data_dir, "cgroups", "memory_mount", "cgroup.procs"), mode="wb") as procs:
procs.truncate(0)
def setUp(self):
AgentTestCase.setUp(self)
TestCGroup._clean_up_test_files()
def tearDown(self):
AgentTestCase.tearDown(self)
TestCGroup._clean_up_test_files()
def test_correct_creation(self):
test_cgroup = CGroup.create("dummy_path", "cpu", "test_extension")
self.assertIsInstance(test_cgroup, CpuCgroup)
self.assertEqual(test_cgroup.controller, "cpu")
self.assertEqual(test_cgroup.path, "dummy_path")
self.assertEqual(test_cgroup.name, "test_extension")
test_cgroup = CGroup.create("dummy_path", "memory", "test_extension")
self.assertIsInstance(test_cgroup, MemoryCgroup)
self.assertEqual(test_cgroup.controller, "memory")
self.assertEqual(test_cgroup.path, "dummy_path")
self.assertEqual(test_cgroup.name, "test_extension")
def test_is_active(self):
test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "cpu_mount"), "cpu", "test_extension")
self.assertEqual(False, test_cgroup.is_active())
with open(os.path.join(data_dir, "cgroups", "cpu_mount", "tasks"), mode="wb") as tasks:
tasks.write(str(1000).encode())
self.assertEqual(True, test_cgroup.is_active())
test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "memory_mount"), "memory", "test_extension")
self.assertEqual(False, test_cgroup.is_active())
with open(os.path.join(data_dir, "cgroups", "memory_mount", "tasks"), mode="wb") as tasks:
tasks.write(str(1000).encode())
self.assertEqual(True, test_cgroup.is_active())
def test_get_tracked_processes(self):
test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "cpu_mount"), "cpu", "test_extension")
self.assertListEqual(test_cgroup.get_tracked_processes(), [])
with open(os.path.join(data_dir, "cgroups", "cpu_mount", "cgroup.procs"), mode="wb") as tasks:
tasks.write(str(1000).encode())
self.assertEqual(['1000'], test_cgroup.get_tracked_processes())
test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "memory_mount"), "memory", "test_extension")
self.assertListEqual(test_cgroup.get_tracked_processes(), [])
with open(os.path.join(data_dir, "cgroups", "memory_mount", "cgroup.procs"), mode="wb") as tasks:
tasks.write(str(1000).encode())
self.assertEqual(['1000'], test_cgroup.get_tracked_processes())
@patch("azurelinuxagent.common.logger.periodic_warn")
def test_is_active_file_not_present(self, patch_periodic_warn):
test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "not_cpu_mount"), "cpu", "test_extension")
self.assertEqual(False, test_cgroup.is_active())
test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "not_memory_mount"), "memory", "test_extension")
self.assertEqual(False, test_cgroup.is_active())
self.assertEqual(0, patch_periodic_warn.call_count)
@patch("azurelinuxagent.common.logger.periodic_warn")
def test_is_active_incorrect_file(self, patch_periodic_warn):
test_cgroup = CGroup.create(os.path.join(data_dir, "cgroups", "cpu_mount", "tasks"), "cpu", "test_extension")
self.assertEqual(False, test_cgroup.is_active())
self.assertEqual(1, patch_periodic_warn.call_count)
test_cgp = CGroup.create(os.path.join(data_dir, "cgroups", "memory_mount", "tasks"), "memory", "test_extension")
self.assertEqual(False, test_cgp.is_active())
self.assertEqual(2, patch_periodic_warn.call_count)
class TestCpuCgroup(AgentTestCase):
@classmethod
def setUpClass(cls):
AgentTestCase.setUpClass()
original_read_file = fileutil.read_file
#
# Tests that need to mock the contents of /proc/stat or */cpuacct/stat can set this map from
# the file that needs to be mocked to the mock file (each test starts with an empty map). If
# an Exception is given instead of a path, the exception is raised
#
cls.mock_read_file_map = {}
def mock_read_file(filepath, **args):
if filepath in cls.mock_read_file_map:
mapped_value = cls.mock_read_file_map[filepath]
if isinstance(mapped_value, Exception):
raise mapped_value
filepath = mapped_value
return original_read_file(filepath, **args)
cls.mock_read_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file)
cls.mock_read_file.start()
@classmethod
def tearDownClass(cls):
cls.mock_read_file.stop()
AgentTestCase.tearDownClass()
def setUp(self):
AgentTestCase.setUp(self)
TestCpuCgroup.mock_read_file_map.clear()
def test_initialize_cpu_usage_should_set_current_cpu_usage(self):
cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
TestCpuCgroup.mock_read_file_map = {
"/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"),
os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t0")
}
cgroup.initialize_cpu_usage()
self.assertEquals(cgroup._current_cgroup_cpu, 63763)
self.assertEquals(cgroup._current_system_cpu, 5496872)
def test_get_cpu_usage_should_return_the_cpu_usage_since_its_last_invocation(self):
cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
TestCpuCgroup.mock_read_file_map = {
"/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"),
os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t0")
}
cgroup.initialize_cpu_usage()
TestCpuCgroup.mock_read_file_map = {
"/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t1"),
os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t1")
}
cpu_usage = cgroup.get_cpu_usage()
self.assertEquals(cpu_usage, 0.031)
TestCpuCgroup.mock_read_file_map = {
"/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t2"),
os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t2")
}
cpu_usage = cgroup.get_cpu_usage()
self.assertEquals(cpu_usage, 0.045)
def test_initialie_cpu_usage_should_set_the_cgroup_usage_to_0_when_the_cgroup_does_not_exist(self):
cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
io_error_2 = IOError()
io_error_2.errno = errno.ENOENT # "No such directory"
TestCpuCgroup.mock_read_file_map = {
"/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"),
os.path.join(cgroup.path, "cpuacct.stat"): io_error_2
}
cgroup.initialize_cpu_usage()
self.assertEquals(cgroup._current_cgroup_cpu, 0)
self.assertEquals(cgroup._current_system_cpu, 5496872) # check the system usage just for test sanity
def test_initialize_cpu_usage_should_raise_an_exception_when_called_more_than_once(self):
cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
TestCpuCgroup.mock_read_file_map = {
"/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"),
os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t0")
}
cgroup.initialize_cpu_usage()
with self.assertRaises(CGroupsException):
cgroup.initialize_cpu_usage()
def test_get_cpu_usage_should_raise_an_exception_when_initialize_cpu_usage_has_not_been_invoked(self):
cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
with self.assertRaises(CGroupsException):
cpu_usage = cgroup.get_cpu_usage()
class TestMemoryCgroup(AgentTestCase):
def test_memory_cgroup_create(self):
test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "memory_mount"))
self.assertEqual("memory", test_mem_cg.controller)
def test_get_metrics(self):
test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "memory_mount"))
memory_usage = test_mem_cg.get_memory_usage()
self.assertEqual(100000, memory_usage)
max_memory_usage = test_mem_cg.get_max_memory_usage()
self.assertEqual(1000000, max_memory_usage)
def test_get_metrics_when_files_not_present(self):
test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups"))
with self.assertRaises(IOError) as e:
test_mem_cg.get_memory_usage()
self.assertEqual(e.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as e:
test_mem_cg.get_max_memory_usage()
self.assertEqual(e.exception.errno, errno.ENOENT)
|
|
"""Let's Encrypt CLI."""
# TODO: Sanity check all input. Be sure to avoid shell code etc...
import argparse
import atexit
import functools
import logging
import logging.handlers
import os
import pkg_resources
import sys
import time
import traceback
import configargparse
import configobj
import OpenSSL
import zope.component
import zope.interface.exceptions
import zope.interface.verify
from acme import client as acme_client
from acme import jose
import letsencrypt
from letsencrypt import account
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import client
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt import log
from letsencrypt import reporter
from letsencrypt import storage
from letsencrypt.display import util as display_util
from letsencrypt.display import ops as display_ops
from letsencrypt.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
# Argparse's help formatting has a lot of unhelpful peculiarities, so we want
# to replace as much of it as we can...
# This is the stub to include in help generated by argparse
SHORT_USAGE = """
letsencrypt [SUBCOMMAND] [options] [domains]
The Let's Encrypt agent can obtain and install HTTPS/TLS/SSL certificates. By
default, it will attempt to use a webserver both for obtaining and installing
the cert. """
# This is the short help for letsencrypt --help, where we disable argparse
# altogether
USAGE = SHORT_USAGE + """Major SUBCOMMANDS are:
(default) everything Obtain & install a cert in your current webserver
auth Authenticate & obtain cert, but do not install it
install Install a previously obtained cert in a server
revoke Revoke a previously obtained certificate
rollback Rollback server configuration changes made during install
config_changes Show changes made to server config during installation
Choice of server for authentication/installation:
--apache Use the Apache plugin for authentication & installation
--nginx Use the Nginx plugin for authentication & installation
--standalone Run a standalone HTTPS server (for authentication only)
OR:
--authenticator standalone --installer nginx
More detailed help:
-h, --help [topic] print this message, or detailed help on a topic;
the available topics are:
all, apache, automation, nginx, paths, security, testing, or any of the
subcommands
"""
def _find_domains(args, installer):
if args.domains is None:
domains = display_ops.choose_names(installer)
else:
domains = args.domains
if not domains:
raise errors.Error("Please specify --domains, or --installer that "
"will help in domain names autodiscovery")
return domains
def _determine_account(args, config):
"""Determine which account to use.
In order to make the renewer (configuration de/serialization) happy,
if ``args.account`` is ``None``, it will be updated based on the
user input. Same for ``args.email``.
:param argparse.Namespace args: CLI arguments
:param letsencrypt.interface.IConfig config: Configuration object
:param .AccountStorage account_storage: Account storage.
:returns: Account and optionally ACME client API (biproduct of new
registration).
:rtype: `tuple` of `letsencrypt.account.Account` and
`acme.client.Client`
"""
account_storage = account.AccountFileStorage(config)
acme = None
if args.account is not None:
acc = account_storage.load(args.account)
else:
accounts = account_storage.find_all()
if len(accounts) > 1:
acc = display_ops.choose_account(accounts)
elif len(accounts) == 1:
acc = accounts[0]
else: # no account registered yet
if args.email is None:
args.email = display_ops.get_email()
if not args.email: # get_email might return ""
args.email = None
def _tos_cb(regr):
if args.tos:
return True
msg = ("Please read the Terms of Service at {0}. You "
"must agree in order to register with the ACME "
"server at {1}".format(
regr.terms_of_service, config.server))
return zope.component.getUtility(interfaces.IDisplay).yesno(
msg, "Agree", "Cancel")
try:
acc, acme = client.register(
config, account_storage, tos_cb=_tos_cb)
except errors.Error as error:
logger.debug(error, exc_info=True)
raise errors.Error(
"Unable to register an account with ACME server")
args.account = acc.id
return acc, acme
def _init_le_client(args, config, authenticator, installer):
if authenticator is not None:
# if authenticator was given, then we will need account...
acc, acme = _determine_account(args, config)
logger.debug("Picked account: %r", acc)
# XXX
#crypto_util.validate_key_csr(acc.key)
else:
acc, acme = None, None
return client.Client(config, acc, authenticator, installer, acme=acme)
def _find_duplicative_certs(domains, config, renew_config):
"""Find existing certs that duplicate the request."""
identical_names_cert, subset_names_cert = None, None
configs_dir = renew_config.renewal_configs_dir
cli_config = configuration.RenewerConfiguration(config)
for renewal_file in os.listdir(configs_dir):
try:
full_path = os.path.join(configs_dir, renewal_file)
rc_config = configobj.ConfigObj(renew_config.renewer_config_file)
rc_config.merge(configobj.ConfigObj(full_path))
rc_config.filename = full_path
candidate_lineage = storage.RenewableCert(
rc_config, config_opts=None, cli_config=cli_config)
except (configobj.ConfigObjError, errors.CertStorageError, IOError):
logger.warning("Renewal configuration file %s is broken. "
"Skipping.", full_path)
continue
# TODO: Handle these differently depending on whether they are
# expired or still valid?
candidate_names = set(candidate_lineage.names())
if candidate_names == set(domains):
identical_names_cert = candidate_lineage
elif candidate_names.issubset(set(domains)):
subset_names_cert = candidate_lineage
return identical_names_cert, subset_names_cert
def run(args, config, plugins): # pylint: disable=too-many-branches,too-many-locals
"""Obtain a certificate and install."""
if args.configurator is not None and (args.installer is not None or
args.authenticator is not None):
return ("Either --configurator or --authenticator/--installer"
"pair, but not both, is allowed")
if args.authenticator is not None or args.installer is not None:
installer = display_ops.pick_installer(
config, args.installer, plugins)
authenticator = display_ops.pick_authenticator(
config, args.authenticator, plugins)
else:
# TODO: this assumes that user doesn't want to pick authenticator
# and installer separately...
authenticator = installer = display_ops.pick_configurator(
config, args.configurator, plugins)
if installer is None or authenticator is None:
return "Configurator could not be determined"
domains = _find_domains(args, installer)
treat_as_renewal = False
# Considering the possibility that the requested certificate is
# related to an existing certificate. (config.duplicate, which
# is set with --duplicate, skips all of this logic and forces any
# kind of certificate to be obtained with treat_as_renewal = False.)
if not config.duplicate:
identical_names_cert, subset_names_cert = _find_duplicative_certs(
domains, config, configuration.RenewerConfiguration(config))
# I am not sure whether that correctly reads the systemwide
# configuration file.
question = None
if identical_names_cert is not None:
question = (
"You have an existing certificate that contains exactly the "
"same domains you requested (ref: {0})\n\nDo you want to "
"renew and replace this certificate with a newly-issued one?"
).format(identical_names_cert.configfile.filename)
elif subset_names_cert is not None:
question = (
"You have an existing certificate that contains a portion of "
"the domains you requested (ref: {0})\n\nIt contains these "
"names: {1}\n\nYou requested these names for the new "
"certificate: {2}.\n\nDo you want to replace this existing "
"certificate with the new certificate?"
).format(subset_names_cert.configfile.filename,
", ".join(subset_names_cert.names()),
", ".join(domains))
if question is None:
# We aren't in a duplicative-names situation at all, so we don't
# have to tell or ask the user anything about this.
pass
elif zope.component.getUtility(interfaces.IDisplay).yesno(
question, "Replace", "Cancel"):
treat_as_renewal = True
else:
reporter_util = zope.component.getUtility(interfaces.IReporter)
reporter_util.add_message(
"To obtain a new certificate that {0} an existing certificate "
"in its domain-name coverage, you must use the --duplicate "
"option.\n\nFor example:\n\n{1} --duplicate {2}".format(
"duplicates" if identical_names_cert is not None else
"overlaps with", sys.argv[0], " ".join(sys.argv[1:])),
reporter_util.HIGH_PRIORITY)
return 1
# Attempting to obtain the certificate
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(args, config, authenticator, installer)
if treat_as_renewal:
lineage = identical_names_cert if identical_names_cert is not None else subset_names_cert
# TODO: Use existing privkey instead of generating a new one
new_certr, new_chain, new_key, _ = le_client.obtain_certificate(domains)
# TODO: Check whether it worked!
lineage.save_successor(
lineage.latest_common_version(), OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, new_certr.body),
new_key.pem, crypto_util.dump_pyopenssl_chain(new_chain))
lineage.update_all_links_to(lineage.latest_common_version())
# TODO: Check return value of save_successor
# TODO: Also update lineage renewal config with any relevant
# configuration values from this attempt?
le_client.deploy_certificate(
domains, lineage.privkey, lineage.cert, lineage.chain)
display_ops.success_renewal(domains)
else:
# TREAT AS NEW REQUEST
lineage = le_client.obtain_and_enroll_certificate(
domains, authenticator, installer, plugins)
if not lineage:
return "Certificate could not be obtained"
# TODO: This treats the key as changed even when it wasn't
# TODO: We also need to pass the fullchain (for Nginx)
le_client.deploy_certificate(
domains, lineage.privkey, lineage.cert, lineage.chain)
le_client.enhance_config(domains, args.redirect)
display_ops.success_installation(domains)
def auth(args, config, plugins):
"""Authenticate & obtain cert, but do not install it."""
# XXX: Update for renewer / RenewableCert
if args.domains is not None and args.csr is not None:
# TODO: --csr could have a priority, when --domains is
# supplied, check if CSR matches given domains?
return "--domains and --csr are mutually exclusive"
authenticator = display_ops.pick_authenticator(
config, args.authenticator, plugins)
if authenticator is None:
return "Authenticator could not be determined"
if args.installer is not None:
installer = display_ops.pick_installer(config, args.installer, plugins)
else:
installer = None
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(args, config, authenticator, installer)
if args.csr is not None:
certr, chain = le_client.obtain_certificate_from_csr(le_util.CSR(
file=args.csr[0], data=args.csr[1], form="der"))
le_client.save_certificate(
certr, chain, args.cert_path, args.chain_path)
else:
domains = _find_domains(args, installer)
if not le_client.obtain_and_enroll_certificate(
domains, authenticator, installer, plugins):
return "Certificate could not be obtained"
def install(args, config, plugins):
"""Install a previously obtained cert in a server."""
# XXX: Update for renewer/RenewableCert
installer = display_ops.pick_installer(config, args.installer, plugins)
if installer is None:
return "Installer could not be determined"
domains = _find_domains(args, installer)
le_client = _init_le_client(
args, config, authenticator=None, installer=installer)
assert args.cert_path is not None # required=True in the subparser
le_client.deploy_certificate(
domains, args.key_path, args.cert_path, args.chain_path)
le_client.enhance_config(domains, args.redirect)
def revoke(args, config, unused_plugins): # TODO: coop with renewal config
"""Revoke a previously obtained certificate."""
if args.key_path is not None: # revocation by cert key
logger.debug("Revoking %s using cert key %s",
args.cert_path[0], args.key_path[0])
acme = acme_client.Client(
config.server, key=jose.JWK.load(args.key_path[1]))
else: # revocation by account key
logger.debug("Revoking %s using Account Key", args.cert_path[0])
acc, _ = _determine_account(args, config)
# pylint: disable=protected-access
acme = client._acme_from_config_key(config, acc.key)
acme.revoke(jose.ComparableX509(crypto_util.pyopenssl_load_certificate(
args.cert_path[1])[0]))
def rollback(args, config, plugins):
"""Rollback server configuration changes made during install."""
client.rollback(args.installer, args.checkpoints, config, plugins)
def config_changes(unused_args, config, unused_plugins):
"""Show changes made to server config during installation
View checkpoints and associated configuration changes.
"""
client.view_config_changes(config)
def plugins_cmd(args, config, plugins): # TODO: Use IDisplay rather than print
"""List server software plugins."""
logger.debug("Expected interfaces: %s", args.ifaces)
ifaces = [] if args.ifaces is None else args.ifaces
filtered = plugins.ifaces(ifaces)
logger.debug("Filtered plugins: %r", filtered)
if not args.init and not args.prepare:
print str(filtered)
return
filtered.init(config)
verified = filtered.verify(ifaces)
logger.debug("Verified plugins: %r", verified)
if not args.prepare:
print str(verified)
return
verified.prepare()
available = verified.available()
logger.debug("Prepared plugins: %s", available)
print str(available)
def read_file(filename, mode="rb"):
"""Returns the given file's contents.
:param str filename: Filename
:param str mode: open mode (see `open`)
:returns: A tuple of filename and its contents
:rtype: tuple
:raises argparse.ArgumentTypeError: File does not exist or is not readable.
"""
try:
return filename, open(filename, mode).read()
except IOError as exc:
raise argparse.ArgumentTypeError(exc.strerror)
def flag_default(name):
"""Default value for CLI flag."""
return constants.CLI_DEFAULTS[name]
def config_help(name, hidden=False):
"""Help message for `.IConfig` attribute."""
if hidden:
return argparse.SUPPRESS
else:
return interfaces.IConfig[name].__doc__
class SilentParser(object): # pylint: disable=too-few-public-methods
"""Silent wrapper around argparse.
A mini parser wrapper that doesn't print help for its
arguments. This is needed for the use of callbacks to define
arguments within plugins.
"""
def __init__(self, parser):
self.parser = parser
def add_argument(self, *args, **kwargs):
"""Wrap, but silence help"""
kwargs["help"] = argparse.SUPPRESS
self.parser.add_argument(*args, **kwargs)
HELP_TOPICS = ["all", "security", "paths", "automation", "testing", "plugins"]
class HelpfulArgumentParser(object):
"""Argparse Wrapper.
This class wraps argparse, adding the ability to make --help less
verbose, and request help on specific subcategories at a time, eg
'letsencrypt --help security' for security options.
"""
def __init__(self, args, plugins):
plugin_names = [name for name, _p in plugins.iteritems()]
self.help_topics = HELP_TOPICS + plugin_names + [None]
self.parser = configargparse.ArgParser(
usage=SHORT_USAGE,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
args_for_setting_config_path=["-c", "--config"],
default_config_files=flag_default("config_files"))
# This is the only way to turn off overly verbose config flag documentation
self.parser._add_config_file_help = False # pylint: disable=protected-access
self.silent_parser = SilentParser(self.parser)
self.args = self.preprocess_args(args)
help1 = self.prescan_for_flag("-h", self.help_topics)
help2 = self.prescan_for_flag("--help", self.help_topics)
assert max(True, "a") == "a", "Gravity changed direction"
help_arg = max(help1, help2)
if help_arg:
# just --help with no topic; avoid argparse altogether
print USAGE
sys.exit(0)
self.visible_topics = self.determine_help_topics(help_arg)
#print self.visible_topics
self.groups = {} # elements are added by .add_group()
def preprocess_args(self, args):
"""Work around some limitations in argparse.
Currently, add the default verb "run" as a default.
"""
for token in args:
if token in VERBS:
return args
return ["run"] + args
def prescan_for_flag(self, flag, possible_arguments):
"""Checks cli input for flags.
Check for a flag, which accepts a fixed set of possible arguments, in
the command line; we will use this information to configure argparse's
help correctly. Return the flag's argument, if it has one that matches
the sequence @possible_arguments; otherwise return whether the flag is
present.
"""
if flag not in self.args:
return False
pos = self.args.index(flag)
try:
nxt = self.args[pos + 1]
if nxt in possible_arguments:
return nxt
except IndexError:
pass
return True
def add(self, topic, *args, **kwargs):
"""Add a new command line argument.
@topic is required, to indicate which part of the help will document
it, but can be None for `always documented'.
"""
if self.visible_topics[topic]:
if topic in self.groups:
group = self.groups[topic]
group.add_argument(*args, **kwargs)
else:
self.parser.add_argument(*args, **kwargs)
else:
kwargs["help"] = argparse.SUPPRESS
self.parser.add_argument(*args, **kwargs)
def add_group(self, topic, **kwargs):
"""
This has to be called once for every topic; but we leave those calls
next to the argument definitions for clarity. Return something
arguments can be added to if necessary, either the parser or an argument
group.
"""
if self.visible_topics[topic]:
#print "Adding visible group " + topic
group = self.parser.add_argument_group(topic, **kwargs)
self.groups[topic] = group
return group
else:
#print "Invisible group " + topic
return self.silent_parser
def add_plugin_args(self, plugins):
"""
Let each of the plugins add its own command line arguments, which
may or may not be displayed as help topics.
"""
for name, plugin_ep in plugins.iteritems():
parser_or_group = self.add_group(name, description=plugin_ep.description)
#print parser_or_group
plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name)
def determine_help_topics(self, chosen_topic):
"""
The user may have requested help on a topic, return a dict of which
topics to display. @chosen_topic has prescan_for_flag's return type
:returns: dict
"""
# topics maps each topic to whether it should be documented by
# argparse on the command line
if chosen_topic == "all":
return dict([(t, True) for t in self.help_topics])
elif not chosen_topic:
return dict([(t, False) for t in self.help_topics])
else:
return dict([(t, t == chosen_topic) for t in self.help_topics])
def create_parser(plugins, args):
"""Create parser."""
helpful = HelpfulArgumentParser(args, plugins)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
help="Use the text output instead of the curses UI.")
helpful.add(None, "-m", "--email", help=config_help("email"))
# positional arg shadows --domains, instead of appending, and
# --domains is useful, because it can be stored in config
#for subparser in parser_run, parser_auth, parser_install:
# subparser.add_argument("domains", nargs="*", metavar="domain")
helpful.add(None, "-d", "--domains", metavar="DOMAIN", action="append")
helpful.add(
None, "--duplicate", dest="duplicate", action="store_true",
help="Allow getting a certificate that duplicates an existing one")
helpful.add_group(
"automation",
description="Arguments for automating execution & other tweaks")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(letsencrypt.__version__),
help="show program's version number and exit")
helpful.add(
"automation", "--no-confirm", dest="no_confirm", action="store_true",
help="Turn off confirmation screens, currently used for --revoke")
helpful.add(
"automation", "--agree-eula", dest="eula", action="store_true",
help="Agree to the Let's Encrypt Developer Preview EULA")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
help="Agree to the Let's Encrypt Subscriber Agreement")
helpful.add(
"automation", "--account", metavar="ACCOUNT_ID",
help="Account ID to use")
helpful.add_group(
"testing", description="The following flags are meant for "
"testing purposes only! Do NOT change them, unless you "
"really know what you're doing!")
helpful.add(
"testing", "--debug", action="store_true",
help="Show tracebacks if the program exits abnormally")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add( # TODO: apache plugin does NOT respect it (#479)
"testing", "--dvsni-port", type=int, default=flag_default("dvsni_port"),
help=config_help("dvsni_port"))
helpful.add("testing", "--simple-http-port", type=int,
help=config_help("simple_http_port"))
helpful.add("testing", "--no-simple-http-tls", action="store_true",
help=config_help("no_simple_http_tls"))
helpful.add_group(
"security", description="Security parameters & server settings")
helpful.add(
"security", "-B", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
# TODO: resolve - assumes binary logic while client.py assumes ternary.
helpful.add(
"security", "-r", "--redirect", action="store_true",
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost.")
helpful.add(
"security", "--strict-permissions", action="store_true",
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
_create_subparsers(helpful)
return helpful.parser, helpful.args
# For now unfortunately this constant just needs to match the code below;
# there isn't an elegant way to autogenerate it in time.
VERBS = ["run", "auth", "install", "revoke", "rollback", "config_changes",
"plugins"]
def _create_subparsers(helpful):
subparsers = helpful.parser.add_subparsers(metavar="SUBCOMMAND")
def add_subparser(name, func): # pylint: disable=missing-docstring
subparser = subparsers.add_parser(
name, help=func.__doc__.splitlines()[0], description=func.__doc__)
subparser.set_defaults(func=func)
return subparser
# the order of add_subparser() calls is important: it defines the
# order in which subparser names will be displayed in --help
add_subparser("run", run)
parser_auth = add_subparser("auth", auth)
parser_install = add_subparser("install", install)
parser_revoke = add_subparser("revoke", revoke)
parser_rollback = add_subparser("rollback", rollback)
add_subparser("config_changes", config_changes)
parser_plugins = add_subparser("plugins", plugins_cmd)
parser_auth.add_argument(
"--csr", type=read_file, help="Path to a Certificate Signing "
"Request (CSR) in DER format.")
parser_auth.add_argument(
"--cert-path", default=flag_default("auth_cert_path"),
help="When using --csr this is where certificate is saved.")
parser_auth.add_argument(
"--chain-path", default=flag_default("auth_chain_path"),
help="When using --csr this is where certificate chain is saved.")
parser_install.add_argument(
"--cert-path", required=True, help="Path to a certificate that "
"is going to be installed.")
parser_install.add_argument(
"--key-path", required=True, help="Accompanying private key")
parser_install.add_argument(
"--chain-path", help="Accompanying path to a certificate chain.")
parser_revoke.add_argument(
"--cert-path", type=read_file, help="Revoke a specific certificate.",
required=True)
parser_revoke.add_argument(
"--key-path", type=read_file,
help="Revoke certificate using its accompanying key. Useful if "
"Account Key is lost.")
parser_rollback.add_argument(
"--checkpoints", type=int, metavar="N",
default=flag_default("rollback_checkpoints"),
help="Revert configuration N number of checkpoints.")
parser_plugins.add_argument(
"--init", action="store_true", help="Initialize plugins.")
parser_plugins.add_argument(
"--prepare", action="store_true",
help="Initialize and prepare plugins.")
parser_plugins.add_argument(
"--authenticators", action="append_const", dest="ifaces",
const=interfaces.IAuthenticator,
help="Limit to authenticator plugins only.")
parser_plugins.add_argument(
"--installers", action="append_const", dest="ifaces",
const=interfaces.IInstaller, help="Limit to installer plugins only.")
def _paths_parser(helpful):
add = helpful.add
helpful.add_group(
"paths", description="Arguments changing execution paths & servers")
add("paths", "--config-dir", default=flag_default("config_dir"),
help=config_help("config_dir"))
add("paths", "--work-dir", default=flag_default("work_dir"),
help=config_help("work_dir"))
add("paths", "--logs-dir", default=flag_default("logs_dir"),
help="Logs directory.")
add("paths", "--server", default=flag_default("server"),
help=config_help("server"))
def _plugins_parsing(helpful, plugins):
helpful.add_group(
"plugins", description="Let's Encrypt client supports an "
"extensible plugins architecture. See '%(prog)s plugins' for a "
"list of all available plugins and their names. You can force "
"a particular plugin by setting options provided below. Further "
"down this help message you will find plugin-specific options "
"(prefixed by --{plugin_name}).")
helpful.add(
"plugins", "-a", "--authenticator", help="Authenticator plugin name.")
helpful.add(
"plugins", "-i", "--installer", help="Installer plugin name.")
helpful.add(
"plugins", "--configurator", help="Name of the plugin that is "
"both an authenticator and an installer. Should not be used "
"together with --authenticator or --installer.")
# things should not be reorder past/pre this comment:
# plugins_group should be displayed in --help before plugin
# specific groups (so that plugins_group.description makes sense)
helpful.add_plugin_args(plugins)
def _setup_logging(args):
level = -args.verbose_count * 10
fmt = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
if args.text_mode:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt))
else:
handler = log.DialogHandler()
# dialog box is small, display as less as possible
handler.setFormatter(logging.Formatter("%(message)s"))
handler.setLevel(level)
# TODO: use fileConfig?
# unconditionally log to file for debugging purposes
# TODO: change before release?
log_file_name = os.path.join(args.logs_dir, 'letsencrypt.log')
file_handler = logging.handlers.RotatingFileHandler(
log_file_name, maxBytes=2 ** 20, backupCount=10)
# rotate on each invocation, rollover only possible when maxBytes
# is nonzero and backupCount is nonzero, so we set maxBytes as big
# as possible not to overrun in single CLI invocation (1MB).
file_handler.doRollover() # TODO: creates empty letsencrypt.log.1 file
file_handler.setLevel(logging.DEBUG)
file_handler_formatter = logging.Formatter(fmt=fmt)
file_handler_formatter.converter = time.gmtime # don't use localtime
file_handler.setFormatter(file_handler_formatter)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG) # send all records to handlers
root_logger.addHandler(handler)
root_logger.addHandler(file_handler)
logger.debug("Root logging level set at %d", level)
logger.info("Saving debug log to %s", log_file_name)
def _handle_exception(exc_type, exc_value, trace, args):
"""Logs exceptions and reports them to the user.
Args is used to determine how to display exceptions to the user. In
general, if args.debug is True, then the full exception and traceback is
shown to the user, otherwise it is suppressed. If args itself is None,
then the traceback and exception is attempted to be written to a logfile.
If this is successful, the traceback is suppressed, otherwise it is shown
to the user. sys.exit is always called with a nonzero status.
"""
logger.debug(
"Exiting abnormally:\n%s",
"".join(traceback.format_exception(exc_type, exc_value, trace)))
if issubclass(exc_type, Exception) and (args is None or not args.debug):
if args is None:
logfile = "letsencrypt.log"
try:
with open(logfile, "w") as logfd:
traceback.print_exception(
exc_type, exc_value, trace, file=logfd)
except: # pylint: disable=bare-except
sys.exit("".join(
traceback.format_exception(exc_type, exc_value, trace)))
if issubclass(exc_type, errors.Error):
sys.exit(exc_value)
elif args is None:
sys.exit(
"An unexpected error occurred. Please see the logfile '{0}' "
"for more details.".format(logfile))
else:
sys.exit(
"An unexpected error occurred. Please see the logfiles in {0} "
"for more details.".format(args.logs_dir))
else:
sys.exit("".join(
traceback.format_exception(exc_type, exc_value, trace)))
def main(cli_args=sys.argv[1:]):
"""Command line argument parsing and main script execution."""
sys.excepthook = functools.partial(_handle_exception, args=None)
# note: arg parser internally handles --help (and exits afterwards)
plugins = plugins_disco.PluginsRegistry.find_all()
parser, tweaked_cli_args = create_parser(plugins, cli_args)
args = parser.parse_args(tweaked_cli_args)
config = configuration.NamespaceConfig(args)
zope.component.provideUtility(config)
# Setup logging ASAP, otherwise "No handlers could be found for
# logger ..." TODO: this should be done before plugins discovery
for directory in config.config_dir, config.work_dir:
le_util.make_or_verify_dir(
directory, constants.CONFIG_DIRS_MODE, os.geteuid(),
"--strict-permissions" in cli_args)
# TODO: logs might contain sensitive data such as contents of the
# private key! #525
le_util.make_or_verify_dir(
args.logs_dir, 0o700, os.geteuid(), "--strict-permissions" in cli_args)
_setup_logging(args)
# do not log `args`, as it contains sensitive data (e.g. revoke --key)!
logger.debug("Arguments: %r", cli_args)
logger.debug("Discovered plugins: %r", plugins)
sys.excepthook = functools.partial(_handle_exception, args=args)
# Displayer
if args.text_mode:
displayer = display_util.FileDisplay(sys.stdout)
else:
displayer = display_util.NcursesDisplay()
zope.component.provideUtility(displayer)
# Reporter
report = reporter.Reporter()
zope.component.provideUtility(report)
atexit.register(report.atexit_print_messages)
# TODO: remove developer EULA prompt for the launch
if not config.eula:
eula = pkg_resources.resource_string("letsencrypt", "EULA")
if not zope.component.getUtility(interfaces.IDisplay).yesno(
eula, "Agree", "Cancel"):
raise errors.Error("Must agree to TOS")
if not os.geteuid() == 0:
logger.warning(
"Root (sudo) is required to run most of letsencrypt functionality.")
# check must be done after arg parsing as --help should work
# w/o root; on the other hand, e.g. "letsencrypt run
# --authenticator dns" or "letsencrypt plugins" does not
# require root as well
#return (
# "{0}Root is required to run letsencrypt. Please use sudo.{0}"
# .format(os.linesep))
return args.func(args, config, plugins)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
|
#
# Copyright 2017 CNIT - Consorzio Nazionale Interuniversitario per le Telecomunicazioni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import copy
import json
import os.path
import yaml
from lib.etsi.etsi_parser import EtsiParser
from lib.etsi.etsi_rdcl_graph import EtsiRdclGraph
from lib.util import Util
from projecthandler.models import Project
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('EtsiModel.py')
# project_types['etsi']= projecthandler.etsi_model.EtsiProject
# project_types['click']= ClickProject
PATH_TO_SCHEMAS = 'lib/etsi/schemas/'
PATH_TO_DESCRIPTORS_TEMPLATES = 'lib/etsi/descriptor_template'
DESCRIPTOR_TEMPLATE_SUFFIX = '.json'
GRAPH_MODEL_FULL_NAME = 'lib/TopologyModels/etsi/etsi.yaml'
EXAMPLES_FOLDER = 'usecases/ETSI/'
class EtsiProject(Project):
"""Etsi Project class
The data model has the following descriptors:
'nsd'
'vnfd'
'vnffgd'
'vld'
"""
@classmethod
def data_project_from_files(cls, request):
file_dict = {}
for my_key in request.FILES.keys():
file_dict[my_key] = request.FILES.getlist(my_key)
log.info(file_dict)
data_project = EtsiParser.importprojectfiles(file_dict)
return data_project
@classmethod
def data_project_from_example(cls, request):
example_id = request.POST.get('example-etsi-id', '')
data_project = EtsiParser.importprojectdir(EXAMPLES_FOLDER + example_id + '/JSON', 'json')
return data_project
@classmethod
def get_example_list(cls):
"""Returns a list of directories, in each directory there is a project example"""
path = EXAMPLES_FOLDER
dirs = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))]
return {'etsi': dirs}
@classmethod
def get_json_schema_by_type(cls, type_descriptor):
schema = PATH_TO_SCHEMAS + type_descriptor + ".json"
return schema
@classmethod
def get_new_descriptor(cls, descriptor_type, request_id):
json_template = cls.get_descriptor_template(descriptor_type)
if descriptor_type == 'nsd':
json_template['nsdIdentifier'] = request_id
json_template['nsdInvariantId'] = request_id
else:
json_template['vnfdId'] = request_id
return json_template
@classmethod
def get_descriptor_template(cls, type_descriptor):
"""Returns a descriptor template for a given descriptor type"""
try:
schema = Util.loadjsonfile(os.path.join(PATH_TO_DESCRIPTORS_TEMPLATES, type_descriptor + DESCRIPTOR_TEMPLATE_SUFFIX))
# print 'type_descriptor : '+type_descriptor
return schema
except Exception as e:
log.exception(e)
return False
@classmethod
def get_clone_descriptor(cls, descriptor, type_descriptor, new_descriptor_id):
new_descriptor = copy.deepcopy(descriptor)
if type_descriptor == 'vnfd':
new_extention = "_" + new_descriptor_id
new_descriptor['vnfdId'] = new_descriptor_id;
new_descriptor['vnfProductName'] = new_descriptor['vnfProductName'] + new_extention if new_descriptor[
'vnfProductName'] is not None else \
new_descriptor['vnfProductName']
for vnfExtCpd in new_descriptor['vnfExtCpd']:
vnfExtCpd['cpdId'] = vnfExtCpd['cpdId'] + new_extention if vnfExtCpd['cpdId'] is not None else \
vnfExtCpd['cpdId']
if type_descriptor == 'nsd':
new_extention = "_" + new_descriptor_id
new_descriptor['nsdIdentifier'] = new_descriptor_id
new_descriptor['nsdName'] = new_descriptor_id
new_descriptor['nsdInvariantId'] = new_descriptor_id
for sapd in new_descriptor['sapd']:
sapd['cpdId'] = sapd['cpdId'] + new_extention if sapd['cpdId'] is not None else sapd['cpdId']
return new_descriptor
def get_type(self):
return "etsi"
def __str__(self):
return self.name
def get_overview_data(self):
current_data = json.loads(self.data_project)
result = {
'owner': self.owner.__str__(),
'name': self.name,
'updated_date': self.updated_date.__str__(),
'info': self.info,
'type': 'etsi',
'nsd': len(current_data['nsd'].keys()) if 'nsd' in current_data else 0,
'vnfd': len(current_data['vnfd'].keys()) if 'vnfd' in current_data else 0,
'validated': self.validated
}
return result
def get_graph_data_json_topology(self, descriptor_id):
test_t3d = EtsiRdclGraph()
project = self.get_dataproject()
##FIXME da rivedere, credo sia necessario rivedere questo processo
topology = test_t3d.build_graph_from_project(project,
model=self.get_graph_model(GRAPH_MODEL_FULL_NAME))
return json.dumps(topology)
def create_descriptor(self, descriptor_name, type_descriptor, new_data, data_type):
"""Creates a descriptor of a given type from a json or yaml representation
Returns the descriptor id or False
"""
try:
# utility = Util()
current_data = json.loads(self.data_project)
if data_type == 'json':
new_descriptor = json.loads(new_data)
elif data_type == 'yaml':
# utility = Util()
yaml_object = yaml.load(new_data)
new_descriptor = json.loads(Util.yaml2json(yaml_object))
else:
log.debug('Create descriptor: Unknown data type')
return False
# schema = cls.loadjsonfile("lib/etsi/schemas/"+type_descriptor+".json")
reference_schema = self.get_json_schema_by_type(type_descriptor)
# validate = Util.validate_json_schema(reference_schema, new_descriptor)
validate = False
new_descriptor_id = new_descriptor['vnfdId'] if type_descriptor != "nsd" else new_descriptor[
'nsdIdentifier']
if not type_descriptor in current_data:
current_data[type_descriptor] = {}
current_data[type_descriptor][new_descriptor_id] = new_descriptor
self.data_project = current_data
self.validated = validate # TODO(stefano) not clear if this is the validation for the whole project
self.update()
result = new_descriptor_id
except Exception as e:
log.exception(e)
result = False
return result
def set_validated(self, value):
self.validated = True if value is not None and value == True else False
def get_add_element(self, request):
print "etsi add element"
result = False
group_id = request.POST.get('group_id')
element_id = request.POST.get('element_id')
element_type = request.POST.get('element_type')
existing_element = request.POST.get('existing_element')
print group_id, element_id, element_type, existing_element
if element_type == 'ns_cp':
result = self.add_ns_sap(group_id, element_id)
elif element_type == 'ns_vl':
result = self.add_ns_vl(group_id, element_id)
elif element_type == 'vnf':
if existing_element:
result = self.add_ns_existing_vnf(group_id, element_id)
else:
result = self.add_ns_vnf(group_id, element_id)
elif element_type == 'vnf_vl':
result = self.add_vnf_intvl(group_id, element_id)
elif element_type == 'vnf_ext_cp':
result = self.add_vnf_vnfextcpd(group_id, element_id)
elif element_type == 'vnf_vdu':
result = self.add_vnf_vdu(group_id, element_id)
elif element_type == 'vnf_vdu_cp':
vdu_id = request.POST.get('choice')
print 'vnf_vdu_cp', (vdu_id)
result = self.add_vnf_vducp(group_id, vdu_id, element_id)
elif element_type == 'vnffg':
# log.debug("Add ") group_id, element_id
result = self.add_vnffg(group_id, element_id)
print "result etsi", result
return result
def get_remove_element(self, request):
result = False
group_id = request.POST.get('group_id')
element_id = request.POST.get('element_id')
element_type = request.POST.get('element_type')
log.debug('in get_remove_element : ' + str(element_id))
if element_type == 'ns_cp':
result = self.remove_ns_sap(group_id, element_id)
elif element_type == 'ns_vl':
result = self.remove_ns_vl(group_id, element_id)
elif element_type == 'vnf':
result = self.remove_ns_vnf(group_id, element_id)
elif element_type == 'vnf_vl':
result = self.remove_vnf_intvl(group_id, element_id)
elif element_type == 'vnf_ext_cp':
result = self.remove_vnf_vnfextcpd(group_id, element_id)
elif element_type == 'vnf_vdu':
result = self.remove_vnf_vdu(group_id, element_id)
elif element_type == 'vnf_vdu_cp':
vdu_id = request.POST.get('choice')
result = self.remove_vnf_vducp(group_id, vdu_id, element_id)
return result
def get_add_link(self, request):
result = False
parameters = request.POST.dict()
print "ets_model", parameters
source_type = parameters['source_type']
destination_type = parameters['target_type']
source_id = parameters['source']
destination_id = parameters['target']
group_id = parameters['group_id']
if (source_type, destination_type) in [('ns_vl', 'ns_cp'), ('ns_cp', 'ns_vl')]:
vl_id = source_id if source_type == 'ns_vl' else destination_id
sap_id = source_id if source_type == 'ns_cp' else destination_id
result = self.link_vl_sap(group_id, vl_id, sap_id)
elif (source_type, destination_type) in [('ns_vl', 'vnf'), ('vnf', 'ns_vl')]:
vl_id = source_id if source_type == 'ns_vl' else destination_id
vnf_id = source_id if source_type == 'vnf' else destination_id
ns_id = group_id
vnf_ext_cp = request.POST.get('choice')
result = self.link_vl_vnf(ns_id, vl_id, vnf_id, vnf_ext_cp)
if (source_type, destination_type) in [('vnf', 'ns_cp'), ('ns_cp', 'vnf')]:
vnf_id = source_id if source_type == 'vnf' else destination_id
sap_id = source_id if source_type == 'ns_cp' else destination_id
ns_id = group_id
vnf_ext_cp = request.POST.get('choice')
result = self.link_vnf_sap(ns_id, vnf_id, sap_id, vnf_ext_cp)
elif (source_type, destination_type) in [('vnf_vl', 'vnf_vdu_cp'), ('vnf_vdu_cp', 'vnf_vl')]:
vdu_id = request.POST.get('choice')
vnf_id = group_id
intvl_id = source_id if source_type == 'vnf_vl' else destination_id
vducp_id = source_id if source_type == 'vnf_vdu_cp' else destination_id
result = self.link_vducp_intvl(vnf_id, vdu_id, vducp_id, intvl_id)
elif (source_type, destination_type) in [('vnf_ext_cp', 'vnf_vl'), ('vnf_vl', 'vnf_ext_cp')]:
vnfExtCpd_id = source_id if source_type == 'vnf_ext_cp' else destination_id
intvl_id = source_id if source_type == 'vnf_vl' else destination_id
result = self.link_vnfextcpd_intvl(group_id, vnfExtCpd_id, intvl_id)
return result
def get_remove_link(self, request):
result = False
parameters = request.POST.dict()
print "removelink", parameters
source_id = parameters['source']
destination_id = parameters['target']
source_type = parameters['source_type']
destination_type = parameters['target_type']
group_id = parameters['group_id']
if (source_type, destination_type) in [('ns_vl', 'ns_cp'), ('ns_cp', 'ns_vl')]:
vl_id = source_id if source_type == 'ns_vl' else destination_id
sap_id = source_id if source_type == 'ns_cp' else destination_id
result = self.unlink_vl_sap(group_id, vl_id, sap_id)
elif (source_type, destination_type) in [('ns_vl', 'vnf'), ('vnf', 'ns_vl')]:
vl_id = source_id if source_type == 'ns_vl' else destination_id
vnf_id = source_id if source_type == 'vnf' else destination_id
ns_id = group_id
result = self.unlink_vl_vnf(ns_id, vl_id, vnf_id)
if (source_type, destination_type) in [('vnf', 'ns_cp'), ('ns_cp', 'vnf')]:
vnf_id = source_id if source_type == 'vnf' else destination_id
sap_id = source_id if source_type == 'ns_cp' else destination_id
ns_id = group_id
result = self.unlink_vl_sap(ns_id, vnf_id, sap_id)
elif (source_type, destination_type) in [('vnf_vl', 'vnf_vdu_cp'), ('vnf_vdu_cp', 'vnf_vl')]:
intvl_id = source_id if source_type == 'vnf_vl' else destination_id
vducp_id = source_id if source_type == 'vnf_vdu_cp' else destination_id
vnf_id = group_id
result = self.unlink_vducp_intvl(vnf_id, vducp_id, intvl_id)
elif (source_type, destination_type) in [('vnf_ext_cp', 'vnf_vl'), ('vnf_vl', 'vnf_ext_cp')]:
vnfExtCpd_id = source_id if source_type == 'vnf_ext_cp' else destination_id
intvl_id = source_id if source_type == 'vnf_vl' else destination_id
result = self.unlink_vnfextcpd_intvl(group_id, vnfExtCpd_id, intvl_id)
return result
def get_unused_vnf(self, nsd_id):
try:
current_data = json.loads(self.data_project)
result = []
if 'vnfd' in current_data:
for vnf in current_data['vnfd']:
if vnf not in current_data['nsd'][nsd_id]['vnfdId']:
result.append(vnf)
except Exception as e:
log.exception(e)
result = None # TODO maybe we should use False ?
return result
def get_available_nodes(self, args):
"""Returns all available node """
log.debug('get_available_nodes')
try:
result = []
#current_data = json.loads(self.data_project)
model_graph = self.get_graph_model(GRAPH_MODEL_FULL_NAME)
for node in model_graph['layer'][args['layer']]['nodes']:
if 'addable' in model_graph['layer'][args['layer']]['nodes'][node] and model_graph['layer'][args['layer']]['nodes'][node]['addable']:
current_data = {
"id": node,
"category_name": model_graph['nodes'][node]['label'],
"types": [
{
"name": "generic",
"id": node
}
]
}
result.append(current_data)
#result = current_data[type_descriptor][descriptor_id]
except Exception as e:
log.debug(e)
result = []
return result
# NS operations: add/remove VL
def add_ns_vl(self, ns_id, vl_id):
try:
current_data = json.loads(self.data_project)
ns = self.get_descriptor_template('nsd')
vl_descriptor = ns['virtualLinkDesc'][0]
vl_descriptor['virtualLinkDescId'] = vl_id
current_data['nsd'][ns_id]['virtualLinkDesc'].append(vl_descriptor)
virtualLinkProfile = ns['nsDf'][0]['virtualLinkProfile'][0]
virtualLinkProfile['virtualLinkProfileId'] = "virtualLinkProfileId" + vl_id
virtualLinkProfile['virtualLinkDescId'] = vl_id
current_data['nsd'][ns_id]['nsDf'][0]['virtualLinkProfile'].append(virtualLinkProfile)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def remove_ns_vl(self, ns_id, vl_id):
try:
current_data = json.loads(self.data_project)
vl_descriptor = next(
(x for x in current_data['nsd'][ns_id]['virtualLinkDesc'] if x['virtualLinkDescId'] == vl_id), None)
if vl_descriptor is not None:
current_data['nsd'][ns_id]['virtualLinkDesc'].remove(vl_descriptor)
vl_profile = next((x for x in current_data['nsd'][ns_id]['nsDf'][0]['virtualLinkProfile'] if
x['virtualLinkDescId'] == vl_id), None)
if vl_profile is not None:
vl_profile_id = vl_profile['virtualLinkProfileId']
current_data['nsd'][ns_id]['nsDf'][0]['virtualLinkProfile'].remove(vl_profile)
for nsDf in current_data['nsd'][ns_id]['nsDf']:
for vnfProfile in nsDf["vnfProfile"]:
for nsVirtualLinkConnectivity in vnfProfile['nsVirtualLinkConnectivity']:
if nsVirtualLinkConnectivity['virtualLinkProfileId'] == vl_profile_id:
vnfProfile['nsVirtualLinkConnectivity'].remove(nsVirtualLinkConnectivity)
for sapd in current_data['nsd'][ns_id]['sapd']:
if 'nsVirtualLinkDescId' in sapd and sapd['nsVirtualLinkDescId'] == vl_id:
sapd['nsVirtualLinkDescId'] = None
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def edit_ns_vl(self, ns_id, vl_id, vl_descriptor):
try:
current_data = json.loads(self.data_project)
self.remove_ns_vl(ns_id, vl_id)
current_data['nsd'][ns_id]['virtualLinkDesc'].append(vl_descriptor)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# NS operations: add/remove SAP
def add_ns_sap(self, ns_id, sap_id):
try:
current_data = json.loads(self.data_project)
# utility = Util()
ns = self.get_descriptor_template('nsd')
sap_descriptor = ns['sapd'][0]
sap_descriptor['cpdId'] = sap_id
current_data['nsd'][ns_id]['sapd'].append(sap_descriptor)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def remove_ns_sap(self, ns_id, sap_id):
try:
current_data = json.loads(self.data_project)
sap_descriptor = next((x for x in current_data['nsd'][ns_id]['sapd'] if x['cpdId'] == sap_id), None)
if sap_descriptor is not None:
current_data['nsd'][ns_id]['sapd'].remove(sap_descriptor)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def edit_ns_sap(self, ns_id, sap_id, sap_descriptor):
try:
current_data = json.loads(self.data_project)
self.remove_ns_sap(ns_id, sap_id)
current_data['nsd'][ns_id]['sapd'].append(sap_descriptor)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# NS operations: add/remove VNF
def add_ns_vnf(self, ns_id, vnf_id):
# Aggingi l'id a vnfProfile e aggiungi un entry in nsDf e creare il file descriptor del VNF
try:
current_data = json.loads(self.data_project)
# utility = Util()
current_data['nsd'][ns_id]['vnfdId'].append(vnf_id)
vnf_profile = self.get_descriptor_template('nsd')['nsDf'][0]['vnfProfile'][0]
vnf_profile['vnfdId'] = vnf_id
current_data['nsd'][ns_id]['nsDf'][0]['vnfProfile'].append(vnf_profile)
vnf_descriptor = self.get_descriptor_template('vnfd')
vnf_descriptor['vnfdId'] = vnf_id
vnf_descriptor['vdu'] = []
vnf_descriptor['intVirtualLinkDesc'] = []
vnf_descriptor['vnfExtCpd'] = []
if 'vnfd' not in current_data:
current_data['vnfd'] = {}
current_data['vnfd'][vnf_id] = vnf_descriptor
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def add_ns_existing_vnf(self, ns_id, vnf_id):
try:
current_data = json.loads(self.data_project)
current_data['nsd'][ns_id]['vnfdId'].append(vnf_id)
# utility = Util()
vnf_profile = self.get_descriptor_template('nsd')['nsDf'][0]['vnfProfile'][0]
vnf_profile['vnfdId'] = vnf_id
current_data['nsd'][ns_id]['nsDf'][0]['vnfProfile'].append(vnf_profile)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def remove_ns_vnf(self, ns_id, vnf_id):
try:
current_data = json.loads(self.data_project)
current_data['nsd'][ns_id]['vnfdId'].remove(vnf_id)
vnf_profile = next(
(x for x in current_data['nsd'][ns_id]['nsDf'][0]['vnfProfile'] if x['vnfdId'] == vnf_id), None)
if vnf_profile is not None:
current_data['nsd'][ns_id]['nsDf'][0]['vnfProfile'].remove(vnf_profile)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def edit_ns_vnf(self, vnf_id, vnf_descriptor):
try:
current_data = json.loads(self.data_project)
if 'vnfd' not in current_data:
current_data['vnfd'] = {}
current_data['vnfd'][vnf_id] = vnf_descriptor
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# NS operations: add/remove Nested NS
def add_ns_nsNested(self, ns_id, nested_ns_id):
try:
current_data = json.loads(self.data_project)
current_data['nsd'][ns_id]['nestedNsdId'].append(nested_ns_id)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# NS operations: link/Unlink Sap with VL
def link_vl_sap(self, ns_id, vl_id, sap_id):
try:
current_data = json.loads(self.data_project)
sap_descriptor = next((x for x in current_data['nsd'][ns_id]['sapd'] if x['cpdId'] == sap_id), None)
if 'associatedCpdId' in sap_descriptor:
del sap_descriptor['associatedCpdId']
sap_descriptor['nsVirtualLinkDescId'] = vl_id
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def unlink_vl_sap(self, ns_id, vl_id, sap_id):
try:
current_data = json.loads(self.data_project)
sap_descriptor = next((x for x in current_data['nsd'][ns_id]['sapd'] if x['cpdId'] == sap_id), None)
sap_descriptor['nsVirtualLinkDescId'] = None
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# NS operations: link/Unlink vnf with VL
def link_vnf_sap(self, ns_id, vnf_id, sap_id, ext_cp_id):
try:
current_data = json.loads(self.data_project)
sap_descriptor = next((x for x in current_data['nsd'][ns_id]['sapd'] if x['cpdId'] == sap_id), None)
if 'nsVirtualLinkDescId' in sap_descriptor:
del sap_descriptor['nsVirtualLinkDescId']
sap_descriptor['associatedCpdId'] = ext_cp_id
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# NS operations: link/Unlink VNF with VL
def link_vl_vnf(self, ns_id, vl_id, vnf_id, ext_cp_id):
try:
current_data = json.loads(self.data_project)
# utility = Util()
vnf_profile = next(
(x for x in current_data['nsd'][ns_id]['nsDf'][0]['vnfProfile'] if x['vnfdId'] == vnf_id), None)
virtual_link_profile = next((x for x in current_data['nsd'][ns_id]['nsDf'][0]['virtualLinkProfile'] if
x['virtualLinkDescId'] == vl_id), None)
if virtual_link_profile is None:
virtual_link_profile = self.get_descriptor_template('nsd')['nsDf'][0]['virtualLinkProfile'][0]
virtual_link_profile['virtualLinkDescId'] = vl_id
current_data['nsd'][ns_id]['nsDf'][0]['virtualLinkProfile'].append(virtual_link_profile)
virtual_link_profile_id = virtual_link_profile['virtualLinkProfileId']
virtual_link_connectivity = next((x for x in vnf_profile['nsVirtualLinkConnectivity'] if
x['virtualLinkProfileId'] == virtual_link_profile_id), None)
if virtual_link_connectivity is not None:
virtual_link_connectivity['cpdId'].append(ext_cp_id)
else:
virtual_link_connectivity = \
self.get_descriptor_template('nsd')['nsDf'][0]['vnfProfile'][0]['nsVirtualLinkConnectivity'][0]
virtual_link_connectivity['virtualLinkProfileId'] = virtual_link_profile_id
virtual_link_connectivity['cpdId'].append(ext_cp_id)
vnf_profile['nsVirtualLinkConnectivity'].append(virtual_link_connectivity)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def unlink_vl_vnf(self, ns_id, vl_id, vnf_id):
try:
current_data = json.loads(self.data_project)
# utility = Util()
vnf_profile = next(
(x for x in current_data['nsd'][ns_id]['nsDf'][0]['vnfProfile'] if x['vnfdId'] == vnf_id), None)
virtual_link_profile = next((x for x in current_data['nsd'][ns_id]['nsDf'][0]['virtualLinkProfile'] if
x['virtualLinkDescId'] == vl_id), None)
virtual_link_profile_id = virtual_link_profile['virtualLinkProfileId']
virtual_link_connectivity = next((x for x in vnf_profile['nsVirtualLinkConnectivity'] if
x['virtualLinkProfileId'] == virtual_link_profile_id), None)
if virtual_link_connectivity is not None:
for vnfExtCpd in current_data['vnfd'][vnf_id]['vnfExtCpd']:
if vnfExtCpd['cpdId'] in virtual_link_connectivity['cpdId']:
log.debug("removing: %s", str(vnfExtCpd['cpdId']))
virtual_link_connectivity['cpdId'].remove(vnfExtCpd['cpdId'])
if not virtual_link_connectivity['cpdId']:
vnf_profile['nsVirtualLinkConnectivity'].remove(virtual_link_connectivity)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# VNF operationd: add/remove VDU
def add_vnf_vdu(self, vnf_id, vdu_id):
try:
current_data = json.loads(self.data_project)
# utility = Util()
vdu_descriptor = self.get_descriptor_template('vnfd')['vdu'][0]
vdu_descriptor['vduId'] = vdu_id
vdu_descriptor['intCpd'] = []
current_data['vnfd'][vnf_id]['vdu'].append(vdu_descriptor)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def remove_vnf_vdu(self, vnf_id, vdu_id):
print 'remove_vnf_vdu', vnf_id, vdu_id
try:
current_data = json.loads(self.data_project)
vdu_descriptor = next((x for x in current_data['vnfd'][vnf_id]['vdu'] if x['vduId'] == vdu_id), None)
print 'vdu_descriptor', vdu_descriptor
if vdu_descriptor is not None:
current_data['vnfd'][vnf_id]['vdu'].remove(vdu_descriptor)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def edit_vnf_vdu(self, vnf_id, vdu_id, vdu_descriptor):
try:
current_data = json.loads(self.data_project)
self.remove_vnf_vdu(vnf_id, vdu_id)
current_data['vnfd'][vnf_id]['vdu'].append(vdu_descriptor)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# VNF operationd: add/remove CP VDU
def add_vnf_vducp(self, vnf_id, vdu_id, vducp_id):
try:
print vnf_id, vdu_id, vducp_id
current_data = json.loads(self.data_project)
# utility = Util()
vdu_descriptor = next((x for x in current_data['vnfd'][vnf_id]['vdu'] if x['vduId'] == vdu_id), None)
intcp_descriptor = self.get_descriptor_template('vnfd')['vdu'][0]['intCpd'][0]
intcp_descriptor['cpdId'] = vducp_id
vdu_descriptor['intCpd'].append(intcp_descriptor)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def remove_vnf_vducp(self, vnf_id, vdu_id, vducp_id):
try:
current_data = json.loads(self.data_project)
vdu_descriptor = next((x for x in current_data['vnfd'][vnf_id]['vdu'] if x['vduId'] == vdu_id), None)
intcp_descriptor = next((x for x in vdu_descriptor['intCpd'] if x['cpdId'] == vducp_id), None)
vdu_descriptor['intCpd'].remove(intcp_descriptor)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# VNF operationd: link/unlink VduCP and IntVL
def link_vducp_intvl(self, vnf_id, vdu_id, vducp_id, intvl_id):
try:
current_data = json.loads(self.data_project)
vdu_descriptor = next((x for x in current_data['vnfd'][vnf_id]['vdu'] if x['vduId'] == vdu_id), None)
intcp_descriptor = next((x for x in vdu_descriptor['intCpd'] if x['cpdId'] == vducp_id), None)
intcp_descriptor['intVirtualLinkDesc'] = intvl_id
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def unlink_vducp_intvl(self, vnf_id, vducp_id, intvl_id):
try:
current_data = json.loads(self.data_project)
for vdu in current_data['vnfd'][vnf_id]['vdu']:
intCpd = next(
(x for x in vdu['intCpd'] if x['cpdId'] == vducp_id and x['intVirtualLinkDesc'] == intvl_id), None)
if intCpd is not None:
intCpd['intVirtualLinkDesc'] = None
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# VNF operationd: add/remove IntVL
def add_vnf_intvl(self, vnf_id, intvl_id):
try:
current_data = json.loads(self.data_project)
# utility = Util()
intVirtualLinkDesc = self.get_descriptor_template('vnfd')['intVirtualLinkDesc'][0]
intVirtualLinkDesc['virtualLinkDescId'] = intvl_id
current_data['vnfd'][vnf_id]['intVirtualLinkDesc'].append(intVirtualLinkDesc)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def remove_vnf_intvl(self, vnf_id, intvl_id):
try:
current_data = json.loads(self.data_project)
# utility = Util()
intVirtualLinkDesc = next(
(x for x in current_data['vnfd'][vnf_id]['intVirtualLinkDesc'] if x['virtualLinkDescId'] == intvl_id),
None)
current_data['vnfd'][vnf_id]['intVirtualLinkDesc'].remove(intVirtualLinkDesc)
for vdu in current_data['vnfd'][vnf_id]['vdu']:
for intCpd in vdu['intCpd']:
if intCpd['intVirtualLinkDesc'] == intvl_id:
intCpd['intVirtualLinkDesc'] = None
for vnfExtCpd in current_data['vnfd'][vnf_id]['vnfExtCpd']:
if vnfExtCpd['intVirtualLinkDesc'] == intvl_id:
vnfExtCpd['intVirtualLinkDesc'] = None
for deploymentFlavour in current_data['vnfd'][vnf_id]['deploymentFlavour']:
for virtualLinkProfile in deploymentFlavour['virtualLinkProfile']:
if virtualLinkProfile['vnfVirtualLinkDescId'] == intvl_id:
deploymentFlavour['virtualLinkProfile'].remove(virtualLinkProfile)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# VNF operationd: add/remove vnfExtCpd
def add_vnf_vnfextcpd(self, vnf_id, vnfExtCpd_id):
try:
current_data = json.loads(self.data_project)
# utility = Util()
vnfExtCpd = self.get_descriptor_template('vnfd')['vnfExtCpd'][0]
vnfExtCpd['cpdId'] = vnfExtCpd_id
current_data['vnfd'][vnf_id]['vnfExtCpd'].append(vnfExtCpd)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def remove_vnf_vnfextcpd(self, vnf_id, vnfExtCpd_id):
try:
current_data = json.loads(self.data_project)
# utility = Util()
vnfExtCpd = next((x for x in current_data['vnfd'][vnf_id]['vnfExtCpd'] if x['cpdId'] == vnfExtCpd_id), None)
current_data['vnfd'][vnf_id]['vnfExtCpd'].remove(vnfExtCpd)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# VNF operationd: link/unlink vnfextcpd and IntVL
def link_vnfextcpd_intvl(self, vnf_id, vnfExtCpd_id, intvl_id):
try:
current_data = json.loads(self.data_project)
vnfExtCpd = next((x for x in current_data['vnfd'][vnf_id]['vnfExtCpd'] if x['cpdId'] == vnfExtCpd_id), None)
vnfExtCpd['intVirtualLinkDesc'] = intvl_id
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def unlink_vnfextcpd_intvl(self, vnf_id, vnfExtCpd_id, intvl_id):
try:
current_data = json.loads(self.data_project)
vnfExtCpd = next((x for x in current_data['vnfd'][vnf_id]['vnfExtCpd'] if x['cpdId'] == vnfExtCpd_id), None)
vnfExtCpd['intVirtualLinkDesc'] = None
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
def add_vnffg(self, ns_id, vnffg_id):
try:
current_data = json.loads(self.data_project)
# utility = Util()
vnffg = self.get_descriptor_template('nsd')['vnffgd'][0]
vnffg['vnffgdId'] = vnffg_id
current_data['nsd'][ns_id]['vnffgd'].append(vnffg)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# def add_node_to_vnffg(self, ns_id, vnffg_id, element_type, element_id):
def add_node_to_vnffg(self, request):
try:
group_id = request.POST.get('group_id')
element_id = request.POST.get('element_id')
element_type = request.POST.get('element_type')
vnffg_id = request.POST.get('vnffg_id')
# print group_id, element_id, element_type, vnffg_id
current_data = json.loads(self.data_project)
vnffg = next((x for x in current_data['nsd'][group_id]['vnffgd'] if x['vnffgdId'] == vnffg_id), None)
if element_type == 'ns_vl':
vnffg['virtualLinkDescId'].append(element_id)
elif element_type == 'vnf':
vnffg['vnfdId'].append(element_id)
elif element_type == 'ns_cp':
vnffg['cpdPoolId'].append(element_id)
self.data_project = current_data
self.update()
result = True
except Exception as e:
log.exception(e)
result = False
return result
# Project.add_project_type('etsi', EtsiProject)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import secrets
import uuid
from cgi import FieldStorage
from unittest import TestCase
import pretend
import pytest
from pyramid.httpexceptions import HTTPNotFound
from sqlalchemy.orm.exc import NoResultFound
from webob.multidict import MultiDict
from warehouse.admin.interfaces import ISponsorLogoStorage
from warehouse.admin.views import sponsors as views
from warehouse.sponsors.models import Sponsor
from ....common.db.sponsors import SponsorFactory
COLOR_LOGO_FILE = FieldStorage()
COLOR_LOGO_FILE.filename = "colorlogo.png"
COLOR_LOGO_FILE.file = io.BytesIO(
(
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06"
b"\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\rIDATx\xdac\xfc\xcf\xc0P\x0f\x00"
b"\x04\x85\x01\x80\x84\xa9\x8c!\x00\x00\x00\x00IEND\xaeB`\x82"
)
)
COLOR_LOGO_FILE.type = "image/png"
WHITE_LOGO_FILE = FieldStorage()
WHITE_LOGO_FILE.filename = "whitelogo.png"
WHITE_LOGO_FILE.file = io.BytesIO(
(
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06"
b"\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\rIDATx\xdac\xfc\xcf\xc0P\x0f\x00"
b"\x04\x85\x01\x80\x84\xa9\x8c!\x00\x00\x00\x00IEND\xaeB`\x82"
)
)
WHITE_LOGO_FILE.type = "image/png"
class TestSponsorList:
def test_list_all_sponsors(self, db_request):
[SponsorFactory.create() for _ in range(5)]
sponsors = db_request.db.query(Sponsor).order_by(Sponsor.name).all()
result = views.sponsor_list(db_request)
assert result == {"sponsors": sponsors}
class TestCreateSponsor:
def test_serialize_form_to_create_sponsor(self, db_request):
result = views.create_sponsor(db_request)
assert len(result) == 1
assert isinstance(result["form"], views.SponsorForm)
def test_serialize_form_errors_if_invalid_post(self, db_request):
db_request.method = "POST"
db_request.POST["name"] = ""
db_request.POST["link_url"] = ""
db_request.POST = MultiDict(db_request.POST)
result = views.create_sponsor(db_request)
assert len(result) == 1
assert isinstance(result["form"], views.SponsorForm)
assert result["form"].errors
def test_create_sponsor(self, db_request):
db_request.method = "POST"
db_request.POST["name"] = "Sponsor"
db_request.POST["link_url"] = "https://newsponsor.com"
db_request.POST["color_logo"] = COLOR_LOGO_FILE
db_request.POST = MultiDict(db_request.POST)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_url = pretend.call_recorder(lambda r: "/admin/sponsors/")
storage_service = pretend.stub(
store=pretend.call_recorder(
lambda path, file_path, ct: f"http://files/sponsorlogos/{path}"
)
)
db_request.find_service = pretend.call_recorder(
lambda svc, name=None, context=None: {
ISponsorLogoStorage: storage_service,
}.get(svc)
)
resp = views.create_sponsor(db_request)
assert resp.status_code == 303
assert resp.location == "/admin/sponsors/"
assert db_request.session.flash.calls == [
pretend.call("Added new sponsor 'Sponsor'", queue="success")
]
assert db_request.route_url.calls == [pretend.call("admin.sponsor.list")]
class TestEditSponsor:
def test_serialize_form_and_sponsor(self, db_request):
sponsor = SponsorFactory.create()
db_request.matchdict["sponsor_id"] = sponsor.id
result = views.edit_sponsor(db_request)
assert len(result) == 2
assert isinstance(result["form"], views.SponsorForm)
assert result["form"].data["name"] == sponsor.name
assert result["sponsor"] == sponsor
def test_404_if_sponsor_does_not_exist(self, db_request):
db_request.matchdict["sponsor_id"] = str(uuid.uuid4())
with pytest.raises(HTTPNotFound):
views.edit_sponsor(db_request)
def test_update_sponsor(self, monkeypatch, db_request):
sponsor = SponsorFactory.create()
form = views.SponsorForm(MultiDict({}), sponsor)
data = form.data.copy()
data["name"] = "New Name"
data["white_logo"] = WHITE_LOGO_FILE
data["color_logo"] = COLOR_LOGO_FILE
db_request.matchdict["sponsor_id"] = sponsor.id
db_request.method = "POST"
db_request.POST = MultiDict(data)
db_request.current_route_path = pretend.call_recorder(
lambda: f"/admin/sponsors/{sponsor.id}/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
storage_service = pretend.stub(
store=pretend.call_recorder(
lambda path, file_path, ct: f"http://files/sponsorlogos/{path}"
)
)
db_request.find_service = pretend.call_recorder(
lambda svc, name=None, context=None: {
ISponsorLogoStorage: storage_service,
}.get(svc)
)
monkeypatch.setattr(secrets, "token_urlsafe", lambda x: "deadbeef")
resp = views.edit_sponsor(db_request)
db_sponsor = db_request.db.query(Sponsor).filter(Sponsor.id == sponsor.id).one()
assert resp.status_code == 303
assert resp.location == f"/admin/sponsors/{sponsor.id}/"
assert db_sponsor.name == "New Name"
assert (
db_sponsor.white_logo_url
== "http://files/sponsorlogos/new-name-white-logo-deadbeef.png"
)
assert (
db_sponsor.color_logo_url
== "http://files/sponsorlogos/new-name-color-logo-deadbeef.png"
)
assert db_request.session.flash.calls == [
pretend.call("Sponsor updated", queue="success")
]
def test_form_errors_if_invalid_post_data(self, db_request):
sponsor = SponsorFactory.create()
form = views.SponsorForm(MultiDict({}), sponsor)
data = form.data.copy()
data["name"] = "" # name is required
db_request.matchdict["sponsor_id"] = sponsor.id
db_request.method = "POST"
db_request.POST = MultiDict(data)
result = views.edit_sponsor(db_request)
assert "name" in result["form"].errors
class TestDeleteSponsor:
def test_404_if_sponsor_does_not_exist(self, db_request):
db_request.matchdict["sponsor_id"] = str(uuid.uuid4())
with pytest.raises(HTTPNotFound):
views.delete_sponsor(db_request)
def test_delete_sponsor(self, db_request):
sponsor = SponsorFactory.create()
db_request.matchdict["sponsor_id"] = sponsor.id
db_request.params = {"sponsor": sponsor.name}
db_request.method = "POST"
db_request.route_url = pretend.call_recorder(lambda s: "/admin/sponsors/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
resp = views.delete_sponsor(db_request)
with pytest.raises(NoResultFound):
db_request.db.query(Sponsor).filter(Sponsor.id == sponsor.id).one()
assert resp.status_code == 303
assert resp.location == "/admin/sponsors/"
assert db_request.session.flash.calls == [
pretend.call(f"Deleted sponsor {sponsor.name}", queue="success")
]
assert db_request.route_url.calls == [pretend.call("admin.sponsor.list")]
def test_do_not_delete_sponsor_if_invalid_confirmation_param(self, db_request):
sponsor = SponsorFactory.create()
db_request.matchdict["sponsor_id"] = sponsor.id
db_request.params = {"sponsor": "not the sponsor name"}
db_request.method = "POST"
db_request.route_url = pretend.call_recorder(
lambda s, sponsor_id: f"/admin/sponsors/{sponsor_id}"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
resp = views.delete_sponsor(db_request)
sponsor = db_request.db.query(Sponsor).filter(Sponsor.id == sponsor.id).one()
assert resp.status_code == 303
assert resp.location == f"/admin/sponsors/{sponsor.id}"
assert db_request.session.flash.calls == [
pretend.call("Wrong confirmation input", queue="error")
]
assert db_request.route_url.calls == [
pretend.call("admin.sponsor.edit", sponsor_id=sponsor.id)
]
class TestSponsorForm(TestCase):
def setUp(self):
self.data = {
"name": "Sponsor",
"link_url": "https://newsponsor.com",
"color_logo_url": "http://domain.com/image.jpg",
}
def test_required_fields(self):
required_fields = ["name", "link_url", "color_logo_url"]
form = views.SponsorForm(data={"color_logo_url": ""})
assert form.validate() is False
assert len(form.errors) == len(required_fields)
for field in required_fields:
assert field in form.errors
def test_valid_data(self):
form = views.SponsorForm(data=self.data)
assert form.validate() is True
def test_white_logo_is_required_for_footer_display(self):
self.data["footer"] = True
# don't validate without logo
form = views.SponsorForm(data=self.data)
assert form.validate() is False
assert "white_logo" in form.errors
self.data["white_logo_url"] = "http://domain.com/white-logo.jpg"
form = views.SponsorForm(data=self.data)
assert form.validate() is True
def test_white_logo_is_required_for_infra_display(self):
self.data["infra_sponsor"] = True
# don't validate without logo
form = views.SponsorForm(data=self.data)
assert form.validate() is False
assert "white_logo" in form.errors
self.data["white_logo_url"] = "http://domain.com/white-logo.jpg"
form = views.SponsorForm(data=self.data)
assert form.validate() is True
|
|
"""Template helper methods for rendering strings with Home Assistant data."""
from datetime import datetime
import json
import logging
import math
import random
import base64
import re
import jinja2
from jinja2 import contextfilter
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace
from homeassistant.const import (
ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_UNIT_OF_MEASUREMENT, MATCH_ALL,
STATE_UNKNOWN)
from homeassistant.core import State, valid_entity_id
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert
from homeassistant.util import dt as dt_util
from homeassistant.util import location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RE_NONE_ENTITIES = re.compile(r"distance\(|closest\(", re.I | re.M)
_RE_GET_ENTITIES = re.compile(
r"(?:(?:states\.|(?:is_state|is_state_attr|state_attr|states)"
r"\((?:[\ \'\"]?))([\w]+\.[\w]+)|([\w]+))", re.I | re.M
)
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{")
@bind_hass
def attach(hass, obj):
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, dict):
for child in obj.values():
attach(hass, child)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(value, variables=None):
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables)
for item in value]
if isinstance(value, dict):
return {key: render_complex(item, variables)
for key, item in value.items()}
return value.async_render(variables)
def extract_entities(template, variables=None):
"""Extract all entities for state_changed listener from template string."""
if template is None or _RE_JINJA_DELIMITERS.search(template) is None:
return []
if _RE_NONE_ENTITIES.search(template):
return MATCH_ALL
extraction = _RE_GET_ENTITIES.findall(template)
extraction_final = []
for result in extraction:
if result[0] == 'trigger.entity_id' and 'trigger' in variables and \
'entity_id' in variables['trigger']:
extraction_final.append(variables['trigger']['entity_id'])
elif result[0]:
extraction_final.append(result[0])
if variables and result[1] in variables and \
isinstance(variables[result[1]], str) and \
valid_entity_id(variables[result[1]]):
extraction_final.append(variables[result[1]])
if extraction_final:
return list(set(extraction_final))
return MATCH_ALL
class Template:
"""Class to hold a template and manage caching and rendering."""
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError('Expected template to be a string')
self.template = template
self._compiled_code = None
self._compiled = None
self.hass = hass
def ensure_valid(self):
"""Return if template is valid."""
if self._compiled_code is not None:
return
try:
self._compiled_code = ENV.compile(self.template)
except jinja2.exceptions.TemplateSyntaxError as err:
raise TemplateError(err)
def extract_entities(self, variables=None):
"""Extract all entities for state_changed listener."""
return extract_entities(self.template, variables)
def render(self, variables: TemplateVarsType = None, **kwargs):
"""Render given template."""
if variables is not None:
kwargs.update(variables)
return run_callback_threadsafe(
self.hass.loop, self.async_render, kwargs).result()
def async_render(self, variables: TemplateVarsType = None,
**kwargs) -> str:
"""Render given template.
This method must be run in the event loop.
"""
if self._compiled is None:
self._ensure_compiled()
if variables is not None:
kwargs.update(variables)
try:
return self._compiled.render(kwargs).strip()
except jinja2.TemplateError as err:
raise TemplateError(err)
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
return run_callback_threadsafe(
self.hass.loop, self.async_render_with_possible_json_value, value,
error_value).result()
def async_render_with_possible_json_value(self, value,
error_value=_SENTINEL,
variables=None):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables['value'] = value
try:
variables['value_json'] = json.loads(value)
except (ValueError, TypeError):
pass
try:
return self._compiled.render(variables).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex, value, self.template)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(self):
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, 'hass variable not set on template'
template_methods = TemplateMethods(self.hass)
global_vars = ENV.make_globals({
'closest': template_methods.closest,
'distance': template_methods.distance,
'is_state': self.hass.states.is_state,
'is_state_attr': template_methods.is_state_attr,
'state_attr': template_methods.state_attr,
'states': AllStates(self.hass),
})
self._compiled = jinja2.Template.from_code(
ENV, self._compiled_code, global_vars, None)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (self.__class__ == other.__class__ and
self.template == other.template and
self.hass == other.hass)
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass):
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
return DomainStates(self._hass, name)
def __iter__(self):
"""Return all states."""
return iter(
_wrap_state(state) for state in
sorted(self._hass.states.async_all(),
key=lambda state: state.entity_id))
def __len__(self):
"""Return number of states."""
return len(self._hass.states.async_entity_ids())
def __call__(self, entity_id):
"""Return the states."""
state = self._hass.states.get(entity_id)
return STATE_UNKNOWN if state is None else state.state
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass, domain):
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _wrap_state(
self._hass.states.get('{}.{}'.format(self._domain, name)))
def __iter__(self):
"""Return the iteration over all the states."""
return iter(sorted(
(_wrap_state(state) for state in self._hass.states.async_all()
if state.domain == self._domain),
key=lambda state: state.entity_id))
def __len__(self):
"""Return number of states."""
return len(self._hass.states.async_entity_ids(self._domain))
class TemplateState(State):
"""Class to represent a state object in a template."""
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, state):
"""Initialize template state."""
self._state = state
@property
def state_with_unit(self):
"""Return the state concatenated with the unit if available."""
state = object.__getattribute__(self, '_state')
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if unit is None:
return state.state
return "{} {}".format(state.state, unit)
def __getattribute__(self, name):
"""Return an attribute of the state."""
if name in TemplateState.__dict__:
return object.__getattribute__(self, name)
return getattr(object.__getattribute__(self, '_state'), name)
def __repr__(self):
"""Representation of Template State."""
rep = object.__getattribute__(self, '_state').__repr__()
return '<template ' + rep[1:]
def _wrap_state(state):
"""Wrap a state."""
return None if state is None else TemplateState(state)
class TemplateMethods:
"""Class to expose helpers to templates."""
def __init__(self, hass):
"""Initialize the helpers."""
self._hass = hass
def closest(self, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
"""
if len(args) == 1:
latitude = self._hass.config.latitude
longitude = self._hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = self._resolve_state(args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s",
point_state)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s",
args[0], args[1])
return None
entities = args[2]
if isinstance(entities, (AllStates, DomainStates)):
states = list(entities)
else:
if isinstance(entities, State):
gr_entity_id = entities.entity_id
else:
gr_entity_id = str(entities)
group = self._hass.components.group
states = [self._hass.states.get(entity_id) for entity_id
in group.expand_entity_ids([gr_entity_id])]
return _wrap_state(loc_helper.closest(latitude, longitude, states))
def distance(self, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
point_state = self._resolve_state(value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s",
value)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning("Distance:Unable to process latitude and "
"longitude: %s, %s", value, value_2)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"distance:State does not contain valid location: %s",
point_state)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:State does not contains a location: %s",
value)
return None
locations.append((latitude, longitude))
if len(locations) == 1:
return self._hass.config.distance(*locations[0])
return self._hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), 'm')
def is_state_attr(self, entity_id, name, value):
"""Test if a state is a specific attribute."""
state_attr = self.state_attr(entity_id, name)
return state_attr is not None and state_attr == value
def state_attr(self, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = self._hass.states.get(entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def _resolve_state(self, entity_id_or_state):
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return self._hass.states.get(entity_id_or_state)
return None
def forgiving_round(value, precision=0):
"""Round accepted strings."""
try:
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(
dt_util.utc_from_timestamp(value)).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find='', ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value='', find='', replace='', ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find='', ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find='', index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode('utf-8')).decode('utf-8')
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode('utf-8')
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (list(['th', 'st', 'nd', 'rd'] + ['th'] * 6)
[(int(str(value)[-1])) % 10] if
int(str(value)[-2:]) % 100 not in range(11, 14)
else 'th')
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
return isinstance(obj, Namespace) or \
super().is_safe_attribute(obj, attr, value)
ENV = TemplateEnvironment()
ENV.filters['round'] = forgiving_round
ENV.filters['multiply'] = multiply
ENV.filters['log'] = logarithm
ENV.filters['sin'] = sine
ENV.filters['cos'] = cosine
ENV.filters['tan'] = tangent
ENV.filters['sqrt'] = square_root
ENV.filters['as_timestamp'] = forgiving_as_timestamp
ENV.filters['timestamp_custom'] = timestamp_custom
ENV.filters['timestamp_local'] = timestamp_local
ENV.filters['timestamp_utc'] = timestamp_utc
ENV.filters['is_defined'] = fail_when_undefined
ENV.filters['max'] = max
ENV.filters['min'] = min
ENV.filters['random'] = random_every_time
ENV.filters['base64_encode'] = base64_encode
ENV.filters['base64_decode'] = base64_decode
ENV.filters['ordinal'] = ordinal
ENV.filters['regex_match'] = regex_match
ENV.filters['regex_replace'] = regex_replace
ENV.filters['regex_search'] = regex_search
ENV.filters['regex_findall_index'] = regex_findall_index
ENV.filters['bitwise_and'] = bitwise_and
ENV.filters['bitwise_or'] = bitwise_or
ENV.globals['log'] = logarithm
ENV.globals['sin'] = sine
ENV.globals['cos'] = cosine
ENV.globals['tan'] = tangent
ENV.globals['sqrt'] = square_root
ENV.globals['pi'] = math.pi
ENV.globals['tau'] = math.pi * 2
ENV.globals['e'] = math.e
ENV.globals['float'] = forgiving_float
ENV.globals['now'] = dt_util.now
ENV.globals['utcnow'] = dt_util.utcnow
ENV.globals['as_timestamp'] = forgiving_as_timestamp
ENV.globals['relative_time'] = dt_util.get_age
ENV.globals['strptime'] = strptime
|
|
from common_fixtures import * # NOQA
from cattle import ApiError
from test_physical_host import disable_go_machine_service # NOQA
@pytest.fixture(scope='module')
def update_ping_settings(request, super_client):
# These settings need changed because they control how the logic of the
# ping handlers behave in cattle. We need to update them so that we can
# ensure the ping logic will fully run.
settings = super_client.list_setting()
originals = []
def update_setting(new_value, s):
originals.append((setting, {'value': s.value}))
s = super_client.update(s, {'value': new_value})
wait_setting_active(super_client, s)
for setting in settings:
if setting.name == 'agent.ping.resources.every' and setting.value != 1:
update_setting('1', setting)
if setting.name == 'agent.resource.monitor.cache.resource.seconds' \
and setting.value != 0:
update_setting('0', setting)
def revert_settings():
for s in originals:
super_client.update(s[0], s[1])
request.addfinalizer(revert_settings)
@pytest.fixture(scope='module')
def machine_context(admin_user_client):
return create_context(admin_user_client, create_project=True,
add_host=True)
@pytest.fixture(scope='module')
def admin_client(machine_context):
return machine_context.client
@pytest.fixture(scope='module')
def admin_account(machine_context):
return machine_context.project
def test_machine_lifecycle(super_client, admin_client, admin_account,
update_ping_settings):
name = random_str()
machine = admin_client.create_machine(name=name,
virtualboxConfig={})
machine = admin_client.wait_success(machine)
assert machine.state == 'active'
assert machine.virtualboxConfig is not None
external_id = super_client.reload(machine).externalId
assert external_id is not None
# Create an agent with the externalId specified. The agent simulator will
# mimic how the go-machine-service would use this external_id to bootstrap
# an agent onto the physical host with the proper PHYSICAL_HOST_UUID set.
scope = 'io.cattle.platform.agent.connection.simulator' \
'.AgentConnectionSimulator'
uri = 'sim://{}'.format(random_str())
data = {scope: {}}
data[scope]['addPhysicalHost'] = True
data[scope]['externalId'] = external_id
account_id = get_plain_id(super_client, admin_account)
data[scope]['agentResourcesAccountId'] = account_id
data['agentResourcesAccountId'] = account_id
agent = super_client.create_agent(uri=uri, data=data)
agent = super_client.wait_success(agent)
wait_for(lambda: len(agent.hosts()) == 1)
hosts = agent.hosts()
assert len(hosts) == 1
host = hosts[0]
assert host.physicalHostId == machine.id
assert machine.accountId == host.accountId
# Need to force a ping because they cause physical hosts to be created
# under non-machine use cases. Ensures the machine isnt overridden
ping = one(super_client.list_task, name='agent.ping')
ping.execute()
time.sleep(.1) # The ping needs time to execute
agent = super_client.reload(agent)
hosts = agent.hosts()
assert len(hosts) == 1
host = hosts[0]
physical_hosts = host.physicalHost()
assert physical_hosts.id == machine.id
machine = admin_client.wait_success(machine.remove())
assert machine.state == 'removed'
agent = super_client.wait_success(super_client.reload(machine).agent())
assert agent.state == 'removed'
host = admin_client.wait_success(admin_client.reload(host))
assert host.state == 'removed'
def test_machine_driver_config(admin_client):
name = "test-%s" % random_str()
vbox_config = {
"memory": "2048",
"diskSize": "40000",
"boot2dockerUrl": "http://localhost/random",
}
ca = "ca-1"
key = "key-1"
host = admin_client.create_machine(name=name,
virtualboxConfig=vbox_config,
authCertificateAuthority=ca,
authKey=key)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert vbox_config == host.virtualboxConfig
assert ca == host.authCertificateAuthority
assert key == host.authKey
assert host.driver == 'virtualbox'
name = "test-%s" % random_str()
digoc_config = {
"image": "img1",
"region": "reg1",
"size": "40000",
"accessToken": "ac-1",
"ipv6": True,
"privateNetworking": True,
"backups": True
}
host = admin_client.create_machine(name=name,
digitaloceanConfig=digoc_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert digoc_config == host.digitaloceanConfig
assert host.driver == 'digitalocean'
name = "test-%s" % random_str()
ec2_config = {
"accessKey": "accesskey1",
"secretKey": "secretkey1",
"vpcId": "1234",
"subnetId": "5678",
"sessionToken": "sessiontoken1",
"ami": "ami1",
"region": "us-east-1",
"zone": "us-east-1a",
"securityGroup": "docker-machine",
"instanceType": "type1",
"rootSize": "60GB",
"iamInstanceProfile": "profile1",
}
host = admin_client.create_machine(name=name,
amazonec2Config=ec2_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert ec2_config == host.amazonec2Config
assert host.driver == 'amazonec2'
name = "test-%s" % random_str()
packet_config = {
"apiKey": "apikey1",
"projectId": "projectId",
"os": "centos_7",
"facilityCode": "ewr1",
"plan": "baremetal_1",
"billingCycle": "hourly",
}
host = admin_client.create_machine(name=name,
packetConfig=packet_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert packet_config == host.packetConfig
assert host.driver == 'packet'
name = "test-%s" % random_str()
azure_config = {
"dockerPort": "dockerPort",
"image": "image",
"location": "location",
"password": "password",
"publishSettingsFile": "publishSettingsFile",
"size": "size",
"subscriptionId": "subscriptionId",
"subscriptionCert": "subscriptionCert",
"username": "username",
}
host = admin_client.create_machine(name=name,
azureConfig=azure_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert azure_config == host.azureConfig
assert host.driver == 'azure'
name = "test-%s" % random_str()
rackspace_config = {
"username": "username",
"apiKey": "apiKey",
"region": "region",
"endpointType": "endpointType",
"imageId": "imageId",
"flavorId": "flavorId",
"sshUser": "sshUser",
"sshPort": "sshPort",
"dockerInstall": "dockerInstall",
}
host = admin_client.create_machine(name=name,
rackspaceConfig=rackspace_config)
host = admin_client.wait_success(host)
assert host.state == 'active'
assert rackspace_config == host.rackspaceConfig
assert host.driver == 'rackspace'
def test_machine_validation(admin_client):
name = "test-%s" % random_str()
# Can't set two drivers
try:
admin_client.create_machine(name=name,
virtualboxConfig={},
digitaloceanConfig={"accessToken": "a"})
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'DriverConfigExactlyOneRequired'
else:
assert False, "Should not have been able to set two drivers."
# Must set at least one driver
try:
admin_client.create_machine(name=name)
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'DriverConfigExactlyOneRequired'
else:
assert False, "Should have been required to set a driver."
# Property present, but None/nil/null is acceptable
host = admin_client.create_machine(name=name,
virtualboxConfig={},
digitaloceanConfig=None)
assert host is not None
def test_digitalocean_config_validation(admin_client):
name = "test-%s" % random_str()
# accessToken is required
try:
admin_client.create_machine(name=name,
digitaloceanConfig={})
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'MissingRequired'
else:
assert False, 'Should have got MissingRequired for accessToken'
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPolicyDefinitionsOperations:
"""ServiceEndpointPolicyDefinitionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified ServiceEndpoint policy definitions.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the Service Endpoint Policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
**kwargs: Any
) -> "_models.ServiceEndpointPolicyDefinition":
"""Get the specified service endpoint policy definitions from service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
service_endpoint_policy_definitions: "_models.ServiceEndpointPolicyDefinition",
**kwargs: Any
) -> "_models.ServiceEndpointPolicyDefinition":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(service_endpoint_policy_definitions, 'ServiceEndpointPolicyDefinition')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
service_endpoint_policy_definitions: "_models.ServiceEndpointPolicyDefinition",
**kwargs: Any
) -> AsyncLROPoller["_models.ServiceEndpointPolicyDefinition"]:
"""Creates or updates a service endpoint policy definition in the specified service endpoint
policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:param service_endpoint_policy_definitions: Parameters supplied to the create or update service
endpoint policy operation.
:type service_endpoint_policy_definitions: ~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicyDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceEndpointPolicyDefinition or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicyDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
service_endpoint_policy_definitions=service_endpoint_policy_definitions,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyDefinitionListResult"]:
"""Gets all service endpoint policy definitions in a service end point policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyDefinitionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.ServiceEndpointPolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions'} # type: ignore
|
|
# *******************************************
# Copyright 2010-2015, Anthony Hand
#
#
# File version 2015.05.13 (May 13, 2015)
# Updates:
# - Moved MobileESP to GitHub. https:#github.com/ahand/mobileesp
# - Opera Mobile/Mini browser has the same UA string on multiple platforms and doesn't differentiate phone vs. tablet.
# - Removed DetectOperaAndroidPhone(). This method is no longer reliable.
# - Removed DetectOperaAndroidTablet(). This method is no longer reliable.
# - Added support for Windows Phone 10: variable and DetectWindowsPhone10()
# - Updated DetectWindowsPhone() to include WP10.
# - Added support for Firefox OS.
# - A variable plus DetectFirefoxOS(), DetectFirefoxOSPhone(), DetectFirefoxOSTablet()
# - NOTE: Firefox doesn't add UA tokens to definitively identify Firefox OS vs. their browsers on other mobile platforms.
# - Added support for Sailfish OS. Not enough info to add a tablet detection method at this time.
# - A variable plus DetectSailfish(), DetectSailfishPhone()
# - Added support for Ubuntu Mobile OS.
# - DetectUbuntu(), DetectUbuntuPhone(), DetectUbuntuTablet()
# - Added support for 2 smart TV OSes. They lack browsers but do have WebViews for use by HTML apps.
# - One variable for Samsung Tizen TVs, plus DetectTizenTV()
# - One variable for LG WebOS TVs, plus DetectWebOSTV()
# - Added DetectTizen(). Tests for "mobile" to disambiguate from Samsung Smart TVs.
# - Removed variables for obsolete devices: deviceHtcFlyer, deviceXoom.
# - Updated DetectAndroid(). No longer has a special test case for the HTC Flyer tablet.
# - Updated DetectAndroidPhone().
# - Updated internal detection code for Android.
# - No longer has a special test case for the HTC Flyer tablet.
# - Checks against DetectOperaMobile() on Android and reports here if relevant.
# - Updated DetectAndroidTablet().
# - No longer has a special test case for the HTC Flyer tablet.
# - Checks against DetectOperaMobile() on Android to exclude it from here.
# - DetectMeego(): Changed definition for this method. Now detects any Meego OS device, not just phones.
# - DetectMeegoPhone(): NEW. For Meego phones. Ought to detect Opera browsers on Meego, as well.
# - DetectTierIphone(): Added support for phones running Sailfish, Ubuntu and Firefox Mobile.
# - DetectTierTablet(): Added support for tablets running Ubuntu and Firefox Mobile.
# - DetectSmartphone(): Added support for Meego phones.
# - Caught this library up to the PHP, JavaScript and Java versions. Updates include:
# - Added support for Bada: a variable and DetectBada(). This detects any Bada OS device, but (almost) all are phones.
# - Refactored the Windows Phone delegate-related properties and features. Now fires for any Windows Phone, not just WP7.
# - The event fires now when DetectWindowsPhone() is true.
# - Added support for Windows Phone 8: DetectWindowsPhone8().
# - Updated DetectWindowsMobile(). Excludes any Windows Phone device, not just WP7.
# - Added support for BlackBerry 10 OS phones: DetectBlackBerry10Phone().
# - Updated DetectSmartphone() to sync with the other libraries.
# - Updated DetectTierIphone() to sync with the other libraries.
# - OnInit(EventArgs e): Fixed the user agent and httpaccept init logic.
# - Refactored the detection logic in DetectMobileQuick() and DetectMobileLong().
# - Moved a few detection tests for older browsers to Long.
#
#
#
# File version date: Feburary 10, 2012
# Creation:
# - Cloned from http://code.google.com/p/mobileesp/source/browse/Java/UAgentInfo.java
# and http://code.google.com/p/mobileesp/source/browse/PHP/mdetect.php
# - Port to Python: Alexey Evseev ([email protected])
# - Made for www.irk.fm website
#
#
#
# LICENSE INFORMATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
# ABOUT THIS PROJECT
# Project Owner: Anthony Hand
# Email: [email protected]
# Web Site: http://www.mobileesp.com
# Source Files: https://github.com/ahand/mobileesp
#
# Versions of this code are available for:
# PHP, JavaScript, Java, ASP.NET (C#), Ruby and Python
#
# *******************************************
class UAgentInfo(object):
"""The UAgentInfo class encapsulates information about
a browser's connection to your web site.
You can use it to find out whether the browser asking for
your site's content is probably running on a mobile device.
The methods were written so you can be as granular as you want.
For example, enquiring whether it's as specific as an iPod Touch or
as general as a smartphone class device.
The object's methods return true, or false.
"""
# Initialize some initial smartphone string variables.
engineWebKit = "webkit"
deviceIphone = "iphone"
deviceIpod = "ipod"
deviceIpad = "ipad"
deviceMacPpc = "macintosh" #Used for disambiguation
deviceAndroid = "android"
deviceGoogleTV = "googletv"
deviceSymbian = "symbian"
deviceS60 = "series60"
deviceS70 = "series70"
deviceS80 = "series80"
deviceS90 = "series90"
deviceWinPhone7 = "windows phone os 7"
deviceWinPhone8 = "windows phone 8"
deviceWinPhone10 = "windows phone 10"
deviceWinMob = "windows ce"
deviceWindows = "windows"
deviceIeMob = "iemobile"
devicePpc = "ppc" #Stands for PocketPC
enginePie = "wm5 pie" #An old Windows Mobile
deviceBB = "blackberry"
deviceBB10 = "bb10" #For the new BB 10 OS
vndRIM = "vnd.rim" #Detectable when BB devices emulate IE or Firefox
deviceBBStorm = "blackberry95" #Storm 1 and 2
deviceBBBold = "blackberry97" #Bold 97x0 (non-touch)
deviceBBBoldTouch = "blackberry 99" #Bold 99x0 (touchscreen)
deviceBBTour = "blackberry96" #Tour
deviceBBCurve = "blackberry89" #Curve 2
deviceBBCurveTouch = "blackberry 938" #Curve Touch 9380
deviceBBTorch = "blackberry 98" #Torch
deviceBBPlaybook = "playbook" #PlayBook tablet
devicePalm = "palm"
deviceWebOS = "webos" #For Palm devices
deviceWebOStv = "web0s" #For LG TVs
deviceWebOShp = "hpwos" #For HP's line of WebOS devices
engineBlazer = "blazer" #Old Palm
engineXiino = "xiino" #Another old Palm
deviceKindle = "kindle" #Amazon Kindle, eInk one
engineSilk = "silk" #Amazon's accelerated Silk browser for Kindle Fire
deviceNuvifone = "nuvifone" #Garmin Nuvifone
deviceBada = "bada" #Samsung's Bada OS
deviceTizen = "tizen" #Tizen OS
deviceMeego = "meego" #Meego OS
deviceSailfish = "sailfish" #Sailfish OS
deviceUbuntu = "ubuntu" #Ubuntu Mobile OS
#Initialize variables for mobile-specific content.
vndwap = "vnd.wap"
wml = "wml"
#Initialize variables for other random devices and mobile browsers.
deviceTablet = "tablet" #Generic term for slate and tablet devices
deviceBrew = "brew"
deviceDanger = "danger"
deviceHiptop = "hiptop"
devicePlaystation = "playstation"
devicePlaystationVita = "vita"
deviceNintendoDs = "nitro"
deviceNintendo = "nintendo"
deviceWii = "wii"
deviceXbox = "xbox"
deviceArchos = "archos"
engineFirefox = "firefox" #For Firefox OS
engineOpera = "opera" #Popular browser
engineNetfront = "netfront" #Common embedded OS browser
engineUpBrowser = "up.browser" #common on some phones
engineOpenWeb = "openweb" #Transcoding by OpenWave server
deviceMidp = "midp" #a mobile Java technology
uplink = "up.link"
engineTelecaQ = "teleca q" #a modern feature phone browser
devicePda = "pda" #some devices report themselves as PDAs
mini = "mini" #Some mobile browsers put "mini" in their names.
mobile = "mobile" #Some mobile browsers put "mobile" in their user agent strings.
mobi = "mobi" #Some mobile browsers put "mobi" in their user agent strings.
#Smart TV strings
smartTV1 = "smart-tv" #Samsung Tizen smart TVs
smartTV2 = "smarttv" #LG WebOS smart TVs
#Use Maemo, Tablet, and Linux to test for Nokia"s Internet Tablets.
maemo = "maemo"
linux = "linux"
qtembedded = "qt embedded" #for Sony Mylo
mylocom2 = "com2" #for Sony Mylo also
#In some UserAgents, the only clue is the manufacturer.
manuSonyEricsson = "sonyericsson"
manuericsson = "ericsson"
manuSamsung1 = "sec-sgh"
manuSony = "sony"
manuHtc = "htc" #Popular Android and WinMo manufacturer
#In some UserAgents, the only clue is the operator.
svcDocomo = "docomo"
svcKddi = "kddi"
svcVodafone = "vodafone"
#Disambiguation strings.
disUpdate = "update" #pda vs. update
def __init__(self, userAgent, httpAccept):
"""Initialize the __userAgent and __httpAccept variables
Keyword arguments:
userAgent -- the User-Agent header
httpAccept -- the Accept header
"""
# User-Agent and Accept HTTP request headers
self.__userAgent = userAgent.lower() if userAgent else ""
self.__httpAccept = httpAccept.lower() if httpAccept else ""
# Let's store values for quickly accessing the same info multiple times.
self.__isIphone = False
self.__isAndroidPhone = False
self.__isTierTablet = False
self.__isTierIphone = False
self.__isTierRichCss = False
self.__isTierGenericMobile = False
# Intialize key stored values.
self.initDeviceScan()
def getUserAgent(self):
"""Return the lower case HTTP_USER_AGENT"""
return self.__userAgent
def getHttpAccept(self):
"""Return the lower case HTTP_ACCEPT"""
return self.__httpAccept
def getIsIphone(self):
"""Return whether the device is an Iphone or iPod Touch"""
return self.__isIphone
def getIsTierTablet(self):
"""Return whether the device is in the Tablet Tier."""
return self.__isTierTablet
def getIsTierIphone(self):
"""Return whether the device is in the Iphone Tier."""
return self.__isTierIphone
def getIsTierRichCss(self):
"""Return whether the device is in the 'Rich CSS' tier of mobile devices."""
return self.__isTierRichCss
def getIsTierGenericMobile(self):
"""Return whether the device is a generic, less-capable mobile device."""
return self.__isTierGenericMobile
def initDeviceScan(self):
"""Initialize Key Stored Values."""
self.__isIphone = self.detectIphoneOrIpod()
self.__isAndroidPhone = self.detectAndroidPhone()
self.__isTierTablet = self.detectTierTablet()
self.__isTierIphone = self.detectTierIphone()
self.__isTierRichCss = self.detectTierRichCss()
self.__isTierGenericMobile = self.detectTierOtherPhones()
def detectIphone(self):
"""Return detection of an iPhone
Detects if the current device is an iPhone.
"""
# The iPad and iPod touch say they're an iPhone! So let's disambiguate.
return not self.detectWindowsPhone() \
and UAgentInfo.deviceIphone in self.__userAgent \
and not self.detectIpad() \
and not self.detectIpod()
def detectIpod(self):
"""Return detection of an iPod Touch
Detects if the current device is an iPod Touch.
"""
return UAgentInfo.deviceIpod in self.__userAgent
def detectIpad(self):
"""Return detection of an iPad
Detects if the current device is an iPad tablet.
"""
return UAgentInfo.deviceIpad in self.__userAgent \
and self.detectWebkit()
def detectIphoneOrIpod(self):
"""Return detection of an iPhone or iPod Touch
Detects if the current device is an iPhone or iPod Touch.
"""
#We repeat the searches here because some iPods may report themselves as an iPhone, which would be okay.
return UAgentInfo.deviceIphone in self.__userAgent \
or UAgentInfo.deviceIpod in self.__userAgent
def detectIos(self):
"""Return detection of an Apple iOS device
Detects *any* iOS device: iPhone, iPod Touch, iPad.
"""
return self.detectIphoneOrIpod() \
or self.detectIpad()
def detectAndroid(self):
"""Return detection of an Android device
Detects *any* Android OS-based device: phone, tablet, and multi-media player.
Also detects Google TV.
"""
if not self.detectWindowsPhone() \
and UAgentInfo.deviceAndroid in self.__userAgent \
or self.detectGoogleTV():
return True
return False
def detectAndroidPhone(self):
"""Return detection of an Android phone
Detects if the current device is a (small-ish) Android OS-based device
used for calling and/or multi-media (like a Samsung Galaxy Player).
Google says these devices will have 'Android' AND 'mobile' in user agent.
Ignores tablets (Honeycomb and later).
"""
#First, let's make sure we're on an Android device.
if not self.detectAndroid():
return False
#If it's Android and has 'mobile' in it, Google says it's a phone.
if UAgentInfo.mobile in self.__userAgent:
return True
#Special check for Android devices with Opera Mobile/Mini. They should report here.
if self.detectOperaMobile():
return True
return False
def detectAndroidTablet(self):
"""Return detection of an Android tablet
Detects if the current device is a (self-reported) Android tablet.
Google says these devices will have 'Android' and NOT 'mobile' in their user agent.
"""
#First, let's make sure we're on an Android device.
if not self.detectAndroid():
return False
#Special check for Android devices with Opera Mobile/Mini. They should NOT report here.
if self.detectOperaMobile():
return False
#Otherwise, if it's Android and does NOT have 'mobile' in it, Google says it's a tablet.
return UAgentInfo.mobile not in self.__userAgent
def detectAndroidWebKit(self):
"""Return detection of an Android WebKit browser
Detects if the current device is an Android OS-based device and
the browser is based on WebKit.
"""
return self.detectAndroid() \
and self.detectWebkit()
def detectGoogleTV(self):
"""Return detection of GoogleTV
Detects if the current device is a GoogleTV.
"""
return UAgentInfo.deviceGoogleTV in self.__userAgent
def detectWebkit(self):
"""Return detection of a WebKit browser
Detects if the current browser is based on WebKit.
"""
return UAgentInfo.engineWebKit in self.__userAgent
def detectS60OssBrowser(self):
"""Return detection of Symbian S60 Browser
Detects if the current browser is the Symbian S60 Open Source Browser.
"""
#First, test for WebKit, then make sure it's either Symbian or S60.
return self.detectWebkit() \
and (UAgentInfo.deviceSymbian in self.__userAgent \
or UAgentInfo.deviceS60 in self.__userAgent)
def detectSymbianOS(self):
"""Return detection of SymbianOS
Detects if the current device is any Symbian OS-based device,
including older S60, Series 70, Series 80, Series 90, and UIQ,
or other browsers running on these devices.
"""
return UAgentInfo.deviceSymbian in self.__userAgent \
or UAgentInfo.deviceS60 in self.__userAgent \
or UAgentInfo.deviceS70 in self.__userAgent \
or UAgentInfo.deviceS80 in self.__userAgent \
or UAgentInfo.deviceS90 in self.__userAgent
def detectWindowsPhone(self):
"""Return detection of a Windows Phone device
Detects if the current browser is a
Windows Phone 7, 8, or 10 device
"""
return self.detectWindowsPhone7() \
or self.detectWindowsPhone8() \
or self.detectWindowsPhone10()
def detectWindowsPhone7(self):
"""Return detection of Windows Phone 7
Detects if the current browser is a
Windows Phone 7 device.
"""
return UAgentInfo.deviceWinPhone7 in self.__userAgent
def detectWindowsPhone8(self):
"""Return detection of Windows Phone 8
Detects if the current browser is a
Windows Phone 8 device.
"""
return UAgentInfo.deviceWinPhone8 in self.__userAgent
def detectWindowsPhone10(self):
"""Return detection of Windows Phone 10
Detects if the current browser is a
Windows Phone 10 device.
"""
return UAgentInfo.deviceWinPhone10 in self.__userAgent
def detectWindowsMobile(self):
"""Return detection of Windows Mobile
Detects if the current browser is a Windows Mobile device.
Excludes Windows Phone 7 devices.
Focuses on Windows Mobile 6.xx and earlier.
"""
#Exclude new Windows Phone.
if self.detectWindowsPhone():
return False
#Most devices use 'Windows CE', but some report 'iemobile'
# and some older ones report as 'PIE' for Pocket IE.
# We also look for instances of HTC and Windows for many of their WinMo devices.
if UAgentInfo.deviceWinMob in self.__userAgent \
or UAgentInfo.deviceIeMob in self.__userAgent \
or UAgentInfo.enginePie in self.__userAgent:
return True
# Test for certain Windwos Mobile-based HTC devices.
if UAgentInfo.manuHtc in self.__userAgent \
and UAgentInfo.deviceWindows in self.__userAgent:
return True
if self.detectWapWml() \
and UAgentInfo.deviceWindows in self.__userAgent:
return True
#Test for Windows Mobile PPC but not old Macintosh PowerPC.
return UAgentInfo.devicePpc in self.__userAgent \
and UAgentInfo.deviceMacPpc not in self.__userAgent
def detectBlackBerry(self):
"""Return detection of Blackberry
Detects if the current browser is any BlackBerry.
Includes the PlayBook.
"""
return UAgentInfo.deviceBB in self.__userAgent \
or UAgentInfo.vndRIM in self.__httpAccept
def detectBlackBerry10Phone(self):
"""Return detection of a Blackberry 10 OS phone
Detects if the current browser is a BlackBerry 10 OS phone.
Excludes the PlayBook.
"""
return UAgentInfo.deviceBB10 in self.__userAgent \
and UAgentInfo.mobile in self.__userAgent
def detectBlackBerryTablet(self):
"""Return detection of a Blackberry Tablet
Detects if the current browser is on a BlackBerry tablet device.
Example: PlayBook
"""
return UAgentInfo.deviceBBPlaybook in self.__userAgent
def detectBlackBerryWebKit(self):
"""Return detection of a Blackberry device with WebKit browser
Detects if the current browser is a BlackBerry device AND uses a
WebKit-based browser. These are signatures for the new BlackBerry OS 6.
Examples: Torch. Includes the Playbook.
"""
return self.detectBlackBerry() \
and self.detectWebkit()
def detectBlackBerryTouch(self):
"""Return detection of a Blackberry touchscreen device
Detects if the current browser is a BlackBerry Touch
device, such as the Storm, Torch, and Bold Touch. Excludes the Playbook.
"""
return UAgentInfo.deviceBBStorm in self.__userAgent \
or UAgentInfo.deviceBBTorch in self.__userAgent \
or UAgentInfo.deviceBBBoldTouch in self.__userAgent \
or UAgentInfo.deviceBBCurveTouch in self.__userAgent
def detectBlackBerryHigh(self):
"""Return detection of a Blackberry device with a better browser
Detects if the current browser is a BlackBerry device AND
has a more capable recent browser. Excludes the Playbook.
Examples, Storm, Bold, Tour, Curve2
Excludes the new BlackBerry OS 6 and 7 browser!!
"""
#Disambiguate for BlackBerry OS 6 or 7 (WebKit) browser
if self.detectBlackBerryWebKit():
return False
if not self.detectBlackBerry():
return False
return self.detectBlackBerryTouch() \
or UAgentInfo.deviceBBBold in self.__userAgent \
or UAgentInfo.deviceBBTour in self.__userAgent \
or UAgentInfo.deviceBBCurve in self.__userAgent
def detectBlackBerryLow(self):
"""Return detection of a Blackberry device with a poorer browser
Detects if the current browser is a BlackBerry device AND
has an older, less capable browser.
Examples: Pearl, 8800, Curve1
"""
if not self.detectBlackBerry():
return False
#Assume that if it's not in the High tier, then it's Low
return self.detectBlackBerryHigh() \
or self.detectBlackBerryWebKit()
def detectPalmOS(self):
"""Return detection of a PalmOS device
Detects if the current browser is on a PalmOS device.
"""
#Most devices nowadays report as 'Palm', but some older ones reported as Blazer or Xiino.
if UAgentInfo.devicePalm in self.__userAgent \
or UAgentInfo.engineBlazer in self.__userAgent \
or UAgentInfo.engineXiino in self.__userAgent:
# Make sure it's not WebOS
return not self.detectPalmWebOS()
return False
def detectPalmWebOS(self):
"""Return detection of a Palm WebOS device
Detects if the current browser is on a Palm device
running the new WebOS.
"""
return UAgentInfo.deviceWebOS in self.__userAgent
def detectWebOSTablet(self):
"""Return detection of an HP WebOS tablet
Detects if the current browser is on an HP tablet running WebOS.
"""
return UAgentInfo.deviceWebOShp in self.__userAgent \
and UAgentInfo.deviceTablet in self.__userAgent
def detectWebOSTV(self):
"""Return detection of a WebOS smart TV
Detects if the current browser is on a WebOS smart TV.
"""
return UAgentInfo.deviceWebOStv in self.__userAgent \
and UAgentInfo.smartTV2 in self.__userAgent
def detectGarminNuvifone(self):
"""Return detection of a Garmin Nuvifone
Detects if the current browser is a
Garmin Nuvifone.
"""
return UAgentInfo.deviceNuvifone in self.__userAgent
def detectBada(self):
"""Return detection of a Bada device
Detects a device running the Bada OS from Samsung.
"""
return UAgentInfo.deviceBada in self.__userAgent
def detectTizen(self):
"""Return detection of a Tizen device
Detects a device running the Tizen smartphone OS.
"""
return UAgentInfo.deviceTizen in self.__userAgent \
and UAgentInfo.mobile in self.__userAgent
def detectTizenTV(self):
"""Return detection of a Tizen smart TV
Detects if the current browser is on a Tizen smart TV.
"""
return UAgentInfo.deviceTizen in self.__userAgent \
and UAgentInfo.smartTV1 in self.__userAgent
def detectMeego(self):
"""Return detection of a Meego device
Detects a device running the Meego OS.
"""
return UAgentInfo.deviceMeego in self.__userAgent
def detectMeegoPhone(self):
"""Return detection of a Meego phone
Detects a phone running the Meego OS.
"""
return UAgentInfo.deviceMeego in self.__userAgent \
and UAgentInfo.mobi in self.__userAgent
def detectFirefoxOS(self):
"""Return detection of a Firefox OS mobile device
Detects a mobile device (probably) running the Firefox OS.
"""
return self.detectFirefoxOSPhone() \
or self.detectFirefoxOSTablet()
def detectFirefoxOSPhone(self):
"""Return detection of a Firefox OS phone
Detects a phone (probably) running the Firefox OS.
"""
if self.detectIos() \
or self.detectAndroid() \
or self.detectSailfish():
return False
if UAgentInfo.engineFirefox in self.__userAgent \
and UAgentInfo.mobile in self.__userAgent:
return True
return False
def detectFirefoxOSTablet(self):
"""Return detection of a Firefox OS tablet
Detects a tablet (probably) running the Firefox OS.
"""
if self.detectIos() \
or self.detectAndroid() \
or self.detectSailfish():
return False
if UAgentInfo.engineFirefox in self.__userAgent \
and UAgentInfo.deviceTablet in self.__userAgent:
return True
return False
def detectSailfish(self):
"""Return detection of a Sailfish OS device.
Detects a device running the Sailfish OS.
"""
return UAgentInfo.deviceSailfish in self.__userAgent
def detectSailfishPhone(self):
"""Return detection of a Sailfish phone
Detects a phone running the Sailfish OS.
"""
if self.detectSailfish() \
and UAgentInfo.mobile in self.__userAgent:
return True
return False
def detectUbuntu(self):
"""Return detection of an Ubuntu Mobile OS mobile device
Detects a mobile device running the Ubuntu Mobile OS.
"""
return self.detectUbuntuPhone() \
or self.detectUbuntuTablet()
def detectUbuntuPhone(self):
"""Return detection of an Ubuntu Mobile OS phone
Detects a phone running the Ubuntu Mobile OS.
"""
if UAgentInfo.deviceUbuntu in self.__userAgent \
and UAgentInfo.mobile in self.__userAgent:
return True
return False
def detectUbuntuTablet(self):
"""Return detection of an Ubuntu Mobile OS tablet
Detects a tablet running the Ubuntu Mobile OS.
"""
if UAgentInfo.deviceUbuntu in self.__userAgent \
and UAgentInfo.deviceTablet in self.__userAgent:
return True
return False
def detectBrewDevice(self):
"""Return detection of a Brew device
Detects whether the device is a Brew-powered device.
"""
return UAgentInfo.deviceBrew in self.__userAgent
def detectDangerHiptop(self):
"""Return detection of a Danger Hiptop
Detects the Danger Hiptop device.
"""
return UAgentInfo.deviceDanger in self.__userAgent \
or UAgentInfo.deviceHiptop in self.__userAgent
def detectOperaMobile(self):
"""Return detection of an Opera browser for a mobile device
Detects Opera Mobile or Opera Mini.
"""
return UAgentInfo.engineOpera in self.__userAgent \
and (UAgentInfo.mini in self.__userAgent \
or UAgentInfo.mobi in self.__userAgent)
def detectWapWml(self):
"""Return detection of a WAP- or WML-capable device
Detects whether the device supports WAP or WML.
"""
return UAgentInfo.vndwap in self.__httpAccept \
or UAgentInfo.wml in self.__httpAccept
def detectKindle(self):
"""Return detection of a Kindle
Detects if the current device is an Amazon Kindle (eInk devices only).
Note: For the Kindle Fire, use the normal Android methods.
"""
return UAgentInfo.deviceKindle in self.__userAgent \
and not self.detectAndroid()
def detectAmazonSilk(self):
"""Return detection of an Amazon Kindle Fire in Silk mode.
Detects if the current Amazon device is using the Silk Browser.
Note: Typically used by the the Kindle Fire.
"""
return UAgentInfo.engineSilk in self.__userAgent
def detectSonyPlaystation(self):
"""Return detection of Sony Playstation
Detects if the current device is a Sony Playstation.
"""
return UAgentInfo.devicePlaystation in self.__userAgent
def detectGamingHandheld(self):
"""Return detection of a gaming handheld with a modern iPhone-class browser
Detects if the current device is a handheld gaming device with
a touchscreen and modern iPhone-class browser. Includes the Playstation Vita.
"""
return UAgentInfo.devicePlaystation in self.__userAgent \
and UAgentInfo.devicePlaystationVita in self.__userAgent
def detectNintendo(self):
"""Return detection of Nintendo
Detects if the current device is a Nintendo game device.
"""
return UAgentInfo.deviceNintendo in self.__userAgent \
or UAgentInfo.deviceNintendo in self.__userAgent \
or UAgentInfo.deviceNintendo in self.__userAgent
def detectXbox(self):
"""Return detection of Xbox
Detects if the current device is a Microsoft Xbox.
"""
return UAgentInfo.deviceXbox in self.__userAgent
def detectGameConsole(self):
"""Return detection of any Game Console
Detects if the current device is an Internet-capable game console.
"""
return self.detectSonyPlaystation() \
or self.detectNintendo() \
or self.detectXbox()
def detectMidpCapable(self):
"""Return detection of a MIDP mobile Java-capable device
Detects if the current device supports MIDP, a mobile Java technology.
"""
return UAgentInfo.deviceMidp in self.__userAgent \
or UAgentInfo.deviceMidp in self.__httpAccept
def detectMaemoTablet(self):
"""Return detection of a Maemo OS tablet
Detects if the current device is on one of the Maemo-based Nokia Internet Tablets.
"""
if UAgentInfo.maemo in self.__userAgent:
return True
return UAgentInfo.linux in self.__userAgent \
and UAgentInfo.deviceTablet in self.__userAgent \
and not self.detectWebOSTablet() \
and not self.detectAndroid()
def detectArchos(self):
"""Return detection of an Archos media player
Detects if the current device is an Archos media player/Internet tablet.
"""
return UAgentInfo.deviceArchos in self.__userAgent
def detectSonyMylo(self):
"""Return detection of a Sony Mylo device
Detects if the current browser is a Sony Mylo device.
"""
return UAgentInfo.manuSony in self.__userAgent \
and (UAgentInfo.qtembedded in self.__userAgent
or UAgentInfo.mylocom2 in self.__userAgent)
#*****************************
# Device Classes
#*****************************
def detectSmartphone(self):
"""Return detection of a general smartphone device
Checks to see whether the device is *any* 'smartphone'.
Note: It's better to use DetectTierIphone() for modern touchscreen devices.
"""
return self.detectTierIphone() \
or self.detectS60OssBrowser() \
or self.detectSymbianOS() \
or self.detectWindowsMobile() \
or self.detectBlackBerry() \
or self.detectMeegoPhone() \
or self.detectPalmWebOS()
def detectMobileQuick(self):
"""Return detection of any mobile device using the quicker method
Detects if the current device is a mobile device.
This method catches most of the popular modern devices.
Excludes Apple iPads and other modern tablets.
"""
#Let's exclude tablets
if self.__isTierTablet:
return False
#Most mobile browsing is done on smartphones
if self.detectSmartphone():
return True
#Catch-all for many mobile devices
if UAgentInfo.mobile in self.__userAgent:
return True
if self.detectOperaMobile():
return True
#We also look for Kindle devices
if self.detectKindle() \
or self.detectAmazonSilk():
return True
if self.detectWapWml() \
or self.detectMidpCapable() \
or self.detectBrewDevice():
return True
if UAgentInfo.engineNetfront in self.__userAgent \
or UAgentInfo.engineUpBrowser in self.__userAgent:
return True
return False
def detectMobileLong(self):
"""Return detection of any mobile device using the more thorough method
The longer and more thorough way to detect for a mobile device.
Will probably detect most feature phones,
smartphone-class devices, Internet Tablets,
Internet-enabled game consoles, etc.
This ought to catch a lot of the more obscure and older devices, also --
but no promises on thoroughness!
"""
if self.detectMobileQuick() \
or self.detectGameConsole():
return True
if self.detectDangerHiptop() \
or self.detectMaemoTablet() \
or self.detectSonyMylo() \
or self.detectArchos():
return True
if UAgentInfo.devicePda in self.__userAgent \
and UAgentInfo.disUpdate not in self.__userAgent:
return True
#detect older phones from certain manufacturers and operators.
return UAgentInfo.uplink in self.__userAgent \
or UAgentInfo.engineOpenWeb in self.__userAgent \
or UAgentInfo.manuSamsung1 in self.__userAgent \
or UAgentInfo.manuSonyEricsson in self.__userAgent \
or UAgentInfo.manuericsson in self.__userAgent \
or UAgentInfo.svcDocomo in self.__userAgent \
or UAgentInfo.svcKddi in self.__userAgent \
or UAgentInfo.svcVodafone in self.__userAgent
#*****************************
# For Mobile Web Site Design
#*****************************
def detectTierTablet(self):
"""Return detection of any device in the Tablet Tier
The quick way to detect for a tier of devices.
This method detects for the new generation of
HTML 5 capable, larger screen tablets.
Includes iPad, Android (e.g., Xoom), BB Playbook, WebOS, etc.
"""
return self.detectIpad() \
or self.detectAndroidTablet() \
or self.detectBlackBerryTablet() \
or self.detectFirefoxOSTablet() \
or self.detectUbuntuTablet() \
or self.detectWebOSTablet()
def detectTierIphone(self):
"""Return detection of any device in the iPhone/Android/WP7/WebOS Tier
The quick way to detect for a tier of devices.
This method detects for devices which can
display iPhone-optimized web content.
Includes iPhone, iPod Touch, Android, Windows Phone 7, Palm WebOS, etc.
"""
return self.__isIphone \
or self.__isAndroidPhone \
or self.detectWindowsPhone() \
or self.detectBlackBerry10Phone() \
or self.detectPalmWebOS() \
or self.detectBada() \
or self.detectTizen() \
or self.detectFirefoxOSPhone() \
or self.detectSailfishPhone() \
or self.detectUbuntuPhone() \
or self.detectGamingHandheld()
def detectTierRichCss(self):
"""Return detection of any device in the 'Rich CSS' Tier
The quick way to detect for a tier of devices.
This method detects for devices which are likely to be capable
of viewing CSS content optimized for the iPhone,
but may not necessarily support JavaScript.
Excludes all iPhone Tier devices.
"""
#The following devices are explicitly ok.
#Note: 'High' BlackBerry devices ONLY
if not self.detectMobileQuick():
return False
#Exclude iPhone Tier and e-Ink Kindle devices
if self.detectTierIphone() \
or self.detectKindle():
return False
#The following devices are explicitly ok.
#Note: 'High' BlackBerry devices ONLY
#Older Windows 'Mobile' isn't good enough for iPhone Tier.
return self.detectWebkit() \
or self.detectS60OssBrowser() \
or self.detectBlackBerryHigh() \
or self.detectWindowsMobile() \
or UAgentInfo.engineTelecaQ in self.__userAgent
def detectTierOtherPhones(self):
"""Return detection of a mobile device in the less capable tier
The quick way to detect for a tier of devices.
This method detects for all other types of phones,
but excludes the iPhone and RichCSS Tier devices.
"""
#Exclude devices in the other 2 categories
return self.detectMobileLong() \
and not self.detectTierIphone() \
and not self.detectTierRichCss()
|
|
"""
This is a uniform signal sampler. |br|
The modules samples the given signals uniformly. |br|
*Examples*:
Please go to the *examples/acquisitions* directory for examples on how to
use the sampler. |br|
*Settings*:
Parameters of the sampler are described below.
Take a look on '__parametersDefine' function for more info on the
parameters.
Parameters of the sampler are attributes of the class which must/can
be set before the sampler is run.
Required parameters:
- a. **mSig** (*Numpy array 2D*): Input signals
- b. **tS** (*float*): time of input signals
- d. **fR** (*float*): input signals' representation sampling frequency
- d. **Tg** (*float*): patterns sampling grid
- e. **fSamp** (*float*): the requested average sampling frequency of the sampling patterns
Optional parameters:
- f. **iAlpha** (*float*): the alpha parameter (0 < iAlpha < 1).
It defines where to put the first sample as a fraction
of the sampling period.
[default = 0.5]
- g. **bMute** (*int*): mute the console output from the sampler [default = 0]
*Output*:
Description of the sampler output is below.
This is the list of attributes of the sampler class which are available
after calling the 'run' method:
Observed signals:
- a. **mObSig** (*Numpy array 2D*): The observed sampled signals
- b. **lObSig** (list): list with the observed sampled signals
Sampling patterns:
- b. **mPatts** (*Numpy array 2D*): Sampling patterns (as grid indices)
- c. **mPattsRep** (*Numpy array 2D*): Sampling patterns
(as signal representaion points)
- d. **mPattsT** (*Numpy array 2D*): Sampling patterns (as time moments)
Observation matrices:
- e. **lPhi** (list) List with observation matrices.
One matrix p. signal.
Additional parameters of sampling patterns:
- f. **nK_g** (*int*): the number of grid points in the sampling pattern
- g. **tTau_real** (*float*): the real time of sampling patterns
- h. **nK_s** (*int*): the expected number of sampling points in a pattern
- i. **f_s** (*float*): the expected sampling frequency
- j. **nT** (*int*): the expected ampling period (as grid pts)
- k **tT_s**. (*float*): the expected sampling period
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <[email protected]>
*Version*:
1.0 | 29-JAN-2015 : * Initial version. |br|
2.0 | 14-AUG-2015 : * Objectified version (2.0) |br|
2.1 | 17-AUG-2015 : * Observation matrices are gathered in list, not in 3D matrix |br|
2.1r1 | 18-AUG-2015 : * Adjusted to RxCSObject v1.0 |br|
2.1r2 | 03-OCT-2015 : * Secured against floating-point precision errors |br|
2.2 | 13-OCT-2015 : * List with observed signal is added |br|
2.2r1 | 16-JAN-2016 : * Fix in calculation of the expected average sampling period [round instead of ceil] |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import math
import rxcs
import numpy as np
class uniform(rxcs._RxCSobject):
def __init__(self, *args):
rxcs._RxCSobject.__init__(self) # Make it a RxCS object
self.strRxCSgroup = 'Acquisition' # Name of group of RxCS modules
self.strModuleName = 'Uniform sampler' # Module name
self.__inputSignals() # Define input signals
self.__parametersDefine() # Define the parameters
# Input signals
def __inputSignals(self):
# 1d/2d array with input signals, one signal p. row
self.paramAddMan('mSig', 'Input signals')
self.paramType('mSig', np.ndarray)
self.paramTypeEl('mSig', (int, float))
self.paramNDimLE('mSig', 2)
# Input signals representation sampling frequency
self.paramAddMan('fR', 'Input signals representation sampling frequency', unit='Hz')
self.paramType('fR', (int, float))
self.paramH('fR', 0)
self.paramL('fR', np.inf)
# Time of input signals
self.paramAddMan('tS', 'Time of input signals', unit='s')
self.paramType('tS', (int, float))
self.paramH('tS', 0)
self.paramL('tS', np.inf)
# Define parameters
def __parametersDefine(self):
# Patterns sampling grid
self.paramAddMan('Tg', 'Patterns sampling grid', unit='s')
self.paramType('Tg', (int, float)) # Must be of int or float type
self.paramH('Tg', 0) # Patterns sampling grid must be higher than zero
self.paramL('Tg', np.inf) # ...and lower than infinity
# Requested sampling frequency
self.paramAddMan('fSamp', 'Requested sampling frequency', unit='Hz')
self.paramType('fSamp', (int, float)) # Must be of int or float type
self.paramH('fSamp', 0) # Requested sampling frequency must be higher than zero
self.paramL('fSamp', np.inf) # ...and lower than infinity
# Alpha parameter
self.paramAddOpt('iAlpha', 'Alpha parameter', default=0.5)
self.paramType('iAlpha', (int, float)) # Must be of int or float type
self.paramH('iAlpha', 0) # The alpha parameter must he higher than zero
self.paramL('iAlpha', 1) # ...and lower than one
# 'Mute the output' flag
self.paramAddOpt('bMute', 'Mute the output', noprint=1, default=0)
self.paramType('bMute', int) # Must be of int type
self.paramAllowed('bMute',[0, 1]) # It can be either 1 or 0
# Run
def run(self):
self.parametersCheck() # Check if all the needed partameters are in place and are correct
self.parametersPrint() # Print the values of parameters
self.engineStartsInfo() # Info that the engine starts
self.__engine() # Run the engine
self.engineStopsInfo() # Info that the engine ends
return self.__dict__ # Return dictionary with the parameters
# Engine of the function
def __engine(self):
self._computeParam() # Compute parameters of sampling
self._checkConf() # Check configuration of sampling
self._generatePatterns() # Generate the sampling patterns
self._sampleSignals() # Sample the signals
self._generObser() # Generate the observation matrices
return
# Compute parameters
def _computeParam(self):
"""
This function computes parameters of sampling.
Args:
none
Returns:
none
List of variables added by function to the object:
nK_g (float): the number of grid points in the sampling pattern
tTau_real (float): the real time of sampling patterns
nK_s (float): the expected number of sampling points in a pattern
f_s (float): the expected average sampling frequency
nT (float): the expected average sampling period (as grid pts)
tT_s (float): the expected average sampling period
"""
# Calculate the number of grid points in the sampling period
nK_g = math.floor(self.tS / self.Tg)
# Calculate the real time of sampling patterns
tTau_real = nK_g * self.Tg
# Calculate the expected number of sampling points in a pattern
nK_s = int(round(tTau_real * self.fSamp))
# Calculate the expected average sampling frequency
f_s = nK_s / tTau_real
# Calculate the expected average sampling period
tT_s = 1 / f_s
# Calculate the expected average sampling period and recalculate it to
# the grid
nT = int(round(1 / (f_s * self.Tg)))
self.nK_g = nK_g # the number of grid points in the sampling pattern
self.tTau_real = tTau_real # the real time of sampling patterns
self.nK_s = nK_s # the expected number of sampling points in a pattern
self.f_s = f_s # the expected average sampling frequency
self.nT = nT # the expected average sampling period (as grid pts)
self.tT_s = tT_s # the expected average sampling period
return
def _checkConf(self):
"""
This function checks configuration of sampling
Args:
none
Returns:
none
"""
# -----------------------------------------------------------------
# Check if the number of grid points in patterns is higher than 0
if not self.nK_g > 0:
strError = ('Real number of grid points in patterns must be higher ')
strError = strError + ('than zero')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the real time of patterns is higher than 0
if not self.tTau_real > 0:
strError = ('Real time of patterns must be higher than zero')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the expected number of sampling points is higher than 0
if not self.nK_s > 0:
strError = ('The expected number of sampling points in patterns ')
strError = strError + ('must be higher than zero')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the time of patterns is equal to the time of signals to be
# sampled
if (self.tTau_real - self.tS) > self.tS/1e12:
strError = ('The real time of patterns is different than the time ')
strError = strError + ('of signals to be sampled')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the expected number of sampling points is lower or equal
# to the number of grid points
if not self.nK_g >= self.nK_s:
strError = ('The real number of grid points in patterns must be ')
strError = strError + ('higher or equal to the number of expected ')
strError = strError + ('sampling points')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the signal representation sampling frequency is compatible
# with the sampling period
if np.abs(np.round(self.Tg * self.fR) - (self.Tg * self.fR)) > 1e-6:
strError = ('The chosen sampling grid period is incompatible with ')
strError = strError + ('the signals representation sampling ')
strError = strError + ('frequency')
raise ValueError(strError)
# -----------------------------------------------------------------
return
# Generate the sampling patterns
def _generatePatterns(self):
"""
This function generates the required number of uniform sampling patterns.
Args:
none
Returns:
none
List of variables added by function to the object:
nSigs (number): the number of input signals
mPatts (matrix): the sampling patterns (grid indices)
mPattsRep (matrix): the sampling patterns (signal rep. sampling points)
mPattsT (matrix): the sampling patterns (time moments)
"""
# Make the matrix with signals 2 dim, if it is 1 dim
if self.mSig.ndim == 1:
self.mSig = self.mSig.copy()
self.mSig.shape = (1, self.mSig.size)
(nSigs, _) = self.mSig.shape # The number of input signals
# Allocate the matrix for all the sampling patterns
mPatts = np.ones((nSigs, self.nK_s), dtype='int64')
# Generate a vector with a sampling pattern
vPattern = self._uniform_engine(self.nK_s, self.nT, self.iAlpha, self.nK_g)
# Multiple and store the generated pattern
mPatts = np.tile(vPattern,(nSigs, 1))
mPatts = mPatts.astype(int)
# The patterns engine generates patterns in range <1 ; N>, where
# N is the number of possible positions of a sampling points.
# Because Numpy indexes arrays from 0, the patterns should be represented
# in range from <0 ; N-1>
mPatts = mPatts - 1
# --------------------------------------------------------------
# Compute the number of signal representation points which equals
# one grid point
iGridvsRep = int(np.round((self.Tg * self.fR)))
# Recalculate the patterns to the signal representation sampling points
mPattsRep = iGridvsRep * mPatts
# --------------------------------------------------------------
# Recalculate the patterns to the time moments
vTSig = (1 / self.fR) * np.arange(int(np.round(self.tTau_real * self.fR))) # The time vector of the input signal
mPattsT = vTSig[mPattsRep]
self.nSigs = nSigs
self.mPatts = mPatts
self.mPattsRep = mPattsRep
self.mPattsT = mPattsT
return
# Sample the signals
def _sampleSignals(self):
"""
This function samples signals using the previously generated
sampling patterns.
Args:
none
Returns:
none
List of variables added by function to the object:
mObSig (matrix): the observed signals
"""
self.mObSig = (self.mSig[np.arange(self.nSigs), self.mPattsRep.T]).T # Sample the signals
# Put the observed signals into a list
self.lObSig = []
for inxSig in range(self.nSigs):
self.lObSig.append(self.mObSig[inxSig, :])
return
# Generate the observation matrices
def _generObser(self):
"""
This function generates the observation matrices.
Args:
none
Returns:
none
List of variables added by function to the object:
lPhi (list): list with the observation matrices
"""
nSmp = int(round(self.tS * self.fR)) # The number of representation samples in the input signals
# Generate the observation matrix for the first sampling pattern
mPhi = np.zeros((self.nK_s, nSmp)) # Allocate the first observation matrix
vPatts = self.mPattsRep[0, :] # Get the sampling pattern
inxRow = 0
for inxCol in vPatts: # <- loop over all samling points in pattern
mPhi[inxRow, int(inxCol)] = 1
inxRow = inxRow + 1
lPhi = [mPhi] # Put the matrix into a list
self.lPhi = lPhi
return
# =================================================================
# Uniform engine
# =================================================================
def _uniform_engine(self, nK_s, nT, iAlpha, K_g):
"""
Args:
nK_s: [int] the number of wanted sampling points in a pattern
nT: [int] the sampling period
(expressed in the number of grid points)
iAlpha: [float] the alpha parameter
K_g: [int] the number of grid points in a pattern
"""
# Product of the alpha parameter and the sampling period
# (expressed in the number of grid points)
K_alpha = np.round(iAlpha * nT)
# Generate a pattern
vPattern = np.arange(K_alpha, nK_s*nT+K_alpha, nT)
# Clear the pattern (any sampling point higher then the number of sampling periods)
vPattern = vPattern[vPattern < K_g]
vPattern = vPattern[vPattern >= 0]
# -----------------------------------------------------------------
return vPattern
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 14:58:39 2015
@author: jpk
This script produces a report of observations taken the last 7 days at SALT
and print it out on the terminal and writes it to a file.
The script runs from today and queries the sdb for data going back 7 days.
"""
import os
import sys
import pandas as pd
import pandas.io.sql as psql
import MySQLdb
import matplotlib.pyplot as pl
import numpy as np
from datetime import datetime
import report_queries as rq
def string_header(dr):
'''
format the header to be printed and written to file
'''
s = dr.ix[0].to_string().split('\n')
txt = '''
*************** SALT Weekly Observing Stats *****************
A report for %s to
%s
''' %(s[0], s[1])
return txt
def string_weekly_total_time_breakdown(wttb):
# determine the percantages of time broken down in catagories
t = pd.Series(wttb.stack(), index = wttb.stack().index)
t.index = t.index.get_level_values(1)
per = pd.Series(np.zeros(len(t)), index = t.index)
per['Weather':'Science'] = t['Weather':'Science'] / t.Total * 100
per['TimeLostToWeather': 'ScienceTime'] = per['Weather':'Science']
# write out the string:
txt = '''
-------------------------------------------------------------
Time Breakdown:
---------------
Science time: {} ({:,.0f}%)
Engineering time: {} ({:,.0f}%)
Weather: {} ({:,.0f}%)
Problems: {} ({:,.0f}%)
--
Total: {}
'''.format(t.ScienceTime, per.Science,
t.EngineeringTime, per.Engineering,
t.TimeLostToWeather, per.Weather,
t.TimeLostToProblems, per.Problems,
t.NightLength)
return txt
def string_weekly_priority_breakdown(wpb):
# create a percentage column
wpb['per'] = pd.Series(np.zeros(len(wpb)), index = wpb.index)
# determine the percentage from the Time column which is in seconds
wpb.per = (wpb.Tsec / wpb.Tsec.sum()) * 100
txt = wpb.to_string(columns=['Priority', 'No. Blocks', 'per'],
index=False,
header=False,
formatters={'per':'({:,.0f}%)'.format,
'Priority':' {:>5} '.format,
'No. Blocks':' {0:,.0f} '.format})
hdr = '''
-------------------------------------------------------------
Priority BreakDown:
-------------------
Priority No. Blocks
'''
ftr = '''
--
Total {0:,.0f}
'''.format(wpb['No. Blocks'].sum())
return hdr + txt + ftr
def string_weekly_subsystem_breakdown(wsb):
# calculate the percentage of time breakdown
# create a new percentage column
wsb['per'] = pd.Series(np.zeros(len(wsb)), index = wsb.index)
# determine the percentage from the Time column which is in seconds
wsb.per = (wsb.Time / wsb.Time.sum()) * 100
# create a string object to be printed and written to file
txt = wsb.to_string(columns=['SaltSubsystem', 'TotalTime', 'per'],
index=False,
header=False,
formatters={'SaltSubsystem':' {:>11} '.format,
'per':'({:,.0f}%)'.format,
'TotalTime':' {} '.format })
hdr = '''
-------------------------------------------------------------
Problems Time Breakdown
---------------------
SALT Subsystem Total Time
'''
return hdr + txt
def print_to_screen(txt):
'''
this function prints the formatted string to the terminal
'''
ftr = '''
****************** End of Weekly Report *********************
'''
print txt + ftr
return
def write_to_file(dr, txt, dirname='./logs/'):
'''
this function writes the text to a file and names the report accorting
to the date range specified
'''
filename = 'weekly_report_' + datetime.strftime(dr.StartDate[0], '%Y%m%d') + \
'-' + datetime.strftime(dr.EndDate[0], '%Y%m%d') + '.txt'
ftr = '''
****************** End of Weekly Report *********************
'''
with open(dirname+filename, 'w') as f:
f.write(txt + ftr)
def commandLine(argv):
# executes if module is run from the command line
# Testing a datetime check
# if type(arg) is not datetime.date:
# raise TypeError('arg must be a datetime.date, not a %s' % type(arg))
dprint("Reading command line options")
# read command line options
try:
opts,args = getopt.getopt(sys.argv[1:],"vdct:f:i:r:o",
["verbose","debug","current", "target-id=","filter=","instrument=","radius=","ocs","help"])
except getopt.GetoptError, inst:
print inst
print 'Use --help to get a list of options'
sys.exit(2)
ra, dec, filter, ins, radius, target_id = "","","","","",""
use_current_pointing = False
use_ocs = False
global verbose
global debug
# parse them to the relevant variables
for opt, arg in opts:
if opt in ('--help'):
usage()
elif opt in ('-v','--verbose'):
verbose=True
elif opt in ('-d','--debug'):
verbose=True # implied
debug=True
elif opt in ('-f','--filter'):
filter = arg
elif opt in ('-i','--instrument'):
ins = arg
elif opt in ('-r','--radius'):
radius = float(arg)
elif opt in ('-t','--target-id'):
target_id = arg
elif opt in ('-c','--current'):
use_current_pointing = True
elif opt in ('-o','--ocs'):
use_ocs = True
else:
print 'Unknown option: ' + opt
usage()
if __name__=='__main__':
# open mysql connection to the sdb
mysql_con = MySQLdb.connect(host='sdb.cape.saao.ac.za',
port=3306, user=os.environ['SDBUSER'],
passwd=os.environ['SDBPASS'], db='sdb')
obsdate = sys.argv[1]
date = '{}-{}-{}'.format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
interval = sys.argv[2]
# use the connection to get the required data: _d
dr_d = rq.date_range(mysql_con, date, interval=interval)
wpb_d = rq.weekly_priority_breakdown(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
wttb_d = rq.weekly_total_time_breakdown(mysql_con, date, interval=interval)
wsb_d = rq.weekly_subsystem_breakdown(mysql_con, date, interval=interval)
# TESTING: save the dataframes
dr_d.save('dr_d')
wpb_d.save('wpd_d')
wtb_d.save('wtb_d')
wttb_d.save('wttd_d')
wsb_d.save('wsb_d')
# format the string needed to print and write to file: _t
dr_t = string_header(dr_d)
wpd_t = string_weekly_priority_breakdown(wpb_d)
wttb_t = string_weekly_total_time_breakdown(wttb_d)
wsb_t = string_weekly_subsystem_breakdown(wsb_d)
# print the report to the terminal
print_to_screen(dr_t + wpd_t + wttb_t + wsb_t)
# write the report to file
write_to_file(dr_d, dr_t + wpd_t + wttb_t + wsb_t)
mysql_con.close()
|
|
from datetime import timedelta
from django import VERSION as DJANGO_VERSION
from django.contrib.auth.models import Group, Permission
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from freezegun import freeze_time
from wagtail.admin.log_action_registry import LogActionRegistry
from wagtail.core.models import GroupPagePermission, Page, PageLogEntry, PageViewRestriction
from wagtail.tests.testapp.models import SimplePage
from wagtail.tests.utils import WagtailTestUtils
def test_hook(actions):
return actions.register_action('test.custom_action', 'Custom action', 'Tested!')
class TestAuditLogHooks(TestCase, WagtailTestUtils):
def setUp(self):
self.root_page = Page.objects.get(id=2)
def test_register_log_actions_hook(self):
# testapp/wagtail_hooks.py defines a 'blockquote' rich text feature with a hallo.js
# plugin, via the register_rich_text_features hook; test that we can retrieve it here
log_actions = LogActionRegistry()
actions = log_actions.get_actions()
self.assertIn('wagtail.create', actions)
def test_action_format_message(self):
log_entry = PageLogEntry.objects.log_action(self.root_page, action='test.custom_action')
log_actions = LogActionRegistry()
self.assertEqual(log_actions.format_message(log_entry), "Unknown test.custom_action")
self.assertNotIn('test.custom_action', log_actions.get_actions())
with self.register_hook('register_log_actions', test_hook):
log_actions = LogActionRegistry()
self.assertIn('test.custom_action', log_actions.get_actions())
self.assertEqual(log_actions.format_message(log_entry), "Tested!")
self.assertEqual(log_actions.get_action_label('test.custom_action'), 'Custom action')
class TestAuditLogAdmin(TestCase, WagtailTestUtils):
def setUp(self):
self.root_page = Page.objects.get(id=2)
self.hello_page = SimplePage(
title="Hello world!",
slug='hello-world',
content="hello",
live=False,
)
self.root_page.add_child(instance=self.hello_page)
self.about_page = SimplePage(title="About", slug="about", content="hello")
self.root_page.add_child(instance=self.about_page)
self.administrator = self.create_superuser(
username='administrator',
email='[email protected]',
password='password'
)
self.editor = self.create_user(username='the_editor', email='[email protected]', password='password')
sub_editors = Group.objects.create(name='Sub editors')
sub_editors.permissions.add(Permission.objects.get(
content_type__app_label='wagtailadmin',
codename='access_admin'
))
self.editor.groups.add(sub_editors)
for permission_type in ['add', 'edit', 'publish']:
GroupPagePermission.objects.create(
group=sub_editors, page=self.hello_page, permission_type=permission_type
)
def _update_page(self, page):
# save revision
page.save_revision(user=self.editor, log_action=True)
# schedule for publishing
page.go_live_at = timezone.now() + timedelta(seconds=1)
revision = page.save_revision(user=self.editor, log_action=True)
revision.publish(user=self.editor)
# publish
with freeze_time(timezone.now() + timedelta(seconds=2)):
revision.publish(user=self.editor)
# lock/unlock
page.save(user=self.editor, log_action='wagtail.lock')
page.save(user=self.editor, log_action='wagtail.unlock')
# change privacy
restriction = PageViewRestriction(page=page, restriction_type=PageViewRestriction.LOGIN)
restriction.save(user=self.editor)
restriction.restriction_type = PageViewRestriction.PASSWORD
restriction.save(user=self.administrator)
restriction.delete()
def test_page_history(self):
self._update_page(self.hello_page)
history_url = reverse('wagtailadmin_pages:history', kwargs={'page_id': self.hello_page.id})
self.login(user=self.editor)
response = self.client.get(history_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Created", 1)
self.assertContains(response, "Draft saved", 2)
self.assertContains(response, "Locked", 1)
self.assertContains(response, "Unlocked", 1)
self.assertContains(response, "Page scheduled for publishing", 1)
self.assertContains(response, "Published", 1)
if DJANGO_VERSION >= (3, 0):
self.assertContains(
response, "Added the 'Private, accessible to logged-in users' view restriction"
)
self.assertContains(
response,
"Updated the view restriction to 'Private, accessible with the following password'"
)
self.assertContains(
response,
"Removed the 'Private, accessible with the following password' view restriction"
)
else:
self.assertContains(
response, "Added the 'Private, accessible to logged-in users' view restriction"
)
self.assertContains(
response,
"Updated the view restriction to 'Private, accessible with the following password'"
)
self.assertContains(
response,
"Removed the 'Private, accessible with the following password' view restriction"
)
self.assertContains(response, 'system', 2) # create without a user + remove restriction
self.assertContains(response, 'the_editor', 9) # 7 entries by editor + 1 in sidebar menu + 1 in filter
self.assertContains(response, 'administrator', 2) # the final restriction change + filter
def test_page_history_filters(self):
self.login(user=self.editor)
self._update_page(self.hello_page)
history_url = reverse('wagtailadmin_pages:history', kwargs={'page_id': self.hello_page.id})
response = self.client.get(history_url + '?action=wagtail.edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Draft saved", count=2)
self.assertNotContains(response, "Locked")
self.assertNotContains(response, "Unlocked")
self.assertNotContains(response, "Page scheduled for publishing")
self.assertNotContains(response, "Published")
def test_site_history(self):
self._update_page(self.hello_page)
self.about_page.save_revision(user=self.administrator, log_action=True)
self.about_page.delete(user=self.administrator)
site_history_url = reverse('wagtailadmin_reports:site_history')
# the editor has access to the root page, so should see everything
self.login(user=self.editor)
response = self.client.get(site_history_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'About')
self.assertContains(response, "Draft saved", 2)
self.assertNotContains(response, 'Deleted')
# once a page is deleted, its log entries are only visible to super admins or users with
# permissions on the root page
self.hello_page.delete(user=self.administrator)
response = self.client.get(site_history_url)
self.assertContains(response, "No log entries found")
# add the editor user to the Editors group which has permissions on the root page
self.editor.groups.add(Group.objects.get(name='Editors'))
response = self.client.get(site_history_url)
self.assertContains(response, 'About', 3) # create, save draft, delete
self.assertContains(response, 'Created', 2)
self.assertContains(response, 'Deleted', 2)
# check with super admin
self.login(user=self.administrator)
response = self.client.get(site_history_url)
self.assertContains(response, 'About', 3) # create, save draft, delete
self.assertContains(response, 'Deleted', 2)
def test_edit_form_has_history_link(self):
self.hello_page.save_revision()
self.login(user=self.editor)
response = self.client.get(
reverse('wagtailadmin_pages:edit', args=[self.hello_page.id])
)
self.assertEqual(response.status_code, 200)
history_url = reverse('wagtailadmin_pages:history', args=[self.hello_page.id])
self.assertContains(response, history_url)
def test_create_and_publish_does_not_log_revision_save(self):
self.login(user=self.administrator)
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world-redux',
'action-publish': 'action-publish',
}
response = self.client.post(
reverse('wagtailadmin_pages:add', args=('tests', 'simplepage', self.root_page.id)),
post_data, follow=True
)
self.assertEqual(response.status_code, 200)
page_id = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world-redux').id
self.assertListEqual(
list(PageLogEntry.objects.filter(page=page_id).values_list('action', flat=True)),
['wagtail.publish', 'wagtail.create']
)
def test_revert_and_publish_logs_reversion_and_publish(self):
revision = self.hello_page.save_revision(user=self.editor)
self.hello_page.save_revision(user=self.editor)
self.login(user=self.administrator)
response = self.client.post(
reverse('wagtailadmin_pages:edit', args=(self.hello_page.id, )),
{
'title': "Hello World!",
'content': "another hello",
'slug': 'hello-world',
'revision': revision.id, 'action-publish': 'action-publish'}, follow=True
)
self.assertEqual(response.status_code, 200)
entries = PageLogEntry.objects.filter(page=self.hello_page).values_list('action', flat=True)
self.assertListEqual(
list(entries),
['wagtail.publish', 'wagtail.rename', 'wagtail.revert', 'wagtail.create']
)
|
|
# -*- coding: utf-8 -*-
'''
:maintainer: Evan Borgstrom <[email protected]>
Pythonic object interface to creating state data, see the pyobjects renderer
for more documentation.
'''
from __future__ import absolute_import
import inspect
import logging
from salt.utils.odict import OrderedDict
import salt.ext.six as six
REQUISITES = ('listen', 'onchanges', 'onfail', 'require', 'watch', 'use', 'listen_in', 'onchanges_in', 'onfail_in', 'require_in', 'watch_in', 'use_in')
log = logging.getLogger(__name__)
class StateException(Exception):
pass
class DuplicateState(StateException):
pass
class InvalidFunction(StateException):
pass
class Registry(object):
'''
The StateRegistry holds all of the states that have been created.
'''
states = OrderedDict()
requisites = []
includes = []
extends = OrderedDict()
enabled = True
@classmethod
def empty(cls):
cls.states = OrderedDict()
cls.requisites = []
cls.includes = []
cls.extends = OrderedDict()
@classmethod
def include(cls, *args):
if not cls.enabled:
return
cls.includes += args
@classmethod
def salt_data(cls):
states = OrderedDict([
(id_, states_)
for id_, states_ in six.iteritems(cls.states)
])
if cls.includes:
states['include'] = cls.includes
if cls.extends:
states['extend'] = OrderedDict([
(id_, states_)
for id_, states_ in six.iteritems(cls.extends)
])
cls.empty()
return states
@classmethod
def add(cls, id_, state, extend=False):
if not cls.enabled:
return
if extend:
attr = cls.extends
else:
attr = cls.states
if id_ in attr:
if state.full_func in attr[id_]:
raise DuplicateState(
"A state with id '{0!r}', type '{1!r}' exists".format(
id_,
state.full_func
)
)
else:
attr[id_] = OrderedDict()
# if we have requisites in our stack then add them to the state
if len(cls.requisites) > 0:
for req in cls.requisites:
if req.requisite not in state.kwargs:
state.kwargs[req.requisite] = []
state.kwargs[req.requisite].append(req())
attr[id_].update(state())
@classmethod
def extend(cls, id_, state):
cls.add(id_, state, extend=True)
@classmethod
def make_extend(cls, name):
return StateExtend(name)
@classmethod
def push_requisite(cls, requisite):
if not cls.enabled:
return
cls.requisites.append(requisite)
@classmethod
def pop_requisite(cls):
if not cls.enabled:
return
del cls.requisites[-1]
class StateExtend(object):
def __init__(self, name):
self.name = name
class StateRequisite(object):
def __init__(self, requisite, module, id_):
self.requisite = requisite
self.module = module
self.id_ = id_
def __call__(self):
return {self.module: self.id_}
def __enter__(self):
Registry.push_requisite(self)
def __exit__(self, type, value, traceback):
Registry.pop_requisite()
class StateFactory(object):
'''
The StateFactory is used to generate new States through a natural syntax
It is used by initializing it with the name of the salt module::
File = StateFactory("file")
Any attribute accessed on the instance returned by StateFactory is a lambda
that is a short cut for generating State objects::
File.managed('/path/', owner='root', group='root')
The kwargs are passed through to the State object
'''
def __init__(self, module, valid_funcs=None):
self.module = module
if valid_funcs is None:
valid_funcs = []
self.valid_funcs = valid_funcs
def __getattr__(self, func):
if len(self.valid_funcs) > 0 and func not in self.valid_funcs:
raise InvalidFunction('The function {0!r} does not exist in the '
'StateFactory for {1!r}'.format(
func,
self.module
))
def make_state(id_, **kwargs):
return State(
id_,
self.module,
func,
**kwargs
)
return make_state
def __call__(self, id_, requisite='require'):
'''
When an object is called it is being used as a requisite
'''
# return the correct data structure for the requisite
return StateRequisite(requisite, self.module, id_)
class State(object):
'''
This represents a single item in the state tree
The id_ is the id of the state, the func is the full name of the salt
state (i.e. file.managed). All the keyword args you pass in become the
properties of your state.
'''
def __init__(self, id_, module, func, **kwargs):
self.id_ = id_
self.module = module
self.func = func
# our requisites should all be lists, but when you only have a
# single item it's more convenient to provide it without
# wrapping it in a list. transform them into a list
for attr in REQUISITES:
if attr in kwargs:
try:
iter(kwargs[attr])
except TypeError:
kwargs[attr] = [kwargs[attr]]
self.kwargs = kwargs
if isinstance(self.id_, StateExtend):
Registry.extend(self.id_.name, self)
self.id_ = self.id_.name
else:
Registry.add(self.id_, self)
self.requisite = StateRequisite('require', self.module, self.id_)
@property
def attrs(self):
kwargs = self.kwargs
# handle our requisites
for attr in REQUISITES:
if attr in kwargs:
# rebuild the requisite list transforming any of the actual
# StateRequisite objects into their representative dict
kwargs[attr] = [
req() if isinstance(req, StateRequisite) else req
for req in kwargs[attr]
]
# build our attrs from kwargs. we sort the kwargs by key so that we
# have consistent ordering for tests
return [
{k: kwargs[k]}
for k in sorted(six.iterkeys(kwargs))
]
@property
def full_func(self):
return "{0!s}.{1!s}".format(self.module, self.func)
def __str__(self):
return "{0!s} = {1!s}:{2!s}".format(self.id_, self.full_func, self.attrs)
def __call__(self):
return {
self.full_func: self.attrs
}
def __enter__(self):
Registry.push_requisite(self.requisite)
def __exit__(self, type, value, traceback):
Registry.pop_requisite()
class SaltObject(object):
'''
Object based interface to the functions in __salt__
.. code-block:: python
:linenos:
Salt = SaltObject(__salt__)
Salt.cmd.run(bar)
'''
def __init__(self, salt):
self._salt = salt
def __getattr__(self, mod):
class __wrapper__(object):
def __getattr__(wself, func): # pylint: disable=E0213
try:
return self._salt['{0}.{1}'.format(mod, func)]
except KeyError:
raise AttributeError
return __wrapper__()
class MapMeta(type):
'''
This is the metaclass for our Map class, used for building data maps based
off of grain data.
'''
def __init__(cls, name, bases, nmspc):
cls.__set_attributes__()
super(MapMeta, cls).__init__(name, bases, nmspc)
def __set_attributes__(cls):
match_groups = OrderedDict([])
# find all of our filters
for item in cls.__dict__:
if item[0] == '_':
continue
filt = cls.__dict__[item]
# only process classes
if not inspect.isclass(filt):
continue
# which grain are we filtering on
grain = getattr(filt, '__grain__', 'os_family')
if grain not in match_groups:
match_groups[grain] = OrderedDict([])
# does the object pointed to have a __match__ attribute?
# if so use it, otherwise use the name of the object
# this is so that you can match complex values, which the python
# class name syntax does not allow
if hasattr(filt, '__match__'):
match = filt.__match__
else:
match = item
match_groups[grain][match] = OrderedDict([])
for name in filt.__dict__:
if name[0] == '_':
continue
match_groups[grain][match][name] = filt.__dict__[name]
attrs = {}
for grain in match_groups:
filtered = Map.__salt__['grains.filter_by'](match_groups[grain],
grain=grain)
if filtered:
attrs.update(filtered)
if hasattr(cls, 'merge'):
pillar = Map.__salt__['pillar.get'](cls.merge)
if pillar:
attrs.update(pillar)
for name in attrs:
setattr(cls, name, attrs[name])
def need_salt(*a, **k):
log.error("Map needs __salt__ set before it can be used!")
return {}
class Map(six.with_metaclass(MapMeta, object)): # pylint: disable=W0232
__salt__ = {
'grains.filter_by': need_salt,
'pillar.get': need_salt
}
|
|
# orm/relationships.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`.relationship`.
"""
from .. import sql, util, exc as sa_exc, schema
from ..sql.util import (
ClauseAdapter,
join_condition, _shallow_annotate, visit_binary_product,
_deep_deannotate, find_tables
)
from ..sql import operators, expression, visitors
from .interfaces import MANYTOMANY, MANYTOONE, ONETOMANY
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"remote": True})
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"foreign": True})
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
class JoinCondition(object):
def __init__(self,
parent_selectable,
child_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True
):
self.parent_selectable = parent_selectable
self.parent_local_selectable = parent_local_selectable
self.child_selectable = child_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info('%s setup primary join %s', self.prop,
self.primaryjoin)
log.info('%s setup secondary join %s', self.prop,
self.secondaryjoin)
log.info('%s synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.synchronize_pairs))
log.info('%s secondary synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.secondary_synchronize_pairs or []))
log.info('%s local/remote pairs [%s]', self.prop,
','.join('(%s / %s)' % (l, r) for (l, r) in
self.local_remote_pairs))
log.info('%s remote columns [%s]', self.prop,
','.join('%s' % col for col in self.remote_columns)
)
log.info('%s local columns [%s]', self.prop,
','.join('%s' % col for col in self.local_columns)
)
log.info('%s relationship direction %s', self.prop,
self.direction)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = \
join_condition(
self.child_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
else:
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.child_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
except sa_exc.NoForeignKeysError:
if self.secondary is not None:
raise sa_exc.NoForeignKeysError("Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions."
% (self.prop, self.secondary))
else:
raise sa_exc.NoForeignKeysError("Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression."
% self.prop)
except sa_exc.AmbiguousForeignKeysError:
if self.secondary is not None:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables."
% (self.prop, self.secondary))
else:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% self.prop)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = element._annotations.copy()
del v['remote']
v['local'] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = element._annotations.copy()
del v['local']
v['remote'] = True
return element._with_annotations(v)
return visitors.replacement_traverse(
self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(self.primaryjoin,
values=("local", "remote"))
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin,
{},
check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin,
{},
check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and \
isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(binary.left, sql.ColumnElement) or \
not isinstance(binary.right, sql.ColumnElement):
return
if "foreign" not in binary.left._annotations and \
"foreign" not in binary.right._annotations:
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate(
{"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True})
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin,
{},
{"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_selectable
mt = self.child_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause) and \
isinstance(f, expression.ColumnClause) and \
pt.is_derived_from(c.table) and \
pt.is_derived_from(f.table) and \
mt.is_derived_from(c.table) and \
mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return bool(
set(find_tables(self.parent_selectable)).intersection(
find_tables(self.child_selectable)
)
)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(lambda col: "foreign" in col._annotations)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl)
def _annotate_selfref(self, fn):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and \
isinstance(binary.right, expression.ColumnClause):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate(
{"remote": True})
else:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument.")
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side)
else:
def repl(element):
if element in remote_side:
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(binary.left,
binary.right)
binary.right, binary.left = proc_left_right(binary.right,
binary.left)
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and \
isinstance(right, expression.ColumnClause):
if self.child_selectable.c.contains_column(right) and \
self.parent_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_selectable.c.contains_column(element) and \
(
not self.parent_local_selectable.c.\
contains_column(element)
or self.child_local_selectable.c.\
contains_column(element)):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side."
% self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set([l for (l, r)
in self._local_remote_pairs])
else:
local_side = util.column_set(self.parent_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and \
elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError('Relationship %s could '
'not determine any unambiguous local/remote column '
'pairs based on join condition and remote_side '
'arguments. '
'Consider using the remote() annotation to '
'accurately mark those elements of the join '
'condition that are on the remote side of '
'the relationship.'
% (self.prop, ))
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign")
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if self.support_sync and can_sync or \
(not self.support_sync and has_foreign):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = "Could not locate any simple equality expressions "\
"involving locally mapped foreign key columns for "\
"%s join condition "\
"'%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
" Ensure that referencing columns are associated "\
"with a ForeignKey or ForeignKeyConstraint, or are "\
"annotated in the join condition with the foreign() "\
"annotation. To allow comparison operators other than "\
"'==', the relationship can be marked as viewonly=True."
raise sa_exc.ArgumentError(err)
else:
err = "Could not locate any relevant foreign key columns "\
"for %s join condition '%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
' Ensure that referencing columns are associated '\
'with a ForeignKey or ForeignKeyConstraint, or are '\
'annotated in the join condition with the foreign() '\
'annotation.'
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_selectable.c)
targetcols = util.column_set(self.child_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(
self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(
self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = self.remote_columns.\
intersection(self.foreign_key_columns).\
difference(self_equated)
manytoone_local = self.local_columns.\
intersection(self.foreign_key_columns).\
difference(self_equated)
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError("Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate())
for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if "remote" in right._annotations and \
"remote" not in left._annotations and \
self.can_be_synced_fn(left):
lrp.add((left, right))
elif "remote" in left._annotations and \
"remote" not in right._annotations and \
self.can_be_synced_fn(right):
lrp.add((right, left))
if binary.operator is operators.eq and \
self.can_be_synced_fn(left, right):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs)
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = \
self._deannotate_pairs(secondary_sync_pairs)
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
@util.memoized_property
def deannotated_primaryjoin(self):
return _deep_deannotate(self.primaryjoin)
@util.memoized_property
def deannotated_secondaryjoin(self):
if self.secondaryjoin is not None:
return _deep_deannotate(self.secondaryjoin)
else:
return None
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(
self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation)
)
return set([x._deannotate() for x in s])
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set([
col for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
])
def join_targets(self, source_selectable,
dest_selectable,
aliased,
single_crit=None):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable,
{'no_replacement_traverse': True})
primaryjoin, secondaryjoin, secondary = self.primaryjoin, \
self.secondaryjoin, self.secondary
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias()
primary_aliasizer = ClauseAdapter(secondary)
secondary_aliasizer = \
ClauseAdapter(dest_selectable,
equivalents=self.child_equivalents).\
chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = \
ClauseAdapter(secondary).\
chain(ClauseAdapter(source_selectable,
equivalents=self.parent_equivalents))
secondaryjoin = \
secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents))
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return primaryjoin, secondaryjoin, secondary, \
target_adapter, dest_selectable
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
lookup = util.column_dict()
equated_columns = util.column_dict()
if reverse_direction and self.secondaryjoin is None:
for l, r in self.local_remote_pairs:
_list = lookup.setdefault(r, [])
_list.append((r, l))
equated_columns[l] = r
else:
for l, r in self.local_remote_pairs:
_list = lookup.setdefault(l, [])
_list.append((l, r))
equated_columns[r] = l
def col_to_bind(col):
if col in lookup:
for tobind, equated in lookup[col]:
if equated in binds:
return None
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True)
return binds[col]
return None
lazywhere = self.deannotated_primaryjoin
if self.deannotated_secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind)
if self.deannotated_secondaryjoin is not None:
secondaryjoin = self.deannotated_secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = dict((binds[col].key, col) for col in binds)
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Seralizable equivalent to:
lambda c: "name" in c._annotations
"""
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import copy
import six
import warnings
from itertools import chain
from datetime import datetime
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from django.core.exceptions import ImproperlyConfigured, FieldDoesNotExist
from haystack import fields as haystack_fields
from haystack.query import EmptySearchQuerySet
from haystack.utils import Highlighter
from rest_framework import serializers
from rest_framework.fields import empty
from rest_framework.utils.field_mapping import ClassLookupDict, get_field_kwargs
from drf_haystack.fields import (
HaystackBooleanField, HaystackCharField, HaystackDateField, HaystackDateTimeField,
HaystackDecimalField, HaystackFloatField, HaystackIntegerField, HaystackMultiValueField,
FacetDictField, FacetListField
)
class Meta(type):
"""
Template for the HaystackSerializerMeta.Meta class.
"""
fields = tuple()
exclude = tuple()
search_fields = tuple()
index_classes = tuple()
serializers = tuple()
ignore_fields = tuple()
field_aliases = {}
field_options = {}
index_aliases = {}
def __new__(mcs, name, bases, attrs):
cls = super(Meta, mcs).__new__(mcs, str(name), bases, attrs)
if cls.fields and cls.exclude:
raise ImproperlyConfigured("%s cannot define both 'fields' and 'exclude'." % name)
return cls
def __setattr__(cls, key, value):
raise AttributeError("Meta class is immutable.")
def __delattr__(cls, key, value):
raise AttributeError("Meta class is immutable.")
class HaystackSerializerMeta(serializers.SerializerMetaclass):
"""
Metaclass for the HaystackSerializer that ensures that all declared subclasses implemented a Meta.
"""
def __new__(mcs, name, bases, attrs):
attrs.setdefault("_abstract", False)
cls = super(HaystackSerializerMeta, mcs).__new__(mcs, str(name), bases, attrs)
if getattr(cls, "Meta", None):
cls.Meta = Meta("Meta", (Meta,), dict(cls.Meta.__dict__))
elif not cls._abstract:
raise ImproperlyConfigured("%s must implement a Meta class or have the property _abstract" % name)
return cls
class HaystackSerializer(six.with_metaclass(HaystackSerializerMeta, serializers.Serializer)):
"""
A `HaystackSerializer` which populates fields based on
which models that are available in the SearchQueryset.
"""
_abstract = True
_field_mapping = ClassLookupDict({
haystack_fields.BooleanField: HaystackBooleanField,
haystack_fields.CharField: HaystackCharField,
haystack_fields.DateField: HaystackDateField,
haystack_fields.DateTimeField: HaystackDateTimeField,
haystack_fields.DecimalField: HaystackDecimalField,
haystack_fields.EdgeNgramField: HaystackCharField,
haystack_fields.FacetBooleanField: HaystackBooleanField,
haystack_fields.FacetCharField: HaystackCharField,
haystack_fields.FacetDateField: HaystackDateField,
haystack_fields.FacetDateTimeField: HaystackDateTimeField,
haystack_fields.FacetDecimalField: HaystackDecimalField,
haystack_fields.FacetFloatField: HaystackFloatField,
haystack_fields.FacetIntegerField: HaystackIntegerField,
haystack_fields.FacetMultiValueField: HaystackMultiValueField,
haystack_fields.FloatField: HaystackFloatField,
haystack_fields.IntegerField: HaystackIntegerField,
haystack_fields.LocationField: HaystackCharField,
haystack_fields.MultiValueField: HaystackMultiValueField,
haystack_fields.NgramField: HaystackCharField,
})
def __init__(self, instance=None, data=empty, **kwargs):
super(HaystackSerializer, self).__init__(instance, data, **kwargs)
if not self.Meta.index_classes and not self.Meta.serializers:
raise ImproperlyConfigured("You must set either the 'index_classes' or 'serializers' "
"attribute on the serializer Meta class.")
if not self.instance:
self.instance = EmptySearchQuerySet()
@staticmethod
def _get_default_field_kwargs(model, field):
"""
Get the required attributes from the model field in order
to instantiate a REST Framework serializer field.
"""
kwargs = {}
try:
field_name = field.model_attr or field.index_fieldname
model_field = model._meta.get_field(field_name)
kwargs.update(get_field_kwargs(field_name, model_field))
# Remove stuff we don't care about!
delete_attrs = [
"allow_blank",
"choices",
"model_field",
"allow_unicode",
]
for attr in delete_attrs:
if attr in kwargs:
del kwargs[attr]
except FieldDoesNotExist:
pass
return kwargs
def _get_index_field(self, field_name):
"""
Returns the correct index field.
"""
return field_name
def _get_index_class_name(self, index_cls):
"""
Converts in index model class to a name suitable for use as a field name prefix. A user
may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class
"""
cls_name = index_cls.__name__
aliases = self.Meta.index_aliases
return aliases.get(cls_name, cls_name.split('.')[-1])
def get_fields(self):
"""
Get the required fields for serializing the result.
"""
fields = self.Meta.fields
exclude = self.Meta.exclude
ignore_fields = self.Meta.ignore_fields
indices = self.Meta.index_classes
declared_fields = copy.deepcopy(self._declared_fields)
prefix_field_names = len(indices) > 1
field_mapping = OrderedDict()
# overlapping fields on multiple indices is supported by internally prefixing the field
# names with the index class to which they belong or, optionally, a user-provided alias
# for the index.
for index_cls in self.Meta.index_classes:
prefix = ""
if prefix_field_names:
prefix = "_%s__" % self._get_index_class_name(index_cls)
for field_name, field_type in six.iteritems(index_cls.fields):
orig_name = field_name
field_name = "%s%s" % (prefix, field_name)
# Don't use this field if it is in `ignore_fields`
if orig_name in ignore_fields or field_name in ignore_fields:
continue
# When fields to include are decided by `exclude`
if exclude:
if orig_name in exclude or field_name in exclude:
continue
# When fields to include are decided by `fields`
if fields:
if orig_name not in fields and field_name not in fields:
continue
# Look up the field attributes on the current index model,
# in order to correctly instantiate the serializer field.
model = index_cls().get_model()
kwargs = self._get_default_field_kwargs(model, field_type)
kwargs['prefix_field_names'] = prefix_field_names
field_mapping[field_name] = self._field_mapping[field_type](**kwargs)
# Add any explicitly declared fields. They *will* override any index fields
# in case of naming collision!.
if declared_fields:
for field_name in declared_fields:
field_mapping[field_name] = declared_fields[field_name]
return field_mapping
def to_representation(self, instance):
"""
If we have a serializer mapping, use that. Otherwise, use standard serializer behavior
Since we might be dealing with multiple indexes, some fields might
not be valid for all results. Do not render the fields which don't belong
to the search result.
"""
if self.Meta.serializers:
ret = self.multi_serializer_representation(instance)
else:
ret = super(HaystackSerializer, self).to_representation(instance)
prefix_field_names = len(getattr(self.Meta, "index_classes")) > 1
current_index = self._get_index_class_name(type(instance.searchindex))
for field in self.fields.keys():
orig_field = field
if prefix_field_names:
parts = field.split("__")
if len(parts) > 1:
index = parts[0][1:] # trim the preceding '_'
field = parts[1]
if index == current_index:
ret[field] = ret[orig_field]
del ret[orig_field]
elif field not in chain(instance.searchindex.fields.keys(), self._declared_fields.keys()):
del ret[orig_field]
# include the highlighted field in either case
if getattr(instance, "highlighted", None):
ret["highlighted"] = instance.highlighted[0]
return ret
def multi_serializer_representation(self, instance):
serializers = self.Meta.serializers
index = instance.searchindex
serializer_class = serializers.get(type(index), None)
if not serializer_class:
raise ImproperlyConfigured("Could not find serializer for %s in mapping" % index)
return serializer_class(context=self._context).to_representation(instance)
class FacetFieldSerializer(serializers.Serializer):
"""
Responsible for serializing a faceted result.
"""
text = serializers.SerializerMethodField()
count = serializers.SerializerMethodField()
narrow_url = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
self._parent_field = None
super(FacetFieldSerializer, self).__init__(*args, **kwargs)
@property
def parent_field(self):
return self._parent_field
@parent_field.setter
def parent_field(self, value):
self._parent_field = value
def get_paginate_by_param(self):
"""
Returns the ``paginate_by_param`` for the (root) view paginator class.
This is needed in order to remove the query parameter from faceted
narrow urls.
If using a custom pagination class, this class attribute needs to
be set manually.
"""
if hasattr(self.root, "paginate_by_param") and self.root.paginate_by_param:
return self.root.paginate_by_param
pagination_class = self.context["view"].pagination_class
if not pagination_class:
return None
# PageNumberPagination
if hasattr(pagination_class, "page_query_param"):
return pagination_class.page_query_param
# LimitOffsetPagination
elif hasattr(pagination_class, "offset_query_param"):
return pagination_class.offset_query_param
# CursorPagination
elif hasattr(pagination_class, "cursor_query_param"):
return pagination_class.cursor_query_param
else:
raise AttributeError(
"%(root_cls)s is missing a `paginate_by_param` attribute. "
"Define a %(root_cls)s.paginate_by_param or override "
"%(cls)s.get_paginate_by_param()." % {
"root_cls": self.root.__class__.__name__,
"cls": self.__class__.__name__
})
def get_text(self, instance):
"""
Haystack facets are returned as a two-tuple (value, count).
The text field should contain the faceted value.
"""
instance = instance[0]
if isinstance(instance, (six.text_type, six.string_types)):
return serializers.CharField(read_only=True).to_representation(instance)
elif isinstance(instance, datetime):
return serializers.DateTimeField(read_only=True).to_representation(instance)
return instance
def get_count(self, instance):
"""
Haystack facets are returned as a two-tuple (value, count).
The count field should contain the faceted count.
"""
instance = instance[1]
return serializers.IntegerField(read_only=True).to_representation(instance)
def get_narrow_url(self, instance):
"""
Return a link suitable for narrowing on the current item.
"""
text = instance[0]
request = self.context["request"]
query_params = request.GET.copy()
# Never keep the page query parameter in narrowing urls.
# It will raise a NotFound exception when trying to paginate a narrowed queryset.
page_query_param = self.get_paginate_by_param()
if page_query_param and page_query_param in query_params:
del query_params[page_query_param]
selected_facets = set(query_params.pop(self.root.facet_query_params_text, []))
selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text})
query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets))
path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()}
url = request.build_absolute_uri(path)
return serializers.Hyperlink(url, "narrow-url")
def to_representation(self, field, instance):
"""
Set the ``parent_field`` property equal to the current field on the serializer class,
so that each field can query it to see what kind of attribute they are processing.
"""
self.parent_field = field
return super(FacetFieldSerializer, self).to_representation(instance)
class HaystackFacetSerializer(six.with_metaclass(HaystackSerializerMeta, serializers.Serializer)):
"""
The ``HaystackFacetSerializer`` is used to serialize the ``facet_counts()``
dictionary results on a ``SearchQuerySet`` instance.
"""
_abstract = True
serialize_objects = False
paginate_by_param = None
facet_dict_field_class = FacetDictField
facet_list_field_class = FacetListField
facet_field_serializer_class = FacetFieldSerializer
def get_fields(self):
"""
This returns a dictionary containing the top most fields,
``dates``, ``fields`` and ``queries``.
"""
field_mapping = OrderedDict()
for field, data in self.instance.items():
field_mapping.update(
{field: self.facet_dict_field_class(
child=self.facet_list_field_class(child=self.facet_field_serializer_class(data)), required=False)}
)
if self.serialize_objects is True:
field_mapping["objects"] = serializers.SerializerMethodField()
return field_mapping
def get_objects(self, instance):
"""
Return a list of objects matching the faceted result.
"""
view = self.context["view"]
queryset = self.context["objects"]
page = view.paginate_queryset(queryset)
if page is not None:
serializer = view.get_facet_objects_serializer(page, many=True)
return OrderedDict([
("count", self.get_count(queryset)),
("next", view.paginator.get_next_link()),
("previous", view.paginator.get_previous_link()),
("results", serializer.data)
])
serializer = view.get_serializer(queryset, many=True)
return serializer.data
def get_count(self, queryset):
"""
Determine an object count, supporting either querysets or regular lists.
"""
try:
return queryset.count()
except (AttributeError, TypeError):
return len(queryset)
@property
def facet_query_params_text(self):
return self.context["facet_query_params_text"]
class HaystackSerializerMixin(object):
"""
This mixin can be added to a serializer to use the actual object as the data source for serialization rather
than the data stored in the search index fields. This makes it easy to return data from search results in
the same format as elsewhere in your API and reuse your existing serializers
"""
def to_representation(self, instance):
obj = instance.object
return super(HaystackSerializerMixin, self).to_representation(obj)
class HighlighterMixin(object):
"""
This mixin adds support for ``highlighting`` (the pure python, portable
version, not SearchQuerySet().highlight()). See Haystack docs
for more info).
"""
highlighter_class = Highlighter
highlighter_css_class = "highlighted"
highlighter_html_tag = "span"
highlighter_max_length = 200
highlighter_field = None
def get_highlighter(self):
if not self.highlighter_class:
raise ImproperlyConfigured(
"%(cls)s is missing a highlighter_class. Define %(cls)s.highlighter_class, "
"or override %(cls)s.get_highlighter()." %
{"cls": self.__class__.__name__}
)
return self.highlighter_class
@staticmethod
def get_document_field(instance):
"""
Returns which field the search index has marked as it's
`document=True` field.
"""
for name, field in instance.searchindex.fields.items():
if field.document is True:
return name
def get_terms(self, data):
"""
Returns the terms to be highlighted
"""
terms = " ".join(six.itervalues(self.context["request"].GET))
return terms
def to_representation(self, instance):
ret = super(HighlighterMixin, self).to_representation(instance)
terms = self.get_terms(ret)
if terms:
highlighter = self.get_highlighter()(terms, **{
"html_tag": self.highlighter_html_tag,
"css_class": self.highlighter_css_class,
"max_length": self.highlighter_max_length
})
document_field = self.get_document_field(instance)
if highlighter and document_field:
# Handle case where this data is None, but highlight expects it to be a string
data_to_highlight = getattr(instance, self.highlighter_field or document_field) or ''
ret["highlighted"] = highlighter.highlight(data_to_highlight)
return ret
|
|
from sqlalchemy import Table, ForeignKey, Column, Boolean, Integer, Float, String, TIMESTAMP, LargeBinary, sql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
#Module used to map the tables from Genologics's Postgres instance
Base = declarative_base()
#Junction tables
artifact_sample_map = Table('artifact_sample_map', Base.metadata,
Column('artifactid', Integer, ForeignKey('artifact.artifactid')),
Column('processid', Integer, ForeignKey('sample.processid')))
"""Junction table between artifact and sample"""
artifact_ancestor_map = Table('artifact_ancestor_map', Base.metadata,
Column('artifactid', Integer, ForeignKey('artifact.artifactid')),
Column('ancestorartifactid', Integer, ForeignKey('artifact.artifactid')))
"""Junction table between artifact and artifact (as an ancestor)"""
artifact_label_map = Table('artifact_label_map', Base.metadata,
Column('artifactid', Integer, ForeignKey('artifact.artifactid')),
Column('labelid', Integer, ForeignKey('reagentlabel.labelid')))
"""Junction table between artifact and reagentlabel)"""
#Standard tables
#udf view has to be before project
class EntityUdfView(Base):
"""Table used to access project and container udfs
:arg INTEGER attachtoid: the ID of the entity to attach the row to.
:arg INTEGER attachtoclassid: the ID of the class of the entity to attach the row to.
:arg STRING udtname: the name of the User Defined Type.
:arg STRING udfname: the name of the User Defined Field.
:arg STRING udttype: the type of the User Defined Type.
:arg STRING udfvalue: the value of the User Defined Field.
:arg STRING udfunitlabel: the type of the User Defined Field if preset.
All of these are mapped as primary keys.
"""
__tablename__ = 'entity_udf_view'
attachtoid = Column(Integer, primary_key=True)
attachtoclassid = Column(Integer, primary_key=True)
udtname = Column(String, primary_key=True)
udfname = Column(String, primary_key=True)
udftype = Column(String, primary_key=True)
udfvalue = Column(String, primary_key=True)
udfunitlabel = Column(String, primary_key=True)
def __repr__(self):
return "<EntityUdf(id={}, class={}, key={}, value={})>".format(self.attachtoid, self.attachtoclassid, self.udfname, self.udfvalue)
class Project(Base):
"""Table storing project objects
:arg INTEGER projectid: the _internal_ project ID. **Primary key.**
:arg STRING name: the project name.
:arg TIMESTAMP opendate: the opening date of the project as a timestamp.
:arg TIMESTAMP closedate: the closing date of the project as a timestamp.
:arg TIMESTAMP invoicedate: the invoicing date of the project as a timestamp.
:arg STRING luid: the external project id.
:arg STRING maximumsampleid: the id of the last sample. usually, nb of samples-1, as it's 0 indexed.
:arg INTEGER ownerid: researcherID of the project owner.
:arg INTEGER datastoreid: probably used to map the udfs
:arg INTEGER isglobal: *unkown*
:arg TIMESTAMP createddate: the creation date of the project as a timestamp.
:arg TIMESTAMP lastmodifieddate: the last modification date of the project as a timestamp.
:arg INTEGER lastmodifiedby: the id of the last modifier of the project.
:arg INTEGER researcherid: the id of the researcher associated to the project.
:arg INTEGER priority: *unknown*
The following attributes are *not* found in the table, but are available through mapping
:arg UDFS udfs: list of project udf rows for the given projectid
:arg RESEARCHER researcher: direct researcher mapping
"""
__tablename__ = 'project'
projectid = Column(Integer, primary_key=True)
name = Column(String)
opendate = Column(TIMESTAMP)
closedate = Column(TIMESTAMP)
invoicedate = Column(TIMESTAMP)
luid = Column(String)
maximumsampleid = Column(String)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
researcherid = Column(Integer, ForeignKey('researcher.researcherid'))
priority = Column(Integer)
#this is the reason why udfview was declared before project
#the entityid of the Projects is 83
udfs = relationship("EntityUdfView", foreign_keys=projectid, remote_side=EntityUdfView.attachtoid, uselist=True,
primaryjoin="and_(Project.projectid==EntityUdfView.attachtoid, EntityUdfView.attachtoclassid==83)")
researcher = relationship("Researcher", uselist=False)
@hybrid_property
def udf_dict(self):
udf_dict={}
for udfrow in self.udfs:
if udfrow.udfvalue:
if udfrow.udftype == "Numeric":
udf_dict[udfrow.udfname]=float(udfrow.udfvalue)
elif udfrow.udftype == "Boolean":
udf_dict[udfrow.udfname]=(udfrow.udfvalue=="True")
else:
udf_dict[udfrow.udfname]=udfrow.udfvalue
return udf_dict
def __repr__(self):
return "<Project(id={}, name={})>".format(self.projectid, self.name)
class SampleUdfView(Base):
"""Table used to access project and container udfs
:arg INTEGER sampleid: the ID of the sample to attach the row to.
:arg STRING udtname: the name of the User Defined Type.
:arg STRING udfname: the name of the User Defined Field.
:arg STRING udttype: the type of the User Defined Type.
:arg STRING udfvalue: the value of the User Defined Field.
:arg STRING udfunitlabel: the type of the User Defined Field if preset.
All of these are mapped as primary keys.
"""
__tablename__ = 'sample_udf_view'
sampleid = Column(Integer, ForeignKey('sample.sampleid'), primary_key=True)
udtname = Column(String, primary_key=True)
udfname = Column(String, primary_key=True)
udftype = Column(String, primary_key=True)
udfvalue = Column(String, primary_key=True)
udfunitlabel = Column(String, primary_key=True)
def __repr__(self):
return "<SampleUdf(id={}, key={}, value={})>".format(self.sampleid, self.udfname, self.udfvalue)
class Sample(Base):
"""
Table mapping the samples
:arg INTEGER processid: The ID of the process that spawned the sample. Primary key.
:arg INTEGER sampleid: Internal sample ID.
:arg STRING name: the sample name.
:arg TIMESTAMP datereceived: timestamp of the sample import.
:arg TIMESTAMP datecompleted: timestamp of the project closure / sample completion.
:arg INTEGER maximumanalyteid: *unknown*
:arg INTEGER uniqueid: *unknown*. Not unique.
:arg INTEGER bisourceid: *unknown*.
:arg INTEGER projectid: projet ID associated to the sample.
:arg INTEGER controltypeid: *unknown*.
The following attributes are *not* found in the table, but are available through mapping
:arg Project project: project object associated to the sample through the projectid foreign key.
"""
__tablename__ = 'sample'
processid = Column(Integer, ForeignKey('process.processid'), primary_key=True)
sampleid = Column(Integer)
name = Column(String)
datereceived = Column(TIMESTAMP)
datecompleted = Column(TIMESTAMP)
maximumanalyteid = Column(Integer)
uniqueid = Column(Integer)
bisourceid = Column(Integer)
projectid = Column(Integer, ForeignKey('project.projectid'))
controltypeid = Column(Integer)
project = relationship(Project, backref='samples')
udfs = relationship('SampleUdfView')
submitter = relationship("Researcher",
secondary="join(Process, Principals, Process.techid==Principals.principalid)",
primaryjoin="Sample.processid==Process.processid",
secondaryjoin="Researcher.researcherid==Principals.researcherid",
uselist=False)
artifact = relationship('Artifact',
secondary=artifact_sample_map,
secondaryjoin="and_(artifact_sample_map.c.artifactid == Artifact.artifactid, Artifact.isoriginal==True)",
uselist=False
)
@hybrid_property
def udf_dict(self):
udf_dict={}
for udfrow in self.udfs:
if udfrow.udfvalue:
if udfrow.udftype == "Numeric":
udf_dict[udfrow.udfname]=float(udfrow.udfvalue)
elif udfrow.udftype == "Boolean":
udf_dict[udfrow.udfname]=(udfrow.udfvalue=="True")
else:
udf_dict[udfrow.udfname]=udfrow.udfvalue
return udf_dict
def __repr__(self):
return "<Sample(id={}, name={})>".format(self.sampleid, self.name)
class ProcessType(Base):
"""Table mapping the Process Types
:arg INTEGER typeid: The Process Type ID. Primary Key
:arg STRING displayname: The name of the process type as shown everywhere.
:arg STRING typename: The name of the _category_ of the process type.
:arg BOOLEAN isenabled: Probably related to the tickbox in the Operations interface
:arg STRING contextcode: The short code (usually 3 letters) that represents the type
:arg BOOLEAN isvisible: *unknown*
:arg INTEGER style: *unknown*
:arg BOOLEANshowinexplorer: *unknown*
:arg BOOLEAN showinbuttonbar: *unknown*
:arg BOOLEAN openpostprocess: *unknown*
:arg STRING iconconstant: *unknown*
:arg STRING outputcontextcode: *unknown*. Apparently, a two-letter code.
:arg BOOLEAN useprotocol: *unknown*
:arg INTEGER ownerid: Researcher ID of the owner of the type. Should correlate to the Researcher table.
:arg INTEGER datastoreid: likely related to the udf storage
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: creation date
:arg TIMESTAMP lastmodifieddate: timestamp of the last modification
:arg INTEGER lastmodifiedby: ID of the last modifier
:arg STRING behaviourname: *unknown*
:arg STRING pmetadata: html string likely containing display data. The actual column name is metadata, but that causes namespace conflicts.
:arg BOOLEAN canedit: is that type editable
:arg STRING modulename: Java module tied to this type
:arg STRING expertname: Java class tied to this type
"""
__tablename__ = 'processtype'
typeid = Column(Integer, primary_key=True)
displayname = Column(String)
typename= Column(String)
isenabled = Column(Boolean)
contextcode = Column(String)
isvisible = Column(Boolean)
style = Column(Integer)
showinexplorer = Column(Boolean)
showinbuttonbar = Column(Boolean)
openpostprocess = Column(Boolean)
iconconstant = Column(String)
outputcontextcode = Column(String)
useprotocol = Column(Boolean)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
behaviourname = Column(String)
pmetadata = Column('metadata', String)
canedit = Column(Boolean)
modulename = Column(String)
expertname = Column(String)
def __repr__(self):
return "<ProcessType(id={}, name={})>".format(self.typeid, self.typename)
class Process(Base):
"""Table mapping process objects
:arg INTEGER processid: the (short) process ID. **Primary key.**
:arg TIMESTAMP daterun: date where the process was closed
:arg STRING luid: the (long) process id
:arg BOOLEAN isprotocol: *unknown*
:arg STRING protocolnameused: *unknown*
:arg BOOLEAN programstarted: probably stores EPP status
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg INTEGER ownerid: researcher id of the process creator
:arg TIMESTAMP createddate: date of creation of the process
:arg TIMESTAMP lastmodifieddate: date of last modification
:arg INTEGER lastmodifiedby: researcher id of the last modifier
:arg INTEGER installationid: *unknown*
:arg INTEGER techid: *unknown*
:arg INTEGER typeid: id of the process type associated
:arg INTEGER stringparameterid: parameterid from processparameter. Contains information about EPPs.
:arg INTEGER fileparameterid: *unknown* often empty
:arg INTEGER protocolstepid: id of the associated protocol step
:arg STRING workstatus: status of the process. values : COMPLETE, RECORD_DETAILS, STARTED, UNDER_REVIEW, MOVE_SAMPLES_ON
:arg INTEGER reagentcategoryid: id of the assocated reagent category
:arg INTEGER signedbyid: *unknown*
:arg TIMESTAMP signeddate: *unknown*
:arg BOOLEAN nextstepslocked: *unknown*
The following attributes are *not* found in the table, but are available through mapping
:arg ProcessType type: ProcessType row associated with the Process row.
:arg ProcessUdfView udfs: ProcessUdfView row associated with the Process row.
"""
__tablename__ = 'process'
processid = Column(Integer, primary_key=True)
daterun = Column(TIMESTAMP)
luid = Column(String)
isprotocol = Column(Boolean)
protocolnameused = Column(String)
programstarted = Column(Boolean)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
ownerid = Column(Integer)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
installationid = Column(Integer)
techid = Column(Integer, ForeignKey('principals.principalid'))
typeid = Column(Integer, ForeignKey('processtype.typeid'))
stringparameterid = Column(Integer)
fileparameterid = Column(Integer)
protocolstepid = Column(Integer)
workstatus = Column(String)
reagentcategoryid = Column(Integer)
signedbyid = Column(Integer)
signeddate = Column(TIMESTAMP)
nextstepslocked = Column(Boolean)
type = relationship("ProcessType", backref='processes')
udfs = relationship("ProcessUdfView")
technician = relationship("Principals")
def __repr__(self):
return "<Process(id={}, type={})>".format(self.processid, self.typeid)
@hybrid_property
def udf_dict(self):
udf_dict={}
for udfrow in self.udfs:
if udfrow.udfvalue:
if udfrow.udftype == "Numeric":
udf_dict[udfrow.udfname]=float(udfrow.udfvalue)
elif udfrow.udftype == "Boolean":
udf_dict[udfrow.udfname]=(udfrow.udfvalue=="True")
else:
udf_dict[udfrow.udfname]=udfrow.udfvalue
return udf_dict
class Artifact(Base):
"""Table mapping artifact objects
:arg INTEGER artifactid: the (short) artifact ID. **Primary key.**
:arg STRING name: the artifact given name
:arg STRING luid: the (long) artifact id
:arg FLOAT concentration: *unknown*
:arg FLOAT origvolume: *unknown*
:arg FLOAT origconcentration: *unknown*
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isworking: API working flag
:arg BOOLEAN isoriginal: *unknown*
:arg BOOLEAN isglobal: *unknown*
:arg BOOLEAN isgenealogyartifact: *unknown*
:arg INTEGER ownerid: researcher id of the artifact creator
:arg TIMESTAMP createddate: date of creation of the artifact
:arg TIMESTAMP lastmodifieddate: date of last modification
:arg INTEGER lastmodifiedby: researcher id of the last modifier
:arg INTEGER artifacttypeid: *unknown*
:arg INTEGER processoutputid: *unknown*
:arg INTEGER currentstateid: *unknown*
:arg INTEGER originalstateid: *unknown*
:arg INTEGER compoundartifactid: *unknown*
:arg INTEGER outputindex: *unknown*
The following attributes are *not* found in the table, but are available through mapping
:arg Artifact ancestors: Artifact rows associated with this row through artifact_ancestor_map.
:arg ContainerPlacement containerplacement: ContainerPlacement row associated the Artifact row.
:arg String qc_flag: API string of the latest qc_flag of the artifact.
:arg ReagentLabel reagentlabels: reagentlabel rows associated with the Artifact row.
:arg Sample samples: Sample rows associated with the Artifact row.
:arg ArtifactState states: ArtifactState rows associated with the Artifact row.
:arg ArtifactUdfView udfs: ArtifactUdfView row associated the Artifact row.
:arg dict udf_dict: A dictionnary of udfs with correct types (Strings, Floats and Booleans).
:arg list routes: a list of routing actions taken on the given artifact
"""
__tablename__ = 'artifact'
artifactid = Column(Integer, primary_key=True)
name = Column(String)
luid = Column(String)
volume = Column(Float)
concentration = Column(Float)
origvolume = Column(Float)
origconcentration = Column(Float)
datastoreid = Column(Integer)
isworking = Column(Boolean)
isoriginal = Column(Boolean)
isglobal = Column(Boolean)
isgenealogyartifact=Column(Boolean)
ownerid = Column(Integer)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
artifacttypeid = Column(Integer)
processoutputtypeid=Column(Integer)
currentstateid = Column(Integer)
originalstateid = Column(Integer)
compoundartifactid= Column(Integer)
outputindex = Column(Integer)
samples = relationship("Sample", secondary = artifact_sample_map, backref="artifacts")
ancestors = relationship("Artifact", secondary = artifact_ancestor_map,
primaryjoin=artifactid==artifact_ancestor_map.c.artifactid,
secondaryjoin=artifactid==artifact_ancestor_map.c.ancestorartifactid)
udfs = relationship("ArtifactUdfView")
states = relationship("ArtifactState", backref='artifact')
containerplacement = relationship('ContainerPlacement', uselist=False, backref='artifact')
routes = relationship("RoutingAction", backref='artifact')
@hybrid_property
def udf_dict(self):
udf_dict={}
for udfrow in self.udfs:
if udfrow.udfvalue:
if udfrow.udftype == "Numeric":
udf_dict[udfrow.udfname]=float(udfrow.udfvalue)
elif udfrow.udftype == "Boolean":
udf_dict[udfrow.udfname]=(udfrow.udfvalue=="True")
else:
udf_dict[udfrow.udfname]=udfrow.udfvalue
return udf_dict
@hybrid_property
def qc_flag(self):
latest_state=sorted(self.states, key=lambda x:x.lastmodifieddate)[-1]
if latest_state.qcflag==0:
return 'UNKNOWN'
elif latest_state.qcflag==1:
return 'PASSED'
elif latest_state.qcflag==2:
return 'FAILED'
else:
return 'ERROR'
def __repr__(self):
return "<Artifact(id={}, name={})>".format(self.artifactid, self.name)
class ArtifactUdfView(Base):
"""
View mapping udfs with artifacts through the datastores.
:arg INTEGER artifactid: the (short) artifact id
:arg STRING udtname: name of the user defined type
:arg STRING udfname: name of the user defined field
:arg STRING udftype: type of the user defined field
:arg STRING udfvalue: value of the user defined field
:arg STRING udfunitlabel: unit of the user defined field
"""
__tablename__ = 'artifact_udf_view'
artifactid = Column(Integer, ForeignKey('artifact.artifactid') , primary_key=True)
udtname = Column(String, primary_key=True)
udfname = Column(String, primary_key=True)
udftype = Column(String, primary_key=True)
udfvalue = Column(String, primary_key=True)
udfunitlabel = Column(String, primary_key=True)
def __repr__(self):
return "<ArtifactUdf(id={}, key={}, value={})>".format(self.artifactid, self.udfname, self.udfvalue)
class ProcessUdfView(Base):
"""
View mapping udfs with processes through the datastores.
:arg INTEGER processid: the (short) process id
:arg INTEGER typeid: the process type id
:arg STRING udtname: name of the user defined type
:arg STRING udfname: name of the user defined field
:arg STRING udftype: type of the user defined field
:arg STRING udfvalue: value of the user defined field
:arg STRING udfunitlabel: unit of the user defined field
"""
__tablename__ = 'process_udf_view'
processid = Column(Integer, ForeignKey('process.processid') , primary_key=True)
typeid = Column(Integer, ForeignKey('processtype.typeid'), primary_key=True)
udtname = Column(String, primary_key=True)
udfname = Column(String, primary_key=True)
udftype = Column(String, primary_key=True)
udfvalue = Column(String, primary_key=True)
udfunitlabel = Column(String, primary_key=True)
def __repr__(self):
return "<ProcessUdf(id={}, key={}, value={})>".format(self.processid, self.udfname, self.udfvalue)
class ContainerPlacement(Base):
"""
Table mapping sample placement in the containers
:arg INTEGER placementid: internal placement ID. Primary key.
:arg INTEGER containerid: the associated container id
:arg INTEGER wellxposition: the horizontal position in the container of the sample
:arg INTEGER wellyposition: the vertical position in the container of the sample
:arg TIMESTAMP dateplaced: timestamp of the placement creation
:arg INTEGER ownerid: researcherid of the user who made the placement
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unkown*
:arg TIMESTAMP createddate: timestamp of the placement creation
:arg TIMESTAMP lastmodifieddate: timestamp of the last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
:arg INTEGER reagentid: Reagent ID used in that placement
:arg INTEGER processartifactid: artifact id of the artifact involved in that placement
The following attributes are *not* found in the table, but are available through mapping
:arg Container container: Container row associated with the ContainerPlacement row.
:arg STRING api_string: string reporting the position in the same fashion as the API does.
"""
__tablename__ = 'containerplacement'
placementid = Column(Integer, primary_key=True)
containerid = Column(Integer, ForeignKey('container.containerid'), primary_key=True)
wellxposition = Column(Integer)
wellyposition = Column(Integer)
dateplaced = Column(TIMESTAMP)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
reagentid = Column(Integer)
processartifactid = Column(Integer, ForeignKey('artifact.artifactid'))
container=relationship('Container', uselist=False)
def get_x_position(self):
"""Get the X position of the placement according to Container type"""
ctype=self.container.type
start=0
if ctype.isxalpha:
start=65
start+=ctype.xindexstartsat
value=start+self.wellxposition
if ctype.isxalpha:
return chr(value)
return value
def get_y_position(self):
"""Get the Y position of the placement according to Container type"""
ctype=self.container.type
start=0
if ctype.isyalpha:
start=65
start+=ctype.yindexstartsat
value=start+self.wellyposition
if ctype.isyalpha:
return chr(value)
return value
@hybrid_property
def api_string(self):
return "{0}:{1}".format(self.get_y_position(), self.get_x_position())
def __repr__(self):
return "<ContainerPlacement(id={}, pos={}:{}, cont={}, art={})>".format(self.placementid, self.wellxposition, self.wellyposition, self.containerid, self.processartifactid)
class Container(Base):
"""Table mapping containers
:arg INTEGER containerid: The (short) container id. Primary Key.
:arg STRING subtype: The container type
:arg STRING luid: The (long) container id
:arg BOOLEAN isvisible: *unkown*
:arg STRING name: The container name
:arg INTEGER ownerid: Researcher ID of the container creator
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: The date of creation
:arg TIMESTAMP lastmodifieddate: The date of last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
:arg INTEGER stateid: placeholders for empty, populated, depleted, discarded
:arg INTEGER typeid: container type id from containertype (not mapped)
:arg STRING lotnumber: *unknown*
:arg TIMESTAMP expirydate: *unknown*
The following attributes are *not* found in the table, but are available through mapping
:arg EntityUdfView udfs: EntityUdfView row associated with the Container row.
:arg ContainerType type: ContainerType row associated with the Container row.
"""
__tablename__ = 'container'
containerid = Column(Integer, primary_key=True)
subtype = Column(String)
luid = Column(String)
isvisible = Column(Boolean)
name = Column(String)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
stateid = Column(Integer)
typeid = Column(Integer, ForeignKey('containertype.typeid'))
lotnumber = Column(String)
expirydate = Column(TIMESTAMP)
#the entity id of Containers is 27
udfs = relationship("EntityUdfView", foreign_keys=containerid, remote_side=EntityUdfView.attachtoid, uselist=True,
primaryjoin="and_(Container.containerid==EntityUdfView.attachtoid, EntityUdfView.attachtoclassid==27)")
type=relationship("ContainerType", uselist=False)
def __repr__(self):
return "<Container(id={}, name={})>".format(self.containerid, self.name)
class ContainerType(Base):
"""Table mapping containertype
:arg INTEGER typeid: internal container type id
:arg STRING name: container type name
:arg INTEGER sequencenumber: *unknown*
:arg BOOLEAN isvisible: *unknown*
:arg INTEGER numxpositions: number of valid x positions in the container
:arg BOOLEAN isxalpha: true if the x axis is coded by letter
:arg INTEGER numypositions: number of valid y positions in the container
:arg BOOLEAN isyalpha: true if the x axis is coded by letter
:arg INTEGER xindexstartsat: first value of the x axis
:arg INTEGER yindexstartsat: first value of the y axis
:arg INTEGER iconsetconstant: *unknown*
:arg INTEGER ownerid: researcherid of the owner of the container type
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: date of creation of the type
:arg TIMESTAMP lastmodifieddate: date of last modification
:arg INTEGER lastmodifiedby: researcher id of the last modifier
:arg STRING subtype: *unknown*
:arg STRING vendoruniqueid: *unknown*
:arg BOOLEAN istube: true if the container is a tube
"""
__tablename__ = 'containertype'
typeid = Column(Integer, primary_key=True)
name = Column(String)
sequencenumber = Column(Integer)
isvisible = Column(Boolean)
numxpositions = Column(Integer)
isxalpha = Column(Boolean)
numypositions = Column(Integer)
isyalpha = Column(Boolean)
xindexstartsat = Column(Integer)
yindexstartsat = Column(Integer)
iconsetconstant = Column(Integer)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
subtype = Column(String)
vendoruniqueid = Column(String)
istube = Column(Boolean)
def __repr__(self):
return "<ContainerType(id={}, name={})>".format(self.typeid, self.name)
class ReagentLabel(Base):
"""Table mapping reagent labels
:arg INTEGER labelid: The reagent label id. Primary Key.
:arg STRING name: The reagent label name
:arg INTEGER ownerid: Researcher ID of the reagent label creator
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: The date of creation
:arg TIMESTAMP lastmodifieddate: The date of last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
The following attributes are *not* found in the table, but are available through mapping
:arg Artifact artifacts: list of artifacts linked through the artifact_label junction table.
"""
__tablename__ = 'reagentlabel'
labelid= Column(Integer, primary_key=True)
name = Column(String)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
artifacts = relationship("Artifact", secondary = artifact_label_map,
backref='reagentlabels')
def __repr__(self):
return "<ReagentLabel(id={}, name={})>".format(self.labelid, self.name)
class Analyte(Base):
""" Table mapping Analytes
:arg INTEGER artifactid: artifact id of the analyte. Primary key
:arg INTEGER analyteid: internal analyte id
:arg BOOLEAN iscalibrant: *unknown*
:arg INTEGER sequencenumber: *unknown*
:arg BOOLEAN isvisible: *unknown*
The following attributes are *not* found in the table, but are available through mapping
:arg Artifact artifact: artifact row corresponding to the analyte row.
"""
__tablename__ = 'analyte'
artifactid = Column(Integer, ForeignKey('artifact.artifactid'), primary_key=True)
analyteid = Column(Integer)
iscalibrant = Column(Boolean)
sequencenumber = Column(Integer)
isvisible = Column(Boolean)
artifact=relationship("Artifact", uselist=False)
def __repr__(self):
return "<Analyte(id={})>".format(self.artifactid)
class ResultFile(Base):
""" Table mapping ResultFiles
:arg INTEGER artifactid: artifact id of the ResultFile. Primary key
:arg INTEGER fileid: internal file id
:arg STRING typeid: *unknown*
:arg INTEGER parsestatus: *unknown*
:arg INTEGER status: *unknown*
:arg INTEGER commandid: *unknown*
:arg BOOLEAN glsfileid: id of the corresponding row in glsfile
The following attributes are *not* found in the table, but are available through mapping
:arg Artifact artifact: artifact row corresponding to the ResultFile row.
:arg GlsFile glsfile: glsfile row corresponding to the ResultFile row.
"""
__tablename__ = 'resultfile'
artifactid = Column(Integer, ForeignKey('artifact.artifactid'), primary_key=True)
fileid = Column(Integer)
type = Column(String)
parsestatus = Column(Integer)
status = Column(Integer)
commandid = Column(String)
glsfileid = Column(Integer)
artifact=relationship("Artifact", uselist=False)
def __repr__(self):
return "<ResultFile(id={})>".format(self.artifactid)
class GlsFile(Base):
""" Table mapping Glsfiles
:arg INTEGER fileid: internal file id of corresponding ResultFile. Primary key.
:arg STRING server: ftp location
:arg STRING contenturi: URI to the file
:arg STRING luid: long file id
:arg STRING originallocation: original path of the file on the uploader's computer.
:arg BOOLEAN ispublished: *unknown*
:arg INTEGER ownerid: Researcher ID of the file creator
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: The date of creation
:arg TIMESTAMP lastmodifieddate: The date of last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
:arg INTEGER attachtoid: *unknown*
:arg INTEGER attachtoclassid: *unknown*
The following attributes are *not* found in the table, but are available through mapping
:arg Artifact artifact: artifact row corresponding to the ResultFile row.
:arg GlsFile glsfile: glsfile row corresponding to the ResultFile row.
"""
__tablename__ = 'glsfile'
fileid = Column(Integer, ForeignKey('resultfile.glsfileid'), primary_key=True)
server = Column(String)
contenturi = Column(String)
luid = Column(String)
originallocation = Column(String)
ispublished = Column(Boolean)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
attachtoid = Column(Integer)
attachtoclassid = Column(Integer)
file=relationship("ResultFile",uselist=False, backref="glsfile")
def __repr__(self):
return "<GlsFile(id={})>".format(self.fileid)
class Researcher(Base):
""" Table mapping Researchers
:arg INTEGER researcherid: internal researcher id. Primary key.
:arg INTEGER roleid: internal role id
:arg STRING firstname: First name of the researcher
:arg STRING lastname: Last name of the researcher
:arg STRING title: researcher's title, if any
:arg STRING initials: researcher's initials
:arg INTEGER ownerid: id of the row creator
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: The date of creation
:arg TIMESTAMP lastmodifieddate: The date of last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
:arg STRING phone: researcher's phone number
:arg STRING email: researcher's email address
:arg STRING fax: researcher's fax number
:arg INTEGER addressid: id of the associated Address row. (Not mapped)
:arg INTEGER labid: id of the associated Lab row. (Not mapped)
:arg INTEGER supervisorid: researcher id of the researcher's supervisor
:arg BOOLEAN isapproved: has been validated as a user
:arg STRING requestedsupervisorfirstname: *unknown*
:arg STRING requestedsupervsodlastname: *unknown*
:arg STRING requestedusername: *unknown*
:arg STRING requestedpassword: *unknown*
:arg STRING requestedlabname: *unknown*
:arg LARGEBINARY avatar: base64 encoding of the avatar image
:arg STRING avatarcontenttype: mime type of the avatar image
"""
__tablename__ = 'researcher'
researcherid = Column(Integer, primary_key=True)
roleid = Column(Integer)
firstname = Column(String)
lastname = Column(String)
title = Column(String)
initials = Column(String)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Integer)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
phone = Column(String)
email = Column(String)
fax = Column(String)
addressid = Column(Integer)
labid = Column(Integer, ForeignKey('lab.labid'))
supervisorid = Column(Integer)
isapproved = Column(Boolean)
requestedsupervisorfirstname = Column(String)
requestedsupervisorlastname = Column(String)
requestedusername = Column(String)
requestedpassword = Column(String)
requestedlabname = Column(String)
avatar = Column(LargeBinary)
avatarcontenttype = Column(String)
lab=relationship("Lab",uselist=False)
def __repr__(self):
return "<Researcher(id={}, name={} {}, initials={})>".format(self.researcherid, self.firstname, self.lastname, self.initials)
class EscalationEvent(Base):
""" Table mapping Escalation events
:arg INTEGER eventid: escalation event internal id. Primary Key.
:arg INTEGER processid: process ID where the escalation took place
:arg INTEGER originarorid: researcher id of the user requesting a review
:arg INTEGER reviewerid: researcher id of the user having to perform the review
:arg TIMESTAMP escalationdate: timestamp of the review request
:arg TIMESTAMP reviewdate: timestamp of the review completion
:arg STRING escalationcomment: comment of the review request
:arg STRING reviewcomment: comment of the review completion
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg INTEGER ownerid: Researcher ID of the container creator
:arg TIMESTAMP createddate: The date of creation
:arg TIMESTAMP lastmodifieddate: The date of last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
"""
__tablename__ = 'escalationevent'
eventid = Column(Integer, primary_key=True)
processid = Column(Integer, ForeignKey('process.processid'))
originatorid = Column(Integer)
reviewerid = Column(Integer)
escalationdate = Column(TIMESTAMP)
reviewdate = Column(TIMESTAMP)
escalationcomment = Column(String)
reviewcomment = Column(String)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
ownerid = Column(Integer)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
process=relationship("Process", uselist=False)
def __repr__(self):
return "<EscalationEvent(id={}, process={})>".format(self.eventid, self.processid)
class EscalatedSample(Base):
""" Table mapping the escalated samples
:arg INTEGER escalatedsampleid: the escalated sample internal id. Primary key.
:arg INTEGER escalationeventid: the associated escalation event id
:arg INTEGER artifactid: the associated artifact id.
:arg INTEGER ownerid: Researcher ID of the container creator
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: The date of creation
:arg TIMESTAMP lastmodifieddate: The date of last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
"""
__tablename__ = 'escalatedsample'
escalatedsampleid = Column(Integer, primary_key=True)
escalationeventid = Column(Integer, ForeignKey('escalationevent.eventid'))
artifactid = Column(Integer, ForeignKey('artifact.artifactid'))
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
event=relationship("EscalationEvent", backref="escalatedsamples")
def __repr__(self):
return "<EscalatedSample(id={}, artifact={})>".format(self.escalatedsampleid, self.artifactid)
class ProcessIOTracker(Base):
"""Table mapping the input/outputs of processes
:arg INTEGER trackerid: internal tracker id. Primary key
:arg FLOAT inputvolume: *unknown*
:arg FLOAT inputconcentration: *unknown*
:arg INTEGER inputstatepreid: *unknown*
:arg INTEGER inputstatuspostid:*unknown*
:arg INTEGER ownerid: Researcher ID of the container creator
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: The date of creation
:arg TIMESTAMP lastmodifieddate: The date of last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
:arg INTEGER inputartifactid: id of the associated input artifact
:arg INTEGER processid: id of the associated process
The following attributes are *not* found in the table, but are available through mapping
:arg Artifact artifact: artifact row corresponding to the ResultFile row.
"""
__tablename__ = 'processiotracker'
trackerid = Column(Integer, primary_key=True)
inputvolume = Column(Float)
inputconcentration = Column(Float)
inputstatepreid = Column(Integer)
inputstatepostid = Column(Integer)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
inputartifactid = Column(Integer, ForeignKey('artifact.artifactid'))
processid = Column(Integer, ForeignKey('process.processid'))
def __repr__(self):
return "<ProcessIOTracker(id={}, processid={}, inputartifactid={})>".format(self.trackerid, self.processid, self.inputartifactid)
class ArtifactState(Base):
"""Table mapping artifac states and QC
:arg INTEGER stateid: the internal state id. Primary key.
:arg INTEGER qcflag: 0: UNKNOWN, 1: PASSED, 2: FAILED
:arg INTEGER ownerid: Researcher ID of the container creator
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: The date of creation
:arg TIMESTAMP lastmodifieddate: The date of last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
:arg INTEGER artifactid: id of the associated artifact
"""
__tablename__ = 'artifactstate'
stateid = Column(Integer, primary_key=True)
qcflag = Column(Integer)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
artifactid = Column(Integer, ForeignKey('artifact.artifactid'))
def __repr__(self):
return "<ArtifactState(id={}, artifactid={})>".format(self.stateid, self.artifactid)
class OutputMapping(Base):
"""Table mapping the process outputs
:arg INTEGER mappingid: the internal mapping id
:arg FLOAT outputvolume: *unknown*
:arg FLOAT outputconcentration: *unknown*
:arg INTEGER ownerid: Researcher ID of the container creator
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: The date of creation
:arg TIMESTAMP lastmodifieddate: The date of last modification
:arg INTEGER lastmodifiedby: researcherid of the last modifier
:arg INTEGER trackerid: trackerid of the associated processiotracker
:arg INTEGER outputartifactid: artifactid of the associated artifact
"""
__tablename__ = 'outputmapping'
mappingid = Column(Integer, primary_key=True)
outputvolume = Column(Float)
outputconcentration = Column(Float)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
trackerid = Column(Integer, ForeignKey('processiotracker.trackerid'))
outputartifactid = Column(Integer, ForeignKey('artifact.artifactid'))
tracker=relationship('ProcessIOTracker', backref='output')
def __repr__(self):
return "<OutputMapping(mappingid={}, trackerid={}, outputartifactid={})>".format(self.mappingid, self.trackerid, self.outputartifactid)
class Principals(Base):
"""Table mapping user information
:arg INTEGER principalid: internal principal id, primary key
:arg STRING username: username associated with that row
:arg STRING password: hashed password
:arg BOOLEAN isvisible: *unknown*
:arg BOOLEAN isloggedin: flag checking is the user is currently within the system
:arg INTEGER datastoreid: id of the associated datastore
:arg INTEGER ownerid: id of the creator of that row
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: row creation date
:arg TIMESTAMP lastmodifieddate: row last modification date
:arg INTEGER lastmodifiedby: researcherid of the last modifier
:arg STRING ldapdn: *unknown*
:arg STRING ldapuuid: *unknown*
:arg BOOLEAN accountlocked : *unknown*
:arg INTEGER researcherid: id of the associated researcher row
:arg BOOLEAN locked: *unknown*
"""
__tablename__ = 'principals'
principalid = Column(Integer, primary_key=True)
username = Column(String)
password = Column(String)
isvisible = Column(Boolean)
isloggedin = Column(Boolean)
datastoreid = Column(Integer)
ownerid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
ldapdn = Column(String)
ldapuuid = Column(String)
accountlocked = Column(Boolean)
researcherid = Column(Integer, ForeignKey('researcher.researcherid'))
locked = Column(Boolean)
researcher=relationship("Researcher")
def __repr__(self):
return "<Principals(principalid={}, username={}, researcherid={})>".format(self.principalid, self.username, self.researcherid)
class Lab(Base):
"""Table mapping Lab entities
:arg INTEGER labid: internal lab id. Primary key.
:arg STRING name: Lab name
:arg STRING website: URL to the lab's website
:arg INTEGER ownerid: id of the creator of that row
:arg INTEGER datastoreid: id of the associated datastore
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: row creation date
:arg TIMESTAMP lastmodifieddate: row last modification date
:arg INTEGER lastmodifiedby: researcherid of the last modifier
:arg INTEGER billingadressid: ID of the associated billing address
:arg INTEGER shippingaddressid: ID of the associated shipping address
"""
__tablename__ = "lab"
labid = Column(Integer, primary_key=True)
name = Column(String)
website = Column(String)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
billingaddressid = Column(Integer)
shippingaddressid = Column(Integer)
#the entity id of Lab is 17
udfs = relationship("EntityUdfView", foreign_keys=labid, remote_side=EntityUdfView.attachtoid, uselist=True,
primaryjoin="and_(Lab.labid==EntityUdfView.attachtoid, EntityUdfView.attachtoclassid==17)")
@hybrid_property
def udf_dict(self):
udf_dict={}
for udfrow in self.udfs:
if udfrow.udfvalue:
if udfrow.udftype == "Numeric":
udf_dict[udfrow.udfname]=float(udfrow.udfvalue)
elif udfrow.udftype == "Boolean":
udf_dict[udfrow.udfname]=(udfrow.udfvalue=="True")
else:
udf_dict[udfrow.udfname]=udfrow.udfvalue
return udf_dict
def __repr__(self):
return "<Lab(labid={}, name={})>".format(self.labid, self.name)
class ReagentType(Base):
"""Table mapping the reagenttype table
:arg INTEGER reagenttypeid: internal reagent type id
:arg STRING name: name of the reagent type
:arg STRING meta_data: *unknown*
:arg STRING specialtype: *unknown*
:arg INTEGER ownerid: principal ID of the owner
:arg INTEGER datastoreid: *unknown*
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: date of creation
:arg TIMESTAMP lastmodifieddate: date of last modification
:arg INTEGER lastmodifiedby: principal id of the last modifier
:arg BOOLEAN isvisible: *unknown*
:arg INTEGER reagentcategoryid: the associated reagentcategory id
"""
__tablename__ = 'reagenttype'
reagenttypeid = Column(Integer, primary_key=True)
name = Column(String)
meta_data = Column('metadata', String)
specialtype = Column(String)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
isvisible = Column(Boolean)
reagentcategoryid = Column(Integer)
def __repr__(self):
return "<ReagentType(reagenttypeid={}, name={})>".format(self.reagenttypeid, self.name)
class RoutingAction(Base):
"""Table mapping the routingaction table, which governs the routing of artifacts.
:arg INTEGER routingactionid: internal routing action id
:arg STRING actiontype: name of the action. valid names : COMPLETE, ADVANCE, ESCALATE, REWORK, REMOVE, REPEAT, NO_ACTION
:arg INTEGER actionstepid: stepid from ProtocolStep
:arg INTEGER processid: processid of the process that the artifact is routed to
:arg INTEGER artifactid: artifactid of the artifact being routed
:arg INTEGER reworkedprocessid: if action is REWORK, processid of the step samples are being reworked to
:arg INTEGER reworkedartifactid: if action is REWORK, artifactid of the artifact being reworked
:arg INTEGER ownerid: principal ID of the owner
:arg INTEGER datastoreid: *unknown*
:arg BOOLEAN isglobal: *unknown*
:arg TIMESTAMP createddate: date of creation
:arg TIMESTAMP lastmodifieddate: date of last modification
:arg INTEGER lastmodifiedby: principal id of the last modifier
"""
__tablename__ = 'routingaction'
routingactionid = Column(Integer, primary_key=True)
actiontype = Column(String)
processid = Column(Integer)
artifactid = Column(Integer, ForeignKey('artifact.artifactid'))
reworkedprocessid = Column(Integer)
reworkedartifactid = Column(Integer)
ownerid = Column(Integer)
datastoreid = Column(Integer)
isglobal = Column(Boolean)
createddate = Column(TIMESTAMP)
lastmodifieddate = Column(TIMESTAMP)
lastmodifiedby = Column(Integer)
def __repr__(self):
return "<RoutingAction(routingactionid={}, actiontype={})>".format(self.routingactionid, self.actiontype)
|
|
import os, sys;
import h5py, user_config, cppext;
from numpy import *;
from share_fun import val_def;
from functions import generate_Umatrix;
def init_solver(parms, np):
solver_type = parms['SOLVER_TYPE'];
print '%s solver is used...'%solver_type;
input_args = {
'solver_path' : parms.get('SOLVER_EXE_PATH', ''),
'mpirun_path' : parms.get('SOLVER_MPIRUN_PATH', user_config.mpirun),
'np' : np
}
if solver_type == 'CTHYB_Matrix':
input_args['parm2xml'] = val_def(parms, 'PARMS2XML', user_config.parm2xml);
input_args['solver_path'] = user_config.solver_matrix;
solver = HybridizationMatrixSolver(input_args);
elif solver_type == 'CTHYB_Segment':
input_args['solver_path'] = user_config.solver_segment;
solver = HybridizationSegmentSolver(input_args);
elif solver_type == 'TRIQS':
input_args['solver_path'] = user_config.solver_triqs;
solver = TRIQSSolver(input_args);
elif solver_type == 'TRIQSOld':
input_args['solver_path'] = user_config.solver_triqs_old;
solver = TRIQSSolverOld(input_args);
else: print 'Solver %s unknown'%solver_type;
return solver;
class HybridizationMatrixSolver:
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
# prepare hybtau file for CTQMC
print 'Prepare running solver for ' + prefix;
self.prefix = prefix;
parms = in_data['parms'];
hyb_tau = in_data['hybtau'];
FLAVORS = int(parms['FLAVORS']);
for f in range(FLAVORS): hyb_tau[:, f] = -hyb_tau[::-1, f];
hyb_tau = c_[linspace(0, float(parms['BETA']), int(parms['N_TAU']) + 1), hyb_tau];
savetxt(prefix+'.hybtau', hyb_tau);
if FLAVORS/2 == 3: Lattice = '"t2g system"';
if FLAVORS/2 == 2: Lattice = '"eg system"';
if FLAVORS/2 == 1: Lattice = '"site"';
# prepare parms file for CTQMC
green_only = 1; self.list_obs = None;
if int(parms['MEASURE']) > 0:
green_only = 0
self.list_obs = parms['OBSERVABLES'].split(',')
QMC_parms = {
'LATTICE_LIBRARY' : user_config.LatticeLibrary,
'LATTICE' : Lattice,
'MODEL_LIBRARY' : user_config.ModelLibrary,
'MODEL' : user_config.Model,
'L' : FLAVORS/2,
'SITES' : FLAVORS/2,
'GREEN_ONLY' : green_only,
'SEED' : random.random_integers(10000),
'SWEEPS' : val_def(parms, 'SWEEPS', 500000),
'THERMALIZATION' : val_def(parms, 'THERMALIZATION', 300),
'N' : parms['N_TAU'],
'N_ORDER' : val_def(parms, 'N_ORDER', 50),
'N_MEAS' : val_def(parms, 'N_MEAS', 200),
'N_SHIFT' : val_def(parms, 'N_SHIFT', 0),
'N_SWAP' : val_def(parms, 'N_SWAP', 0),
'BETA' : parms['BETA'],
'U' : parms['U'],
"U'" : float(parms['U']) - 2*float(parms['J']),
'J' : parms['J'],
'SPINS' : 2,
'CONSERVED_QUANTUMNUMBERS': '"Nup, Ndown"',
'F' : prefix + '.hybtau'
};
for f in range(FLAVORS/2):
QMC_parms['MUUP'+str(f)] = in_data['MU'][2*f];
QMC_parms['MUDOWN'+str(f)] = in_data['MU'][2*f+1];
solver_parms_file = open(prefix + '.parms', 'w');
for k, v in QMC_parms.iteritems():
solver_parms_file.write(k + ' = ' + str(v) + ';\n');
solver_parms_file.write('{}');
solver_parms_file.close();
def run(self):
cmd = '%s %s.parms %s 1>&2'%(self.args['parm2xml'], self.prefix,self.prefix);
print cmd; os.system(cmd);
cmd = '%s -n %d %s %s.in.xml'%(self.args['mpirun_path'], self.args['np'], self.args['solver_path'], self.prefix);
print cmd; return os.system(cmd);
def collect(self):
print 'Collect data from ' + self.prefix;
measure = 0;
collect_error = False;
if self.list_obs is not None:
print 'also collect data for observables ', self.list_obs;
if 'error' in self.list_obs:
collect_error = True;
self.list_obs.pop(self.list_obs.index('error'));
measure = 1;
Gtau, Gerr, obs = cppext.get_raw_data(self.prefix, measure, self.list_obs);
if collect_error: obs.update({'GreenError' : mean(Gerr, 0) });
return Gtau, obs;
class HybridizationSegmentSolver:
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
# prepare hybtau file for CTQMC
print 'Prepare running solver for ' + prefix;
self.prefix = prefix;
self.list_obs = None;
self.parms = in_data['parms'];
self.MEASURE_freq = int(val_def(in_data['parms'], 'MEASURE_freq', 1));
parms = in_data['parms'];
FLAVORS = int(parms['FLAVORS']);
# prepare parms file for CTQMC
QMC_parms = {
'SEED' : random.random_integers(10000),
'SWEEPS' : int(val_def(parms, 'SWEEPS', 500000)),
'THERMALIZATION' : int(val_def(parms, 'THERMALIZATION', 300)),
'N_TAU' : int(parms['N_TAU']),
'N_HISTOGRAM_ORDERS' : int(val_def(parms, 'N_ORDER', 50)),
'N_MEAS' : int(val_def(parms, 'N_MEAS', 100)),
'N_CYCLES' : int(val_def(parms, 'N_CYCLES', 30)),
'BETA' : float(parms['BETA']),
'U_MATRIX' : self.prefix+'.Umatrix',
'MU_VECTOR' : self.prefix+'.MUvector',
'BASENAME' : prefix,
'DELTA' : prefix + '.hybtau',
'N_ORBITALS' : FLAVORS,
'MEASURE_freq' : self.MEASURE_freq,
'N_MATSUBARA' : int(parms['N_CUTOFF']),
'MAX_TIME' : val_def(parms, 'MAX_TIME', 80000),
};
self.Norder = QMC_parms['N_HISTOGRAM_ORDERS'];
solver_parms_file = open(prefix + '.parms', 'w');
for k, v in QMC_parms.iteritems(): solver_parms_file.write(k + ' = ' + str(v) + ';\n');
# Umatrix: either Slater-Kanamori form or using Slater integrals
Umatrix = generate_Umatrix(float(parms['U']), float(parms['J']),
FLAVORS/2, val_def(parms, 'INTERACTION_TYPE', 'SlaterKanamori'));
hyb_tau = in_data['hybtau'];
hyb_tau = c_[linspace(0, float(parms['BETA']), int(parms['N_TAU']) + 1), hyb_tau];
savetxt(prefix+'.hybtau', hyb_tau);
savetxt(self.prefix+'.Umatrix', Umatrix);
savetxt(self.prefix+'.MUvector', in_data['MU']);
def run(self):
FLAVORS = int(self.parms['FLAVORS']);
cmd = '%s -n %d %s %s.parms 1>&2'%(self.args['mpirun_path'], self.args['np'], self.args['solver_path'], self.prefix);
print cmd;
retval = os.system(cmd);
gh5 = h5py.File('%s.out.h5'%self.prefix, 'r');
sign = float(gh5['/simulation/results/Sign/mean/value'][...]);
if sign < 0.99: print >> sys.stderr, 'sign = %.4f: Run QMC again for %s!'%(sign, self.prefix); retval = 1;
for i in range(FLAVORS):
norder = float(gh5['/simulation/results/order_%d/mean/value'%i][...]);
if norder > self.Norder:
print sys.stderr >> "mean Norder of flavor %d > Norder = %d"%(norder, self.Norder);
retval = 1;
gh5.close(); del gh5;
return retval;
def collect(self):
print 'Collect data from ' + self.prefix;
FLAVORS = int(self.parms['FLAVORS']);
obs = None;
gh5 = h5py.File('%s.out.h5'%self.prefix, 'r');
Gtau = array([gh5['/G_tau/%d/mean/value'%f][:] for f in range(FLAVORS)]).T;
Serr = None;
if self.MEASURE_freq:
Giwn = array([gh5['/G_omega/%d/mean/value'%f][:, 0] + 1j*gh5['/G_omega/%d/mean/value'%f][:, 1] for f in range(FLAVORS)]).T;
Siwn = array([gh5['/S_omega/%d/mean/value'%f][:, 0] + 1j*gh5['/S_omega/%d/mean/value'%f][:, 1] for f in range(FLAVORS)]).T;
if int(self.parms['MEASURE']) > 0:
if 'error' in self.parms['OBSERVABLES']:
Serr = zeros((len(Siwn), FLAVORS));
for f in range(FLAVORS):
Fval = gh5['simulation/results/fw_re_%d/mean/value'%f][:] + 1j*gh5['simulation/results/fw_im_%d/mean/value'%f][:];
Ferr = gh5['simulation/results/fw_re_%d/mean/error'%f][:] + 1j*gh5['simulation/results/fw_im_%d/mean/error'%f][:];
Gval = gh5['simulation/results/gw_re_%d/mean/value'%f][:] + 1j*gh5['simulation/results/gw_im_%d/mean/value'%f][:];
Gerr = gh5['simulation/results/gw_re_%d/mean/error'%f][:] + 1j*gh5['simulation/results/gw_im_%d/mean/error'%f][:];
Serr[:, f] = abs(Fval/Gval) * sqrt(abs(Ferr/Fval)**2 + abs(Gerr/Gval)**2);
nn = array([]);
nf = -Gtau[-1, :];
for i in range(FLAVORS):
for j in range(i+1):
if i == j: tmp = nf[i];
else: tmp = float(gh5['/simulation/results/nn_%d_%d/mean/value'%(i,j)][...]);
nn = r_[nn, tmp];
gh5.close();
obs = { 'nn' : nn };
if Serr is not None: obs.update({'SelfEnergyError': Serr});
if self.MEASURE_freq: return Gtau, obs, Giwn, Siwn;
else: return Gtau, obs;
class TRIQSSolverOld:
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
print 'Prepare running solver for ' + prefix;
self.prefix = prefix;
parms = in_data['parms'];
BETA = float(parms['BETA']);
NCOR = int(parms['FLAVORS']) / 2;
self.beta = BETA;
self.Ntau = int(parms['N_TAU']) + 1;
self.Ncor = NCOR;
self.measure = int(parms['MEASURE'])
hyb_mat = in_data['hybmat'];
hyb_tail = in_data['hybtail'];
wn = (2*arange(size(hyb_mat, 0))+1)*pi/BETA;
savetxt(prefix+'.hybmat.real', c_[wn, hyb_mat.real]);
savetxt(prefix+'.hybmat.imag', c_[wn, hyb_mat.imag]);
savetxt(prefix+'.hybmat.tail', hyb_tail);
savetxt(prefix+'.MUvector', in_data['MU']);
Umatrix = generate_Umatrix(float(parms['U']), float(parms['J']),
NCOR, val_def(parms, 'INTERACTION_TYPE', 'SlaterKanamori'));
savetxt(prefix+'.Umatrix', Umatrix);
# prepare parms file for CTQMC
QMC_parms = {
'SWEEPS_EACH_NODE' : int(val_def(parms, 'SWEEPS', 500000))/self.args['np'],
'THERMALIZATION' : val_def(parms, 'THERMALIZATION', 50000),
'N_MEAS' : val_def(parms, 'N_MEAS', 100),
'BETA' : parms['BETA'],
'U_MATRIX' : prefix+'.Umatrix',
'MU_VECTOR' : prefix + '.MUvector',
'HYB_MAT' : prefix + '.hybmat',
'NCOR' : NCOR,
'HDF5_OUTPUT' : prefix + '.solution.h5',
'N_LEGENDRE' : val_def(parms, 'TRIQS_N_LEGENDRE', 50),
'ACCUMULATION' : val_def(parms, 'TRIQS_ACCUMULATION', 'legendre'),
'SPINFLIP' : val_def(parms, 'TRIQS_SPINFLIP', 1),
'MEASURE' : self.measure,
};
solver_parms_file = open(prefix + '.parms', 'w');
for k, v in QMC_parms.iteritems(): solver_parms_file.write(k + ' = ' + str(v) + ';\n');
def run(self):
cmd = '%s -n %d %s %s.parms 1>&2'%(self.args['mpirun_path'], self.args['np'], self.args['solver_path'], self.prefix);
print cmd;
retval = os.system(cmd);
return retval;
def collect(self):
print 'Collect data from ' + self.prefix;
R = h5py.File(self.prefix+'.solution.h5', 'r');
BETA = self.beta;
SPINS = 2; spins = ('up', 'dn');
NCOR = self.Ncor;
G = []; S = []; nf = []; Gl = [];
is_legendre = True if 'G_Legendre' in R else False;
for f in range(NCOR):
for sp in spins:
G.append(R['G/%s%d/data'%(sp,f)][:, 0, 0, 0] + 1j*R['G/%s%d/data'%(sp,f)][:, 0, 0, 1]);
if is_legendre: Gl.append(R['G_Legendre/%s%d/data'%(sp,f)][:, 0, 0]);
S.append(R['Sigma/%s%d/data'%(sp,f)][:, 0, 0, 0] + 1j*R['Sigma/%s%d/data'%(sp,f)][:, 0, 0, 1]);
nf.append(float(R['Observables/N_%s%d'%(sp,f)][...]));
Giwn = array(G).T;
Siwn = array(S).T;
nf = array(nf);
nn = array([]);
for i in range(SPINS*NCOR):
for j in range(i+1):
if i == j: tmp = nf[i];
else: tmp = float(R['Observables/nn_%d_%d'%(i,j)][...]);
nn = r_[nn, tmp];
obs = { 'nn' : nn };
Gtau = zeros((self.Ntau, SPINS*NCOR), dtype = float);
for f in range(SPINS*NCOR):
Gtau[:, f] = cppext.IFT_mat2tau(Giwn[:, f].copy(), self.Ntau, BETA, 1.0, 0.0);
Gtau[-1, :] = -nf;
Gtau[0, :] = -(1-nf);
order_min = int(R['Sigma/up0/singularity/omin'][...]);
order_max = min(R['Sigma/up0/singularity/mask'][:]);
Stail = zeros((order_max-order_min+1, NCOR*SPINS), dtype = complex);
for f in range(NCOR):
for s, sp in enumerate(spins):
tmp = R['Sigma/%s%d/singularity/data'%(sp,f)][:order_max-order_min+1, 0, 0];
Stail[:, 2*f+s] = tmp[:,0] + 1j*tmp[:,1];
Stail = r_[order_min*ones((1, NCOR*SPINS)), Stail, order_max*ones((1, NCOR*SPINS))];
if is_legendre:
GLegendre = array(Gl).T;
obs = { 'SelfEnergyTail' : Stail, 'GLegendre' : GLegendre , 'nn' : nn };
else: obs = { 'SelfEnergyTail' : Stail, 'nn' : nn };
if self.measure > 0:
if 'TimeCorrelators' in R:
opr_list = eval(str(R['TimeCorrelators/indices'][...]))
for opr_name in opr_list:
obs[opr_name] = R['TimeCorrelators/%s/data'%opr_name][:]
R.close()
return Gtau, obs, Giwn, Siwn;
class TRIQSSolver(object):
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
print 'Prepare input for the impurity solver'
self.mysys = in_data['parms'];
self.prefix = prefix
self.measure = int(self.mysys['MEASURE'])
mysys = self.mysys
beta = float(mysys['BETA'])
hyb_mat = in_data['hybmat']
hyb_tail = in_data['hybtail']
assert(int(mysys['N_MAX_FREQ']) == len(hyb_mat))
wn = (2*arange(int(mysys['N_MAX_FREQ']))+1)*pi/beta
savetxt('%s.hybmat.real'%prefix, c_[wn, hyb_mat.real]);
savetxt('%s.hybmat.imag'%prefix, c_[wn, hyb_mat.imag]);
savetxt('%s.hybmat.tail'%prefix, hyb_tail);
savetxt('%s.mu_eff'%prefix, in_data['MU']);
# prepare parms file for TRIQS solver
triqs_parms = {
'n_cycles' : int(mysys['SWEEPS'])/self.args['np'],
'length_cycle' : mysys.get('N_MEAS', 100),
'n_warmup_cycles' : mysys.get('THERMALIZATION', 10000),
'max_time' : mysys.get('MAX_TIME', -1),
'partition_method' : mysys.get('TRIQS_PARTITION_METHOD',
'autopartition'),
'U' : mysys['U'],
'J' : mysys['J'],
'INTERACTION' : mysys.get('INTERACTION', 'Kanamori'),
'HYB_MAT' : '%s.hybmat'%prefix,
'MU_VECTOR' : '%s.mu_eff'%prefix,
'BETA' : mysys['BETA'],
'NFLAVORS' : int(mysys['FLAVORS'])/2,
'NSPINS' : 2,
'N_TAU' : max(10001, 2*len(wn)),
'N_MAX_FREQ' : len(wn),
'HDF5_OUTPUT' : '%s.triqs.out.h5'%prefix,
'PREFIX' : prefix,
'MEASURE' : self.measure,
}
solver_parms_file = open('%s.parms'%prefix, 'w')
for k, v in triqs_parms.iteritems():
solver_parms_file.write(k + ' = ' + str(v) + ';\n')
def run(self):
print 'Running the solver %s'%('and measure static observables'
if self.measure else '')
cmd = '%s -n %d %s %s.parms 1>&2'%(self.args['mpirun_path'],
self.args['np'],
self.args['solver_path'],
self.prefix)
print cmd
retval = os.system(cmd)
return retval
def collect(self):
print 'Collect data from ' + self.prefix;
mysys = self.mysys
h5tmp = self.prefix+'.triqs.out.h5'
arch = h5py.File(h5tmp, 'r')
spin_names = ('up', 'dn')
nfreqs = int(mysys['N_MAX_FREQ'])
nflavors = int(mysys['FLAVORS']) / 2
nspins = 2
norbs = nflavors*nspins
ntau = int(mysys['N_TAU'])
beta = float(mysys['BETA'])
Giwn = zeros((nfreqs, norbs), dtype=complex)
Siwn = zeros((nfreqs, norbs), dtype=complex)
Gtau = zeros((ntau+1, norbs), dtype=float)
nf = zeros(norbs)
wn = (2*arange(nfreqs)+1)*pi/beta
for i in range(nflavors):
for s in range(nspins):
f = nspins*i+s
d = arch['Giwn/%s_%d/data'%(spin_names[s], i)][:nfreqs, 0, 0]
Giwn[:, f] = d[:, 0] + 1j*d[:, 1]
d = arch['Siwn/%s_%d/data'%(spin_names[s], i)][:nfreqs, 0, 0]
Siwn[:, f] = d[:, 0] + 1j*d[:, 1]
Gtau[:, f] = cppext.IFT_mat2tau(Giwn[:, f].copy(), ntau+1,
beta, 1.0, 0.0)
nf[f] = self._get_density_from_gmat(Giwn[:, f], [0, 1, 0])
#nf[f] = arch['Occupancy/%s_%d'%(spin_names[s], i)][...]
obs = {'sign' : arch['average_sign'][...]}
if 'Observables' in arch:
for k, v in arch['Observables'].iteritems(): obs[k] = v
return Gtau, obs, Giwn, Siwn
def _get_density_from_gmat(self, giwn, tail):
beta = float(self.mysys['BETA'])
# nfreqs = 1.5*int(self.mysys['N_CUTOFF'])
nfreqs = int(self.mysys['N_MAX_FREQ'])
wn = (2*arange(nfreqs)+1)*pi/beta
C = tail
density = 2./beta*real(sum(giwn[:nfreqs]) \
+ C[2]*sum(1./wn**2)) + 0.5*C[1] - beta*C[2]/4.
return density
|
|
# remote.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
# Module implementing a remote object allowing easy access to git remotes
from exc import GitCommandError
from ConfigParser import NoOptionError
from config import SectionConstraint
from git.util import (
LazyMixin,
Iterable,
IterableList
)
from git.db.interface import TransportDB
from refs import RemoteReference
import os
__all__ = ['Remote']
class PushInfo(object):
"""Wrapper for basic PushInfo to provide the previous interface which includes
resolved objects instead of plain shas
old_commit # object for the corresponding old_commit_sha"""
class FetchInfo(object):
"""Wrapper to restore the previous interface, resolving objects and wrapping
references"""
class Remote(LazyMixin, Iterable):
"""Provides easy read and write access to a git remote.
Everything not part of this interface is considered an option for the current
remote, allowing constructs like remote.pushurl to query the pushurl.
NOTE: When querying configuration, the configuration accessor will be cached
to speed up subsequent accesses."""
__slots__ = ( "repo", "name", "_config_reader" )
_id_attribute_ = "name"
def __init__(self, repo, name):
"""Initialize a remote instance
:param repo: The repository we are a remote of
:param name: the name of the remote, i.e. 'origin'"""
if not hasattr(repo, 'git'):
# note: at some point we could just create a git command instance ourselves
# but lets just be lazy for now
raise AssertionError("Require repository to provide a git command instance currently")
#END assert git cmd
if not isinstance(repo, TransportDB):
raise AssertionError("Require TransportDB interface implementation")
#END verify interface
self.repo = repo
self.name = name
if os.name == 'nt':
# some oddity: on windows, python 2.5, it for some reason does not realize
# that it has the config_writer property, but instead calls __getattr__
# which will not yield the expected results. 'pinging' the members
# with a dir call creates the config_writer property that we require
# ... bugs like these make me wonder wheter python really wants to be used
# for production. It doesn't happen on linux though.
dir(self)
# END windows special handling
def __getattr__(self, attr):
"""Allows to call this instance like
remote.special( *args, **kwargs) to call git-remote special self.name"""
if attr == "_config_reader":
return super(Remote, self).__getattr__(attr)
# sometimes, probably due to a bug in python itself, we are being called
# even though a slot of the same name exists
try:
return self._config_reader.get(attr)
except NoOptionError:
return super(Remote, self).__getattr__(attr)
# END handle exception
def _config_section_name(self):
return 'remote "%s"' % self.name
def _set_cache_(self, attr):
if attr == "_config_reader":
self._config_reader = SectionConstraint(self.repo.config_reader(), self._config_section_name())
else:
super(Remote, self)._set_cache_(attr)
def __str__(self):
return self.name
def __repr__(self):
return '<git.%s "%s">' % (self.__class__.__name__, self.name)
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not ( self == other )
def __hash__(self):
return hash(self.name)
@classmethod
def iter_items(cls, repo):
""":return: Iterator yielding Remote objects of the given repository"""
for section in repo.config_reader("repository").sections():
if not section.startswith('remote'):
continue
lbound = section.find('"')
rbound = section.rfind('"')
if lbound == -1 or rbound == -1:
raise ValueError("Remote-Section has invalid format: %r" % section)
yield Remote(repo, section[lbound+1:rbound])
# END for each configuration section
@property
def refs(self):
"""
:return:
IterableList of RemoteReference objects. It is prefixed, allowing
you to omit the remote path portion, i.e.::
remote.refs.master # yields RemoteReference('/refs/remotes/origin/master')"""
out_refs = IterableList(RemoteReference._id_attribute_, "%s/" % self.name)
out_refs.extend(RemoteReference.list_items(self.repo, remote=self.name))
assert out_refs, "Remote %s did not have any references" % self.name
return out_refs
@property
def stale_refs(self):
"""
:return:
IterableList RemoteReference objects that do not have a corresponding
head in the remote reference anymore as they have been deleted on the
remote side, but are still available locally.
The IterableList is prefixed, hence the 'origin' must be omitted. See
'refs' property for an example."""
out_refs = IterableList(RemoteReference._id_attribute_, "%s/" % self.name)
for line in self.repo.git.remote("prune", "--dry-run", self).splitlines()[2:]:
# expecting
# * [would prune] origin/new_branch
token = " * [would prune] "
if not line.startswith(token):
raise ValueError("Could not parse git-remote prune result: %r" % line)
fqhn = "%s/%s" % (RemoteReference._common_path_default,line.replace(token, ""))
out_refs.append(RemoteReference(self.repo, fqhn))
# END for each line
return out_refs
@classmethod
def create(cls, repo, name, url, **kwargs):
"""Create a new remote to the given repository
:param repo: Repository instance that is to receive the new remote
:param name: Desired name of the remote
:param url: URL which corresponds to the remote's name
:param kwargs:
Additional arguments to be passed to the git-remote add command
:return: New Remote instance
:raise GitCommandError: in case an origin with that name already exists"""
repo.git.remote( "add", name, url, **kwargs )
return cls(repo, name)
# add is an alias
add = create
@classmethod
def remove(cls, repo, name ):
"""Remove the remote with the given name"""
repo.git.remote("rm", name)
# alias
rm = remove
def rename(self, new_name):
"""Rename self to the given new_name
:return: self """
if self.name == new_name:
return self
self.repo.git.remote("rename", self.name, new_name)
self.name = new_name
try:
del(self._config_reader) # it contains cached values, section names are different now
except AttributeError:
pass
#END handle exception
return self
def update(self, **kwargs):
"""Fetch all changes for this remote, including new branches which will
be forced in ( in case your local remote branch is not part the new remote branches
ancestry anymore ).
:param kwargs:
Additional arguments passed to git-remote update
:return: self """
self.repo.git.remote("update", self.name)
return self
def fetch(self, refspec=None, progress=None, **kwargs):
"""Fetch the latest changes for this remote
:param refspec:
A "refspec" is used by fetch and push to describe the mapping
between remote ref and local ref. They are combined with a colon in
the format <src>:<dst>, preceded by an optional plus sign, +.
For example: git fetch $URL refs/heads/master:refs/heads/origin means
"grab the master branch head from the $URL and store it as my origin
branch head". And git push $URL refs/heads/master:refs/heads/to-upstream
means "publish my master branch head as to-upstream branch at $URL".
See also git-push(1).
Taken from the git manual
:param progress: See 'push' method
:param kwargs: Additional arguments to be passed to git-fetch
:return:
IterableList(FetchInfo, ...) list of FetchInfo instances providing detailed
information about the fetch results
:note:
As fetch does not provide progress information to non-ttys, we cannot make
it available here unfortunately as in the 'push' method."""
return self.repo.fetch(self.name, refspec, progress, **kwargs)
def pull(self, refspec=None, progress=None, **kwargs):
"""Pull changes from the given branch, being the same as a fetch followed
by a merge of branch with your local branch.
:param refspec: see 'fetch' method
:param progress: see 'push' method
:param kwargs: Additional arguments to be passed to git-pull
:return: Please see 'fetch' method """
return self.repo.pull(self.name, refspec, progress, **kwargs)
def push(self, refspec=None, progress=None, **kwargs):
"""Push changes from source branch in refspec to target branch in refspec.
:param refspec: see 'fetch' method
:param progress:
Instance of type RemoteProgress allowing the caller to receive
progress information until the method returns.
If None, progress information will be discarded
:param kwargs: Additional arguments to be passed to git-push
:return:
IterableList(PushInfo, ...) iterable list of PushInfo instances, each
one informing about an individual head which had been updated on the remote
side.
If the push contains rejected heads, these will have the PushInfo.ERROR bit set
in their flags.
If the operation fails completely, the length of the returned IterableList will
be null."""
return self.repo.push(self.name, refspec, progress, **kwargs)
@property
def config_reader(self):
"""
:return:
GitConfigParser compatible object able to read options for only our remote.
Hence you may simple type config.get("pushurl") to obtain the information"""
return self._config_reader
@property
def config_writer(self):
"""
:return: GitConfigParser compatible object able to write options for this remote.
:note:
You can only own one writer at a time - delete it to release the
configuration file and make it useable by others.
To assure consistent results, you should only query options through the
writer. Once you are done writing, you are free to use the config reader
once again."""
writer = self.repo.config_writer()
# clear our cache to assure we re-read the possibly changed configuration
try:
del(self._config_reader)
except AttributeError:
pass
#END handle exception
return SectionConstraint(writer, self._config_section_name())
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""
A simple JSON REST request abstraction layer that is used by the
``dropbox.client`` and ``dropbox.session`` modules. You shouldn't need to use this.
"""
import io
import pkg_resources
import socket
import ssl
import sys
import urllib.request, urllib.parse, urllib.error
try:
import json
except ImportError:
import simplejson as json
try:
import urllib3
except ImportError:
raise ImportError('Dropbox python client requires urllib3.')
SDK_VERSION = "2.2.0"
TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
class RESTResponse(io.IOBase):
"""
Responses to requests can come in the form of ``RESTResponse``. These are
thin wrappers around the socket file descriptor.
:meth:`read()` and :meth:`close()` are implemented.
It is important to call :meth:`close()` to return the connection
back to the connection pool to be reused. If a connection
is not closed by the caller it may leak memory. The object makes a
best-effort attempt upon destruction to call :meth:`close()`,
but it's still best to explicitly call :meth:`close()`.
"""
def __init__(self, resp):
# arg: A urllib3.HTTPResponse object
self.urllib3_response = resp
self.status = resp.status
self.version = resp.version
self.reason = resp.reason
self.strict = resp.strict
self.is_closed = False
def __del__(self):
# Attempt to close when ref-count goes to zero.
self.close()
def __exit__(self, typ, value, traceback):
# Allow this to be used in "with" blocks.
self.close()
# -----------------
# Important methods
# -----------------
def read(self, amt=None):
"""
Read data off the underlying socket.
Parameters
amt
Amount of data to read. Defaults to ``None``, indicating to read
everything.
Returns
Data off the socket. If ``amt`` is not ``None``, at most ``amt`` bytes are returned.
An empty string when the socket has no data.
Raises
``ValueError``
If the ``RESTResponse`` has already been closed.
"""
if self.is_closed:
raise ValueError('Response already closed')
return self.urllib3_response.read(amt)
BLOCKSIZE = 4 * 1024 * 1024 # 4MB at a time just because
def close(self):
"""Closes the underlying socket."""
# Double closing is harmless
if self.is_closed:
return
# Read any remaining crap off the socket before releasing the
# connection. Buffer it just in case it's huge
while self.read(RESTResponse.BLOCKSIZE):
pass
# Mark as closed and release the connection (exactly once)
self.is_closed = True
self.urllib3_response.release_conn()
@property
def closed(self):
return self.is_closed
# ---------------------------------
# Backwards compat for HTTPResponse
# ---------------------------------
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
# Some compat functions showed up recently in urllib3
try:
urllib3.HTTPResponse.flush
urllib3.HTTPResponse.fileno
def fileno(self):
return self.urllib3_response.fileno()
def flush(self):
return self.urllib3_response.flush()
except AttributeError:
pass
def create_connection(address):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def json_loadb(data):
if sys.version_info >= (3,):
data = data.decode('utf8')
return json.loads(data)
class RESTClientObject(object):
def __init__(self, max_reusable_connections=8, mock_urlopen=None):
"""
Parameters
max_reusable_connections
max connections to keep alive in the pool
mock_urlopen
an optional alternate urlopen function for testing
This class uses ``urllib3`` to maintain a pool of connections. We attempt
to grab an existing idle connection from the pool, otherwise we spin
up a new connection. Once a connection is closed, it is reinserted
into the pool (unless the pool is full).
SSL settings:
- Certificates validated using Dropbox-approved trusted root certs
- TLS v1.0 (newer TLS versions are not supported by urllib3)
- Default ciphersuites. Choosing ciphersuites is not supported by urllib3
- Hostname verification is provided by urllib3
"""
self.mock_urlopen = mock_urlopen
self.pool_manager = urllib3.PoolManager(
num_pools=4, # only a handful of hosts. api.dropbox.com, api-content.dropbox.com
maxsize=max_reusable_connections,
block=False,
timeout=60.0, # long enough so datastores await doesn't get interrupted
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=TRUSTED_CERT_FILE,
ssl_version=ssl.PROTOCOL_TLSv1,
)
def request(self, method, url, post_params=None, body=None, headers=None, raw_response=False):
"""Performs a REST request. See :meth:`RESTClient.request()` for detailed description."""
post_params = post_params or {}
headers = headers or {}
headers['User-Agent'] = 'OfficialDropboxPythonSDK/' + SDK_VERSION
if post_params:
if body:
raise ValueError("body parameter cannot be used with post_params parameter")
body = params_to_urlencoded(post_params)
headers["Content-type"] = "application/x-www-form-urlencoded"
# Handle StringIO instances, because urllib3 doesn't.
if hasattr(body, 'getvalue'):
body = str(body.getvalue())
headers["Content-Length"] = len(body)
# Reject any headers containing newlines; the error from the server isn't pretty.
for key, value in list(headers.items()):
if isinstance(value, str) and '\n' in value:
raise ValueError("headers should not contain newlines (%s: %s)" %
(key, value))
try:
# Grab a connection from the pool to make the request.
# We return it to the pool when caller close() the response
urlopen = self.mock_urlopen if self.mock_urlopen else self.pool_manager.urlopen
r = urlopen(
method=method,
url=url,
body=body,
headers=headers,
preload_content=False
)
r = RESTResponse(r) # wrap up the urllib3 response before proceeding
except socket.error as e:
raise RESTSocketError(url, e)
except urllib3.exceptions.SSLError as e:
raise RESTSocketError(url, "SSL certificate error: %s" % e)
if r.status not in (200, 206):
raise ErrorResponse(r, r.read())
return self.process_response(r, raw_response)
def process_response(self, r, raw_response):
if raw_response:
return r
else:
s = r.read()
try:
resp = json_loadb(s)
except ValueError:
raise ErrorResponse(r, s)
r.close()
return resp
def GET(self, url, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("GET", url, headers=headers, raw_response=raw_response)
def POST(self, url, params=None, headers=None, raw_response=False):
assert type(raw_response) == bool
if params is None:
params = {}
return self.request("POST", url,
post_params=params, headers=headers, raw_response=raw_response)
def PUT(self, url, body, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("PUT", url, body=body, headers=headers, raw_response=raw_response)
class RESTClient(object):
"""
A class with all static methods to perform JSON REST requests that is used internally
by the Dropbox Client API. It provides just enough gear to make requests
and get responses as JSON data (when applicable). All requests happen over SSL.
"""
IMPL = RESTClientObject()
@classmethod
def request(cls, *n, **kw):
"""Perform a REST request and parse the response.
Parameters
method
An HTTP method (e.g. ``'GET'`` or ``'POST'``).
url
The URL to make a request to.
post_params
A dictionary of parameters to put in the body of the request.
This option may not be used if the body parameter is given.
body
The body of the request. Typically, this value will be a string.
It may also be a file-like object. The body
parameter may not be used with the post_params parameter.
headers
A dictionary of headers to send with the request.
raw_response
Whether to return a :class:`RESTResponse` object. Default ``False``.
It's best enabled for requests that return large amounts of data that you
would want to ``.read()`` incrementally rather than loading into memory. Also
use this for calls where you need to read metadata like status or headers,
or if the body is not JSON.
Returns
The JSON-decoded data from the server, unless ``raw_response`` is
set, in which case a :class:`RESTResponse` object is returned instead.
Raises
:class:`ErrorResponse`
The returned HTTP status is not 200, or the body was
not parsed from JSON successfully.
:class:`RESTSocketError`
A ``socket.error`` was raised while contacting Dropbox.
"""
return cls.IMPL.request(*n, **kw)
@classmethod
def GET(cls, *n, **kw):
"""Perform a GET request using :meth:`RESTClient.request()`."""
return cls.IMPL.GET(*n, **kw)
@classmethod
def POST(cls, *n, **kw):
"""Perform a POST request using :meth:`RESTClient.request()`."""
return cls.IMPL.POST(*n, **kw)
@classmethod
def PUT(cls, *n, **kw):
"""Perform a PUT request using :meth:`RESTClient.request()`."""
return cls.IMPL.PUT(*n, **kw)
class RESTSocketError(socket.error):
"""A light wrapper for ``socket.error`` that adds some more information."""
def __init__(self, host, e):
msg = "Error connecting to \"%s\": %s" % (host, str(e))
socket.error.__init__(self, msg)
# Dummy class for docstrings, see doco.py.
class _ErrorResponse__doc__(Exception):
"""Exception raised when :class:`DropboxClient` exeriences a problem.
For example, this is raised when the server returns an unexpected
non-200 HTTP response.
"""
_status__doc__ = "HTTP response status (an int)."
_reason__doc__ = "HTTP response reason (a string)."
_headers__doc__ = "HTTP response headers (a list of (header, value) tuples)."
_body__doc__ = "HTTP response body (string or JSON dict)."
_error_msg__doc__ = "Error message for developer (optional)."
_user_error_msg__doc__ = "Error message for end user (optional)."
class ErrorResponse(Exception):
"""
Raised by :meth:`RESTClient.request()` for requests that:
- Return a non-200 HTTP response, or
- Have a non-JSON response body, or
- Have a malformed/missing header in the response.
Most errors that Dropbox returns will have an error field that is unpacked and
placed on the ErrorResponse exception. In some situations, a user_error field
will also come back. Messages under user_error are worth showing to an end-user
of your app, while other errors are likely only useful for you as the developer.
"""
def __init__(self, http_resp, body):
"""
Parameters
http_resp
The :class:`RESTResponse` which errored
body
Body of the :class:`RESTResponse`.
The reason we can't simply call ``http_resp.read()`` to
get the body, is that ``read()`` is not idempotent.
Since it can't be called more than once,
we have to pass the string body in separately
"""
self.status = http_resp.status
self.reason = http_resp.reason
self.body = body
self.headers = http_resp.getheaders()
http_resp.close() # won't need this connection anymore
try:
self.body = json_loadb(self.body)
self.error_msg = self.body.get('error')
self.user_error_msg = self.body.get('user_error')
except ValueError:
self.error_msg = None
self.user_error_msg = None
def __str__(self):
if self.user_error_msg and self.user_error_msg != self.error_msg:
# one is translated and the other is English
msg = "%r (%r)" % (self.user_error_msg, self.error_msg)
elif self.error_msg:
msg = repr(self.error_msg)
elif not self.body:
msg = repr(self.reason)
else:
msg = "Error parsing response body or headers: " +\
"Body - %.100r Headers - %r" % (self.body, self.headers)
return "[%d] %s" % (self.status, msg)
def params_to_urlencoded(params):
"""
Returns a application/x-www-form-urlencoded 'str' representing the key/value pairs in 'params'.
Keys are values are str()'d before calling urllib.urlencode, with the exception of unicode
objects which are utf8-encoded.
"""
def encode(o):
if isinstance(o, str):
return o.encode('utf8')
else:
return str(o)
utf8_params = {encode(k): encode(v) for k, v in params.items()}
return urllib.parse.urlencode(utf8_params)
=======
"""
A simple JSON REST request abstraction layer that is used by the
``dropbox.client`` and ``dropbox.session`` modules. You shouldn't need to use this.
"""
import io
import pkg_resources
import socket
import ssl
import sys
import urllib.request, urllib.parse, urllib.error
try:
import json
except ImportError:
import simplejson as json
try:
import urllib3
except ImportError:
raise ImportError('Dropbox python client requires urllib3.')
SDK_VERSION = "2.2.0"
TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
class RESTResponse(io.IOBase):
"""
Responses to requests can come in the form of ``RESTResponse``. These are
thin wrappers around the socket file descriptor.
:meth:`read()` and :meth:`close()` are implemented.
It is important to call :meth:`close()` to return the connection
back to the connection pool to be reused. If a connection
is not closed by the caller it may leak memory. The object makes a
best-effort attempt upon destruction to call :meth:`close()`,
but it's still best to explicitly call :meth:`close()`.
"""
def __init__(self, resp):
# arg: A urllib3.HTTPResponse object
self.urllib3_response = resp
self.status = resp.status
self.version = resp.version
self.reason = resp.reason
self.strict = resp.strict
self.is_closed = False
def __del__(self):
# Attempt to close when ref-count goes to zero.
self.close()
def __exit__(self, typ, value, traceback):
# Allow this to be used in "with" blocks.
self.close()
# -----------------
# Important methods
# -----------------
def read(self, amt=None):
"""
Read data off the underlying socket.
Parameters
amt
Amount of data to read. Defaults to ``None``, indicating to read
everything.
Returns
Data off the socket. If ``amt`` is not ``None``, at most ``amt`` bytes are returned.
An empty string when the socket has no data.
Raises
``ValueError``
If the ``RESTResponse`` has already been closed.
"""
if self.is_closed:
raise ValueError('Response already closed')
return self.urllib3_response.read(amt)
BLOCKSIZE = 4 * 1024 * 1024 # 4MB at a time just because
def close(self):
"""Closes the underlying socket."""
# Double closing is harmless
if self.is_closed:
return
# Read any remaining crap off the socket before releasing the
# connection. Buffer it just in case it's huge
while self.read(RESTResponse.BLOCKSIZE):
pass
# Mark as closed and release the connection (exactly once)
self.is_closed = True
self.urllib3_response.release_conn()
@property
def closed(self):
return self.is_closed
# ---------------------------------
# Backwards compat for HTTPResponse
# ---------------------------------
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
# Some compat functions showed up recently in urllib3
try:
urllib3.HTTPResponse.flush
urllib3.HTTPResponse.fileno
def fileno(self):
return self.urllib3_response.fileno()
def flush(self):
return self.urllib3_response.flush()
except AttributeError:
pass
def create_connection(address):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def json_loadb(data):
if sys.version_info >= (3,):
data = data.decode('utf8')
return json.loads(data)
class RESTClientObject(object):
def __init__(self, max_reusable_connections=8, mock_urlopen=None):
"""
Parameters
max_reusable_connections
max connections to keep alive in the pool
mock_urlopen
an optional alternate urlopen function for testing
This class uses ``urllib3`` to maintain a pool of connections. We attempt
to grab an existing idle connection from the pool, otherwise we spin
up a new connection. Once a connection is closed, it is reinserted
into the pool (unless the pool is full).
SSL settings:
- Certificates validated using Dropbox-approved trusted root certs
- TLS v1.0 (newer TLS versions are not supported by urllib3)
- Default ciphersuites. Choosing ciphersuites is not supported by urllib3
- Hostname verification is provided by urllib3
"""
self.mock_urlopen = mock_urlopen
self.pool_manager = urllib3.PoolManager(
num_pools=4, # only a handful of hosts. api.dropbox.com, api-content.dropbox.com
maxsize=max_reusable_connections,
block=False,
timeout=60.0, # long enough so datastores await doesn't get interrupted
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=TRUSTED_CERT_FILE,
ssl_version=ssl.PROTOCOL_TLSv1,
)
def request(self, method, url, post_params=None, body=None, headers=None, raw_response=False):
"""Performs a REST request. See :meth:`RESTClient.request()` for detailed description."""
post_params = post_params or {}
headers = headers or {}
headers['User-Agent'] = 'OfficialDropboxPythonSDK/' + SDK_VERSION
if post_params:
if body:
raise ValueError("body parameter cannot be used with post_params parameter")
body = params_to_urlencoded(post_params)
headers["Content-type"] = "application/x-www-form-urlencoded"
# Handle StringIO instances, because urllib3 doesn't.
if hasattr(body, 'getvalue'):
body = str(body.getvalue())
headers["Content-Length"] = len(body)
# Reject any headers containing newlines; the error from the server isn't pretty.
for key, value in list(headers.items()):
if isinstance(value, str) and '\n' in value:
raise ValueError("headers should not contain newlines (%s: %s)" %
(key, value))
try:
# Grab a connection from the pool to make the request.
# We return it to the pool when caller close() the response
urlopen = self.mock_urlopen if self.mock_urlopen else self.pool_manager.urlopen
r = urlopen(
method=method,
url=url,
body=body,
headers=headers,
preload_content=False
)
r = RESTResponse(r) # wrap up the urllib3 response before proceeding
except socket.error as e:
raise RESTSocketError(url, e)
except urllib3.exceptions.SSLError as e:
raise RESTSocketError(url, "SSL certificate error: %s" % e)
if r.status not in (200, 206):
raise ErrorResponse(r, r.read())
return self.process_response(r, raw_response)
def process_response(self, r, raw_response):
if raw_response:
return r
else:
s = r.read()
try:
resp = json_loadb(s)
except ValueError:
raise ErrorResponse(r, s)
r.close()
return resp
def GET(self, url, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("GET", url, headers=headers, raw_response=raw_response)
def POST(self, url, params=None, headers=None, raw_response=False):
assert type(raw_response) == bool
if params is None:
params = {}
return self.request("POST", url,
post_params=params, headers=headers, raw_response=raw_response)
def PUT(self, url, body, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("PUT", url, body=body, headers=headers, raw_response=raw_response)
class RESTClient(object):
"""
A class with all static methods to perform JSON REST requests that is used internally
by the Dropbox Client API. It provides just enough gear to make requests
and get responses as JSON data (when applicable). All requests happen over SSL.
"""
IMPL = RESTClientObject()
@classmethod
def request(cls, *n, **kw):
"""Perform a REST request and parse the response.
Parameters
method
An HTTP method (e.g. ``'GET'`` or ``'POST'``).
url
The URL to make a request to.
post_params
A dictionary of parameters to put in the body of the request.
This option may not be used if the body parameter is given.
body
The body of the request. Typically, this value will be a string.
It may also be a file-like object. The body
parameter may not be used with the post_params parameter.
headers
A dictionary of headers to send with the request.
raw_response
Whether to return a :class:`RESTResponse` object. Default ``False``.
It's best enabled for requests that return large amounts of data that you
would want to ``.read()`` incrementally rather than loading into memory. Also
use this for calls where you need to read metadata like status or headers,
or if the body is not JSON.
Returns
The JSON-decoded data from the server, unless ``raw_response`` is
set, in which case a :class:`RESTResponse` object is returned instead.
Raises
:class:`ErrorResponse`
The returned HTTP status is not 200, or the body was
not parsed from JSON successfully.
:class:`RESTSocketError`
A ``socket.error`` was raised while contacting Dropbox.
"""
return cls.IMPL.request(*n, **kw)
@classmethod
def GET(cls, *n, **kw):
"""Perform a GET request using :meth:`RESTClient.request()`."""
return cls.IMPL.GET(*n, **kw)
@classmethod
def POST(cls, *n, **kw):
"""Perform a POST request using :meth:`RESTClient.request()`."""
return cls.IMPL.POST(*n, **kw)
@classmethod
def PUT(cls, *n, **kw):
"""Perform a PUT request using :meth:`RESTClient.request()`."""
return cls.IMPL.PUT(*n, **kw)
class RESTSocketError(socket.error):
"""A light wrapper for ``socket.error`` that adds some more information."""
def __init__(self, host, e):
msg = "Error connecting to \"%s\": %s" % (host, str(e))
socket.error.__init__(self, msg)
# Dummy class for docstrings, see doco.py.
class _ErrorResponse__doc__(Exception):
"""Exception raised when :class:`DropboxClient` exeriences a problem.
For example, this is raised when the server returns an unexpected
non-200 HTTP response.
"""
_status__doc__ = "HTTP response status (an int)."
_reason__doc__ = "HTTP response reason (a string)."
_headers__doc__ = "HTTP response headers (a list of (header, value) tuples)."
_body__doc__ = "HTTP response body (string or JSON dict)."
_error_msg__doc__ = "Error message for developer (optional)."
_user_error_msg__doc__ = "Error message for end user (optional)."
class ErrorResponse(Exception):
"""
Raised by :meth:`RESTClient.request()` for requests that:
- Return a non-200 HTTP response, or
- Have a non-JSON response body, or
- Have a malformed/missing header in the response.
Most errors that Dropbox returns will have an error field that is unpacked and
placed on the ErrorResponse exception. In some situations, a user_error field
will also come back. Messages under user_error are worth showing to an end-user
of your app, while other errors are likely only useful for you as the developer.
"""
def __init__(self, http_resp, body):
"""
Parameters
http_resp
The :class:`RESTResponse` which errored
body
Body of the :class:`RESTResponse`.
The reason we can't simply call ``http_resp.read()`` to
get the body, is that ``read()`` is not idempotent.
Since it can't be called more than once,
we have to pass the string body in separately
"""
self.status = http_resp.status
self.reason = http_resp.reason
self.body = body
self.headers = http_resp.getheaders()
http_resp.close() # won't need this connection anymore
try:
self.body = json_loadb(self.body)
self.error_msg = self.body.get('error')
self.user_error_msg = self.body.get('user_error')
except ValueError:
self.error_msg = None
self.user_error_msg = None
def __str__(self):
if self.user_error_msg and self.user_error_msg != self.error_msg:
# one is translated and the other is English
msg = "%r (%r)" % (self.user_error_msg, self.error_msg)
elif self.error_msg:
msg = repr(self.error_msg)
elif not self.body:
msg = repr(self.reason)
else:
msg = "Error parsing response body or headers: " +\
"Body - %.100r Headers - %r" % (self.body, self.headers)
return "[%d] %s" % (self.status, msg)
def params_to_urlencoded(params):
"""
Returns a application/x-www-form-urlencoded 'str' representing the key/value pairs in 'params'.
Keys are values are str()'d before calling urllib.urlencode, with the exception of unicode
objects which are utf8-encoded.
"""
def encode(o):
if isinstance(o, str):
return o.encode('utf8')
else:
return str(o)
utf8_params = {encode(k): encode(v) for k, v in params.items()}
return urllib.parse.urlencode(utf8_params)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
A simple JSON REST request abstraction layer that is used by the
``dropbox.client`` and ``dropbox.session`` modules. You shouldn't need to use this.
"""
import io
import pkg_resources
import socket
import ssl
import sys
import urllib.request, urllib.parse, urllib.error
try:
import json
except ImportError:
import simplejson as json
try:
import urllib3
except ImportError:
raise ImportError('Dropbox python client requires urllib3.')
SDK_VERSION = "2.2.0"
TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
class RESTResponse(io.IOBase):
"""
Responses to requests can come in the form of ``RESTResponse``. These are
thin wrappers around the socket file descriptor.
:meth:`read()` and :meth:`close()` are implemented.
It is important to call :meth:`close()` to return the connection
back to the connection pool to be reused. If a connection
is not closed by the caller it may leak memory. The object makes a
best-effort attempt upon destruction to call :meth:`close()`,
but it's still best to explicitly call :meth:`close()`.
"""
def __init__(self, resp):
# arg: A urllib3.HTTPResponse object
self.urllib3_response = resp
self.status = resp.status
self.version = resp.version
self.reason = resp.reason
self.strict = resp.strict
self.is_closed = False
def __del__(self):
# Attempt to close when ref-count goes to zero.
self.close()
def __exit__(self, typ, value, traceback):
# Allow this to be used in "with" blocks.
self.close()
# -----------------
# Important methods
# -----------------
def read(self, amt=None):
"""
Read data off the underlying socket.
Parameters
amt
Amount of data to read. Defaults to ``None``, indicating to read
everything.
Returns
Data off the socket. If ``amt`` is not ``None``, at most ``amt`` bytes are returned.
An empty string when the socket has no data.
Raises
``ValueError``
If the ``RESTResponse`` has already been closed.
"""
if self.is_closed:
raise ValueError('Response already closed')
return self.urllib3_response.read(amt)
BLOCKSIZE = 4 * 1024 * 1024 # 4MB at a time just because
def close(self):
"""Closes the underlying socket."""
# Double closing is harmless
if self.is_closed:
return
# Read any remaining crap off the socket before releasing the
# connection. Buffer it just in case it's huge
while self.read(RESTResponse.BLOCKSIZE):
pass
# Mark as closed and release the connection (exactly once)
self.is_closed = True
self.urllib3_response.release_conn()
@property
def closed(self):
return self.is_closed
# ---------------------------------
# Backwards compat for HTTPResponse
# ---------------------------------
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
# Some compat functions showed up recently in urllib3
try:
urllib3.HTTPResponse.flush
urllib3.HTTPResponse.fileno
def fileno(self):
return self.urllib3_response.fileno()
def flush(self):
return self.urllib3_response.flush()
except AttributeError:
pass
def create_connection(address):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def json_loadb(data):
if sys.version_info >= (3,):
data = data.decode('utf8')
return json.loads(data)
class RESTClientObject(object):
def __init__(self, max_reusable_connections=8, mock_urlopen=None):
"""
Parameters
max_reusable_connections
max connections to keep alive in the pool
mock_urlopen
an optional alternate urlopen function for testing
This class uses ``urllib3`` to maintain a pool of connections. We attempt
to grab an existing idle connection from the pool, otherwise we spin
up a new connection. Once a connection is closed, it is reinserted
into the pool (unless the pool is full).
SSL settings:
- Certificates validated using Dropbox-approved trusted root certs
- TLS v1.0 (newer TLS versions are not supported by urllib3)
- Default ciphersuites. Choosing ciphersuites is not supported by urllib3
- Hostname verification is provided by urllib3
"""
self.mock_urlopen = mock_urlopen
self.pool_manager = urllib3.PoolManager(
num_pools=4, # only a handful of hosts. api.dropbox.com, api-content.dropbox.com
maxsize=max_reusable_connections,
block=False,
timeout=60.0, # long enough so datastores await doesn't get interrupted
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=TRUSTED_CERT_FILE,
ssl_version=ssl.PROTOCOL_TLSv1,
)
def request(self, method, url, post_params=None, body=None, headers=None, raw_response=False):
"""Performs a REST request. See :meth:`RESTClient.request()` for detailed description."""
post_params = post_params or {}
headers = headers or {}
headers['User-Agent'] = 'OfficialDropboxPythonSDK/' + SDK_VERSION
if post_params:
if body:
raise ValueError("body parameter cannot be used with post_params parameter")
body = params_to_urlencoded(post_params)
headers["Content-type"] = "application/x-www-form-urlencoded"
# Handle StringIO instances, because urllib3 doesn't.
if hasattr(body, 'getvalue'):
body = str(body.getvalue())
headers["Content-Length"] = len(body)
# Reject any headers containing newlines; the error from the server isn't pretty.
for key, value in list(headers.items()):
if isinstance(value, str) and '\n' in value:
raise ValueError("headers should not contain newlines (%s: %s)" %
(key, value))
try:
# Grab a connection from the pool to make the request.
# We return it to the pool when caller close() the response
urlopen = self.mock_urlopen if self.mock_urlopen else self.pool_manager.urlopen
r = urlopen(
method=method,
url=url,
body=body,
headers=headers,
preload_content=False
)
r = RESTResponse(r) # wrap up the urllib3 response before proceeding
except socket.error as e:
raise RESTSocketError(url, e)
except urllib3.exceptions.SSLError as e:
raise RESTSocketError(url, "SSL certificate error: %s" % e)
if r.status not in (200, 206):
raise ErrorResponse(r, r.read())
return self.process_response(r, raw_response)
def process_response(self, r, raw_response):
if raw_response:
return r
else:
s = r.read()
try:
resp = json_loadb(s)
except ValueError:
raise ErrorResponse(r, s)
r.close()
return resp
def GET(self, url, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("GET", url, headers=headers, raw_response=raw_response)
def POST(self, url, params=None, headers=None, raw_response=False):
assert type(raw_response) == bool
if params is None:
params = {}
return self.request("POST", url,
post_params=params, headers=headers, raw_response=raw_response)
def PUT(self, url, body, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("PUT", url, body=body, headers=headers, raw_response=raw_response)
class RESTClient(object):
"""
A class with all static methods to perform JSON REST requests that is used internally
by the Dropbox Client API. It provides just enough gear to make requests
and get responses as JSON data (when applicable). All requests happen over SSL.
"""
IMPL = RESTClientObject()
@classmethod
def request(cls, *n, **kw):
"""Perform a REST request and parse the response.
Parameters
method
An HTTP method (e.g. ``'GET'`` or ``'POST'``).
url
The URL to make a request to.
post_params
A dictionary of parameters to put in the body of the request.
This option may not be used if the body parameter is given.
body
The body of the request. Typically, this value will be a string.
It may also be a file-like object. The body
parameter may not be used with the post_params parameter.
headers
A dictionary of headers to send with the request.
raw_response
Whether to return a :class:`RESTResponse` object. Default ``False``.
It's best enabled for requests that return large amounts of data that you
would want to ``.read()`` incrementally rather than loading into memory. Also
use this for calls where you need to read metadata like status or headers,
or if the body is not JSON.
Returns
The JSON-decoded data from the server, unless ``raw_response`` is
set, in which case a :class:`RESTResponse` object is returned instead.
Raises
:class:`ErrorResponse`
The returned HTTP status is not 200, or the body was
not parsed from JSON successfully.
:class:`RESTSocketError`
A ``socket.error`` was raised while contacting Dropbox.
"""
return cls.IMPL.request(*n, **kw)
@classmethod
def GET(cls, *n, **kw):
"""Perform a GET request using :meth:`RESTClient.request()`."""
return cls.IMPL.GET(*n, **kw)
@classmethod
def POST(cls, *n, **kw):
"""Perform a POST request using :meth:`RESTClient.request()`."""
return cls.IMPL.POST(*n, **kw)
@classmethod
def PUT(cls, *n, **kw):
"""Perform a PUT request using :meth:`RESTClient.request()`."""
return cls.IMPL.PUT(*n, **kw)
class RESTSocketError(socket.error):
"""A light wrapper for ``socket.error`` that adds some more information."""
def __init__(self, host, e):
msg = "Error connecting to \"%s\": %s" % (host, str(e))
socket.error.__init__(self, msg)
# Dummy class for docstrings, see doco.py.
class _ErrorResponse__doc__(Exception):
"""Exception raised when :class:`DropboxClient` exeriences a problem.
For example, this is raised when the server returns an unexpected
non-200 HTTP response.
"""
_status__doc__ = "HTTP response status (an int)."
_reason__doc__ = "HTTP response reason (a string)."
_headers__doc__ = "HTTP response headers (a list of (header, value) tuples)."
_body__doc__ = "HTTP response body (string or JSON dict)."
_error_msg__doc__ = "Error message for developer (optional)."
_user_error_msg__doc__ = "Error message for end user (optional)."
class ErrorResponse(Exception):
"""
Raised by :meth:`RESTClient.request()` for requests that:
- Return a non-200 HTTP response, or
- Have a non-JSON response body, or
- Have a malformed/missing header in the response.
Most errors that Dropbox returns will have an error field that is unpacked and
placed on the ErrorResponse exception. In some situations, a user_error field
will also come back. Messages under user_error are worth showing to an end-user
of your app, while other errors are likely only useful for you as the developer.
"""
def __init__(self, http_resp, body):
"""
Parameters
http_resp
The :class:`RESTResponse` which errored
body
Body of the :class:`RESTResponse`.
The reason we can't simply call ``http_resp.read()`` to
get the body, is that ``read()`` is not idempotent.
Since it can't be called more than once,
we have to pass the string body in separately
"""
self.status = http_resp.status
self.reason = http_resp.reason
self.body = body
self.headers = http_resp.getheaders()
http_resp.close() # won't need this connection anymore
try:
self.body = json_loadb(self.body)
self.error_msg = self.body.get('error')
self.user_error_msg = self.body.get('user_error')
except ValueError:
self.error_msg = None
self.user_error_msg = None
def __str__(self):
if self.user_error_msg and self.user_error_msg != self.error_msg:
# one is translated and the other is English
msg = "%r (%r)" % (self.user_error_msg, self.error_msg)
elif self.error_msg:
msg = repr(self.error_msg)
elif not self.body:
msg = repr(self.reason)
else:
msg = "Error parsing response body or headers: " +\
"Body - %.100r Headers - %r" % (self.body, self.headers)
return "[%d] %s" % (self.status, msg)
def params_to_urlencoded(params):
"""
Returns a application/x-www-form-urlencoded 'str' representing the key/value pairs in 'params'.
Keys are values are str()'d before calling urllib.urlencode, with the exception of unicode
objects which are utf8-encoded.
"""
def encode(o):
if isinstance(o, str):
return o.encode('utf8')
else:
return str(o)
utf8_params = {encode(k): encode(v) for k, v in params.items()}
return urllib.parse.urlencode(utf8_params)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
r"""Functions for exclusive $B\to V\ell\nu$ decays."""
from math import sqrt, pi, cos, sin
import flavio
from flavio.physics.bdecays.common import lambda_K, meson_quark, meson_ff
from flavio.physics.bdecays.wilsoncoefficients import wctot_dict
from flavio.physics import ckm
from flavio.classes import AuxiliaryQuantity
from flavio.config import config
from flavio.physics.running import running
from flavio.physics.bdecays import angular
from flavio.physics.bdecays.wilsoncoefficients import get_wceff_fccc
from flavio.classes import Observable, Prediction
def get_ff(q2, par, B, V):
"""Return the form factors"""
ff_name = meson_ff[(B,V)] + ' form factor'
return AuxiliaryQuantity[ff_name].prediction(par_dict=par, wc_obj=None, q2=q2)
def prefactor(q2, par, B, V, lep):
"""Return the prefactor including constants and CKM elements"""
GF = par['GF']
scale = config['renormalization scale']['bvll']
ml = par['m_'+lep]
mB = par['m_'+B]
mV = par['m_'+V]
tauB = par['tau_'+B]
laB = lambda_K(mB**2, mV**2, q2)
laGa = lambda_K(q2, ml**2, 0.)
qi_qj = meson_quark[(B, V)]
if qi_qj == 'bu':
Vij = ckm.get_ckm(par)[0,2] # V_{ub} for b->u transitions
if qi_qj == 'bc':
Vij = ckm.get_ckm(par)[1,2] # V_{cb} for b->c transitions
if q2 <= ml**2:
return 0
return 4*GF/sqrt(2)*Vij
def get_angularcoeff(q2, wc_obj, par, B, V, lep):
Jlist = [_get_angularcoeff(q2, wc_obj, par, B, V, lep, nu)
for nu in ['e', 'mu', 'tau']]
J = {}
J['1s'] = sum([JJ['1s'] for JJ in Jlist])
J['1c'] = sum([JJ['1c'] for JJ in Jlist])
J['2s'] = sum([JJ['2s'] for JJ in Jlist])
J['2c'] = sum([JJ['2c'] for JJ in Jlist])
J['6s'] = sum([JJ['6s'] for JJ in Jlist])
J['6c'] = sum([JJ['6c'] for JJ in Jlist])
J[3] = sum([JJ[3] for JJ in Jlist])
J[4] = sum([JJ[4] for JJ in Jlist])
J[5] = sum([JJ[5] for JJ in Jlist])
J[7] = sum([JJ[7] for JJ in Jlist])
J[8] = sum([JJ[8] for JJ in Jlist])
J[9] = sum([JJ[9] for JJ in Jlist])
return J
def _get_angularcoeff(q2, wc_obj, par, B, V, lep, nu):
scale = config['renormalization scale']['bvll']
mb = running.get_mb(par, scale)
wc = get_wceff_fccc(wc_obj, par, meson_quark[(B,V)], lep, nu, mb, scale, nf=5)
if lep != nu and all(C == 0 for C in wc.values()):
# if all WCs vanish, so does the AC!
return {k: 0 for k in
['1s', '1c', '2s', '2c', '6s', '6c', 3, 4, 5, 7, 8, 9]}
ml = par['m_'+lep]
mB = par['m_'+B]
mV = par['m_'+V]
qi_qj = meson_quark[(B, V)]
if qi_qj == 'bu':
mlight = 0. # neglecting the up quark mass
if qi_qj == 'bc':
mlight = running.get_mc(par, scale) # this is needed for scalar contributions
N = prefactor(q2, par, B, V, lep)
ff = get_ff(q2, par, B, V)
h = angular.helicity_amps_v(q2, mB, mV, mb, mlight, ml, 0, ff, wc, N)
J = angular.angularcoeffs_general_v(h, q2, mB, mV, mb, mlight, ml, 0)
return J
def dGdq2(J):
r"""$q^2$-differential branching ratio in terms of angular coefficients."""
return 3/4. * (2 * J['1s'] + J['1c']) - 1/4. * (2 * J['2s'] + J['2c'])
def dGdq2_L(J):
r"""$q^2$-differential branching ratio to longitudinally polarized
vector meson in terms of angular coefficients."""
return 3/4. * J['1c'] - 1/4. * J['2c']
def dGdq2_T(J):
r"""$q^2$-differential branching ratio to transversely polarized
vector meson in terms of angular coefficients."""
return 3/2. * J['1s'] - 1/2. * J['2s']
# For the angle-differential and binned distributions, the main idea is this:
# while the q2-integration has to be done numerically, the angle integration is
# trivial to do analytically as the angular dependence is given in terms of
# trigonometric functions. So the differential distributions are given as
# dictionaries with (q2-dependent) coefficients of these angular functions.
# Integration (i.e. binning) in angles then merely amounts to replacing the
# angular functions by their respective integrals.
def dG_dq2_dcosthl(J):
r"""$\cos\theta_\ell$-differential branching ratio in terms of angular
coefficients, as dictionary of coefficients of trigonometric functions
of $\theta_\ell$."""
return {'1': 3/8. * (J['1c'] + 2*J['1s']),
'c': 3/8. * (J['6c'] + 2*J['6s']),
'c2': 3/8. * (J['2c'] + 2*J['2s']) }
def dG_dq2_dcosthV(J):
r"""$\cos\theta_V$-differential branching ratio in terms of angular
coefficients, as dictionary of coefficients of trigonometric functions
of $\theta_V$."""
return {'c^2': -3/8. * (-3*J['1c'] + J['2c']),
's^2': -3/8. * (-3*J['1s'] + J['2s']) }
def dG_dq2_dphi(J):
r"""$\phi$-differential branching ratio in terms of angular
coefficients, as dictionary of coefficients of trigonometric functions
of $\phi$."""
return {'1': 1/(8*pi) * (3*J['1c'] + 6*J['1s'] - J['2c'] - 2*J['2s']),
'c2': 1/(2*pi) * J[3],
's2': 1/(2*pi) * J[9] }
def _cos_angle_diff(costh):
r"""Trigonometric functions for differential distributions in terms of
$\cos\theta_{\ell,V}$"""
return {'1': 1, 'c': costh, 'c2': 2*costh**2-1, 'c^2': costh**2,
's^2': 1 - costh**2, 's2': 2*costh*sqrt(1-costh**2)}
def _cos_angle_int(costh):
r"""Integrated trigonometric functions for binned distributions in terms of
$\cos\theta_{\ell,V}$"""
return {'1': costh, 'c': costh**2/2., 'c2': 2*costh**3/3.-costh,
'c^2': costh**3/3., 's^2': costh - costh**3/3.,
's2': -2/3.*(1-costh**2)**(3/2.)}
def _angle_diff(phi):
r"""Trigonometric functions for differential distributions in terms of
$\phi$"""
return {'1': 1, 'c2': cos(2*phi), 's2': sin(2*phi)}
def _angle_int(phi):
r"""Integrated trigonometric functions for binned distributions in terms of
$\phi$"""
return {'1': phi, 'c2': sin(2*phi)/2., 's2': -cos(2*phi)/2.}
def obs_q2int(fct, wc_obj, par, B, V, lep):
"""q2-integrated observable"""
mB = par['m_'+B]
mV = par['m_'+V]
ml = par['m_'+lep]
q2max = (mB-mV)**2
q2min = ml**2
def integrand(q2):
return fct(q2)
return flavio.math.integrate.nintegrate(integrand, q2min, q2max)
def kinem_allowed(q2, par, B, V, lep):
"""True if q2 is in the kinematically allowed region"""
ml = par['m_'+lep]
mB = par['m_'+B]
mV = par['m_'+V]
if q2 < ml**2 or q2 > (mB-mV)**2:
return False
else:
return True
def FL_diff(q2, wc_obj, par, B, V, lep):
if not kinem_allowed(q2, par, B, V, lep):
return 0
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
return dGdq2_L(J) / dGdq2(J)
def FL_binned(q2min, q2max, wc_obj, par, B, V, lep):
num = flavio.math.integrate.nintegrate(lambda q2: dBRdq2(q2, wc_obj, par, B, V, lep, A='L'), q2min, q2max)
if num == 0:
return 0
denom = flavio.math.integrate.nintegrate(lambda q2: dBRdq2(q2, wc_obj, par, B, V, lep, A=None), q2min, q2max)
return num / denom
def Itot_norm(fct_J, wc_obj, par, B, V, lep):
def fct(q2):
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
return fct_J(J)
num = obs_q2int(fct, wc_obj, par, B, V, lep)
def fct_den(q2):
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
return dGdq2(J)
den = obs_q2int(fct_den, wc_obj, par, B, V, lep)
return num / den
def dBR_dq2_dcosthl_binned(q2, clmin, clmax, wc_obj, par, B, V, lep):
if not kinem_allowed(q2, par, B, V, lep):
return 0
tauB = par['tau_'+B]
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
dG = dG_dq2_dcosthl(J)
ang_min = _cos_angle_int(clmin)
ang_max = _cos_angle_int(clmax)
return BRfac(V) * tauB * sum(
[y * (ang_max[a] - ang_min[a]) for a, y in dG.items()])
def BR_binned_costhl(clmin, clmax, wc_obj, par, B, V, lep):
def fct(q2):
return dBR_dq2_dcosthl_binned(q2, clmin, clmax, wc_obj, par, B, V, lep)
return obs_q2int(fct, wc_obj, par, B, V, lep)
def dBR_dq2_dcosthl(q2, cl, wc_obj, par, B, V, lep):
if not kinem_allowed(q2, par, B, V, lep):
return 0
tauB = par['tau_'+B]
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
dG = dG_dq2_dcosthl(J)
ang = _cos_angle_diff(cl)
return BRfac(V) * tauB * sum(
[y * ang[a] for a, y in dG.items()])
def dBR_dcosthl(cl, wc_obj, par, B, V, lep):
def fct(q2):
return dBR_dq2_dcosthl(q2, cl, wc_obj, par, B, V, lep)
return obs_q2int(fct, wc_obj, par, B, V, lep)
def dBR_dq2_dcosthV_binned(q2, cVmin, cVmax, wc_obj, par, B, V, lep):
if not kinem_allowed(q2, par, B, V, lep):
return 0
tauB = par['tau_'+B]
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
dG = dG_dq2_dcosthV(J)
ang_min = _cos_angle_int(cVmin)
ang_max = _cos_angle_int(cVmax)
return BRfac(V) * tauB * sum(
[y * (ang_max[a] - ang_min[a]) for a, y in dG.items()])
def BR_binned_costhV(cVmin, cVmax, wc_obj, par, B, V, lep):
def fct(q2):
return dBR_dq2_dcosthV_binned(q2, cVmin, cVmax, wc_obj, par, B, V, lep)
return obs_q2int(fct, wc_obj, par, B, V, lep)
def dBR_dq2_dcosthV(q2, cV, wc_obj, par, B, V, lep):
if not kinem_allowed(q2, par, B, V, lep):
return 0
tauB = par['tau_'+B]
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
dG = dG_dq2_dcosthV(J)
ang = _cos_angle_diff(cV)
return BRfac(V) * tauB * sum(
[y * ang[a] for a, y in dG.items()])
def dBR_dcosthV(cV, wc_obj, par, B, V, lep):
def fct(q2):
return dBR_dq2_dcosthV(q2, cV, wc_obj, par, B, V, lep)
return obs_q2int(fct, wc_obj, par, B, V, lep)
def dBR_dq2_dphi_binned(q2, phimin, phimax, wc_obj, par, B, V, lep):
if not kinem_allowed(q2, par, B, V, lep):
return 0
tauB = par['tau_'+B]
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
dG = dG_dq2_dphi(J)
ang_min = _angle_int(phimin)
ang_max = _angle_int(phimax)
return BRfac(V) * tauB * sum(
[y * (ang_max[a] - ang_min[a]) for a, y in dG.items()])
def BR_binned_phi(phimin, phimax, wc_obj, par, B, V, lep):
def fct(q2):
return dBR_dq2_dphi_binned(q2, phimin, phimax, wc_obj, par, B, V, lep)
return obs_q2int(fct, wc_obj, par, B, V, lep)
def dBR_dq2_dphi(q2, phi, wc_obj, par, B, V, lep):
if not kinem_allowed(q2, par, B, V, lep):
return 0
tauB = par['tau_'+B]
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
dG = dG_dq2_dphi(J)
ang = _angle_diff(phi)
return BRfac(V) * tauB * sum(
[y * ang[a] for a, y in dG.items()])
def dBR_dphi(phi, wc_obj, par, B, V, lep):
def fct(q2):
return dBR_dq2_dphi(q2, phi, wc_obj, par, B, V, lep)
return obs_q2int(fct, wc_obj, par, B, V, lep)
def BRfac(V):
if V == 'rho0' or V == 'omega':
# factor of 1/2 for neutral rho due to rho = (uubar-ddbar)/sqrt(2)
# and also for omega = (uubar+ddbar)/sqrt(2)
return 1/2.
else:
return 1
def dBRdq2_lep(q2, wc_obj, par, B, V, lep, A):
if not kinem_allowed(q2, par, B, V, lep):
return 0
tauB = par['tau_'+B]
J = get_angularcoeff(q2, wc_obj, par, B, V, lep)
if A is None:
return tauB * dGdq2(J) * BRfac(V)
elif A == 'L':
return tauB * dGdq2_L(J) * BRfac(V)
elif A == 'T':
return tauB * dGdq2_T(J) * BRfac(V)
def dBRdq2(q2, wc_obj, par, B, V, lep, A):
if lep == 'l':
# average of e and mu!
return (dBRdq2_lep(q2, wc_obj, par, B, V, 'e', A) + dBRdq2_lep(q2, wc_obj, par, B, V, 'mu', A))/2
else:
return dBRdq2_lep(q2, wc_obj, par, B, V, lep, A)
def dBRdq2_function(B, V, lep, A):
return lambda wc_obj, par, q2: dBRdq2(q2, wc_obj, par, B, V, lep, A)
def BR_binned(q2min, q2max, wc_obj, par, B, V, lep, A):
def integrand(q2):
return dBRdq2(q2, wc_obj, par, B, V, lep, A)
return flavio.math.integrate.nintegrate(integrand, q2min, q2max)
def BR_binned_function(B, V, lep, A):
return lambda wc_obj, par, q2min, q2max: BR_binned(q2min, q2max, wc_obj, par, B, V, lep, A)
def BR_binned_tot_function(B, V, lep, A):
def f(wc_obj, par, q2min, q2max):
num = BR_binned(q2min, q2max, wc_obj, par, B, V, lep, A)
if num == 0:
return 0
den = BR_tot(wc_obj, par, B, V, lep, A)
return num / den
return f
def FL_function(B, V, lep):
return lambda wc_obj, par, q2: FL_diff(q2, wc_obj, par, B, V, lep)
def FL_binned_function(B, V, lep):
return lambda wc_obj, par, q2min, q2max: FL_binned(q2min, q2max, wc_obj, par, B, V, lep)
def FL_tot_function(B, V, lep):
def f(wc_obj, par):
mB = par['m_'+B]
mV = par['m_'+V]
ml = par['m_'+lep]
q2max = (mB-mV)**2
q2min = ml**2
return FL_binned(q2min, q2max, wc_obj, par, B, V, lep)
return f
def FLt_tot_function(B, V, lep):
def f(wc_obj, par):
def fct_J(J):
return -J['2c']
return Itot_norm(fct_J, wc_obj, par, B, V, lep)
return f
def AFB_tot_function(B, V, lep):
def f(wc_obj, par):
def fct_J(J):
return 3 / 8 * (2 * J['6s'] + J['6c'])
return Itot_norm(fct_J, wc_obj, par, B, V, lep)
return f
def I3_tot_function(B, V, lep):
def f(wc_obj, par):
def fct_J(J):
return J[3]
return Itot_norm(fct_J, wc_obj, par, B, V, lep)
return f
def BR_binned_costhl_function(B, V, lep):
if lep == 'l':
return lambda wc_obj, par, clmin, clmax: (
BR_binned_costhl(clmin, clmax, wc_obj, par, B, V, 'e')
+ BR_binned_costhl(clmin, clmax, wc_obj, par, B, V, 'mu'))/2.
return lambda wc_obj, par, clmin, clmax: BR_binned_costhl(clmin, clmax, wc_obj, par, B, V, lep)
def BR_binned_costhV_function(B, V, lep):
if lep == 'l':
return lambda wc_obj, par, cVmin, cVmax: (
BR_binned_costhV(cVmin, cVmax, wc_obj, par, B, V, 'e')
+ BR_binned_costhV(cVmin, cVmax, wc_obj, par, B, V, 'mu'))/2.
return lambda wc_obj, par, cVmin, cVmax: BR_binned_costhV(cVmin, cVmax, wc_obj, par, B, V, lep)
def BR_binned_phi_function(B, V, lep):
if lep == 'l':
return lambda wc_obj, par, phimin, phimax: (
BR_binned_phi(phimin, phimax, wc_obj, par, B, V, 'e')
+ BR_binned_phi(phimin, phimax, wc_obj, par, B, V, 'mu'))/2.
return lambda wc_obj, par, phimin, phimax: BR_binned_phi(phimin, phimax, wc_obj, par, B, V, lep)
def dBR_dcosthl_function(B, V, lep):
if lep == 'l':
return lambda wc_obj, par, cl: (
dBR_dcosthl(cl, wc_obj, par, B, V, 'e')
+ dBR_dcosthl(cl, wc_obj, par, B, V, 'mu'))/2.
return lambda wc_obj, par, cl: dBR_dcosthl(cl, wc_obj, par, B, V, lep)
def dBR_dcosthV_function(B, V, lep):
if lep == 'l':
return lambda wc_obj, par, cV: (
dBR_dcosthV(cV, wc_obj, par, B, V, 'e')
+ dBR_dcosthV(cV, wc_obj, par, B, V, 'mu'))/2.
return lambda wc_obj, par, cV: dBR_dcosthV(cV, wc_obj, par, B, V, lep)
def dBR_dphi_function(B, V, lep):
if lep == 'l':
return lambda wc_obj, par, phi: (
dBR_dphi(phi, wc_obj, par, B, V, 'e')
+ dBR_dphi(phi, wc_obj, par, B, V, 'mu'))/2.
return lambda wc_obj, par, phi: dBR_dphi(phi, wc_obj, par, B, V, lep)
def _BR_tot(wc_obj, par, B, V, lep, A):
mB = par['m_'+B]
mV = par['m_'+V]
ml = par['m_'+lep]
q2max = (mB-mV)**2
q2min = ml**2
return BR_binned(q2min, q2max, wc_obj, par, B, V, lep, A)
def BR_tot(wc_obj, par, B, V, lep, A):
if lep == 'l':
# average of e and mu!
return (_BR_tot(wc_obj, par, B, V, 'e', A)+_BR_tot(wc_obj, par, B, V, 'mu', A))/2.
else:
return _BR_tot(wc_obj, par, B, V, lep, A)
def BR_tot_function(B, V, lep, A):
return lambda wc_obj, par: BR_tot(wc_obj, par, B, V, lep, A)
def BR_binned_leptonflavour(q2min, q2max, wc_obj, par, B, V, lnum, lden, A):
num = BR_binned(q2min, q2max, wc_obj, par, B, V, lnum, A)
if num == 0:
return 0
den = BR_binned(q2min, q2max, wc_obj, par, B, V, lden, A)
return num/den
def BR_tot_leptonflavour(wc_obj, par, B, V, lnum, lden, A):
num = BR_tot(wc_obj, par, B, V, lnum, A)
if num == 0:
return 0
den = BR_tot(wc_obj, par, B, V, lden, A)
return num/den
def BR_tot_leptonflavour_function(B, V, lnum, lden, A):
return lambda wc_obj, par: BR_tot_leptonflavour(wc_obj, par, B, V, lnum, lden, A)
def BR_binned_leptonflavour_function(B, V, lnum, lden, A):
return lambda wc_obj, par, q2min, q2max: BR_binned_leptonflavour(q2min, q2max, wc_obj, par, B, V, lnum, lden, A)
# Observable and Prediction instances
_tex = {'e': 'e', 'mu': '\mu', 'tau': r'\tau', 'l': r'\ell'}
_A = {'dBR/dq2': None, 'BR': None, '<BR>': None,
'dBR_L/dq2': 'L', 'BR_L': 'L', '<BR_L>': 'L',
'dBR_T/dq2': 'T', 'BR_T': 'T', '<BR_T>': 'T',
}
_func = {'dBR/dq2': dBRdq2_function, 'BR': BR_tot_function, '<BR>': BR_binned_function,
'dBR_L/dq2': dBRdq2_function, 'BR_L': BR_tot_function, '<BR_L>': BR_binned_function,
'dBR_T/dq2': dBRdq2_function, 'BR_T': BR_tot_function, '<BR_T>': BR_binned_function,
'<BR>/<cl>': BR_binned_costhl_function,
'<BR>/<cV>': BR_binned_costhV_function,
'<BR>/<phi>': BR_binned_phi_function,
'dBR/dcl': dBR_dcosthl_function,
'dBR/dcV': dBR_dcosthV_function,
'dBR/dphi': dBR_dphi_function,
'FL': FL_function,
'<FL>': FL_binned_function,
'FLtot': FL_tot_function,
'FLttot': FLt_tot_function,
'AFBtot': AFB_tot_function,
'I3tot': I3_tot_function,
}
_desc = {'dBR/dq2': r'$q^2$-differential', 'BR': 'Total', '<BR>': '$q^2$-binned',
'dBR_L/dq2': 'Differential longitudinal', 'BR_L': 'Total longitudinal', '<BR_L>': 'Binned longitudinal',
'dBR_T/dq2': 'Differential transverse', 'BR_T': 'Total transverse', '<BR_T>': 'Binned transverse',
'<BR>/<cl>': r'$\cos\theta_l$-binned',
'<BR>/<cV>': r'$\cos\theta_V$-binned',
'<BR>/<phi>': r'$\phi$-binned',
'dBR/dcl': r'$\cos\theta_l$-differential',
'dBR/dcV':r'$\cos\theta_V$-differential ',
'dBR/dphi': r'$\phi$-differential',
'FL': r'Differential longitudinal polarization fraction',
'<FL>': r'Binned longitudinal polarization fraction',
'FLtot': r'Total longitudinal polarization fraction',
'FLttot': r'Total longitudinal polarization fraction',
'AFBtot': r'Total forward-backward asymmetry',
'I3tot': r'$q^2$-integrated angular coefficient $I_3$',
}
_tex_br = {'dBR/dq2': r'\frac{d\text{BR}}{dq^2}', 'BR': r'\text{BR}', '<BR>': r'\langle\text{BR}\rangle',
'dBR_L/dq2': r'\frac{d\text{BR}_L}{dq^2}', 'BR_L': r'\text{BR}_L', '<BR_L>': r'\langle\text{BR}_L\rangle',
'dBR_T/dq2': r'\frac{d\text{BR}_T}{dq^2}', 'BR_T': r'\text{BR}_T', '<BR_T>': r'\langle\text{BR}_T\rangle',
'<BR>/<cl>': r'\langle\text{BR}\rangle/\Delta\cos\theta_l',
'<BR>/<cV>': r'\langle\text{BR}\rangle/\Delta\cos\theta_V',
'<BR>/<phi>': r'\langle\text{BR}\rangle/\Delta\phi',
'dBR/dcl': r'\frac{d\text{BR}}{d\cos\theta_l}',
'dBR/dcV': r'\frac{d\text{BR}}{d\cos\theta_V}',
'dBR/dphi': r'\frac{d\text{BR}}{d\phi}',
'FL': r'F_L',
'<FL>': r'\langle F_L\rangle',
'FLtot': r'F_L',
'FLttot': r'\widetilde{F}_L',
'AFBtot': r'A_\text{FB}',
'I3tot': r'I_3',
}
_args = {'dBR/dq2': ['q2'], 'BR': None, '<BR>': ['q2min', 'q2max'],
'dBR_L/dq2': ['q2'], 'BR_L': None, '<BR_L>': ['q2min', 'q2max'],
'dBR_T/dq2': ['q2'], 'BR_T': None, '<BR_T>': ['q2min', 'q2max'],
'<BR>/<cl>': ['clmin', 'clmax'],
'<BR>/<cV>': ['cVmin', 'cVmax'],
'<BR>/<phi>': ['phimin', 'phimax'],
'dBR/dcl': ['cl'],
'dBR/dcV': ['cV'],
'dBR/dphi': ['phi'],
'FL': ['q2'],
'<FL>': ['q2min', 'q2max'],
'FLtot': None,
'FLttot': None,
'AFBtot': None,
'I3tot': None,
}
_hadr = {
'B0->D*': {'tex': r"B^0\to D^{\ast -}", 'B': 'B0', 'V': 'D*+', },
'B+->D*': {'tex': r"B^+\to D^{\ast 0}", 'B': 'B+', 'V': 'D*0', },
'B0->rho': {'tex': r"B^0\to \rho^-", 'B': 'B0', 'V': 'rho+', },
'B+->rho': {'tex': r"B^+\to \rho^0", 'B': 'B+', 'V': 'rho0', },
'B+->omega': {'tex': r"B^+\to \omega ", 'B': 'B+', 'V': 'omega', },
'Bs->K*': {'tex': r"B_s\to K^{* -} ", 'B': 'Bs', 'V': 'K*+', },
}
# for LF ratios we don't distinguish B+ and B0 (but take B0 because we have to choose sth)
_hadr_l = {
'B->D*': {'tex': r"B\to D^{\ast}", 'B': 'B0', 'V': 'D*+', 'decays': ['B0->D*', 'B+->D*'],},
'B->rho': {'tex': r"B\to \rho", 'B': 'B0', 'V': 'rho+', 'decays': ['B0->rho', 'B+->rho'],},
'B+->omega': {'tex': r"B^+\to \omega ", 'B': 'B+', 'V': 'omega', 'decays': ['B+->omega'],},
'Bs->K*': {'tex': r"B_s\to K^{* -} ", 'B': 'Bs', 'V': 'K*+', 'decays': ['Bs->K*'],},
}
_process_taxonomy = r'Process :: $b$ hadron decays :: Semi-leptonic tree-level decays :: $B\to V\ell\nu$ :: $'
for l in ['e', 'mu', 'tau', 'l']:
for br in ['dBR/dq2', 'BR', '<BR>',
'dBR_L/dq2', 'BR_L', '<BR_L>',
'dBR_T/dq2', 'BR_T', '<BR_T>',
'<BR>/<cl>', '<BR>/<cV>', '<BR>/<phi>',
'dBR/dcl', 'dBR/dcV', 'dBR/dphi',
'<FL>', 'FL', 'FLtot', 'FLttot', 'AFBtot', 'I3tot']:
for M in _hadr.keys():
_process_tex = _hadr[M]['tex']+_tex[l]+r"^+\nu_"+_tex[l]
_obs_name = br + "("+M+l+"nu)"
_obs = Observable(_obs_name)
_obs.set_description(_desc[br] + r" branching ratio of $" + _process_tex + "$")
_obs.tex = r'$' + _tex_br[br] + r"(" +_process_tex + ")$"
_obs.arguments = _args[br]
_obs.add_taxonomy(_process_taxonomy + _process_tex + r'$')
if br in _A:
# for dBR/dq2, need to distinguish between total, L, and T
Prediction(_obs_name, _func[br](_hadr[M]['B'], _hadr[M]['V'], l, A=_A[br]))
else:
# for other observables not
Prediction(_obs_name, _func[br](_hadr[M]['B'], _hadr[M]['V'], l))
# Lepton flavour ratios
for l in [('mu','e'), ('tau','mu'), ('tau', 'l')]:
for M in _hadr_l.keys():
# binned ratio of BRs
_obs_name = "<R"+l[0]+l[1]+">("+M+"lnu)"
_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])
_obs.set_description(r"Ratio of partial branching ratios of $" + _hadr_l[M]['tex'] +_tex[l[0]]+r"^+ \nu_"+_tex[l[0]]+r"$" + " and " + r"$" + _hadr_l[M]['tex'] +_tex[l[1]]+r"^+ \nu_"+_tex[l[1]]+r"$")
_obs.tex = r"$\langle R_{" + _tex[l[0]] + ' ' + _tex[l[1]] + r"} \rangle(" + _hadr_l[M]['tex'] + r"\ell^+\nu)$"
for li in l:
for N in _hadr_l[M]['decays']:
# add taxonomy for both processes (e.g. B->Venu and B->Vmunu) and for charged and neutral
_obs.add_taxonomy(_process_taxonomy + _hadr[N]['tex'] + _tex[li]+r"^+\nu_"+_tex[li]+r"$")
Prediction(_obs_name, BR_binned_leptonflavour_function(_hadr_l[M]['B'], _hadr_l[M]['V'], l[0], l[1], A=None))
# ratio of total BRs
_obs_name = "R"+l[0]+l[1]+"("+M+"lnu)"
_obs = Observable(name=_obs_name)
_obs.set_description(r"Ratio of total branching ratios of $" + _hadr_l[M]['tex'] +_tex[l[0]]+r"^+ \nu_"+_tex[l[0]]+r"$" + " and " + r"$" + _hadr_l[M]['tex'] +_tex[l[1]]+r"^+ \nu_"+_tex[l[1]]+r"$")
_obs.tex = r"$R_{" + _tex[l[0]] + ' ' + _tex[l[1]] + r"}(" + _hadr_l[M]['tex'] + r"\ell^+\nu)$"
for li in l:
for N in _hadr_l[M]['decays']:
# add taxonomy for both processes (e.g. B->Venu and B->Vmunu) and for charged and neutral
_obs.add_taxonomy(_process_taxonomy + _hadr[N]['tex'] +_tex[li]+r"^+\nu_"+_tex[li]+r"$")
Prediction(_obs_name, BR_tot_leptonflavour_function(_hadr_l[M]['B'], _hadr_l[M]['V'], l[0], l[1], A=None))
# B->D*taunu normalized binned BR
_obs_name = "<BR>/BR(B->D*taunu)"
_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])
_obs.set_description(r"Relative partial branching ratio of $B\to D^\ast\tau^+\nu$")
_obs.tex = r"$\frac{\langle \text{BR} \rangle}{\text{BR}}(B\to D^\ast\tau^+\nu)$"
for M in ['B+->D*', 'B0->D*']:
_process_tex = _hadr[M]['tex'] + r"\tau^+\nu"
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
Prediction(_obs_name, BR_binned_tot_function('B0', 'D*+', 'tau', A=None))
|
|
from swift.common.internal_client import InternalClient
from swift.common.exceptions import DiskFileXattrNotSupported, DiskFileNoSpace
from swift.common.exceptions import DiskFileNotExist
from swift.obj.diskfile import get_data_dir as df_data_dir, _get_filename
from swift.common.request_helpers import get_name_and_placement
from swift.common.utils import storage_directory, hash_path, cache_from_env
from swift.common.wsgi import make_subrequest
import xattr
import logging
import pickle
import errno
import os
PICKLE_PROTOCOL = 2
SYSMETA_OBJ_HEADER = 'X-Object-Sysmeta-Vertigo-'
VERTIGO_MC_HEADER_OBJ = SYSMETA_OBJ_HEADER + 'Microcontroller'
SYSMETA_CONTAINER_HEADER = 'X-Container-Sysmeta-Vertigo-'
VERTIGO_MC_HEADER_CONTAINER = SYSMETA_CONTAINER_HEADER + 'Microcontroller'
SWIFT_METADATA_KEY = 'user.swift.metadata'
LOCAL_PROXY = '/etc/swift/storlet-proxy-server.conf'
DEFAULT_MD_STRING = {'onget': None,
'onput': None,
'ondelete': None,
'ontimer': None}
def read_metadata(fd, md_key=None):
"""
Helper function to read the pickled metadata from an object file.
:param fd: file descriptor or filename to load the metadata from
:param md_key: metadata key to be read from object file
:returns: dictionary of metadata
"""
meta_key = SWIFT_METADATA_KEY
metadata = ''
key = 0
try:
while True:
metadata += xattr.getxattr(fd, '%s%s' % (meta_key,
(key or '')))
key += 1
except (IOError, OSError) as e:
if metadata == '':
return False
for err in 'ENOTSUP', 'EOPNOTSUPP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
msg = "Filesystem at %s does not support xattr" % \
_get_filename(fd)
logging.exception(msg)
raise DiskFileXattrNotSupported(e)
if e.errno == errno.ENOENT:
raise DiskFileNotExist()
return pickle.loads(metadata)
def write_metadata(fd, metadata, xattr_size=65536, md_key=None):
"""
Helper function to write pickled metadata for an object file.
:param fd: file descriptor or filename to write the metadata
:param md_key: metadata key to be write to object file
:param metadata: metadata to write
"""
meta_key = SWIFT_METADATA_KEY
metastr = pickle.dumps(metadata, PICKLE_PROTOCOL)
key = 0
while metastr:
try:
xattr.setxattr(fd, '%s%s' % (meta_key, key or ''),
metastr[:xattr_size])
metastr = metastr[xattr_size:]
key += 1
except IOError as e:
for err in 'ENOTSUP', 'EOPNOTSUPP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
msg = "Filesystem at %s does not support xattr" % \
_get_filename(fd)
logging.exception(msg)
raise DiskFileXattrNotSupported(e)
if e.errno in (errno.ENOSPC, errno.EDQUOT):
msg = "No space left on device for %s" % _get_filename(fd)
logging.exception(msg)
raise DiskFileNoSpace()
raise
def get_object_metadata(data_file):
"""
Retrieves the swift metadata of a specified data file
:param data_file: full path of the data file
:returns: dictionary with all swift metadata
"""
fd = open_data_file(data_file)
metadata = read_metadata(fd, SWIFT_METADATA_KEY)
close_data_file(fd)
return metadata
def get_container_metadata(vertigo, container):
new_env = dict(vertigo.request.environ)
auth_token = vertigo.request.headers.get('X-Auth-Token')
sub_req = make_subrequest(new_env, 'HEAD', container,
headers={'X-Auth-Token': auth_token},
swift_source='Vertigo')
response = sub_req.get_response(vertigo.app)
return response.headers
def set_object_metadata(data_file, metadata):
"""
Sets the swift metadata to the specified data_file
:param data_file: full path of the data file
"""
fd = open_data_file(data_file)
write_metadata(fd, metadata, md_key=SWIFT_METADATA_KEY)
close_data_file(fd)
def set_container_metadata(vertigo, metadata):
"""
Sets the swift metadata to the container
:param metadata: metadata dictionary
"""
memcache = cache_from_env(vertigo.request.environ)
dest_path = os.path.join('/', vertigo.api_version, vertigo.account, vertigo.container)
for key in metadata.keys():
if not key.startswith(SYSMETA_CONTAINER_HEADER):
del metadata[key]
# We store the Vertigo metadata in the memcached server (only 10 minutes)
memcache.set("vertigo_"+dest_path, metadata, time=600)
new_env = dict(vertigo.request.environ)
auth_token = vertigo.request.headers.get('X-Auth-Token')
metadata.update({'X-Auth-Token': auth_token})
sub_req = make_subrequest(new_env, 'POST', dest_path,
headers=metadata,
swift_source='Vertigo')
sub_req.get_response(vertigo.app)
def make_swift_request(op, account, container=None, obj=None):
"""
Makes a swift request via a local proxy (cost expensive)
:param op: opertation (PUT, GET, DELETE, HEAD)
:param account: swift account
:param container: swift container
:param obj: swift object
:returns: swift.common.swob.Response instance
"""
iclient = InternalClient(LOCAL_PROXY, 'SA', 1)
path = iclient.make_path(account, container, obj)
resp = iclient.make_request(op, path, {'PATH_INFO': path}, [200])
return resp
def verify_access(vertigo, path):
"""
Verifies access to the specified object in swift
:param vertigo: swift_vertigo.vertigo_handler.VertigoProxyHandler instance
:param path: swift path of the object to check
:returns: headers of the object whether exists
"""
vertigo.logger.debug('Vertigo - Verify access to %s' % path)
new_env = dict(vertigo.request.environ)
if 'HTTP_TRANSFER_ENCODING' in new_env.keys():
del new_env['HTTP_TRANSFER_ENCODING']
for key in DEFAULT_MD_STRING.keys():
env_key = 'HTTP_X_VERTIGO_' + key.upper()
if env_key in new_env.keys():
del new_env[env_key]
auth_token = vertigo.request.headers.get('X-Auth-Token')
sub_req = make_subrequest(
new_env, 'HEAD', path,
headers={'X-Auth-Token': auth_token},
swift_source='Vertigo')
return sub_req.get_response(vertigo.app)
def create_link(vertigo, link_path, dest_path, heads):
"""
Creates a link to a real object
:param vertigo: swift_vertigo.vertigo_handler.VertigoProxyHandler instance
:param link_path: swift path of the link
:param dest_path: swift path of the object to link
:param headers: original object headers
"""
vertigo.logger.debug('Vertigo - Creating link from %s to %s' % (link_path,
dest_path))
new_env = dict(vertigo.request.environ)
if 'HTTP_TRANSFER_ENCODING' in new_env.keys():
del new_env['HTTP_TRANSFER_ENCODING']
if 'HTTP_X_COPY_FROM' in new_env.keys():
del new_env['HTTP_X_COPY_FROM']
auth_token = vertigo.request.headers.get('X-Auth-Token')
link_path = os.path.join('/', vertigo.api_version,
vertigo.account, link_path)
sub_req = make_subrequest(
new_env, 'PUT', link_path,
headers={'X-Auth-Token': auth_token,
'Content-Length': 0,
'Content-Type': 'vertigo/link',
'Original-Content-Length': heads["Content-Length"],
'X-Object-Sysmeta-Vertigo-Link-to': dest_path},
swift_source='Vertigo')
resp = sub_req.get_response(vertigo.app)
return resp
def get_data_dir(vertigo):
"""
Gets the data directory full path
:param vertigo: swift_vertigo.vertigo_handler.VertigoObjectHandler instance
:returns: the data directory path
"""
devices = vertigo.conf.get('devices')
device, partition, account, container, obj, policy = \
get_name_and_placement(vertigo.request, 5, 5, True)
name_hash = hash_path(account, container, obj)
device_path = os.path.join(devices, device)
storage_dir = storage_directory(df_data_dir(policy), partition, name_hash)
data_dir = os.path.join(device_path, storage_dir)
return data_dir
def get_data_file(vertigo):
"""
Gets the data file full path
:param vertigo: swift_vertigo.vertigo_handler.VertigoObjectHandler instance
:returns: the data file path
"""
data_dir = get_data_dir(vertigo)
files = os.listdir(data_dir)
for swift_file in files:
if swift_file.endswith(".data"):
return os.path.join(data_dir, swift_file)
def open_data_file(data_file):
"""
Open a data file
:param data_file: full path of the data file
:returns: a file descriptor of the open data file
"""
fd = os.open(data_file, os.O_RDONLY)
return fd
def close_data_file(fd):
"""
Close a file descriptor
:param fd: file descriptor
"""
os.close(fd)
def set_microcontroller_container(vertigo, trigger, mc):
"""
Sets a microcontroller to the specified container in the main request,
and stores the metadata file
:param vertigo: swift_vertigo.vertigo_handler.VertigoObjectHandler instance
:param trigger: trigger name
:param mc: microcontroller name
:raises HTTPInternalServerError: If it fails
"""
container = os.path.join('/', vertigo.api_version, vertigo.account, vertigo.container)
# 1st: set microcontroller name to list
metadata = get_container_metadata(vertigo, container)
try:
mc_dict = get_microcontroller_dict_container(metadata)
except:
raise ValueError('Vertigo - ERROR: There was an error getting trigger'
' dictionary from the object.\n')
if not mc_dict:
mc_dict = DEFAULT_MD_STRING
if not mc_dict[trigger]:
mc_dict[trigger] = list()
if mc not in mc_dict[trigger]:
mc_dict[trigger].append(mc)
# 2nd: Get microcontroller specific metadata
specific_md = vertigo.request.body.rstrip()
# 3rd: Assign all metadata to the container
try:
metadata[VERTIGO_MC_HEADER_CONTAINER] = mc_dict
sysmeta_key = (SYSMETA_CONTAINER_HEADER + trigger + '-' + mc).title()
if specific_md:
metadata[sysmeta_key] = specific_md
else:
if sysmeta_key in metadata:
del metadata[sysmeta_key]
set_container_metadata(vertigo, metadata)
except:
raise ValueError('Vertigo - ERROR: There was an error setting trigger'
' dictionary from the object.\n')
def delete_microcontroller_container(vertigo, trigger, mc):
"""
Deletes a microcontroller to the specified object in the main request
:param vertigo: swift_vertigo.vertigo_handler.VertigoObjectHandler instance
:param trigger: trigger name
:param mc: microcontroller name
:raises HTTPInternalServerError: If it fails
"""
vertigo.logger.debug('Vertigo - Go to delete "' + mc +
'" microcontroller from "' + trigger + '" trigger')
container = os.path.join('/', vertigo.api_version, vertigo.account, vertigo.container)
metadata = get_container_metadata(vertigo, container)
try:
mc_dict = get_microcontroller_dict_container(metadata)
except:
raise ValueError('Vertigo - ERROR: There was an error getting trigger'
' metadata from the object.\n')
try:
if trigger == "vertigo" and mc == "all":
for key in metadata.keys():
if key.startswith(SYSMETA_CONTAINER_HEADER):
del metadata[key]
else:
if metadata[VERTIGO_MC_HEADER_CONTAINER]:
if isinstance(metadata[VERTIGO_MC_HEADER_CONTAINER], dict):
mc_dict = metadata[VERTIGO_MC_HEADER_CONTAINER]
else:
mc_dict = eval(metadata[VERTIGO_MC_HEADER_CONTAINER])
if mc == 'all':
mc_list = mc_dict[trigger]
mc_dict[trigger] = None
for mc_k in mc_list:
sysmeta_key = (SYSMETA_CONTAINER_HEADER + trigger + '-' + mc_k).title()
if sysmeta_key in metadata:
metadata[sysmeta_key] = ''
elif mc in mc_dict[trigger]:
mc_dict[trigger].remove(mc)
sysmeta_key = (SYSMETA_CONTAINER_HEADER + trigger + '-' + mc).title()
if sysmeta_key in metadata:
metadata[sysmeta_key] = ''
else:
raise
metadata[VERTIGO_MC_HEADER_CONTAINER] = mc_dict
metadata = clean_microcontroller_dict_container(metadata)
else:
raise
set_container_metadata(vertigo, metadata)
except:
pass
# raise ValueError('Vertigo - Error: Microcontroller "' + mc + '" not'
# ' assigned to the "' + trigger + '" trigger.\n')
def set_microcontroller_object(vertigo, trigger, mc):
"""
Sets a microcontroller to the specified object in the main request,
and stores the metadata file
:param vertigo: swift_vertigo.vertigo_handler.VertigoObjectHandler instance
:param trigger: trigger name
:param mc: microcontroller name
:raises HTTPInternalServerError: If it fails
"""
# 1st: set microcontroller name to list
try:
mc_dict = get_microcontroller_dict_object(vertigo)
except:
raise ValueError('Vertigo - ERROR: There was an error getting trigger'
' dictionary from the object.\n')
if not mc_dict:
mc_dict = DEFAULT_MD_STRING
if not mc_dict[trigger]:
mc_dict[trigger] = list()
if mc not in mc_dict[trigger]:
mc_dict[trigger].append(mc)
# 2nd: Set microcontroller specific metadata
specific_md = vertigo.request.body.rstrip()
# 3rd: Assign all metadata to the object
try:
data_file = get_data_file(vertigo)
metadata = get_object_metadata(data_file)
metadata[VERTIGO_MC_HEADER_OBJ] = mc_dict
sysmeta_key = (SYSMETA_OBJ_HEADER + trigger + '-' + mc).title()
if specific_md:
metadata[sysmeta_key] = specific_md
else:
if sysmeta_key in metadata:
del metadata[sysmeta_key]
set_object_metadata(data_file, metadata)
except:
raise ValueError('Vertigo - ERROR: There was an error setting trigger'
' dictionary from the object.\n')
def delete_microcontroller_object(vertigo, trigger, mc):
"""
Deletes a microcontroller to the specified object in the main request
:param vertigo: swift_vertigo.vertigo_handler.VertigoObjectHandler instance
:param trigger: trigger name
:param mc: microcontroller name
:raises HTTPInternalServerError: If it fails
"""
vertigo.logger.debug('Vertigo - Go to delete "' + mc +
'" microcontroller from "' + trigger + '" trigger')
try:
data_file = get_data_file(vertigo)
metadata = get_object_metadata(data_file)
except:
raise ValueError('Vertigo - ERROR: There was an error getting trigger'
' metadata from the object.\n')
try:
if trigger == "vertigo" and mc == "all":
for key in metadata.keys():
if key.startswith(SYSMETA_OBJ_HEADER):
del metadata[key]
else:
if metadata[VERTIGO_MC_HEADER_OBJ]:
if isinstance(metadata[VERTIGO_MC_HEADER_OBJ], dict):
mc_dict = metadata[VERTIGO_MC_HEADER_OBJ]
else:
mc_dict = eval(metadata[VERTIGO_MC_HEADER_OBJ])
if mc == 'all':
mc_list = mc_dict[trigger]
mc_dict[trigger] = None
for mc_k in mc_list:
sysmeta_key = (SYSMETA_OBJ_HEADER + trigger + '-' + mc_k).title()
if sysmeta_key in metadata:
del metadata[sysmeta_key]
elif mc in mc_dict[trigger]:
mc_dict[trigger].remove(mc)
sysmeta_key = (SYSMETA_OBJ_HEADER + trigger + '-' + mc).title()
if sysmeta_key in metadata:
del metadata[sysmeta_key]
else:
raise
metadata[VERTIGO_MC_HEADER_OBJ] = mc_dict
metadata = clean_microcontroller_dict_object(metadata)
else:
raise
set_object_metadata(data_file, metadata)
except:
raise ValueError('Vertigo - Error: Microcontroller "' + mc + '" not'
' assigned to the "' + trigger + '" trigger.\n')
data_dir = get_data_dir(vertigo)
vertigo.logger.debug('Vertigo - Object path: ' + data_dir)
def clean_microcontroller_dict_object(metadata):
"""
Auxiliary function that cleans the microcontroller dictionary, deleting
empty lists for each trigger, and deleting all dictionary whether all
values are None.
:param microcontroller_dict: microcontroller dictionary
:returns microcontroller_dict: microcontroller dictionary
"""
for trigger in metadata[VERTIGO_MC_HEADER_OBJ].keys():
if not metadata[VERTIGO_MC_HEADER_OBJ][trigger]:
metadata[VERTIGO_MC_HEADER_OBJ][trigger] = None
if all(value is None for value in metadata[VERTIGO_MC_HEADER_OBJ].values()):
del metadata[VERTIGO_MC_HEADER_OBJ]
return metadata
def clean_microcontroller_dict_container(metadata):
"""
Auxiliary function that cleans the microcontroller dictionary, deleting
empty lists for each trigger, and deleting all dictionary whether all
values are None.
:param microcontroller_dict: microcontroller dictionary
:returns microcontroller_dict: microcontroller dictionary
"""
mc_dict = eval(metadata[VERTIGO_MC_HEADER_CONTAINER])
for trigger in mc_dict.keys():
if not mc_dict[trigger]:
mc_dict[trigger] = None
if all(value is None for value in mc_dict.values()):
metadata[VERTIGO_MC_HEADER_CONTAINER] = ''
return metadata
def get_microcontroller_dict_object(vertigo):
"""
Gets the list of associated microcontrollers to the requested object.
This method retrieves a dictionary with all triggers and all
microcontrollers associated to each trigger.
:param vertigo: swift_vertigo.vertigo_handler.VertigoObjectHandler instance
:returns: microcontroller dictionary
"""
data_file = get_data_file(vertigo)
metadata = get_object_metadata(data_file)
if VERTIGO_MC_HEADER_OBJ in metadata:
if isinstance(metadata[VERTIGO_MC_HEADER_OBJ], dict):
return metadata[VERTIGO_MC_HEADER_OBJ]
else:
return eval(metadata[VERTIGO_MC_HEADER_OBJ])
else:
return None
def get_microcontroller_dict_container(metadata):
"""
Gets the list of associated microcontrollers to the requested container.
This method retrieves a dictionary with all triggers and all
microcontrollers associated to each trigger.
:param vertigo: swift_vertigo.vertigo_handler.VertigoProxyHandler instance
:returns: microcontroller dictionary
"""
if VERTIGO_MC_HEADER_CONTAINER in metadata:
if isinstance(metadata[VERTIGO_MC_HEADER_CONTAINER], dict):
return metadata[VERTIGO_MC_HEADER_CONTAINER]
else:
return eval(metadata[VERTIGO_MC_HEADER_CONTAINER])
else:
return None
def get_microcontroller_list_object(headers, method):
"""
Gets the list of associated microcontrollers to the requested object.
This method gets the microcontroller dictionary from the object headers,
and filter the content to return only a list of names of microcontrollers
associated to the type of request (put, get, delete)
:param headers: response headers of the object
:param method: current method
:returns: microcontroller list associated to the type of the request
"""
if headers[VERTIGO_MC_HEADER_OBJ]:
if isinstance(headers[VERTIGO_MC_HEADER_OBJ], dict):
microcontroller_dict = headers[VERTIGO_MC_HEADER_OBJ]
else:
microcontroller_dict = eval(headers[VERTIGO_MC_HEADER_OBJ])
mc_list = microcontroller_dict["on" + method]
else:
mc_list = None
return mc_list
|
|
import calendar
import urllib
from datetime import timedelta
from typing import Any, Dict
from unittest.mock import patch
import lxml.html
import orjson
from django.conf import settings
from django.http import HttpResponse
from django.utils.timezone import now as timezone_now
from corporate.models import Customer, CustomerPlan
from zerver.lib.actions import do_change_logo_source, do_create_user
from zerver.lib.events import add_realm_logo_fields
from zerver.lib.home import get_furthest_read_time
from zerver.lib.soft_deactivation import do_soft_deactivate_users
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import get_user_messages, queries_captured
from zerver.lib.users import compute_show_invites_and_add_streams
from zerver.models import (
DefaultStream,
Realm,
UserActivity,
UserProfile,
flush_per_request_caches,
get_realm,
get_stream,
get_system_bot,
get_user,
)
from zerver.views.home import compute_navbar_logo_url
from zerver.worker.queue_processors import UserActivityWorker
class HomeTest(ZulipTestCase):
def test_home(self) -> None:
# Keep this list sorted!!!
html_bits = [
'Compose your message here...',
'Exclude messages with topic',
'Keyboard shortcuts',
'Loading...',
'Manage streams',
'Narrow to topic',
'Next message',
'Search streams',
# Verify that the app styles get included
'app-stubentry.js',
'data-params',
]
# Keep this list sorted!!!
expected_keys = [
"alert_words",
"available_notification_sounds",
"avatar_source",
"avatar_url",
"avatar_url_medium",
"bot_types",
"can_create_streams",
"can_subscribe_other_users",
"color_scheme",
"cross_realm_bots",
"custom_profile_field_types",
"custom_profile_fields",
"debug_mode",
"default_language",
"default_language_name",
"delivery_email",
"demote_inactive_streams",
"dense_mode",
"desktop_icon_count_display",
"development_environment",
"email",
"emojiset",
"emojiset_choices",
"enable_desktop_notifications",
"enable_digest_emails",
"enable_login_emails",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"enable_online_push_notifications",
"enable_sounds",
"enable_stream_audible_notifications",
"enable_stream_desktop_notifications",
"enable_stream_email_notifications",
"enable_stream_push_notifications",
"enter_sends",
"first_in_realm",
"fluid_layout_width",
"full_name",
"furthest_read_time",
"has_mobile_devices",
"has_zoom_token",
"high_contrast_mode",
"hotspots",
"initial_servertime",
"insecure_desktop_app",
"is_admin",
"is_guest",
"is_owner",
"jitsi_server_url",
"language_list",
"language_list_dbl_col",
"last_event_id",
"left_side_userlist",
"login_page",
"max_avatar_file_size_mib",
"max_file_upload_size_mib",
"max_icon_file_size",
"max_logo_file_size",
"max_message_id",
"message_content_in_email_notifications",
"muted_topics",
"narrow",
"narrow_stream",
"needs_tutorial",
"never_subscribed",
"notification_sound",
"password_min_guesses",
"password_min_length",
"pm_content_in_desktop_notifications",
"poll_timeout",
"presence_enabled",
"presences",
"prompt_for_invites",
"queue_id",
"realm_add_emoji_by_admins_only",
"realm_allow_community_topic_editing",
"realm_allow_edit_history",
"realm_allow_message_deleting",
"realm_allow_message_editing",
"realm_authentication_methods",
"realm_available_video_chat_providers",
"realm_avatar_changes_disabled",
"realm_bot_creation_policy",
"realm_bot_domain",
"realm_bots",
"realm_community_topic_editing_limit_seconds",
"realm_create_stream_policy",
"realm_default_code_block_language",
"realm_default_external_accounts",
"realm_default_language",
"realm_default_stream_groups",
"realm_default_streams",
"realm_default_twenty_four_hour_time",
"realm_description",
"realm_digest_emails_enabled",
"realm_digest_weekday",
"realm_disallow_disposable_email_addresses",
"realm_domains",
"realm_email_address_visibility",
"realm_email_auth_enabled",
"realm_email_changes_disabled",
"realm_emails_restricted_to_domains",
"realm_embedded_bots",
"realm_emoji",
"realm_filters",
"realm_icon_source",
"realm_icon_url",
"realm_incoming_webhook_bots",
"realm_inline_image_preview",
"realm_inline_url_embed_preview",
"realm_invite_by_admins_only",
"realm_invite_required",
"realm_invite_to_stream_policy",
"realm_is_zephyr_mirror_realm",
"realm_logo_source",
"realm_logo_url",
"realm_mandatory_topics",
"realm_message_content_allowed_in_email_notifications",
"realm_message_content_delete_limit_seconds",
"realm_message_content_edit_limit_seconds",
"realm_message_retention_days",
"realm_name",
"realm_name_changes_disabled",
"realm_name_in_notifications",
"realm_night_logo_source",
"realm_night_logo_url",
"realm_non_active_users",
"realm_notifications_stream_id",
"realm_password_auth_enabled",
"realm_plan_type",
"realm_presence_disabled",
"realm_private_message_policy",
"realm_push_notifications_enabled",
"realm_send_welcome_emails",
"realm_signup_notifications_stream_id",
"realm_upload_quota",
"realm_uri",
"realm_user_group_edit_policy",
"realm_user_groups",
"realm_users",
"realm_video_chat_provider",
"realm_waiting_period_threshold",
"recent_private_conversations",
"root_domain_uri",
"save_stacktraces",
"search_pills_enabled",
"server_avatar_changes_disabled",
"server_generation",
"server_inline_image_preview",
"server_inline_url_embed_preview",
"server_name_changes_disabled",
"settings_send_digest_emails",
"starred_message_counts",
"starred_messages",
"stop_words",
"stream_description_max_length",
"stream_name_max_length",
"subscriptions",
"test_suite",
"timezone",
"translate_emoticons",
"translation_data",
"twenty_four_hour_time",
"two_fa_enabled",
"two_fa_enabled_user",
"unread_msgs",
"unsubscribed",
"upgrade_text_for_wide_organization_logo",
"user_id",
"user_status",
"warn_no_email",
"webpack_public_path",
"wildcard_mentions_notify",
"zulip_feature_level",
"zulip_plan_is_not_limited",
"zulip_version",
]
# Verify fails if logged-out
result = self.client_get('/')
self.assertEqual(result.status_code, 302)
self.login('hamlet')
# Create bot for realm_bots testing. Must be done before fetching home_page.
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
self.client_post("/json/bots", bot_info)
# Verify succeeds once logged-in
flush_per_request_caches()
with queries_captured() as queries:
with patch('zerver.lib.cache.cache_set') as cache_mock:
result = self._get_home_page(stream='Denmark')
self.assertEqual(set(result["Cache-Control"].split(", ")),
{"must-revalidate", "no-store", "no-cache"})
self.assert_length(queries, 42)
self.assert_length(cache_mock.call_args_list, 5)
html = result.content.decode('utf-8')
for html_bit in html_bits:
if html_bit not in html:
raise AssertionError(f'{html_bit} not in result')
page_params = self._get_page_params(result)
actual_keys = sorted([str(k) for k in page_params.keys()])
self.assertEqual(actual_keys, expected_keys)
# TODO: Inspect the page_params data further.
# print(orjson.dumps(page_params, option=orjson.OPT_INDENT_2).decode())
realm_bots_expected_keys = [
'api_key',
'avatar_url',
'bot_type',
'default_all_public_streams',
'default_events_register_stream',
'default_sending_stream',
'email',
'full_name',
'is_active',
'owner_id',
'services',
'user_id',
]
realm_bots_actual_keys = sorted([str(key) for key in page_params['realm_bots'][0].keys()])
self.assertEqual(realm_bots_actual_keys, realm_bots_expected_keys)
def test_home_under_2fa_without_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
self.login('iago')
result = self._get_home_page()
# Should be successful because otp device is not configured.
self.assertEqual(result.status_code, 200)
def test_home_under_2fa_with_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
user_profile = self.example_user('iago')
self.create_default_device(user_profile)
self.login_user(user_profile)
result = self._get_home_page()
# User should not log in because otp device is configured but
# 2fa login function was not called.
self.assertEqual(result.status_code, 302)
self.login_2fa(user_profile)
result = self._get_home_page()
# Should be successful after calling 2fa login function.
self.assertEqual(result.status_code, 200)
def test_num_queries_for_realm_admin(self) -> None:
# Verify number of queries for Realm admin isn't much higher than for normal users.
self.login('iago')
flush_per_request_caches()
with queries_captured() as queries:
with patch('zerver.lib.cache.cache_set') as cache_mock:
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
self.assert_length(cache_mock.call_args_list, 6)
self.assert_length(queries, 39)
def test_num_queries_with_streams(self) -> None:
main_user = self.example_user('hamlet')
other_user = self.example_user('cordelia')
realm_id = main_user.realm_id
self.login_user(main_user)
# Try to make page-load do extra work for various subscribed
# streams.
for i in range(10):
stream_name = 'test_stream_' + str(i)
stream = self.make_stream(stream_name)
DefaultStream.objects.create(
realm_id=realm_id,
stream_id=stream.id,
)
for user in [main_user, other_user]:
self.subscribe(user, stream_name)
# Simulate hitting the page the first time to avoid some noise
# related to initial logins.
self._get_home_page()
# Then for the second page load, measure the number of queries.
flush_per_request_caches()
with queries_captured() as queries2:
result = self._get_home_page()
self.assert_length(queries2, 37)
# Do a sanity check that our new streams were in the payload.
html = result.content.decode('utf-8')
self.assertIn('test_stream_7', html)
def _get_home_page(self, **kwargs: Any) -> HttpResponse:
with \
patch('zerver.lib.events.request_event_queue', return_value=42), \
patch('zerver.lib.events.get_user_events', return_value=[]):
result = self.client_get('/', dict(**kwargs))
return result
def _get_page_params(self, result: HttpResponse) -> Dict[str, Any]:
doc = lxml.html.document_fromstring(result.content)
[div] = doc.xpath("//div[@id='page-params']")
page_params_json = div.get("data-params")
page_params = orjson.loads(page_params_json)
return page_params
def _sanity_check(self, result: HttpResponse) -> None:
'''
Use this for tests that are geared toward specific edge cases, but
which still want the home page to load properly.
'''
html = result.content.decode('utf-8')
if 'Compose your message' not in html:
raise AssertionError('Home page probably did not load.')
def test_terms_of_service(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
for user_tos_version in [None, '1.1', '2.0.3.4']:
user.tos_version = user_tos_version
user.save()
with \
self.settings(TERMS_OF_SERVICE='whatever'), \
self.settings(TOS_VERSION='99.99'):
result = self.client_get('/', dict(stream='Denmark'))
html = result.content.decode('utf-8')
self.assertIn('Accept the new Terms of Service', html)
def test_banned_desktop_app_versions(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
result = self.client_get('/',
HTTP_USER_AGENT="ZulipElectron/2.3.82")
html = result.content.decode('utf-8')
self.assertIn('You are using old version of the Zulip desktop', html)
def test_unsupported_browser(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
# currently we don't support IE, so some of IE's user agents are added.
unsupported_user_agents = [
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2)",
"Mozilla/5.0 (Windows NT 10.0; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
]
for user_agent in unsupported_user_agents:
result = self.client_get('/',
HTTP_USER_AGENT=user_agent)
html = result.content.decode('utf-8')
self.assertIn('Internet Explorer is not supported by Zulip.', html)
def test_terms_of_service_first_time_template(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
user.tos_version = None
user.save()
with \
self.settings(FIRST_TIME_TOS_TEMPLATE='hello.html'), \
self.settings(TOS_VERSION='99.99'):
result = self.client_post('/accounts/accept_terms/')
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
self.assert_in_response("Chat for distributed teams", result)
def test_accept_terms_of_service(self) -> None:
self.login('hamlet')
result = self.client_post('/accounts/accept_terms/')
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
result = self.client_post('/accounts/accept_terms/', {'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result['Location'], '/')
def test_bad_narrow(self) -> None:
self.login('hamlet')
with patch('logging.warning') as mock:
result = self._get_home_page(stream='Invalid Stream')
mock.assert_called_once()
self.assertEqual(mock.call_args_list[0][0][0], "Invalid narrow requested, ignoring")
self._sanity_check(result)
def test_topic_narrow(self) -> None:
self.login('hamlet')
result = self._get_home_page(stream='Denmark', topic='lunch')
self._sanity_check(result)
html = result.content.decode('utf-8')
self.assertIn('lunch', html)
self.assertEqual(set(result["Cache-Control"].split(", ")),
{"must-revalidate", "no-store", "no-cache"})
def test_notifications_stream(self) -> None:
realm = get_realm('zulip')
realm.notifications_stream_id = get_stream('Denmark', realm).id
realm.save()
self.login('hamlet')
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(page_params['realm_notifications_stream_id'], get_stream('Denmark', realm).id)
def create_bot(self, owner: UserProfile, bot_email: str, bot_name: str) -> UserProfile:
user = do_create_user(
email=bot_email,
password='123',
realm=owner.realm,
full_name=bot_name,
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=owner,
)
return user
def create_non_active_user(self, realm: Realm, email: str, name: str) -> UserProfile:
user = do_create_user(
email=email,
password='123',
realm=realm,
full_name=name,
)
# Doing a full-stack deactivation would be expensive here,
# and we really only need to flip the flag to get a valid
# test.
user.is_active = False
user.save()
return user
def test_signup_notifications_stream(self) -> None:
realm = get_realm('zulip')
realm.signup_notifications_stream = get_stream('Denmark', realm)
realm.save()
self.login('hamlet')
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(page_params['realm_signup_notifications_stream_id'], get_stream('Denmark', realm).id)
def test_people(self) -> None:
hamlet = self.example_user('hamlet')
realm = get_realm('zulip')
self.login_user(hamlet)
bots = {}
for i in range(3):
bots[i] = self.create_bot(
owner=hamlet,
bot_email=f'bot-{i}@zulip.com',
bot_name=f'Bot {i}',
)
for i in range(3):
defunct_user = self.create_non_active_user(
realm=realm,
email=f'defunct-{i}@zulip.com',
name=f'Defunct User {i}',
)
result = self._get_home_page()
page_params = self._get_page_params(result)
'''
We send three lists of users. The first two below are disjoint
lists of users, and the records we send for them have identical
structure.
The realm_bots bucket is somewhat redundant, since all bots will
be in one of the first two buckets. They do include fields, however,
that normal users don't care about, such as default_sending_stream.
'''
buckets = [
'realm_users',
'realm_non_active_users',
'realm_bots',
]
for field in buckets:
users = page_params[field]
self.assertTrue(len(users) >= 3, field)
for rec in users:
self.assertEqual(rec['user_id'],
get_user(rec['email'], realm).id)
if field == 'realm_bots':
self.assertNotIn('is_bot', rec)
self.assertIn('is_active', rec)
self.assertIn('owner_id', rec)
else:
self.assertIn('is_bot', rec)
self.assertNotIn('is_active', rec)
active_ids = {p['user_id'] for p in page_params['realm_users']}
non_active_ids = {p['user_id'] for p in page_params['realm_non_active_users']}
bot_ids = {p['user_id'] for p in page_params['realm_bots']}
self.assertIn(hamlet.id, active_ids)
self.assertIn(defunct_user.id, non_active_ids)
# Bots can show up in multiple buckets.
self.assertIn(bots[2].id, bot_ids)
self.assertIn(bots[2].id, active_ids)
# Make sure nobody got mis-bucketed.
self.assertNotIn(hamlet.id, non_active_ids)
self.assertNotIn(defunct_user.id, active_ids)
cross_bots = page_params['cross_realm_bots']
self.assertEqual(len(cross_bots), 3)
cross_bots.sort(key=lambda d: d['email'])
for cross_bot in cross_bots:
# These are either nondeterministic or boring
del cross_bot['timezone']
del cross_bot['avatar_url']
del cross_bot['date_joined']
notification_bot = self.notification_bot()
email_gateway_bot = get_system_bot(settings.EMAIL_GATEWAY_BOT)
welcome_bot = get_system_bot(settings.WELCOME_BOT)
by_email = lambda d: d['email']
self.assertEqual(sorted(cross_bots, key=by_email), sorted([
dict(
avatar_version=email_gateway_bot.avatar_version,
bot_owner_id=None,
bot_type=1,
email=email_gateway_bot.email,
user_id=email_gateway_bot.id,
full_name=email_gateway_bot.full_name,
is_active=True,
is_bot=True,
is_admin=False,
is_owner=False,
is_cross_realm_bot=True,
is_guest=False,
),
dict(
avatar_version=email_gateway_bot.avatar_version,
bot_owner_id=None,
bot_type=1,
email=notification_bot.email,
user_id=notification_bot.id,
full_name=notification_bot.full_name,
is_active=True,
is_bot=True,
is_admin=False,
is_owner=False,
is_cross_realm_bot=True,
is_guest=False,
),
dict(
avatar_version=email_gateway_bot.avatar_version,
bot_owner_id=None,
bot_type=1,
email=welcome_bot.email,
user_id=welcome_bot.id,
full_name=welcome_bot.full_name,
is_active=True,
is_bot=True,
is_admin=False,
is_owner=False,
is_cross_realm_bot=True,
is_guest=False,
),
], key=by_email))
def test_new_stream(self) -> None:
user_profile = self.example_user("hamlet")
stream_name = 'New stream'
self.subscribe(user_profile, stream_name)
self.login_user(user_profile)
result = self._get_home_page(stream=stream_name)
page_params = self._get_page_params(result)
self.assertEqual(page_params['narrow_stream'], stream_name)
self.assertEqual(page_params['narrow'], [dict(operator='stream', operand=stream_name)])
self.assertEqual(page_params['max_message_id'], -1)
def test_invites_by_admins_only(self) -> None:
user_profile = self.example_user('hamlet')
realm = user_profile.realm
realm.invite_by_admins_only = True
realm.save()
self.login_user(user_profile)
self.assertFalse(user_profile.is_realm_admin)
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertNotIn('Invite more users', html)
user_profile.role = UserProfile.ROLE_REALM_ADMINISTRATOR
user_profile.save()
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertIn('Invite more users', html)
def test_show_invites_for_guest_users(self) -> None:
user_profile = self.example_user('polonius')
realm = user_profile.realm
realm.invite_by_admins_only = False
realm.save()
self.login_user(user_profile)
self.assertFalse(user_profile.is_realm_admin)
self.assertFalse(get_realm('zulip').invite_by_admins_only)
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertNotIn('Invite more users', html)
def test_show_billing(self) -> None:
customer = Customer.objects.create(realm=get_realm("zulip"), stripe_customer_id="cus_id")
user = self.example_user('desdemona')
# realm owner, but no CustomerPlan -> no billing link
user.role = UserProfile.ROLE_REALM_OWNER
user.save(update_fields=["role"])
self.login_user(user)
result_html = self._get_home_page().content.decode('utf-8')
self.assertNotIn('Billing', result_html)
# realm owner, with inactive CustomerPlan -> show billing link
CustomerPlan.objects.create(customer=customer, billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL, next_invoice_date=timezone_now(),
tier=CustomerPlan.STANDARD, status=CustomerPlan.ENDED)
result_html = self._get_home_page().content.decode('utf-8')
self.assertIn('Billing', result_html)
# realm admin, with CustomerPlan -> no billing link
user.role = UserProfile.ROLE_REALM_ADMINISTRATOR
user.save(update_fields=["role"])
result_html = self._get_home_page().content.decode('utf-8')
self.assertNotIn('Billing', result_html)
# billing admin, with CustomerPlan -> show billing link
user.role = UserProfile.ROLE_MEMBER
user.is_billing_admin = True
user.save(update_fields=['role', 'is_billing_admin'])
result_html = self._get_home_page().content.decode('utf-8')
self.assertIn('Billing', result_html)
# member, with CustomerPlan -> no billing link
user.is_billing_admin = False
user.save(update_fields=['is_billing_admin'])
result_html = self._get_home_page().content.decode('utf-8')
self.assertNotIn('Billing', result_html)
# guest, with CustomerPlan -> no billing link
user.role = UserProfile.ROLE_GUEST
user.save(update_fields=['role'])
result_html = self._get_home_page().content.decode('utf-8')
self.assertNotIn('Billing', result_html)
# billing admin, but no CustomerPlan -> no billing link
user.role = UserProfile.ROLE_MEMBER
user.is_billing_admin = True
user.save(update_fields=['role', 'is_billing_admin'])
CustomerPlan.objects.all().delete()
result_html = self._get_home_page().content.decode('utf-8')
self.assertNotIn('Billing', result_html)
# billing admin, with sponsorship pending -> show billing link
customer.sponsorship_pending = True
customer.save(update_fields=["sponsorship_pending"])
result_html = self._get_home_page().content.decode('utf-8')
self.assertIn('Billing', result_html)
# billing admin, no customer object -> make sure it doesn't crash
customer.delete()
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
def test_show_plans(self) -> None:
realm = get_realm("zulip")
# Don't show plans to guest users
self.login('polonius')
realm.plan_type = Realm.LIMITED
realm.save(update_fields=["plan_type"])
result_html = self._get_home_page().content.decode('utf-8')
self.assertNotIn('Plans', result_html)
# Show plans link to all other users if plan_type is LIMITED
self.login('hamlet')
result_html = self._get_home_page().content.decode('utf-8')
self.assertIn('Plans', result_html)
# Show plans link to no one, including admins, if SELF_HOSTED or STANDARD
realm.plan_type = Realm.SELF_HOSTED
realm.save(update_fields=["plan_type"])
result_html = self._get_home_page().content.decode('utf-8')
self.assertNotIn('Plans', result_html)
realm.plan_type = Realm.STANDARD
realm.save(update_fields=["plan_type"])
result_html = self._get_home_page().content.decode('utf-8')
self.assertNotIn('Plans', result_html)
def test_desktop_home(self) -> None:
self.login('hamlet')
result = self.client_get("/desktop_home")
self.assertEqual(result.status_code, 301)
self.assertTrue(result["Location"].endswith("/desktop_home/"))
result = self.client_get("/desktop_home/")
self.assertEqual(result.status_code, 302)
path = urllib.parse.urlparse(result['Location']).path
self.assertEqual(path, "/")
def test_compute_navbar_logo_url(self) -> None:
user_profile = self.example_user("hamlet")
page_params = {"color_scheme": user_profile.COLOR_SCHEME_NIGHT}
add_realm_logo_fields(page_params, user_profile.realm)
self.assertEqual(compute_navbar_logo_url(page_params),
"/static/images/logo/zulip-org-logo.svg?version=0")
page_params = {"color_scheme": user_profile.COLOR_SCHEME_LIGHT}
add_realm_logo_fields(page_params, user_profile.realm)
self.assertEqual(compute_navbar_logo_url(page_params),
"/static/images/logo/zulip-org-logo.svg?version=0")
do_change_logo_source(user_profile.realm, Realm.LOGO_UPLOADED, night=False, acting_user=user_profile)
page_params = {"color_scheme": user_profile.COLOR_SCHEME_NIGHT}
add_realm_logo_fields(page_params, user_profile.realm)
self.assertEqual(compute_navbar_logo_url(page_params),
f"/user_avatars/{user_profile.realm_id}/realm/logo.png?version=2")
page_params = {"color_scheme": user_profile.COLOR_SCHEME_LIGHT}
add_realm_logo_fields(page_params, user_profile.realm)
self.assertEqual(compute_navbar_logo_url(page_params),
f"/user_avatars/{user_profile.realm_id}/realm/logo.png?version=2")
do_change_logo_source(user_profile.realm, Realm.LOGO_UPLOADED, night=True, acting_user=user_profile)
page_params = {"color_scheme": user_profile.COLOR_SCHEME_NIGHT}
add_realm_logo_fields(page_params, user_profile.realm)
self.assertEqual(compute_navbar_logo_url(page_params),
f"/user_avatars/{user_profile.realm_id}/realm/night_logo.png?version=2")
page_params = {"color_scheme": user_profile.COLOR_SCHEME_LIGHT}
add_realm_logo_fields(page_params, user_profile.realm)
self.assertEqual(compute_navbar_logo_url(page_params),
f"/user_avatars/{user_profile.realm_id}/realm/logo.png?version=2")
# This configuration isn't super supported in the UI and is a
# weird choice, but we have a test for it anyway.
do_change_logo_source(user_profile.realm, Realm.LOGO_DEFAULT, night=False, acting_user=user_profile)
page_params = {"color_scheme": user_profile.COLOR_SCHEME_NIGHT}
add_realm_logo_fields(page_params, user_profile.realm)
self.assertEqual(compute_navbar_logo_url(page_params),
f"/user_avatars/{user_profile.realm_id}/realm/night_logo.png?version=2")
page_params = {"color_scheme": user_profile.COLOR_SCHEME_LIGHT}
add_realm_logo_fields(page_params, user_profile.realm)
self.assertEqual(compute_navbar_logo_url(page_params),
"/static/images/logo/zulip-org-logo.svg?version=0")
def test_generate_204(self) -> None:
self.login('hamlet')
result = self.client_get("/api/v1/generate_204")
self.assertEqual(result.status_code, 204)
def test_furthest_read_time(self) -> None:
msg_id = self.send_test_message("hello!", sender_name="iago")
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
self.client_post("/json/messages/flags",
{"messages": orjson.dumps([msg_id]).decode(),
"op": "add",
"flag": "read"})
# Manually process the UserActivity
now = timezone_now()
activity_time = calendar.timegm(now.timetuple())
user_activity_event = {'user_profile_id': hamlet.id,
'client': 'test-client',
'query': 'update_message_flags',
'time': activity_time}
yesterday = now - timedelta(days=1)
activity_time_2 = calendar.timegm(yesterday.timetuple())
user_activity_event_2 = {'user_profile_id': hamlet.id,
'client': 'test-client-2',
'query': 'update_message_flags',
'time': activity_time_2}
UserActivityWorker().consume_batch([user_activity_event, user_activity_event_2])
# verify furthest_read_time is last activity time, irrespective of client
furthest_read_time = get_furthest_read_time(hamlet)
self.assertGreaterEqual(furthest_read_time, activity_time)
# Check when user has no activity
UserActivity.objects.filter(user_profile=hamlet).delete()
furthest_read_time = get_furthest_read_time(hamlet)
self.assertIsNone(furthest_read_time)
# Check no user profile handling
furthest_read_time = get_furthest_read_time(None)
self.assertIsNotNone(furthest_read_time)
def test_subdomain_homepage(self) -> None:
self.login('hamlet')
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
with patch('zerver.views.home.get_subdomain', return_value=""):
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
self.assert_in_response('Chat for distributed teams', result)
with patch('zerver.views.home.get_subdomain', return_value="subdomain"):
result = self._get_home_page()
self._sanity_check(result)
def send_test_message(self, content: str, sender_name: str='iago',
stream_name: str='Denmark', topic_name: str='foo') -> int:
sender = self.example_user(sender_name)
return self.send_stream_message(sender, stream_name,
content=content, topic_name=topic_name)
def soft_activate_and_get_unread_count(self, stream: str='Denmark', topic: str='foo') -> int:
stream_narrow = self._get_home_page(stream=stream, topic=topic)
page_params = self._get_page_params(stream_narrow)
return page_params['unread_msgs']['count']
def test_unread_count_user_soft_deactivation(self) -> None:
# In this test we make sure if a soft deactivated user had unread
# messages before deactivation they remain same way after activation.
long_term_idle_user = self.example_user('hamlet')
self.login_user(long_term_idle_user)
message = 'Test Message 1'
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 1)
query_count = len(queries)
user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(user_msg_list[-1].content, message)
self.logout()
with self.assertLogs(level='INFO') as info_log:
do_soft_deactivate_users([long_term_idle_user])
self.assertEqual(info_log.output, [
'INFO:root:Soft-deactivated batch of 1 users; 0 remain to process'
])
self.login_user(long_term_idle_user)
message = 'Test Message 2'
self.send_test_message(message)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertNotEqual(idle_user_msg_list[-1].content, message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
# Test here for query count to be at least 5 greater than previous count
# This will assure indirectly that add_missing_messages() was called.
self.assertGreaterEqual(len(queries) - query_count, 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
def test_multiple_user_soft_deactivations(self) -> None:
long_term_idle_user = self.example_user('hamlet')
# We are sending this message to ensure that long_term_idle_user has
# at least one UserMessage row.
self.send_test_message('Testing', sender_name='hamlet')
with self.assertLogs(level='INFO') as info_log:
do_soft_deactivate_users([long_term_idle_user])
self.assertEqual(info_log.output, [
'INFO:root:Soft-deactivated batch of 1 users; 0 remain to process'
])
message = 'Test Message 1'
self.send_test_message(message)
self.login_user(long_term_idle_user)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = 'Test Message 2'
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 3)
# Test here for query count to be at least 5 less than previous count.
# This will assure add_missing_messages() isn't repeatedly called.
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
with self.assertLogs(level='INFO') as info_log:
do_soft_deactivate_users([long_term_idle_user])
self.assertEqual(info_log.output, [
'INFO:root:Soft-deactivated batch of 1 users; 0 remain to process'
])
message = 'Test Message 3'
self.send_test_message(message)
self.login_user(long_term_idle_user)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 4)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = 'Test Message 4'
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 5)
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
def test_url_language(self) -> None:
user = self.example_user("hamlet")
user.default_language = 'es'
user.save()
self.login_user(user)
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
with \
patch('zerver.lib.events.request_event_queue', return_value=42), \
patch('zerver.lib.events.get_user_events', return_value=[]):
result = self.client_get('/de/')
page_params = self._get_page_params(result)
self.assertEqual(page_params['default_language'], 'es')
# TODO: Verify that the actual language we're using in the
# translation data is German.
def test_translation_data(self) -> None:
user = self.example_user("hamlet")
user.default_language = 'es'
user.save()
self.login_user(user)
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
page_params = self._get_page_params(result)
self.assertEqual(page_params['default_language'], 'es')
def test_compute_show_invites_and_add_streams_admin(self) -> None:
user = self.example_user("iago")
realm = user.realm
realm.invite_by_admins_only = True
realm.save()
show_invites, show_add_streams = compute_show_invites_and_add_streams(user)
self.assertEqual(show_invites, True)
self.assertEqual(show_add_streams, True)
def test_compute_show_invites_and_add_streams_require_admin(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
realm.invite_by_admins_only = True
realm.save()
show_invites, show_add_streams = compute_show_invites_and_add_streams(user)
self.assertEqual(show_invites, False)
self.assertEqual(show_add_streams, True)
def test_compute_show_invites_and_add_streams_guest(self) -> None:
user = self.example_user("polonius")
show_invites, show_add_streams = compute_show_invites_and_add_streams(user)
self.assertEqual(show_invites, False)
self.assertEqual(show_add_streams, False)
def test_compute_show_invites_and_add_streams_unauthenticated(self) -> None:
show_invites, show_add_streams = compute_show_invites_and_add_streams(None)
self.assertEqual(show_invites, False)
self.assertEqual(show_add_streams, False)
|
|
'''
A pure python version of the hdr_histogram code
Ported from
https://github.com/HdrHistogram/HdrHistogram (Java)
https://github.com/HdrHistogram/HdrHistogram_c (C)
Written by Alec Hothan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division, print_function
from builtins import range
from builtins import object
import math
import sys
from hdrh.iterators import AllValuesIterator
from hdrh.iterators import RecordedIterator
from hdrh.iterators import PercentileIterator
from hdrh.iterators import LinearIterator
from hdrh.iterators import LogIterator
from hdrh.codec import HdrHistogramEncoder
def get_bucket_count(value, subb_count, unit_mag):
smallest_untrackable_value = subb_count << unit_mag
buckets_needed = 1
while smallest_untrackable_value <= value:
if smallest_untrackable_value > sys.maxsize // 2:
return buckets_needed + 1
smallest_untrackable_value <<= 1
buckets_needed += 1
return buckets_needed
class HdrHistogram(object):
'''This class supports the recording and analyzing of sampled data value
counts across a configurable integer value range with configurable value
precision within the range. Value precision is expressed as the number of
significant digits in the value recording, and provides control over value
quantization behavior across the value range and the subsequent value
resolution at any given level.
For example, a Histogram could be configured to track the counts of
observed integer values between 0 and 3,600,000,000 while maintaining a
value precision of 3 significant digits across that range. Value
quantization within the range will thus be no larger than 1/1,000th
(or 0.1%) of any value. This example Histogram could be used to track and
analyze the counts of observed response times ranging between 1 microsecond
and 1 hour in magnitude, while maintaining a value resolution of 1
microsecond up to 1 millisecond, a resolution of 1 millisecond (or better)
up to one second, and a resolution of 1 second (or better) up to 1,000
seconds. At it's maximum tracked value (1 hour), it would still maintain a
resolution of 3.6 seconds (or better).
'''
def __init__(self,
lowest_trackable_value,
highest_trackable_value,
significant_figures,
word_size=8,
b64_wrap=True,
hdr_payload=None):
'''Create a new histogram with given arguments
Params:
lowest_trackable_value The lowest value that can be discerned
(distinguished from 0) by the histogram.
Must be a positive integer that is >= 1.
May be internally rounded down to nearest power of 2.
highest_trackable_value The highest value to be tracked by the
histogram. Must be a positive integer that is >=
(2 * lowest_trackable_value).
significant_figures The number of significant decimal digits to
which the histogram will maintain value resolution and
separation. Must be a non-negative integer between 0 and 5.
word_size size of counters in bytes, only 2, 4, 8-byte counters
are supported (default is 8-byte or 64-bit counters)
b64_wrap specifies if the encoding of this histogram should use
base64 wrapping (only useful if you need to encode the histogram
to save somewhere or send over the wire. By default base64
encoding is assumed
hdr_payload only used for associating an existing payload created
from decoding an encoded histograme
Exceptions:
ValueError if the word_size value is unsupported
if significant_figures is invalid
'''
if significant_figures < 1 or significant_figures > 5:
raise ValueError('Invalid significant_figures')
self.lowest_trackable_value = lowest_trackable_value
self.highest_trackable_value = highest_trackable_value
self.significant_figures = significant_figures
self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2)))
largest_value_single_unit_res = 2 * math.pow(10, significant_figures)
subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2)))
self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0
self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1))
self.sub_bucket_half_count = self.sub_bucket_count // 2
self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude
self.bucket_count = get_bucket_count(highest_trackable_value,
self.sub_bucket_count,
self.unit_magnitude)
self.min_value = sys.maxsize
self.max_value = 0
self.total_count = 0
self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2)
self.word_size = word_size
if hdr_payload:
payload = hdr_payload.payload
self.int_to_double_conversion_ratio = payload.conversion_ratio_bits
results = hdr_payload.init_counts(self.counts_len)
if results['total']:
self.set_internal_tacking_values(results['min_nonzero_index'],
results['max_nonzero_index'],
results['total'])
else:
self.int_to_double_conversion_ratio = 1.0
# to encode this histogram into a compressed/base64 format ready
# to be exported
self.b64_wrap = b64_wrap
self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload)
# the counters reside directly in the payload object
# allocated by the encoder
# so that compression for wire transfer can be done without copy
self.counts = self.encoder.get_counts()
self.start_time_stamp_msec = 0
self.end_time_stamp_msec = 0
def _clz(self, value):
"""calculate the leading zeros, equivalent to C __builtin_clzll()
value in hex:
value = 1 clz = 63
value = 2 clz = 62
value = 4 clz = 61
value = 1000 clz = 51
value = 1000000 clz = 39
"""
return 63 - (len(bin(value)) - 3)
def _get_bucket_index(self, value):
# smallest power of 2 containing value
pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask)
return int(pow2ceiling - self.unit_magnitude -
(self.sub_bucket_half_count_magnitude + 1))
def _get_sub_bucket_index(self, value, bucket_index):
return int(value) >> (bucket_index + self.unit_magnitude)
def _counts_index(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ):
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
return bucket_base_index + offset_in_bucket
def _counts_index_for(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
return self._counts_index(bucket_index, sub_bucket_index)
def record_value(self, value, count=1):
'''Record a new value into the histogram
Args:
value: the value to record (must be in the valid range)
count: incremental count (defaults to 1)
'''
if value < 0:
return False
counts_index = self._counts_index_for(value)
if (counts_index < 0) or (self.counts_len <= counts_index):
return False
self.counts[counts_index] += count
self.total_count += count
self.min_value = min(self.min_value, value)
self.max_value = max(self.max_value, value)
return True
def record_corrected_value(self, value, expected_interval, count=1):
'''Record a new value into the histogram and correct for
coordinated omission if needed
Args:
value: the value to record (must be in the valid range)
expected_interval: the expected interval between 2 value samples
count: incremental count (defaults to 1)
'''
while True:
if not self.record_value(value, count):
return False
if value <= expected_interval or expected_interval <= 0:
return True
value -= expected_interval
def get_count_at_index(self, index):
if index >= self.counts_len:
raise IndexError()
# some decoded (read-only) histograms may have truncated
# counts arrays, we return zero for any index that is passed the array
if index >= self.encoder.payload.counts_len:
return 0
return self.counts[index]
def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index):
# Calculate the index for the first entry in the bucket:
# (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) )
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# Calculate the offset in the bucket:
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# (sub_bucket_index - subBucketHalfCount) + bucketBaseIndex
counts_index = bucket_base_index + offset_in_bucket
return self.counts[counts_index]
def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index):
return sub_bucket_index << (bucket_index + self.unit_magnitude)
def get_value_from_index(self, index):
bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1
sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + \
self.sub_bucket_half_count
if bucket_index < 0:
sub_bucket_index -= self.sub_bucket_half_count
bucket_index = 0
return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index)
def get_lowest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
return lowest_equivalent_value
def get_highest_equivalent_value(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index,
sub_bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index)
next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range
return next_non_equivalent_value - 1
def get_target_count_at_percentile(self, percentile):
requested_percentile = min(percentile, 100.0)
count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5)
return max(count_at_percentile, 1)
def get_value_at_percentile(self, percentile):
'''Get the value for a given percentile
Args:
percentile: a float in [0.0..100.0]
Returns:
the value for the given percentile
'''
count_at_percentile = self.get_target_count_at_percentile(percentile)
total = 0
for index in range(self.counts_len):
total += self.get_count_at_index(index)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
return self.get_highest_equivalent_value(value_at_index)
return self.get_lowest_equivalent_value(value_at_index)
return 0
def get_percentile_to_value_dict(self, percentile_list):
'''A faster alternative to query values for a list of percentiles.
Args:
percentile_list: a list of percentiles in any order, dups will be ignored
each element in the list must be a float value in [0.0 .. 100.0]
Returns:
a dict of percentile values indexed by the percentile
'''
result = {}
total = 0
percentile_list_index = 0
count_at_percentile = 0
# remove dups and sort
percentile_list = list(set(percentile_list))
percentile_list.sort()
for index in range(self.counts_len):
total += self.get_count_at_index(index)
while True:
# recalculate target based on next requested percentile
if not count_at_percentile:
if percentile_list_index == len(percentile_list):
return result
percentile = percentile_list[percentile_list_index]
percentile_list_index += 1
if percentile > 100:
return result
count_at_percentile = self.get_target_count_at_percentile(percentile)
if total >= count_at_percentile:
value_at_index = self.get_value_from_index(index)
if percentile:
result[percentile] = self.get_highest_equivalent_value(value_at_index)
else:
result[percentile] = self.get_lowest_equivalent_value(value_at_index)
count_at_percentile = 0
else:
break
return result
def get_total_count(self):
return self.total_count
def get_count_at_value(self, value):
counts_index = self._counts_index_for(value)
return self.counts[counts_index]
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
def get_max_value(self):
if 0 == self.max_value:
return 0
return self.get_highest_equivalent_value(self.max_value)
def get_min_value(self):
if 0 < self.counts[0] or self.total_count == 0:
return 0
if sys.maxsize == self.min_value:
return sys.maxsize
return self.get_lowest_equivalent_value(self.min_value)
def _hdr_size_of_equiv_value_range(self, value):
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
if sub_bucket_index >= self.sub_bucket_count:
bucket_index += 1
return 1 << (self.unit_magnitude + bucket_index)
def _hdr_median_equiv_value(self, value):
return self.get_lowest_equivalent_value(value) + \
(self._hdr_size_of_equiv_value_range(value) >> 1)
def get_mean_value(self):
if not self.total_count:
return 0.0
total = 0
itr = self.get_recorded_iterator()
for item in itr:
total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to)
return float(total) / self.total_count
def get_stddev(self):
if not self.total_count:
return 0.0
mean = self.get_mean_value()
geometric_dev_total = 0.0
for item in self.get_recorded_iterator():
dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean
geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step
return math.sqrt(geometric_dev_total / self.total_count)
def reset(self):
'''Reset the histogram to a pristine state
'''
for index in range(self.counts_len):
self.counts[index] = 0
self.total_count = 0
self.min_value = sys.maxsize
self.max_value = 0
def __iter__(self):
'''Returns the recorded iterator if iter(self) is called
'''
return RecordedIterator(self)
def get_all_values_iterator(self):
return AllValuesIterator(self)
def get_recorded_iterator(self):
return RecordedIterator(self)
def get_percentile_iterator(self, ticks_per_half_distance):
return PercentileIterator(self, ticks_per_half_distance)
def get_linear_iterator(self, value_units_per_bucket):
return LinearIterator(self, value_units_per_bucket)
def get_log_iterator(self, value_units_first_bucket, log_base):
return LogIterator(self, value_units_first_bucket, log_base)
def encode(self):
'''Encode this histogram
Return:
a string containing the base64 encoded compressed histogram (V1 format)
'''
return self.encoder.encode()
def adjust_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
self.max_value = max(self.max_value, max_value)
if min_non_zero_index >= 0:
min_value = self.get_value_from_index(min_non_zero_index)
self.min_value = min(self.min_value, min_value)
self.total_count += total_added
def set_internal_tacking_values(self,
min_non_zero_index,
max_index,
total_added):
'''Called during decoding and add to adjust the new min/max value and
total count
Args:
min_non_zero_index min nonzero index of all added counts (-1 if none)
max_index max index of all added counts (-1 if none)
'''
if max_index >= 0:
self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))
if min_non_zero_index >= 0:
self.min_value = self.get_value_from_index(min_non_zero_index)
self.total_count = total_added
def get_counts_array_index(self, value):
'''Return the index in the counts array for a given value
'''
if value < 0:
raise ValueError("Histogram recorded value cannot be negative.")
bucket_index = self._get_bucket_index(value)
sub_bucket_index = self._get_sub_bucket_index(value, bucket_index)
# Calculate the index for the first entry in the bucket:
bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude
# The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count)
# Calculate the offset in the bucket (can be negative for first bucket):
offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count
# The following is the equivalent of
# ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index
return bucket_base_index + offset_in_bucket
def get_start_time_stamp(self):
return self.start_time_stamp_msec
def set_start_time_stamp(self, time_stamp_msec):
'''Set the start time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.start_time_stamp_msec = time_stamp_msec
def get_end_time_stamp(self):
return self.end_time_stamp_msec
def set_end_time_stamp(self, time_stamp_msec):
'''Set the end time stamp value associated with this histogram to a given value.
Params:
time_stamp_msec the value to set the time stamp to,
[by convention] in msec since the epoch.
'''
self.end_time_stamp_msec = time_stamp_msec
def add(self, other_hist):
highest_recordable_value = \
self.get_highest_equivalent_value(self.get_value_from_index(self.counts_len - 1))
if highest_recordable_value < other_hist.get_max_value():
raise IndexError("The other histogram includes values that do not fit %d < %d" %
(highest_recordable_value, other_hist.get_max_value()))
if (self.bucket_count == other_hist.bucket_count) and \
(self.sub_bucket_count == other_hist.sub_bucket_count) and \
(self.unit_magnitude == other_hist.unit_magnitude) and \
(self.word_size == other_hist.word_size):
# do an in-place addition of one array to another
self.encoder.add(other_hist.encoder)
self.total_count += other_hist.get_total_count()
self.max_value = max(self.max_value, other_hist.get_max_value())
self.min_value = min(self.get_min_value(), other_hist.get_min_value())
else:
# Arrays are not a direct match, so we can't just stream through and add them.
# Instead, go through the array and add each non-zero value found at it's proper value:
for index in range(other_hist.counts_len):
other_count = other_hist.get_count_at_index(index)
if other_count > 0:
self.record_value(other_hist.get_value_from_index(index), other_count)
self.start_time_stamp_msec = \
min(self.start_time_stamp_msec, other_hist.start_time_stamp_msec)
self.end_time_stamp_msec = \
max(self.end_time_stamp_msec, other_hist.end_time_stamp_msec)
def decode_and_add(self, encoded_histogram):
'''Decode an encoded histogram and add it to this histogram
Args:
encoded_histogram (string) an encoded histogram
following the V1 format, such as one returned by the encode() method
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap)
self.add(other_hist)
@staticmethod
def decode(encoded_histogram, b64_wrap=True):
'''Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error
'''
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram
def get_word_size(self):
return self.word_size
def output_percentile_distribution(self,
out_file,
output_value_unit_scaling_ratio,
ticks_per_half_distance=5):
out_file.write('%12s %14s %10s %14s\n\n' %
('Value', 'Percentile', 'TotalCount', '1/(1-Percentile)'))
percentile_format = '%12.{}f %2.12f %10d %14.2f\n'.format(self.significant_figures)
last_line_percentile_format = '%12.{}f %2.12f %10d\n'.format(self.significant_figures)
for iter_value in self.get_percentile_iterator(ticks_per_half_distance):
value = iter_value.value_iterated_to / output_value_unit_scaling_ratio
percentile = iter_value.percentile_level_iterated_to / 100
total_count = iter_value.total_count_to_this_value
if iter_value.percentile_level_iterated_to != 100:
other = 1 / (1 - iter_value.percentile_level_iterated_to / 100)
out_file.write(percentile_format % (value, percentile, total_count, other))
else:
out_file.write(last_line_percentile_format % (value, percentile, total_count))
mean = self.get_mean_value() / output_value_unit_scaling_ratio
stddev = self.get_stddev()
out_file.write('#[Mean = %12.{0}f, StdDeviation = %12.{0}f]\n'.format(
self.significant_figures) % (mean, stddev))
max = self.get_max_value() / output_value_unit_scaling_ratio
total = self.get_total_count()
out_file.write('#[Max = %12.{0}f, TotalCount = %12.{0}f]\n'.format(
self.significant_figures) % (max, total))
out_file.write('#[Buckets = %12d, SubBuckets = %12d]\n' % (
self.bucket_count, self.sub_bucket_count))
|
|
"""
TODO
- [ ] Get Fission code working
- [ ] Display singles / doubles sinograms and backprojections
- [ ] Precalc all the detector probability stuff
- [ ] Use precalc if available
- [ ] Calculate changing p
- [ ] Include changing p calculations
"""
import numpy as np
import matplotlib.pyplot as plt
import tomo
import assemblies
def ANGLE(x): return x / np.pi * 180.
def RADIAN(x): return x / 180. * np.pi
def draw_rays(rays, draw_option='b-', arrow=False):
for ray in rays:
dx = (ray[2] - ray[0]) / 2.
dy = (ray[3] - ray[1]) / 2.
if dx != 0 and dy != 0:
plt.plot([ray[0], ray[2]], [ray[1], ray[3]], draw_option, lw=1)
if arrow:
plt.arrow(ray[0], ray[1], dx, dy,
fc='b', ec='b', head_width=0.5)
def draw_extent(extent, draw_option='b-'):
draw_extent_xs = [extent[0], extent[1], extent[1], extent[0], extent[0]]
draw_extent_ys = [extent[2], extent[2], extent[3], extent[3], extent[2]]
plt.plot(draw_extent_xs, draw_extent_ys, draw_option)
def plot_sinogram(data, type, geo_angles, other_coord):
plt.imshow(data, extent=[other_coord[-1], other_coord[0], geo_angles[-1], geo_angles[0]],
origin='lower', aspect='auto')
if type == 'fan':
plt.xlabel(r'Fan Angle $\phi$')
plt.ylabel(r'Geometry Angle $\theta$')
elif type == 'parallel':
plt.xlabel(r'Parallel Coord $r$')
plt.ylabel(r'Geometry Angle $\theta$')
def draw_detector(points, draw_option=''):
for i in range(points.shape[0]):
plt.plot(points[i, (0, 2)], points[i, (1, 3)], lw=2)
plt.scatter(points[:, (0, 2)], points[:, (1, 3)], color='k', s=8)
def plot_image(image, ex):
plt.imshow(image, extent=ex, origin='lower', aspect='auto')
def CGLS_reconstruction(n_steps, image_shape, sinogram, A, AT):
x = np.zeros(image_shape, dtype=np.double)
r = sinogram - A(x)
d = AT(r)
AT_r = AT(r)
AT_rprevnorm = np.linalg.norm(AT_r)
A_d = A(d)
for k in range(n_steps):
print(k)
alpha = (AT_rprevnorm ** 2) / (np.linalg.norm(A_d) ** 2)
x = x + alpha * d
r = r - alpha * A_d
AT_r = AT(r)
beta = (np.linalg.norm(AT_r) ** 2) / (AT_rprevnorm ** 2)
d = AT_r + beta * d
A_d = A(d)
AT_rprevnorm = np.linalg.norm(AT_r)
return x
def test_ray_crop():
extent = np.array([-12, 12, -8, 8], dtype=np.double)
ray_extent = [extent[0] - 2, extent[1] + 2, extent[2] - 2, extent[3] + 2]
n_rays = 500
rays = np.random.rand(n_rays, 4)
rays[:, (0, 2)] *= (ray_extent[1] - ray_extent[0])
rays[:, (0, 2)] -= (ray_extent[1] - ray_extent[0]) / 2
rays[:, (1, 3)] *= (ray_extent[3] - ray_extent[2])
rays[:, (1, 3)] -= (ray_extent[3] - ray_extent[2]) / 2
plt.figure()
draw_extent(extent, 'r-')
crop_rays = tomo.ray_box_crop(rays, extent)
draw_rays(crop_rays, 'g-')
plt.axes().set_aspect('equal', 'datalim')
plt.show()
def test_ray_geometry(type='parallel', theta=0., other_coord_n=50):
extent = np.array([-12, 12, -8, 8], dtype=np.double)
if type == 'parallel':
length = 40
r = np.linspace(-20, 20, other_coord_n)
rays = tomo.parallel_ray(theta, r, length)
if type == 'fan':
radius = 40
phi = np.linspace(-np.pi / 8, np.pi / 8, other_coord_n)
rays = tomo.fan_ray(theta, phi, radius)
crop_rays = tomo.ray_box_crop(rays, extent)
plt.figure()
draw_extent(extent)
draw_rays(rays[7], arrow=True)
plt.axes().set_aspect('equal', 'datalim')
plt.figure()
draw_extent(extent)
draw_rays(crop_rays[7], arrow=True)
plt.axes().set_aspect('equal', 'datalim')
plt.show()
def test_detector_geometry(type='parallel', theta=0., n_detectors=10):
if type == 'parallel':
dr = 40.
l = 40.
detector_points = tomo.parallel_detector(n_detectors, theta, dr, l)
elif type == 'fan':
dphi = np.pi / 4
radius = 40.
detector_points = tomo.fan_detector(n_detectors, theta, dphi, radius)
plt.figure()
extent = np.array([-12, 12, -8, 8], dtype=np.double)
draw_extent(extent)
draw_detector(detector_points)
plt.axes().set_aspect('equal', 'datalim')
plt.show()
def test_bilinear():
from scipy import interpolate
image = np.zeros((7, 7), dtype=np.double)
extent = np.array([-3.5, 3.5, -3.5, 3.5])
image[5, 2] = 1
image[5, 4] = 1
image[3, 1] = 1
image[2, 2] = 1
image[2, 3] = 1
image[2, 4] = 1
image[3, 5] = 1
image[0, 1] = 1
plt.figure()
plt.imshow(image, extent=extent, origin='lower')
xs = np.linspace(-3, 3, 7)
ys = np.linspace(-3, 3, 7)
f = interpolate.interp2d(xs, ys, image, kind='linear')
plt.figure()
nx, ny = 500, 500
xs = np.linspace(extent[0], extent[1], nx)
ys = np.linspace(extent[2], extent[3], ny)
zs = f(xs, ys)
plt.imshow(zs, extent=extent, origin='lower')
plt.figure()
xs_, ys_ = np.meshgrid(xs, ys)
xs_ = xs_.flatten()
ys_ = ys_.flatten()
my_zs = tomo.bilinear_interpolate(xs_, ys_, image, extent)
print(my_zs)
plt.imshow(my_zs.reshape(
ys.shape[0], xs.shape[0]), extent=extent, origin='lower')
plt.show()
def test_forward_project(type='parallel', theta_n=100, other_coord_n=100):
theta = np.linspace(0., 2 * np.pi, theta_n, endpoint=False)
step_size = 0.01
mu_im, mu_f_im, p_im = assemblies.shielded_true_images()
plt.figure()
ex = list(mu_im.extent)
plt.imshow(mu_im.data, extent=ex, origin='lower', aspect='auto')
if type == 'parallel':
length = 40
r = np.linspace(-20, 20, other_coord_n)
rays = tomo.parallel_ray(theta, r, length)
if type == 'fan':
radius = 40
phi = np.linspace(-np.pi / 8, np.pi / 8, other_coord_n)
rays = tomo.fan_ray(theta, phi, radius)
sinogram = tomo.forward_project(rays, mu_im.data, mu_im.extent, step_size)
plt.figure()
if type == 'parallel':
plot_sinogram(sinogram, 'parallel', theta, r)
if type == 'fan':
plot_sinogram(sinogram, 'fan', theta, phi)
plt.show()
def test_back_project(type='parallel', theta=0., other_coord_n=100):
step_size = 0.01
mu_im, mu_f_im, p_im = assemblies.shielded_true_images()
mu_im.data[:] = 0
mu_im.data[20:50, 20:27] = 1
nx, ny = mu_im.data.shape[1], mu_im.data.shape[0]
extent = mu_im.extent
plt.figure()
plt.imshow(mu_im.data, extent=mu_im.extent, origin='lower', aspect='auto')
if type == 'parallel':
length = 40
r = np.linspace(-20, 20, other_coord_n)
rays = tomo.parallel_ray(theta, r, length)
projection = tomo.forward_project(
rays, mu_im.data, mu_im.extent, step_size)
back_projection = tomo.back_project_parallel(
theta, r, projection, nx, ny, extent)
elif type == 'fan':
radius = 40
phi = np.linspace(-np.pi / 8, np.pi / 8, other_coord_n)
rays = tomo.fan_ray(theta, phi, radius)
projection = tomo.forward_project(
rays, mu_im.data, mu_im.extent, step_size)
back_projection = tomo.back_project_fan(
theta, phi, radius, projection, nx, ny, extent)
plt.figure()
plot_image(back_projection, mu_im.extent)
plt.show()
def test_cgls(steps_n, type='parallel', theta_n=100, other_coord_n=100):
theta = np.linspace(0., 2 * np.pi, theta_n)
step_size = 0.01
mu_im, mu_f_im, p_im = assemblies.ut_logo()
# mu_im.data[:] = 0
# mu_im.data[20:50, 20:27] = 1
nx, ny = mu_im.data.shape[1], mu_im.data.shape[0]
extent = mu_im.extent
if type == 'parallel':
length = 40
r = np.linspace(-20, 20, other_coord_n)
rays = tomo.parallel_ray(theta, r, length)
def A(x): return tomo.forward_project(rays, x, mu_im.extent, step_size)
def AT(x): return tomo.back_project_parallel(
theta, r, x, nx, ny, extent)
if type == 'fan':
radius = 40
phi = np.linspace(-np.pi / 8, np.pi / 8, other_coord_n)
rays = tomo.fan_ray(theta, phi, radius)
def A(x): return tomo.forward_project(rays, x, mu_im.extent, step_size)
def AT(x): return tomo.back_project_fan(
theta, phi, radius, x, nx, ny, extent)
sinogram = A(mu_im.data)
backproject = AT(sinogram)
cgls = CGLS_reconstruction(steps_n, mu_im.data.shape, sinogram, A, AT)
plt.figure()
plot_image(cgls, ex=mu_im.extent)
plt.show()
def test_detector_probability(type='parallel', theta=0., n_detectors=20):
step_size = 0.01
mu_im, mu_f_im, p_im = assemblies.shielded_true_images()
if type == 'parallel':
dr = 40.
l = 40.
detector_points = tomo.parallel_detector(n_detectors, theta, dr, l)
if type == 'fan':
dphi = np.pi / 4
radius = 40.
detector_points = tomo.fan_detector(n_detectors, theta, dphi, radius)
detector_prob = tomo.detect_probability(
mu_im.data, mu_im.extent, detector_points, step_size)
plt.figure()
plot_image(detector_prob[0], mu_im.extent)
plt.figure()
plot_image(detector_prob[1], mu_im.extent)
plt.show()
def test_fission_forward_project(type='parallel', k=1, theta=0., other_coord_n=100, n_detectors=20,
save=True, load=False):
step_size = 0.04
mu_im, mu_f_im, p_im = assemblies.shielded_true_images()
nu_u235_induced = \
np.array([0.0237898, 0.1555525, 0.3216515, 0.3150433,
0.1444732, 0.0356013, 0.0034339, 0.0004546])
plt.figure()
plot_image(mu_im.data, mu_im.extent)
if type == 'parallel':
length = 40.
r = np.linspace(-20, 20, other_coord_n)
dr = 40.
l = 40.
rays = tomo.parallel_ray(theta, r, length)
detector_points = tomo.parallel_detector(n_detectors, theta, dr, l)
if type == 'fan':
dphi = np.pi / 4
radius = 40.
phi = np.linspace(- np.pi / 8., np.pi / 8., other_coord_n)
rays = tomo.fan_ray(theta, phi, radius)
detector_points = tomo.fan_detector(n_detectors, theta, dphi, radius)
if not save and load:
detector_prob = np.load('detector_prob.npy')
else:
detector_prob = tomo.detect_probability(
mu_im.data, mu_im.extent, detector_points, step_size)
if save:
np.save('detector_prob.npy', detector_prob)
fission_project = tomo.fission_forward_project(rays, k,
mu_im.data, mu_f_im.data, p_im.data, detector_prob,
mu_im.extent, nu_u235_induced, step_size)
if fission_project.ndim == 1:
plt.figure()
plt.plot(fission_project)
elif fission_project.ndim == 2:
plt.figure()
if type == 'parallel':
plot_sinogram(fission_project, 'parallel', theta, r)
if type == 'fan':
plot_sinogram(fission_project, 'fan', theta, phi)
plt.show()
if __name__ == '__main__':
# test_ray_crop()
# test_ray_geometry('parallel', theta=np.linspace(
# 0., 2 * np.pi, 50, endpoint=False))
# test_detector_geometry('parallel', RADIAN(301))
# test_bilinear()
# test_forward_project('parallel', theta_n=50)
# test_back_project('fan', theta=np.linspace(
# RADIAN(0), RADIAN(360), 100, endpoint=False))
# test_cgls(20, 'parallel')
test_fission_forward_project(
'fan', k=2, theta=np.linspace(0., 2 * np.pi, 20, endpoint=False),
save=True, load=False)
|
|
# django imports
from django.contrib.contenttypes.models import ContentType
# workflows imports
from permissions.models import ObjectPermission
from permissions.models import ObjectPermissionInheritanceBlock
from workflows.models import StateInheritanceBlock
from workflows.models import StateObjectRelation
from workflows.models import StatePermissionRelation
from workflows.models import Transition
from workflows.models import Workflow
from workflows.models import WorkflowModelRelation
from workflows.models import WorkflowObjectRelation
from workflows.models import WorkflowPermissionRelation
# permissions imports
import permissions.utils
def get_objects_for_workflow(workflow):
"""Returns all objects which have passed workflow.
**Parameters:**
workflow
The workflow for which the objects are returned. Can be a Workflow
instance or a string with the workflow name.
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return []
return workflow.get_objects()
def remove_workflow(ctype_or_obj):
"""Removes the workflow from the passed content type or object. After this
function has been called the content type or object has no workflow
anymore.
If ctype_or_obj is an object the workflow is removed from the object not
from the belonging content type.
If ctype_or_obj is an content type the workflow is removed from the
content type not from instances of the content type (if they have an own
workflow)
ctype_or_obj
The content type or the object to which the passed workflow should be
set. Can be either a ContentType instance or any LFC Django model
instance.
"""
if isinstance(ctype_or_obj, ContentType):
remove_workflow_from_model(ctype_or_obj)
else:
remove_workflow_from_object(ctype_or_obj)
def remove_workflow_from_model(ctype):
"""Removes the workflow from passed content type. After this function has
been called the content type has no workflow anymore (the instances might
have own ones).
ctype
The content type from which the passed workflow should be removed.
Must be a ContentType instance.
"""
# First delete all states, inheritance blocks and permissions from ctype's
# instances which have passed workflow.
workflow = get_workflow_for_model(ctype)
for obj in get_objects_for_workflow(workflow):
# Only take care of the given ctype.
obj_ctype = ContentType.objects.get_for_model(obj)
if ctype != obj_ctype:
continue
try:
ctype = ContentType.objects.get_for_model(obj)
sor = StateObjectRelation.objects.get(content_id=obj.id, content_type=ctype)
except StateObjectRelation.DoesNotExist:
pass
else:
sor.delete()
# Reset all permissions
permissions.utils.reset(obj)
try:
wmr = WorkflowModelRelation.objects.get(content_type=ctype)
except WorkflowModelRelation.DoesNotExist:
pass
else:
wmr.delete()
def remove_workflow_from_object(obj):
"""Removes the workflow from the passed object. After this function has
been called the object has no *own* workflow anymore (it might have one
via its content type).
obj
The object from which the passed workflow should be set. Must be a
Django Model instance.
"""
try:
wor = WorkflowObjectRelation.objects.get(content_type=obj)
except WorkflowObjectRelation.DoesNotExist:
pass
else:
wor.delete()
# Reset all permissions
permissions.utils.reset(obj)
# Set initial of object's content types workflow (if there is one)
set_initial_state(obj)
def set_workflow(ctype_or_obj, workflow):
"""Sets the workflow for passed content type or object. See the specific
methods for more information.
**Parameters:**
workflow
The workflow which should be set to the object or model.
ctype_or_obj
The content type or the object to which the passed workflow should be
set. Can be either a ContentType instance or any Django model
instance.
"""
return workflow.set_to(ctype_or_obj)
def set_workflow_for_object(obj, workflow):
"""Sets the passed workflow to the passed object.
If the object has already the given workflow nothing happens. Otherwise
the object gets the passed workflow and the state is set to the workflow's
initial state.
**Parameters:**
workflow
The workflow which should be set to the object. Can be a Workflow
instance or a string with the workflow name.
obj
The object which gets the passed workflow.
"""
if isinstance(workflow, Workflow) == False:
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return False
workflow.set_to_object(obj)
def set_workflow_for_model(ctype, workflow):
"""Sets the passed workflow to the passed content type. If the content
type has already an assigned workflow the workflow is overwritten.
The objects which had the old workflow must updated explicitely.
**Parameters:**
workflow
The workflow which should be set to passend content type. Must be a
Workflow instance.
ctype
The content type to which the passed workflow should be assigned. Can
be any Django model instance
"""
if isinstance(workflow, Workflow) == False:
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return False
workflow.set_to_model(ctype)
def get_workflow(obj):
"""Returns the workflow for the passed object. It takes it either from
the passed object or - if the object doesn't have a workflow - from the
passed object's ContentType.
**Parameters:**
object
The object for which the workflow should be returend. Can be any
Django model instance.
"""
workflow = get_workflow_for_object(obj)
if workflow is not None:
return workflow
ctype = ContentType.objects.get_for_model(obj)
return get_workflow_for_model(ctype)
def get_workflow_for_object(obj):
"""Returns the workflow for the passed object.
**Parameters:**
obj
The object for which the workflow should be returned. Can be any
Django model instance.
"""
try:
ctype = ContentType.objects.get_for_model(obj)
wor = WorkflowObjectRelation.objects.get(content_id=obj.id, content_type=ctype)
except WorkflowObjectRelation.DoesNotExist:
return None
else:
return wor.workflow
def get_workflow_for_model(ctype):
"""Returns the workflow for the passed model.
**Parameters:**
ctype
The content type for which the workflow should be returned. Must be
a Django ContentType instance.
"""
try:
wor = WorkflowModelRelation.objects.get(content_type=ctype)
except WorkflowModelRelation.DoesNotExist:
return None
else:
return wor.workflow
def get_state(obj):
"""Returns the current workflow state for the passed object.
**Parameters:**
obj
The object for which the workflow state should be returned. Can be any
Django model instance.
"""
ctype = ContentType.objects.get_for_model(obj)
try:
sor = StateObjectRelation.objects.get(content_type=ctype, content_id=obj.id)
except StateObjectRelation.DoesNotExist:
return None
else:
return sor.state
def set_state(obj, state):
"""Sets the state for the passed object to the passed state and updates
the permissions for the object.
**Parameters:**
obj
The object for which the workflow state should be set. Can be any
Django model instance.
state
The state which should be set to the passed object.
"""
ctype = ContentType.objects.get_for_model(obj)
try:
sor = StateObjectRelation.objects.get(content_type=ctype, content_id=obj.id)
except StateObjectRelation.DoesNotExist:
sor = StateObjectRelation.objects.create(content=obj, state=state)
else:
sor.state = state
sor.save()
update_permissions(obj)
def set_initial_state(obj):
"""Sets the initial state to the passed object.
"""
wf = get_workflow(obj)
if wf is not None:
set_state(obj, wf.get_initial_state())
def get_allowed_transitions(obj, user):
"""Returns all allowed transitions for passed object and user. Takes the
current state of the object into account.
**Parameters:**
obj
The object for which the transitions should be returned.
user
The user for which the transitions are allowed.
"""
state = get_state(obj)
if state is None:
return []
return state.get_allowed_transitions(obj, user)
def do_transition(obj, transition, user):
"""Processes the passed transition to the passed object (if allowed).
"""
workflow = get_workflow(obj)
if not isinstance(transition, Transition):
try:
transition = Transition.objects.get(name=transition, workflow=workflow)
except Transition.DoesNotExist:
return False
transitions = get_allowed_transitions(obj, user)
if transition in transitions:
set_state(obj, transition.destination)
return True
else:
return False
def update_permissions(obj):
"""Updates the permissions of the passed object according to the object's
current workflow state.
"""
workflow = get_workflow(obj)
state = get_state(obj)
# Remove all permissions for the workflow
ct = ContentType.objects.get_for_model(obj)
ps = [wpr.permission for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow)]
ObjectPermission.objects.filter(content_type = ct, content_id=obj.id, permission__in=ps).delete()
# Grant permission for the state
for spr in StatePermissionRelation.objects.filter(state=state):
permissions.utils.grant_permission(obj, spr.role, spr.permission)
# Remove all inheritance blocks from the object
ObjectPermissionInheritanceBlock.objects.filter(
content_type = ct, content_id=obj.id, permission__in=ps).delete()
# Add inheritance blocks of this state to the object
for sib in StateInheritanceBlock.objects.filter(state=state):
permissions.utils.add_inheritance_block(obj, sib.permission)
|
|
#!/usr/bin/env python
'''
By replacing the 1st digit of the 2-digit number *3, it turns out that six of
the nine possible values: 13, 23, 43, 53, 73, and 83, are all prime.
By replacing the 3rd and 4th digits of 56**3 with the same digit, this 5-digit
number is the first example having seven primes among the ten generated
numbers, yielding the family: 56003, 56113, 56333, 56443, 56663, 56773, and
56993. Consequently 56003, being the first member of this family, is the
smallest prime with this property.
Find the smallest prime which, by replacing part of the number (not necessarily
adjacent digits) with the same digit, is part of an eight prime value family.
Method:
We generate a prime, generalize it to every permutation possible, then
create the families and check them. Caching results as we go speeds this up,
because there are tons of duplicates. We can also remove patterns that we know
don't produce primes, regardless of the other numbers.
Example:
Start with prime 103.
Generalized patterns for 3-digit #:
(1) **n
(2) *n*
(3) n**
(4) *nn
(5) n*n
(6) nn*
Remove patterns 2, 3, and 6. Because when the last digit is replaced with a
2, 5, or 0 it definitely won't be a prime. (Meaning the family will at most
have 7 primes.)
(1) **n
(4) *nn
(5) n*n
Fill in the generalized patterns with nnn=103 to create families:
(1) **n == **3 --> 113,223,333,443,553,663,773,883,993
(4) *nn == *03 --> 103,203,303,403,503,603,703,803,903
(5) n*n == 1*3 --> 103,113,123,133,143,153,163,173,183,193 (note that 0 can be substituted here)
Check each family for primes and count them:
(1) 113 == True(prime), 223 == True, 333 == False(not prime), etc...
5 of 9 are prime
(4) 2 of 9 are prime
(5) 5 of 10 are prime
Stop when a family of 8 are found.
Find the minimum number within that family.
'''
from math import sqrt, ceil
from itertools import permutations
import sys
import debug
PRIMESET = set() # Cache primes for faster retries
COMBO_RESULTS = {} # Cache results of family checking
PRIME_MASKS = {} # Bucketed by number of digits in prime
def prime_generator():
yield 2
yield 3
num = 3
while True:
num += 2
if is_prime(num):
yield num
# A prime tester that should be faster on retries of the same number
def is_prime(num):
global PRIMESET
if num in PRIMESET:
return True
for i in xrange(2, int(ceil(sqrt(num))) + 1):
if not num % i:
return False
PRIMESET.add(num)
return True
def generate_mask_for_num(num):
'''
Given nnn, generates 100, 010, 001, 110, 101, 011 for any size n.
'''
global PRIME_MASKS
size = len(str(num))
if size in PRIME_MASKS:
return PRIME_MASKS[size]
start = '0' * size
# replace first x digits with 1, then permutate
retval = set()
for i in xrange(1, size):
tmp = start.replace('0', '1', i)
for p in permutations(tmp, size):
retval.add(''.join(p))
PRIME_MASKS[size] = retval
return retval
def generate_combos_from_number(starting_num):
'''
ARGS
starting_num - A number
RETURNS
A set of strings covering every possible combo of
stars and digits. See examples
Example: Given 13, returns ('*3', '1*')
Given 103, returns ('10*', '1**', '*0*', '**3', '*03', '1*3')
'''
masks = generate_mask_for_num(starting_num)
retval = set()
for mask in masks:
applied_mask = "".join(
x if y != '1' else '*' for x, y in zip(str(starting_num), mask))
retval.add(applied_mask)
return retval
def generate_numbers_from_string_pattern(pattern):
''' Given a string of numbers and stars,
generates number patterns by replacing the stars with 0-9.
Example: Given 1*2, generates
102,112,122,132,142,152,162,172,182,192
Notes: Leading stars do NOT get 0.
*n --> 1n,2n,3n...9n -- but not 0n
'''
retval = set()
for i in xrange(10):
# Skip leading 0's
if pattern[0] == "*" and i == 0:
continue
temp = pattern.replace('*', str(i))
retval.add(int(temp))
return retval
def return_amount_of_primes_in_a_list(numbers):
return sum([is_prime(number) for number in numbers])
def return_true_if_at_least_x_primes_in_list(numbers, min_primes):
counter = 0
list_size = len(numbers)
maxfails = list_size - min_primes
for num in numbers:
if not is_prime(num):
counter += 1
if counter >= maxfails:
return False
return True
def main():
for prime in prime_generator():
if len(str(prime)) < 2:
continue
combos = generate_combos_from_number(prime)
# Remove ones with stars in the last digit. They will not be primes
# when a 2 is substituted.
combos = filter(lambda x: x[-1] != "*", combos)
comboloopcount = 0
for combo in combos:
if combo in COMBO_RESULTS:
continue
comboloopcount += 1
numbers = generate_numbers_from_string_pattern(combo)
tf = return_true_if_at_least_x_primes_in_list(numbers, 7)
COMBO_RESULTS[combo] = tf
if tf:
total = return_amount_of_primes_in_a_list(numbers)
# print "Prime {}: Combo {}: {} of 10 are prime".format(prime,
# comboloopcount, total)
if total == 8:
print "GOT IT!"
print combo
print numbers
print min(numbers)
sys.exit()
# Todo: This would be better as actual unittest's
def runtests():
expected = set(('*3', '1*'))
result = generate_combos_from_number(13)
assert expected == result, "{} != {}".format(expected, result)
expected = set(('8*', '*7'))
result = generate_combos_from_number(87)
assert expected == result, "{} != {}".format(expected, result)
expected = set(('1*', '*1'))
result = generate_combos_from_number(11)
assert expected == result, "{} != {}".format(expected, result)
expected = set(('10*', '1*3', '*03', '1**', '**3', '*0*'))
result = generate_combos_from_number(103)
assert expected == result, "{} != {}".format(expected, result)
expected = set((13, 23, 33, 43, 53, 63, 73, 83, 93))
result = generate_numbers_from_string_pattern('*3')
assert expected == result, "{} != {}".format(expected, result)
expected = set((10, 11, 12, 13, 14, 15, 16, 17, 18, 19))
result = generate_numbers_from_string_pattern('1*')
assert expected == result, "{} != {}".format(expected, result)
expected = set((103, 113, 123, 133, 143, 153, 163, 173, 183, 193))
result = generate_numbers_from_string_pattern('1*3')
assert expected == result, "{} != {}".format(expected, result)
expected = set((113, 223, 333, 443, 553, 663, 773, 883, 993))
result = generate_numbers_from_string_pattern('**3')
assert expected == result, "{} != {}".format(expected, result)
expected = set(('100', '010', '001', '110', '101', '011'))
result = generate_mask_for_num(123)
assert expected == result, "{} != {}".format(expected, result)
# Quick speed tests
print "Timing generate_combos_from_number"
debug.start()
for i in range(10000):
generate_combos_from_number(12345)
debug.finish()
print "Timing prime_generator"
debug.start()
p = 2
while p < 1000000:
p = prime_generator()
debug.finish()
if __name__ == '__main__':
main()
|
|
# NEEDS FIXING
# -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['xmovies8.tv', 'xmovies8.ru']
self.base_link = 'https://xmovies8.es'
self.search_link = '/movies/search?s=%s'
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def searchShow(self, title, season, year, aliases, headers):
try:
title = cleantitle.normalize(title)
t = cleantitle.get(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s S%02d' % (title.replace('\'', '-'), int(season)))))
sr = client.request(url, headers=headers, timeout='10')
if sr:
r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?)\s+-\s+S(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
else:
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season)))))
sr = client.request(url, headers=headers, timeout='10')
if sr:
r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
else:
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s %01d' % (title.replace('\'', '-'), int(year)))))
sr = client.request(url, headers=headers, timeout='10')
if sr:
r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % (cleantitle.geturl(title.replace('\'', '-'))))
r = client.request(url, timeout='10', headers=headers)
r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
try:
match = [i[0] for i in r if self.matchAlias(i[1], aliases) and year == i[2]][0]
except:
match = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]
url = re.findall('(?://.+?|)(/.+)', match)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], data['year'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
url = re.sub('/watching.html$', '', url.strip('/'))
url = url + '/watching.html'
p = client.request(url, headers=headers, timeout='10')
if episode > 0:
r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0]) for i in r]
r = [i[0] for i in r if int(i[1]) == episode][0]
p = client.request(r, headers=headers, timeout='10')
referer = url
id = re.findall('load_player\(.+?(\d+)', p)[0]
r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3?id=%s' % id)
r = client.request(r, headers=headers, referer=referer, XHR=True, timeout='10')
url = json.loads(r)['value']
if (url.startswith('//')):
url = 'https:' + url
url = client.request(url, headers=headers, XHR=True, output='geturl', timeout='10')
if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
sources.append({'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False,'debridonly': False})
raise Exception()
r = client.request(url, headers=headers, XHR=True, timeout='10')
try:
src = json.loads(r)['playlist'][0]['sources']
links = [i['file'] for i in src if 'file' in i]
for i in links:
try:
sources.append(
{'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en',
'url': i, 'direct': True, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
for i in range(3):
u = directstream.googlepass(url)
if not u == None: break
return u
except:
return
|
|
"""Tests for certbot._internal.hooks."""
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
from certbot import errors
from certbot import util
from certbot.compat import filesystem
from certbot.compat import os
from certbot.tests import util as test_util
class ValidateHooksTest(unittest.TestCase):
"""Tests for certbot._internal.hooks.validate_hooks."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.hooks import validate_hooks
return validate_hooks(*args, **kwargs)
@mock.patch("certbot._internal.hooks.validate_hook")
def test_it(self, mock_validate_hook):
config = mock.MagicMock()
self._call(config)
types = [call[0][1] for call in mock_validate_hook.call_args_list]
self.assertEqual({"pre", "post", "deploy",}, set(types[:-1]))
# This ensures error messages are about deploy hooks when appropriate
self.assertEqual("renew", types[-1])
class ValidateHookTest(test_util.TempDirTestCase):
"""Tests for certbot._internal.hooks.validate_hook."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.hooks import validate_hook
return validate_hook(*args, **kwargs)
def test_hook_not_executable(self):
# prevent unnecessary modifications to PATH
with mock.patch("certbot._internal.hooks.plug_util.path_surgery"):
# We just mock out filesystem.is_executable since on Windows, it is difficult
# to get a fully working test around executable permissions. See
# certbot.tests.compat.filesystem::NotExecutableTest for more in-depth tests.
with mock.patch("certbot._internal.hooks.filesystem.is_executable", return_value=False):
self.assertRaises(errors.HookCommandNotFound, self._call, 'dummy', "foo")
@mock.patch("certbot._internal.hooks.util.exe_exists")
def test_not_found(self, mock_exe_exists):
mock_exe_exists.return_value = False
with mock.patch("certbot._internal.hooks.plug_util.path_surgery") as mock_ps:
self.assertRaises(errors.HookCommandNotFound, self._call, "foo", "bar")
self.assertTrue(mock_ps.called)
@mock.patch("certbot._internal.hooks._prog")
def test_unset(self, mock_prog):
self._call(None, "foo")
self.assertIs(mock_prog.called, False)
class HookTest(test_util.ConfigTestCase):
"""Common base class for hook tests."""
@classmethod
def _call(cls, *args, **kwargs): # pragma: no cover
"""Calls the method being tested with the given arguments."""
raise NotImplementedError
@classmethod
def _call_with_mock_execute(cls, *args, **kwargs):
"""Calls self._call after mocking out certbot.compat.misc.execute_command_status.
The mock execute object is returned rather than the return value
of self._call.
"""
with mock.patch("certbot.compat.misc.execute_command_status") as mock_execute:
mock_execute.return_value = (0, "", "")
cls._call(*args, **kwargs)
return mock_execute
class PreHookTest(HookTest):
"""Tests for certbot._internal.hooks.pre_hook."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.hooks import pre_hook
return pre_hook(*args, **kwargs)
def setUp(self):
super().setUp()
self.config.pre_hook = "foo"
filesystem.makedirs(self.config.renewal_pre_hooks_dir)
self.dir_hook = os.path.join(self.config.renewal_pre_hooks_dir, "bar")
create_hook(self.dir_hook)
# Reset this value as it may have been modified by past tests
self._reset_pre_hook_already()
def tearDown(self):
# Reset this value so it's unmodified for future tests
self._reset_pre_hook_already()
super().tearDown()
def _reset_pre_hook_already(self):
from certbot._internal.hooks import executed_pre_hooks
executed_pre_hooks.clear()
def test_certonly(self):
self.config.verb = "certonly"
self._test_nonrenew_common()
def test_run(self):
self.config.verb = "run"
self._test_nonrenew_common()
def _test_nonrenew_common(self):
mock_execute = self._call_with_mock_execute(self.config)
mock_execute.assert_called_once_with("pre-hook", self.config.pre_hook, env=mock.ANY)
self._test_no_executions_common()
def test_no_hooks(self):
self.config.pre_hook = None
self.config.verb = "renew"
os.remove(self.dir_hook)
with mock.patch("certbot._internal.hooks.logger") as mock_logger:
mock_execute = self._call_with_mock_execute(self.config)
self.assertIs(mock_execute.called, False)
self.assertIs(mock_logger.info.called, False)
def test_renew_disabled_dir_hooks(self):
self.config.directory_hooks = False
mock_execute = self._call_with_mock_execute(self.config)
mock_execute.assert_called_once_with("pre-hook", self.config.pre_hook, env=mock.ANY)
self._test_no_executions_common()
def test_renew_no_overlap(self):
self.config.verb = "renew"
mock_execute = self._call_with_mock_execute(self.config)
mock_execute.assert_any_call("pre-hook", self.dir_hook, env=mock.ANY)
mock_execute.assert_called_with("pre-hook", self.config.pre_hook, env=mock.ANY)
self._test_no_executions_common()
def test_renew_with_overlap(self):
self.config.pre_hook = self.dir_hook
self.config.verb = "renew"
mock_execute = self._call_with_mock_execute(self.config)
mock_execute.assert_called_once_with("pre-hook", self.dir_hook, env=mock.ANY)
self._test_no_executions_common()
def _test_no_executions_common(self):
with mock.patch("certbot._internal.hooks.logger") as mock_logger:
mock_execute = self._call_with_mock_execute(self.config)
self.assertIs(mock_execute.called, False)
self.assertTrue(mock_logger.info.called)
class PostHookTest(HookTest):
"""Tests for certbot._internal.hooks.post_hook."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.hooks import post_hook
return post_hook(*args, **kwargs)
def setUp(self):
super().setUp()
self.config.post_hook = "bar"
filesystem.makedirs(self.config.renewal_post_hooks_dir)
self.dir_hook = os.path.join(self.config.renewal_post_hooks_dir, "foo")
create_hook(self.dir_hook)
# Reset this value as it may have been modified by past tests
self._reset_post_hook_eventually()
def tearDown(self):
# Reset this value so it's unmodified for future tests
self._reset_post_hook_eventually()
super().tearDown()
def _reset_post_hook_eventually(self):
from certbot._internal.hooks import post_hooks
del post_hooks[:]
def test_certonly_and_run_with_hook(self):
for verb in ("certonly", "run",):
self.config.verb = verb
mock_execute = self._call_with_mock_execute(self.config)
mock_execute.assert_called_once_with("post-hook", self.config.post_hook, env=mock.ANY)
self.assertFalse(self._get_eventually())
def test_cert_only_and_run_without_hook(self):
self.config.post_hook = None
for verb in ("certonly", "run",):
self.config.verb = verb
self.assertFalse(self._call_with_mock_execute(self.config).called)
self.assertFalse(self._get_eventually())
def test_renew_disabled_dir_hooks(self):
self.config.directory_hooks = False
self._test_renew_common([self.config.post_hook])
def test_renew_no_config_hook(self):
self.config.post_hook = None
self._test_renew_common([self.dir_hook])
def test_renew_no_dir_hook(self):
os.remove(self.dir_hook)
self._test_renew_common([self.config.post_hook])
def test_renew_no_hooks(self):
self.config.post_hook = None
os.remove(self.dir_hook)
self._test_renew_common([])
def test_renew_no_overlap(self):
expected = [self.dir_hook, self.config.post_hook]
self._test_renew_common(expected)
self.config.post_hook = "baz"
expected.append(self.config.post_hook)
self._test_renew_common(expected)
def test_renew_with_overlap(self):
self.config.post_hook = self.dir_hook
self._test_renew_common([self.dir_hook])
def _test_renew_common(self, expected):
self.config.verb = "renew"
for _ in range(2):
self._call(self.config)
self.assertEqual(self._get_eventually(), expected)
def _get_eventually(self):
from certbot._internal.hooks import post_hooks
return post_hooks
class RunSavedPostHooksTest(HookTest):
"""Tests for certbot._internal.hooks.run_saved_post_hooks."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.hooks import run_saved_post_hooks
return run_saved_post_hooks()
def _call_with_mock_execute_and_eventually(self, *args, **kwargs):
"""Call run_saved_post_hooks but mock out execute and eventually
certbot._internal.hooks.post_hooks is replaced with
self.eventually. The mock execute object is returned rather than
the return value of run_saved_post_hooks.
"""
eventually_path = "certbot._internal.hooks.post_hooks"
with mock.patch(eventually_path, new=self.eventually):
return self._call_with_mock_execute(*args, **kwargs)
def setUp(self):
super().setUp()
self.eventually: List[str] = []
def test_empty(self):
self.assertFalse(self._call_with_mock_execute_and_eventually().called)
def test_multiple(self):
self.eventually = ["foo", "bar", "baz", "qux"]
mock_execute = self._call_with_mock_execute_and_eventually()
calls = mock_execute.call_args_list
for actual_call, expected_arg in zip(calls, self.eventually):
self.assertEqual(actual_call[0][1], expected_arg)
def test_single(self):
self.eventually = ["foo"]
mock_execute = self._call_with_mock_execute_and_eventually()
mock_execute.assert_called_once_with("post-hook", self.eventually[0], env=mock.ANY)
class RenewalHookTest(HookTest):
"""Common base class for testing deploy/renew hooks."""
# Needed for https://github.com/PyCQA/pylint/issues/179
# pylint: disable=abstract-method
def _call_with_mock_execute(self, *args, **kwargs):
"""Calls self._call after mocking out certbot.compat.misc.execute_command_status.
The mock execute object is returned rather than the return value
of self._call. The mock execute object asserts that environment
variables were properly set.
"""
domains = kwargs["domains"] if "domains" in kwargs else args[1]
lineage = kwargs["lineage"] if "lineage" in kwargs else args[2]
def execute_side_effect(*unused_args, **unused_kwargs):
"""Assert environment variables are properly set.
:returns: two strings imitating no output from the hook
:rtype: `tuple` of `str`
"""
self.assertEqual(os.environ["RENEWED_DOMAINS"], " ".join(domains))
self.assertEqual(os.environ["RENEWED_LINEAGE"], lineage)
return (0, "", "")
with mock.patch("certbot.compat.misc.execute_command_status") as mock_execute:
mock_execute.side_effect = execute_side_effect
self._call(*args, **kwargs)
return mock_execute
def setUp(self):
super().setUp()
self.vars_to_clear = {
var for var in ("RENEWED_DOMAINS", "RENEWED_LINEAGE",)
if var not in os.environ
}
def tearDown(self):
for var in self.vars_to_clear:
os.environ.pop(var, None)
super().tearDown()
class DeployHookTest(RenewalHookTest):
"""Tests for certbot._internal.hooks.deploy_hook."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.hooks import deploy_hook
return deploy_hook(*args, **kwargs)
@mock.patch("certbot._internal.hooks.logger")
def test_dry_run(self, mock_logger):
self.config.deploy_hook = "foo"
self.config.dry_run = True
mock_execute = self._call_with_mock_execute(
self.config, ["example.org"], "/foo/bar")
self.assertIs(mock_execute.called, False)
self.assertTrue(mock_logger.info.called)
@mock.patch("certbot._internal.hooks.logger")
def test_no_hook(self, mock_logger):
self.config.deploy_hook = None
mock_execute = self._call_with_mock_execute(
self.config, ["example.org"], "/foo/bar")
self.assertIs(mock_execute.called, False)
self.assertIs(mock_logger.info.called, False)
def test_success(self):
domains = ["example.org", "example.net"]
lineage = "/foo/bar"
self.config.deploy_hook = "foo"
mock_execute = self._call_with_mock_execute(
self.config, domains, lineage)
mock_execute.assert_called_once_with("deploy-hook", self.config.deploy_hook, env=mock.ANY)
class RenewHookTest(RenewalHookTest):
"""Tests for certbot._internal.hooks.renew_hook"""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.hooks import renew_hook
return renew_hook(*args, **kwargs)
def setUp(self):
super().setUp()
self.config.renew_hook = "foo"
filesystem.makedirs(self.config.renewal_deploy_hooks_dir)
self.dir_hook = os.path.join(self.config.renewal_deploy_hooks_dir,
"bar")
create_hook(self.dir_hook)
def test_disabled_dir_hooks(self):
self.config.directory_hooks = False
mock_execute = self._call_with_mock_execute(
self.config, ["example.org"], "/foo/bar")
mock_execute.assert_called_once_with("deploy-hook", self.config.renew_hook, env=mock.ANY)
@mock.patch("certbot._internal.hooks.logger")
def test_dry_run(self, mock_logger):
self.config.dry_run = True
mock_execute = self._call_with_mock_execute(
self.config, ["example.org"], "/foo/bar")
self.assertIs(mock_execute.called, False)
self.assertEqual(mock_logger.info.call_count, 2)
def test_no_hooks(self):
self.config.renew_hook = None
os.remove(self.dir_hook)
with mock.patch("certbot._internal.hooks.logger") as mock_logger:
mock_execute = self._call_with_mock_execute(
self.config, ["example.org"], "/foo/bar")
self.assertIs(mock_execute.called, False)
self.assertIs(mock_logger.info.called, False)
def test_overlap(self):
self.config.renew_hook = self.dir_hook
mock_execute = self._call_with_mock_execute(
self.config, ["example.net", "example.org"], "/foo/bar")
mock_execute.assert_called_once_with("deploy-hook", self.dir_hook, env=mock.ANY)
def test_no_overlap(self):
mock_execute = self._call_with_mock_execute(
self.config, ["example.org"], "/foo/bar")
mock_execute.assert_any_call("deploy-hook", self.dir_hook, env=mock.ANY)
mock_execute.assert_called_with("deploy-hook", self.config.renew_hook, env=mock.ANY)
class ListHooksTest(test_util.TempDirTestCase):
"""Tests for certbot._internal.hooks.list_hooks."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot._internal.hooks import list_hooks
return list_hooks(*args, **kwargs)
def test_empty(self):
self.assertFalse(self._call(self.tempdir))
def test_multiple(self):
names = sorted(
os.path.join(self.tempdir, basename)
for basename in ("foo", "bar", "baz", "qux")
)
for name in names:
create_hook(name)
self.assertEqual(self._call(self.tempdir), names)
def test_single(self):
name = os.path.join(self.tempdir, "foo")
create_hook(name)
self.assertEqual(self._call(self.tempdir), [name])
def test_ignore_tilde(self):
name = os.path.join(self.tempdir, "foo~")
create_hook(name)
self.assertEqual(self._call(self.tempdir), [])
def create_hook(file_path):
"""Creates an executable file at the specified path.
:param str file_path: path to create the file at
"""
util.safe_open(file_path, mode="w", chmod=0o744).close()
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
|
# !/usr/bin/env python3
# Copyright (c) 2019 The NPCcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of two valid PoS blocks with same height
and same coinstake input.
'''
from copy import deepcopy
from io import BytesIO
import time
from test_framework.messages import CTransaction, CBlock
from test_framework.util import bytes_to_hex_str, hex_str_to_bytes, assert_equal
from fake_stake.base_test import NPCcoin_FakeStakeTest
class ZerocoinPublicSpendReorg(NPCcoin_FakeStakeTest):
def run_test(self):
self.description = "Covers the reorg with a zc public spend in vtx"
self.init_test()
DENOM_TO_USE = 10 # zc denomination
INITAL_MINED_BLOCKS = 321 # First mined blocks (rewards collected to mint)
MORE_MINED_BLOCKS = 105 # More blocks mined before spending zerocoins
# 1) Starting mining blocks
self.log.info("Mining %d blocks.." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
# 2) Mint 2 zerocoins
self.node.mintzerocoin(DENOM_TO_USE)
self.node.generate(1)
self.node.mintzerocoin(DENOM_TO_USE)
self.node.generate(1)
# 3) Mine additional blocks and collect the mints
self.log.info("Mining %d blocks.." % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
self.log.info("Collecting mints...")
mints = self.node.listmintedzerocoins(True, False)
assert len(mints) == 2, "mints list has len %d (!= 2)" % len(mints)
# 4) Get unspent coins and chain tip
self.unspent = self.node.listunspent()
block_count = self.node.getblockcount()
pastBlockHash = self.node.getblockhash(block_count)
self.log.info("Block count: %d - Current best: %s..." % (self.node.getblockcount(), self.node.getbestblockhash()[:5]))
pastBlock = CBlock()
pastBlock.deserialize(BytesIO(hex_str_to_bytes(self.node.getblock(pastBlockHash, False))))
checkpoint = pastBlock.nAccumulatorCheckpoint
# 5) get the raw zerocoin spend txes
self.log.info("Getting the raw zerocoin public spends...")
public_spend_A = self.node.createrawzerocoinpublicspend(mints[0].get("serial hash"))
tx_A = CTransaction()
tx_A.deserialize(BytesIO(hex_str_to_bytes(public_spend_A)))
tx_A.rehash()
public_spend_B = self.node.createrawzerocoinpublicspend(mints[1].get("serial hash"))
tx_B = CTransaction()
tx_B.deserialize(BytesIO(hex_str_to_bytes(public_spend_B)))
tx_B.rehash()
# Spending same coins to different recipients to get different txids
my_addy = "yAVWM5urwaTyhiuFQHP2aP47rdZsLUG5PH"
public_spend_A2 = self.node.createrawzerocoinpublicspend(mints[0].get("serial hash"), my_addy)
tx_A2 = CTransaction()
tx_A2.deserialize(BytesIO(hex_str_to_bytes(public_spend_A2)))
tx_A2.rehash()
public_spend_B2 = self.node.createrawzerocoinpublicspend(mints[1].get("serial hash"), my_addy)
tx_B2 = CTransaction()
tx_B2.deserialize(BytesIO(hex_str_to_bytes(public_spend_B2)))
tx_B2.rehash()
self.log.info("tx_A id: %s" % str(tx_A.hash))
self.log.info("tx_B id: %s" % str(tx_B.hash))
self.log.info("tx_A2 id: %s" % str(tx_A2.hash))
self.log.info("tx_B2 id: %s" % str(tx_B2.hash))
self.test_nodes[0].handle_connect()
# 6) create block_A --> main chain
self.log.info("")
self.log.info("*** block_A ***")
self.log.info("Creating block_A [%d] with public spend tx_A in it." % (block_count + 1))
block_A = self.new_block(block_count, pastBlock, checkpoint, tx_A)
self.log.info("Hash of block_A: %s..." % block_A.hash[:5])
self.log.info("sending block_A...")
var = self.node.submitblock(bytes_to_hex_str(block_A.serialize()))
if var is not None:
self.log.info("result: %s" % str(var))
raise Exception("block_A not accepted")
time.sleep(2)
assert_equal(self.node.getblockcount(), block_count+1)
assert_equal(self.node.getbestblockhash(), block_A.hash)
self.log.info(" >> block_A connected <<")
self.log.info("Current chain: ... --> block_0 [%d] --> block_A [%d]\n" % (block_count, block_count+1))
# 7) create block_B --> forked chain
self.log.info("*** block_B ***")
self.log.info("Creating block_B [%d] with public spend tx_B in it." % (block_count + 1))
block_B = self.new_block(block_count, pastBlock, checkpoint, tx_B)
self.log.info("Hash of block_B: %s..." % block_B.hash[:5])
self.log.info("sending block_B...")
var = self.node.submitblock(bytes_to_hex_str(block_B.serialize()))
self.log.info("result of block_B submission: %s" % str(var))
time.sleep(2)
assert_equal(self.node.getblockcount(), block_count+1)
assert_equal(self.node.getbestblockhash(), block_A.hash)
# block_B is not added. Chain remains the same
self.log.info(" >> block_B not connected <<")
self.log.info("Current chain: ... --> block_0 [%d] --> block_A [%d]\n" % (block_count, block_count+1))
# 8) Create new block block_C on the forked chain (block_B)
block_count += 1
self.log.info("*** block_C ***")
self.log.info("Creating block_C [%d] on top of block_B triggering the reorg" % (block_count + 1))
block_C = self.new_block(block_count, block_B, checkpoint)
self.log.info("Hash of block_C: %s..." % block_C.hash[:5])
self.log.info("sending block_C...")
var = self.node.submitblock(bytes_to_hex_str(block_C.serialize()))
if var is not None:
self.log.info("result: %s" % str(var))
raise Exception("block_C not accepted")
time.sleep(2)
assert_equal(self.node.getblockcount(), block_count+1)
assert_equal(self.node.getbestblockhash(), block_C.hash)
self.log.info(" >> block_A disconnected / block_B and block_C connected <<")
self.log.info("Current chain: ... --> block_0 [%d] --> block_B [%d] --> block_C [%d]\n" % (
block_count - 1, block_count, block_count+1
))
# 7) Now create block_D which tries to spend same coin of tx_B again on the (new) main chain
# (this block will be rejected)
block_count += 1
self.log.info("*** block_D ***")
self.log.info("Creating block_D [%d] trying to double spend the coin of tx_B" % (block_count + 1))
block_D = self.new_block(block_count, block_C, checkpoint, tx_B2)
self.log.info("Hash of block_D: %s..." % block_D.hash[:5])
self.log.info("sending block_D...")
var = self.node.submitblock(bytes_to_hex_str(block_D.serialize()))
self.log.info("result of block_D submission: %s" % str(var))
time.sleep(2)
assert_equal(self.node.getblockcount(), block_count)
assert_equal(self.node.getbestblockhash(), block_C.hash)
# block_D is not added. Chain remains the same
self.log.info(" >> block_D rejected <<")
self.log.info("Current chain: ... --> block_0 [%d] --> block_B [%d] --> block_C [%d]\n" % (
block_count - 2, block_count - 1, block_count
))
# 8) Now create block_E which spends tx_A again on main chain
# (this block will be accepted and connected since tx_A was spent on block_A now disconnected)
self.log.info("*** block_E ***")
self.log.info("Creating block_E [%d] trying spend tx_A on main chain" % (block_count + 1))
block_E = self.new_block(block_count, block_C, checkpoint, tx_A)
self.log.info("Hash of block_E: %s..." % block_E.hash[:5])
self.log.info("sending block_E...")
var = self.node.submitblock(bytes_to_hex_str(block_E.serialize()))
if var is not None:
self.log.info("result: %s" % str(var))
raise Exception("block_E not accepted")
time.sleep(2)
assert_equal(self.node.getblockcount(), block_count+1)
assert_equal(self.node.getbestblockhash(), block_E.hash)
self.log.info(" >> block_E connected <<")
self.log.info("Current chain: ... --> block_0 [%d] --> block_B [%d] --> block_C [%d] --> block_E [%d]\n" % (
block_count - 2, block_count - 1, block_count, block_count+1
))
# 9) Now create block_F which tries to double spend the coin in tx_A
# # (this block will be rejected)
block_count += 1
self.log.info("*** block_F ***")
self.log.info("Creating block_F [%d] trying to double spend the coin in tx_A" % (block_count + 1))
block_F = self.new_block(block_count, block_E, checkpoint, tx_A2)
self.log.info("Hash of block_F: %s..." % block_F.hash[:5])
self.log.info("sending block_F...")
var = self.node.submitblock(bytes_to_hex_str(block_F.serialize()))
self.log.info("result of block_F submission: %s" % str(var))
time.sleep(2)
assert_equal(self.node.getblockcount(), block_count)
assert_equal(self.node.getbestblockhash(), block_E.hash)
self.log.info(" >> block_F rejected <<")
self.log.info("Current chain: ... --> block_0 [%d] --> block_B [%d] --> block_C [%d] --> block_E [%d]\n" % (
block_count - 3, block_count - 2, block_count - 1, block_count
))
self.log.info("All good.")
def new_block(self, block_count, prev_block, checkpoint, zcspend = None):
if prev_block.hash is None:
prev_block.rehash()
staking_utxo_list = [self.unspent.pop()]
pastBlockHash = prev_block.hash
stakingPrevOuts = self.get_prevouts(staking_utxo_list, block_count)
block = self.create_spam_block(pastBlockHash, stakingPrevOuts, block_count + 1)
if zcspend is not None:
block.vtx.append(zcspend)
block.hashMerkleRoot = block.calc_merkle_root()
block.nAccumulatorCheckpoint = checkpoint
block.rehash()
block.sign_block(self.block_sig_key)
return block
if __name__ == '__main__':
ZerocoinPublicSpendReorg().main()
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for z3py overload module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest as test
from absl.testing import parameterized
from pyctr.api import conversion
from pyctr.examples.z3py import z3py
from pyctr.transformers.virtualization import control_flow
from pyctr.transformers.virtualization import functions
from pyctr.transformers.virtualization import logical_ops
from pyctr.transformers.virtualization import variables
import z3
def prove(f):
s = z3.Solver()
s.add(z3.Not(f))
return s.check() == z3.unsat
def can_solve(f):
s = z3.Solver()
s.add(f)
return s.check() == z3.sat
class Z3PyTest(parameterized.TestCase):
def convert(self, f):
return conversion.convert(f, z3py, [logical_ops])
@parameterized.named_parameters(
('TT', True, True),
('TF', True, False),
('Tq', True, z3.Bool('q')),
('FT', False, True),
('FF', False, False),
('Fq', False, z3.Bool('q')),
('pT', z3.Bool('p'), True),
('pF', z3.Bool('p'), True),
('pq', z3.Bool('p'), z3.Bool('q')),
)
def test_demorgan(self, p, q):
def demorgan(a, b):
return (a and b) == (not (not a or not b))
converted_demorgan = self.convert(demorgan)
self.assertTrue(prove(converted_demorgan(p, q)))
@parameterized.named_parameters(
('TTT', True, True, True),
('TTF', True, True, False),
('TTr', True, True, z3.Bool('r')),
('TFT', True, False, True),
('TFF', True, False, False),
('TFr', True, False, z3.Bool('r')),
('TqT', True, z3.Bool('q'), True),
('TqF', True, z3.Bool('q'), False),
('Tqr', True, z3.Bool('q'), z3.Bool('r')),
('FTT', False, True, True),
('FTF', False, True, False),
('FTr', False, True, z3.Bool('r')),
('FFT', False, False, True),
('FFF', False, False, False),
('FFr', False, False, z3.Bool('r')),
('FqT', False, z3.Bool('q'), True),
('FqF', False, z3.Bool('q'), False),
('Fqr', False, z3.Bool('q'), z3.Bool('r')),
('pTT', z3.Bool('p'), True, True),
('pTF', z3.Bool('p'), True, False),
('pTr', z3.Bool('p'), True, z3.Bool('r')),
('pFT', z3.Bool('p'), False, True),
('pFF', z3.Bool('p'), False, False),
('pFr', z3.Bool('p'), False, z3.Bool('r')),
('pqT', z3.Bool('p'), z3.Bool('q'), True),
('pqF', z3.Bool('p'), z3.Bool('q'), False),
('pqr', z3.Bool('p'), z3.Bool('q'), z3.Bool('r')),
)
def test_chains(self, p, q, r):
def test_fn(a, b, c):
return (not (a and b and c)) == (not a or (not b) or (not c))
converted_fn = self.convert(test_fn)
self.assertTrue(prove(converted_fn(p, q, r)))
@parameterized.named_parameters(
('TTT', True, True, True),
('TTF', True, True, False),
('TTr', True, True, z3.Bool('r')),
('TFT', True, False, True),
('TFF', True, False, False),
('TFr', True, False, z3.Bool('r')),
('TqT', True, z3.Bool('q'), True),
('TqF', True, z3.Bool('q'), False),
('Tqr', True, z3.Bool('q'), z3.Bool('r')),
('FTT', False, True, True),
('FTF', False, True, False),
('FTr', False, True, z3.Bool('r')),
('FFT', False, False, True),
('FFF', False, False, False),
('FFr', False, False, z3.Bool('r')),
('FqT', False, z3.Bool('q'), True),
('FqF', False, z3.Bool('q'), False),
('Fqr', False, z3.Bool('q'), z3.Bool('r')),
('pTT', z3.Bool('p'), True, True),
('pTF', z3.Bool('p'), True, False),
('pTr', z3.Bool('p'), True, z3.Bool('r')),
('pFT', z3.Bool('p'), False, True),
('pFF', z3.Bool('p'), False, False),
('pFr', z3.Bool('p'), False, z3.Bool('r')),
('pqT', z3.Bool('p'), z3.Bool('q'), True),
('pqF', z3.Bool('p'), z3.Bool('q'), False),
('pqr', z3.Bool('p'), z3.Bool('q'), z3.Bool('r')),
)
def test_nesting(self, p, q, r):
def test_fn(a, b, c):
return ((a and b) or (b and c)) == ((a or c) and b)
converted_fn = self.convert(test_fn)
self.assertTrue(prove(converted_fn(p, q, r)))
@parameterized.named_parameters(
('TTT', True, True, True),
('TTF', True, True, False),
('TTr', True, True, z3.Bool('r')),
('TFT', True, False, True),
('TFF', True, False, False),
('TFr', True, False, z3.Bool('r')),
('TqT', True, z3.Bool('q'), True),
('TqF', True, z3.Bool('q'), False),
('Tqr', True, z3.Bool('q'), z3.Bool('r')),
('FTT', False, True, True),
('FTF', False, True, False),
('FTr', False, True, z3.Bool('r')),
('FFT', False, False, True),
('FFF', False, False, False),
('FFr', False, False, z3.Bool('r')),
('FqT', False, z3.Bool('q'), True),
('FqF', False, z3.Bool('q'), False),
('Fqr', False, z3.Bool('q'), z3.Bool('r')),
('pTT', z3.Bool('p'), True, True),
('pTF', z3.Bool('p'), True, False),
('pTr', z3.Bool('p'), True, z3.Bool('r')),
('pFT', z3.Bool('p'), False, True),
('pFF', z3.Bool('p'), False, False),
('pFr', z3.Bool('p'), False, z3.Bool('r')),
('pqT', z3.Bool('p'), z3.Bool('q'), True),
('pqF', z3.Bool('p'), z3.Bool('q'), False),
('pqr', z3.Bool('p'), z3.Bool('q'), z3.Bool('r')),
)
def test_if(self, p, q, r):
def test_fn(a, b, c):
result = None
if a:
result = b
else:
result = c
return result
converted_fn = conversion.convert(test_fn, z3py, [variables, control_flow])
self.assertTrue(prove(z3.If(p, q, r) == converted_fn(p, q, r)))
@parameterized.named_parameters(
('TTT', True, True, True),
('TTF', True, True, False),
('TTr', True, True, z3.Bool('r')),
('TFT', True, False, True),
('TFF', True, False, False),
('TFr', True, False, z3.Bool('r')),
('TqT', True, z3.Bool('q'), True),
('TqF', True, z3.Bool('q'), False),
('Tqr', True, z3.Bool('q'), z3.Bool('r')),
('FTT', False, True, True),
('FTF', False, True, False),
('FTr', False, True, z3.Bool('r')),
('FFT', False, False, True),
('FFF', False, False, False),
('FFr', False, False, z3.Bool('r')),
('FqT', False, z3.Bool('q'), True),
('FqF', False, z3.Bool('q'), False),
('Fqr', False, z3.Bool('q'), z3.Bool('r')),
('pTT', z3.Bool('p'), True, True),
('pTF', z3.Bool('p'), True, False),
('pTr', z3.Bool('p'), True, z3.Bool('r')),
('pFT', z3.Bool('p'), False, True),
('pFF', z3.Bool('p'), False, False),
('pFr', z3.Bool('p'), False, z3.Bool('r')),
('pqT', z3.Bool('p'), z3.Bool('q'), True),
('pqF', z3.Bool('p'), z3.Bool('q'), False),
('pqr', z3.Bool('p'), z3.Bool('q'), z3.Bool('r')),
)
def test_if_tuple(self, p, q, r):
def test_fn(a, b, c):
result = None
test_result = None
if a:
test_result = c
result = b
else:
result = c
test_result = b
return result, test_result
converted_fn = conversion.convert(test_fn, z3py, [variables, control_flow])
a, b = converted_fn(p, q, r)
self.assertTrue(prove(z3.If(p, z3.And(q, r), z3.And(q, r)) == z3.And(a, b)))
def test_eight_queens(self):
# See https://ericpony.github.io/z3py-tutorial/guide-examples.htm
def test_fn(queens):
diagonals = []
for i in range(8):
for j in range(i):
result = None
if i == j:
result = True
else:
result = queens[i] - queens[j] != i - j and queens[i] - queens[
j] != j - 1
diagonals.append(result)
return diagonals
queens = [z3.Int('queens_%i' % (i + 1)) for i in range(8)]
ranks = [z3.And(1 <= queens[i], queens[i] <= 8) for i in range(8)]
files = [z3.Distinct(queens)]
converted_fn = conversion.convert(test_fn, z3py,
[logical_ops, variables, control_flow])
diagonals = converted_fn(queens)
self.assertTrue(can_solve(ranks + files + diagonals))
def test_eight_queens_optimized(self):
def test_fn():
queens = [z3.Int('queens_%i' % (i + 1)) for i in range(8)]
ranks = [1 <= queens[i] and queens[i] <= 8 for i in range(8)]
files = [z3.Distinct(queens)]
diagonals = []
for i in range(8):
for j in range(i):
if i != j:
diagonals.append(abs(queens[i] - queens[j]) != abs(i - j))
return ranks, files, diagonals
converted_fn = conversion.convert(test_fn, z3py, [logical_ops, functions])
ranks, files, diagonals = converted_fn()
self.assertTrue(can_solve(ranks + files + diagonals))
def test_sudoku(self):
def get_instance():
# sudoku instance, we use '0' for empty cells
return ((0, 0, 0, 0, 9, 4, 0, 3, 0), (0, 0, 0, 5, 1, 0, 0, 0, 7),
(0, 8, 9, 0, 0, 0, 0, 4, 0), (0, 0, 0, 0, 0, 0, 2, 0, 8),
(0, 6, 0, 2, 0, 1, 0, 5, 0), (1, 0, 2, 0, 0, 0, 0, 0, 0),
(0, 7, 0, 0, 0, 0, 5, 2, 0), (9, 0, 0, 0, 6, 5, 0, 0,
0), (0, 4, 0, 9, 7, 0, 0, 0, 0))
def z3_sudoku():
# See https://ericpony.github.io/z3py-tutorial/guide-examples.htm
# 9x9 matrix of integer variables
x = [[z3.Int('x_%s_%s' % (i + 1, j + 1))
for j in range(9)]
for i in range(9)]
# each cell contains a value in {1, ..., 9}
cells_c = [
z3.And(1 <= x[i][j], x[i][j] <= 9) for i in range(9) for j in range(9)
]
# each row contains a digit at most once
rows_c = [z3.Distinct(x[i]) for i in range(9)]
# each column contains a digit at most once
cols_c = [z3.Distinct([x[i][j] for i in range(9)]) for j in range(9)]
# each 3x3 square contains a digit at most once
sq_c = [
z3.Distinct(
[x[3 * i0 + i][3 * j0 + j]
for i in range(3)
for j in range(3)])
for i0 in range(3)
for j0 in range(3)
]
sudoku_c = cells_c + rows_c + cols_c + sq_c
instance = get_instance()
instance_c = [
z3.If(instance[i][j] == 0, True, x[i][j] == instance[i][j])
for i in range(9)
for j in range(9)
]
return sudoku_c + instance_c
def naive_sudoku():
# 9x9 matrix of integer variables
x = [[z3.Int('x_%s_%s' % (i + 1, j + 1))
for j in range(9)]
for i in range(9)]
# each cell contains a value in {1, ..., 9}
cells_c = [
1 <= x[i][j] and x[i][j] <= 9 for i in range(9) for j in range(9)
]
# each row contains a digit at most once
rows_c = [z3.Distinct(x[i]) for i in range(9)]
# each column contains a digit at most once
cols_c = [z3.Distinct([x[i][j] for i in range(9)]) for j in range(9)]
# each 3x3 square contains a digit at most once
sq_c = [
z3.Distinct(
[x[3 * i0 + i][3 * j0 + j]
for i in range(3)
for j in range(3)])
for i0 in range(3)
for j0 in range(3)
]
sudoku_c = cells_c + rows_c + cols_c + sq_c
instance = get_instance()
instance_c = []
for i in range(9):
for j in range(9):
if instance[i][j] == 0:
instance_c.append(True)
else:
instance_c.append(x[i][j] == instance[i][j])
return sudoku_c + instance_c
def optimized_sudoku():
# 9x9 matrix of integer variables
x = [[z3.Int('x_%s_%s' % (i + 1, j + 1))
for j in range(9)]
for i in range(9)]
# each cell contains a value in {1, ..., 9}
cells_c = [
1 <= x[i][j] and x[i][j] <= 9 for i in range(9) for j in range(9)
]
# each row contains a digit at most once
rows_c = [z3.Distinct(x[i]) for i in range(9)]
# each column contains a digit at most once
cols_c = [z3.Distinct([x[i][j] for i in range(9)]) for j in range(9)]
# each 3x3 square contains a digit at most once
sq_c = [
z3.Distinct(
[x[3 * i0 + i][3 * j0 + j]
for i in range(3)
for j in range(3)])
for i0 in range(3)
for j0 in range(3)
]
sudoku_c = cells_c + rows_c + cols_c + sq_c
instance = get_instance()
instance_c = []
for i in range(9):
for j in range(9):
if instance[i][j] != 0:
instance_c.append(x[i][j] == instance[i][j])
return sudoku_c + instance_c
converted_naive = conversion.convert(naive_sudoku, z3py,
[logical_ops, functions])
converted_opt = conversion.convert(optimized_sudoku, z3py,
[logical_ops, functions])
self.assertEqual(can_solve(converted_naive()), can_solve(z3_sudoku()))
self.assertEqual(can_solve(converted_opt()), can_solve(z3_sudoku()))
if __name__ == '__main__':
test.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import six
import time
import unicodecsv
import tableschema
from apiclient.http import MediaIoBaseUpload
from .mapper import Mapper
# Module API
class Storage(tableschema.Storage):
# Public
def __init__(self, service, project, dataset, prefix=''):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# Set attributes
self.__service = service
self.__project = project
self.__dataset = dataset
self.__prefix = prefix
self.__buckets = None
self.__descriptors = {}
self.__fallbacks = {}
# Create mapper
self.__mapper = Mapper(prefix=prefix)
def __repr__(self):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# Template and format
template = 'Storage <{service}/{project}-{dataset}>'
text = template.format(
service=self.__service,
project=self.__project,
dataset=self.__dataset)
return text
@property
def buckets(self):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# No cached value
if self.__buckets is None:
# Get response
response = self.__service.tables().list(
projectId=self.__project,
datasetId=self.__dataset).execute()
# Extract buckets
self.__buckets = []
for table in response.get('tables', []):
table_name = table['tableReference']['tableId']
bucket = self.__mapper.restore_bucket(table_name)
if bucket is not None:
self.__buckets.append(bucket)
return self.__buckets
def create(self, bucket, descriptor, force=False):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
# Iterate over buckets/descriptors
for bucket, descriptor in zip(buckets, descriptors):
# Existent bucket
if bucket in self.buckets:
if not force:
message = 'Bucket "%s" already exists' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Prepare job body
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
converted_descriptor, fallbacks = self.__mapper.convert_descriptor(descriptor)
body = {
'tableReference': {
'projectId': self.__project,
'datasetId': self.__dataset,
'tableId': table_name,
},
'schema': converted_descriptor,
}
# Make request
self.__service.tables().insert(
projectId=self.__project,
datasetId=self.__dataset,
body=body).execute()
# Add to descriptors/fallbacks
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Remove buckets cache
self.__buckets = None
def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterater over buckets
for bucket in buckets:
# Non-existent bucket
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from descriptors
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Make delete request
table_name = self.__mapper.convert_bucket(bucket)
self.__service.tables().delete(
projectId=self.__project,
datasetId=self.__dataset,
tableId=table_name).execute()
# Remove tables cache
self.__buckets = None
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table_name = self.__mapper.convert_bucket(bucket)
response = self.__service.tables().get(
projectId=self.__project,
datasetId=self.__dataset,
tableId=table_name).execute()
converted_descriptor = response['schema']
descriptor = self.__mapper.restore_descriptor(converted_descriptor)
return descriptor
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# Get schema/data
schema = tableschema.Schema(self.describe(bucket))
table_name = self.__mapper.convert_bucket(bucket)
response = self.__service.tabledata().list(
projectId=self.__project,
datasetId=self.__dataset,
tableId=table_name).execute()
# Collect rows
rows = []
for fields in response['rows']:
row = [field['v'] for field in fields['f']]
rows.append(row)
# Sort rows
# TODO: provide proper sorting solution
rows = sorted(rows, key=lambda row: row[0] if row[0] is not None else 'null')
# Emit rows
for row in rows:
row = self.__mapper.restore_row(row, schema=schema)
yield row
def read(self, bucket):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# Write buffer
BUFFER_SIZE = 10000
# Prepare schema, fallbacks
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write data
rows_buffer = []
for row in rows:
row = self.__mapper.convert_row(row, schema=schema, fallbacks=fallbacks)
rows_buffer.append(row)
if len(rows_buffer) > BUFFER_SIZE:
self.__write_rows_buffer(bucket, rows_buffer)
rows_buffer = []
if len(rows_buffer) > 0:
self.__write_rows_buffer(bucket, rows_buffer)
# Private
def __write_rows_buffer(self, bucket, rows_buffer):
# Process data to byte stream csv
bytes = io.BufferedRandom(io.BytesIO())
writer = unicodecsv.writer(bytes, encoding='utf-8')
for row in rows_buffer:
writer.writerow(row)
bytes.seek(0)
# Prepare job body
table_name = self.__mapper.convert_bucket(bucket)
body = {
'configuration': {
'load': {
'destinationTable': {
'projectId': self.__project,
'datasetId': self.__dataset,
'tableId': table_name
},
'sourceFormat': 'CSV',
}
}
}
# Prepare job media body
mimetype = 'application/octet-stream'
media_body = MediaIoBaseUpload(bytes, mimetype=mimetype)
# Make request to Big Query
response = self.__service.jobs().insert(
projectId=self.__project,
body=body,
media_body=media_body).execute()
self.__wait_response(response)
def __wait_response(self, response):
# Get job instance
job = self.__service.jobs().get(
projectId=response['jobReference']['projectId'],
jobId=response['jobReference']['jobId'])
# Wait done
while True:
result = job.execute(num_retries=1)
if result['status']['state'] == 'DONE':
if result['status'].get('errors'):
errors = result['status']['errors']
message = '\n'.join(error['message'] for error in errors)
raise tableschema.exceptions.StorageError(message)
break
time.sleep(1)
|
|
#!/usr/bin/env python
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import urllib
from itertools import groupby
from django.utils.translation import ugettext as _
from dashboard.facet_builder import _compute_range_facet
from desktop.lib.exceptions_renderable import PopupException
from desktop.conf import SERVER_USER
from desktop.lib.i18n import force_unicode
from desktop.lib.rest.http_client import HttpClient, RestException
from desktop.lib.rest import resource
from libsolr.conf import SSL_CERT_CA_VERIFY
LOG = logging.getLogger(__name__)
try:
from search.conf import EMPTY_QUERY, SECURITY_ENABLED, SOLR_URL
except ImportError, e:
LOG.warn('Solr Search is not enabled')
def utf_quoter(what):
return urllib.quote(unicode(what).encode('utf-8'), safe='~@#$&()*!+=;,.?/\'')
class SolrApi(object):
"""
http://wiki.apache.org/solr/CoreAdmin#CoreAdminHandler
"""
def __init__(self, solr_url=None, user=None, security_enabled=False, ssl_cert_ca_verify=SSL_CERT_CA_VERIFY.get()):
if solr_url is None:
solr_url = SOLR_URL.get()
self._url = solr_url
self._user = user
self._client = HttpClient(self._url, logger=LOG)
self.security_enabled = security_enabled or SECURITY_ENABLED.get()
if self.security_enabled:
self._client.set_kerberos_auth()
self._client.set_verify(ssl_cert_ca_verify)
self._root = resource.Resource(self._client)
# The Kerberos handshake requires two requests in order to authenticate,
# but if our first request is a PUT/POST, it might flat-out reject the
# first request if the body is too large. So, connect here in order to get
# a cookie so future PUT/POSTs will be pre-authenticated.
if self.security_enabled:
self._root.invoke('HEAD', '/')
def query(self, collection, query):
solr_query = {}
solr_query['collection'] = collection['name']
if query.get('download'):
solr_query['rows'] = 1000
solr_query['start'] = 0
else:
solr_query['rows'] = int(collection['template']['rows'] or 10)
solr_query['start'] = int(query['start'])
solr_query['rows'] = min(solr_query['rows'], 1000)
solr_query['start'] = min(solr_query['start'], 10000)
params = self._get_params() + (
('q', self._get_q(query)),
('wt', 'json'),
('rows', solr_query['rows']),
('start', solr_query['start']),
)
if any(collection['facets']):
params += (
('facet', 'true'),
('facet.mincount', 0),
('facet.limit', 10),
)
json_facets = {}
timeFilter = self._get_range_borders(collection, query)
for facet in collection['facets']:
if facet['type'] == 'query':
params += (('facet.query', '%s' % facet['field']),)
elif facet['type'] == 'range' or facet['type'] == 'range-up':
keys = {
'id': '%(id)s' % facet,
'field': facet['field'],
'key': '%(field)s-%(id)s' % facet,
'start': facet['properties']['start'],
'end': facet['properties']['end'],
'gap': facet['properties']['gap'],
'mincount': int(facet['properties']['mincount'])
}
if timeFilter and timeFilter['time_field'] == facet['field'] and (facet['id'] not in timeFilter['time_filter_overrides'] or facet['widgetType'] != 'histogram-widget'):
keys.update(self._get_time_filter_query(timeFilter, facet))
params += (
('facet.range', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.range.start=%(start)s f.%(field)s.facet.range.end=%(end)s f.%(field)s.facet.range.gap=%(gap)s f.%(field)s.facet.mincount=%(mincount)s}%(field)s' % keys),
)
elif facet['type'] == 'field':
keys = {
'id': '%(id)s' % facet,
'field': facet['field'],
'key': '%(field)s-%(id)s' % facet,
'limit': int(facet['properties'].get('limit', 10)) + (1 if facet['widgetType'] == 'facet-widget' else 0),
'mincount': int(facet['properties']['mincount'])
}
params += (
('facet.field', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.limit=%(limit)s f.%(field)s.facet.mincount=%(mincount)s}%(field)s' % keys),
)
elif facet['type'] == 'nested':
_f = {
'field': facet['field'],
'limit': int(facet['properties'].get('limit', 10)) + (1 if facet['widgetType'] == 'text-facet-widget' else 0),
'mincount': int(facet['properties']['mincount']),
'sort': {'count': facet['properties']['sort']},
}
if facet['properties']['domain'].get('blockParent') or facet['properties']['domain'].get('blockChildren'):
_f['domain'] = {}
if facet['properties']['domain'].get('blockParent'):
_f['domain']['blockParent'] = ' OR '.join(facet['properties']['domain']['blockParent'])
if facet['properties']['domain'].get('blockChildren'):
_f['domain']['blockChildren'] = ' OR '.join(facet['properties']['domain']['blockChildren'])
if 'start' in facet['properties'] and not facet['properties'].get('type') == 'field':
_f.update({
'type': 'range',
'start': facet['properties']['start'],
'end': facet['properties']['end'],
'gap': facet['properties']['gap'],
})
if timeFilter and timeFilter['time_field'] == facet['field'] and (facet['id'] not in timeFilter['time_filter_overrides'] or facet['widgetType'] != 'bucket-widget'):
_f.update(self._get_time_filter_query(timeFilter, facet))
else:
_f.update({
'type': 'terms',
'field': facet['field'],
'excludeTags': facet['id'],
'offset': 0,
'numBuckets': True,
'allBuckets': True,
#'prefix': '' # Forbidden on numeric fields
})
if facet['properties']['canRange'] and not facet['properties']['isDate']:
del _f['mincount'] # Numeric fields do not support
if facet['properties']['facets']:
self._n_facet_dimension(facet, _f, facet['properties']['facets'], 1)
if facet['widgetType'] == 'text-facet-widget':
_fname = _f['facet'].keys()[0]
_f['sort'] = {_fname: facet['properties']['sort']}
# domain = '-d2:NaN' # Solr 6.4
json_facets[facet['id']] = _f
elif facet['type'] == 'function':
json_facets[facet['id']] = self._get_aggregate_function(facet)
json_facets['processEmpty'] = True
elif facet['type'] == 'pivot':
if facet['properties']['facets'] or facet['widgetType'] == 'map-widget':
fields = facet['field']
fields_limits = []
for f in facet['properties']['facets']:
fields_limits.append('f.%s.facet.limit=%s' % (f['field'], f['limit']))
fields_limits.append('f.%s.facet.mincount=%s' % (f['field'], f['mincount']))
fields += ',' + f['field']
keys = {
'id': '%(id)s' % facet,
'key': '%(field)s-%(id)s' % facet,
'field': facet['field'],
'fields': fields,
'limit': int(facet['properties'].get('limit', 10)),
'mincount': int(facet['properties']['mincount']),
'fields_limits': ' '.join(fields_limits)
}
params += (
('facet.pivot', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.limit=%(limit)s f.%(field)s.facet.mincount=%(mincount)s %(fields_limits)s}%(fields)s' % keys),
)
if json_facets:
params += (
('json.facet', json.dumps(json_facets)),
)
params += self._get_fq(collection, query)
from dashboard.models import Collection2
fl = urllib.unquote(utf_quoter(','.join(Collection2.get_field_list(collection))))
nested_fields = self._get_nested_fields(collection)
if nested_fields:
fl += urllib.unquote(utf_quoter(',[child parentFilter="%s"]' % ' OR '.join(nested_fields)))
params += (('fl', fl),)
params += (
('hl', 'true'),
('hl.fl', '*'),
('hl.snippets', 5),
('hl.fragsize', 1000),
)
if collection['template']['fieldsSelected']:
fields = []
for field in collection['template']['fieldsSelected']:
attribute_field = filter(lambda attribute: field == attribute['name'], collection['template']['fieldsAttributes'])
if attribute_field:
if attribute_field[0]['sort']['direction']:
fields.append('%s %s' % (field, attribute_field[0]['sort']['direction']))
if fields:
params += (
('sort', ','.join(fields)),
)
response = self._root.get('%(collection)s/select' % solr_query, params)
return self._get_json(response)
def _n_facet_dimension(self, widget, _f, facets, dim):
facet = facets[0]
f_name = 'dim_%02d:%s' % (dim, facet['field'])
if facet['aggregate']['function'] == 'count':
if 'facet' not in _f:
_f['facet'] = {f_name: {}}
else:
_f['facet'][f_name] = {}
_f = _f['facet']
_f[f_name] = {
'type': 'terms',
'field': '%(field)s' % facet,
'limit': int(facet.get('limit', 10)),
'mincount': int(facet['mincount']),
'numBuckets': True,
'allBuckets': True,
#'prefix': '' # Forbidden on numeric fields
}
if widget['widgetType'] == 'tree2-widget' and facets[-1]['aggregate']['function'] != 'count':
_f['subcount'] = self._get_aggregate_function(facets[-1])
if len(facets) > 1: # Get n+1 dimension
if facets[1]['aggregate']['function'] == 'count':
self._n_facet_dimension(widget, _f[f_name], facets[1:], dim + 1)
else:
self._n_facet_dimension(widget, _f[f_name], facets[1:], dim)
else:
agg_function = self._get_aggregate_function(facet)
_f['facet'] = {
'agg_%02d_00:%s' % (dim, agg_function): agg_function
}
for i, _f_agg in enumerate(facets[1:], 1):
if _f_agg['aggregate']['function'] != 'count':
agg_function = self._get_aggregate_function(_f_agg)
_f['facet']['agg_%02d_%02d:%s' % (dim, i, agg_function)] = agg_function
else:
self._n_facet_dimension(widget, _f, facets[i:], dim + 1) # Get n+1 dimension
break
def select(self, collection, query=None, rows=100, start=0):
if query is None:
query = EMPTY_QUERY.get()
params = self._get_params() + (
('q', query),
('wt', 'json'),
('rows', rows),
('start', start),
)
response = self._root.get('%s/select' % collection, params)
return self._get_json(response)
def suggest(self, collection, query):
try:
params = self._get_params() + (
('suggest', 'true'),
('suggest.build', 'true'),
('suggest.q', query['q']),
('wt', 'json'),
)
if query.get('dictionary'):
params += (
('suggest.dictionary', query['dictionary']),
)
response = self._root.get('%s/suggest' % collection, params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def collections(self): # To drop, used in indexer v1
try:
params = self._get_params() + (
('detail', 'true'),
('path', '/clusterstate.json'),
)
response = self._root.get('zookeeper', params=params)
return json.loads(response['znode'].get('data', '{}'))
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def collections2(self):
try:
params = self._get_params() + (
('action', 'LIST'),
('wt', 'json'),
)
return self._root.get('admin/collections', params=params)['collections']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def configs(self):
try:
params = self._get_params() + (
('action', 'LIST'),
('wt', 'json'),
)
return self._root.get('admin/configs', params=params)['configSets']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def aliases(self):
try:
params = self._get_params() + ( # Waiting for SOLR-4968
('detail', 'true'),
('path', '/aliases.json'),
)
response = self._root.get('zookeeper', params=params)
return json.loads(response['znode'].get('data', '{}')).get('collection', {})
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def collection_or_core(self, hue_collection):
if hue_collection.is_core_only:
return self.core(hue_collection.name)
else:
return self.collection(hue_collection.name)
def collection(self, name):
try:
collections = self.collections()
return collections[name]
except Exception, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def create_collection2(self, name, config_name=None, shards=1, replication=1, **kwargs):
try:
params = self._get_params() + (
('action', 'CREATE'),
('name', name),
('numShards', shards),
('replicationFactor', replication),
('wt', 'json')
)
if config_name:
params += (
('collection.configName', config_name),
)
if kwargs:
params += tuple(((key, val) for key, val in kwargs.iteritems()))
response = self._root.post('admin/collections', params=params, contenttype='application/json')
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def add_fields(self, name, fields):
try:
params = self._get_params() + (
('wt', 'json'),
)
data = {'add-field': fields}
response = self._root.post('%(collection)s/schema' % {'collection': name}, params=params, data=json.dumps(data), contenttype='application/json')
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def create_core(self, name, instance_dir, shards=1, replication=1):
try:
params = self._get_params() + (
('action', 'CREATE'),
('name', name),
('instanceDir', instance_dir),
('wt', 'json'),
)
response = self._root.post('admin/cores', params=params, contenttype='application/json')
if response.get('responseHeader', {}).get('status', -1) == 0:
return True
else:
LOG.error("Could not create core. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
if 'already exists' in e.message:
LOG.warn("Could not create collection.", exc_info=True)
return False
else:
raise PopupException(e, title=_('Error while accessing Solr'))
def create_alias(self, name, collections):
try:
params = self._get_params() + (
('action', 'CREATEALIAS'),
('name', name),
('collections', ','.join(collections)),
('wt', 'json'),
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if response.get('responseHeader', {}).get('status', -1) != 0:
raise PopupException(_("Could not create or edit alias: %s") % response)
else:
return response
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def delete_alias(self, name):
try:
params = self._get_params() + (
('action', 'DELETEALIAS'),
('name', name),
('wt', 'json'),
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if response.get('responseHeader', {}).get('status', -1) != 0:
msg = _("Could not delete alias. Check response:\n%s") % json.dumps(response, indent=2)
LOG.error(msg)
raise PopupException(msg)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def delete_collection(self, name):
response = {'status': -1, 'message': ''}
try:
params = self._get_params() + (
('action', 'DELETE'),
('name', name),
('wt', 'json')
)
data = self._root.post('admin/collections', params=params, contenttype='application/json')
if data['responseHeader']['status'] == 0:
response['status'] = 0
else:
response['message'] = "Could not remove collection: %s" % data
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
return response
def remove_core(self, name):
try:
params = self._get_params() + (
('action', 'UNLOAD'),
('name', name),
('deleteIndex', 'true'),
('wt', 'json')
)
response = self._root.post('admin/cores', params=params, contenttype='application/json')
if 'success' in response:
return True
else:
LOG.error("Could not remove core. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def cores(self):
try:
params = self._get_params() + (
('wt', 'json'),
)
return self._root.get('admin/cores', params=params)['status']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def core(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
('core', core),
)
return self._root.get('admin/cores', params=params)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def get_schema(self, collection):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%(core)s/schema' % {'core': collection}, params=params)
return self._get_json(response)['schema']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
# Deprecated
def schema(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
('file', 'schema.xml'),
)
return self._root.get('%(core)s/admin/file' % {'core': core}, params=params)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def fields(self, core, dynamic=False):
try:
params = self._get_params() + (
('wt', 'json'),
('fl', '*'),
)
if not dynamic:
params += (('show', 'schema'),)
response = self._root.get('%(core)s/admin/luke' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def luke(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%(core)s/admin/luke' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def schema_fields(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%(core)s/schema/fields' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def stats(self, core, fields, query=None, facet=''):
try:
params = self._get_params() + (
('q', self._get_q(query) if query is not None else EMPTY_QUERY.get()),
('wt', 'json'),
('rows', 0),
('stats', 'true'),
)
if query is not None:
params += self._get_fq(None, query)
if facet:
params += (('stats.facet', facet),)
params += tuple([('stats.field', field) for field in fields])
response = self._root.get('%(core)s/select' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def terms(self, core, field, properties=None):
try:
params = self._get_params() + (
('wt', 'json'),
('rows', 0),
('terms.fl', field),
)
if properties:
for key, val in properties.iteritems():
params += ((key, val),)
response = self._root.get('%(core)s/terms' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def info_system(self):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('admin/info/system', params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def sql(self, collection, statement):
try:
if 'limit' not in statement.lower(): # rows is not supported
statement = statement + ' LIMIT 100'
params = self._get_params() + (
('wt', 'json'),
('rows', 0),
('stmt', statement),
('rows', 100),
('start', 0),
)
response = self._root.get('%(collection)s/sql' % {'collection': collection}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def get(self, core, doc_id):
collection_name = core['name']
try:
params = self._get_params() + (
('id', doc_id),
('wt', 'json'),
)
response = self._root.get('%(core)s/get' % {'core': collection_name}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def export(self, name, query, fl, sort, rows=100):
try:
params = self._get_params() + (
('q', query),
('fl', fl),
('sort', sort),
('rows', rows),
('wt', 'json'),
)
response = self._root.get('%(name)s/export' % {'name': name}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def update(self, collection_or_core_name, data, content_type='csv', version=None, **kwargs):
if content_type == 'csv':
content_type = 'application/csv'
elif content_type == 'json':
content_type = 'application/json'
else:
LOG.error("Trying to update collection %s with content type %s. Allowed content types: csv/json" % (collection_or_core_name, content_type))
params = self._get_params() + (
('wt', 'json'),
('overwrite', 'true'),
('commit', 'true'),
)
if version is not None:
params += (
('_version_', version),
('versions', 'true')
)
if kwargs:
params += tuple(((key, val) for key, val in kwargs.iteritems()))
response = self._root.post('%s/update' % collection_or_core_name, contenttype=content_type, params=params, data=data)
return self._get_json(response)
# Deprecated
def create_collection(self, name, shards=1, replication=1):
try:
params = self._get_params() + (
('action', 'CREATE'),
('name', name),
('numShards', shards),
('replicationFactor', replication),
('collection.configName', name),
('wt', 'json')
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if 'success' in response:
return True
else:
LOG.error("Could not create collection. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
# Deprecated
def remove_collection(self, name):
try:
params = self._get_params() + (
('action', 'DELETE'),
('name', name),
('wt', 'json')
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if 'success' in response:
return True
else:
LOG.error("Could not remove collection. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def _get_params(self):
if self.security_enabled:
return (('doAs', self._user ),)
return (('user.name', SERVER_USER.get()), ('doAs', self._user),)
def _get_q(self, query):
q_template = '(%s)' if len(query['qs']) >= 2 else '%s'
return 'OR'.join([q_template % (q['q'] or EMPTY_QUERY.get()) for q in query['qs']]).encode('utf-8')
@classmethod
def _get_aggregate_function(cls, facet):
if 'properties' in facet:
f = facet['properties']['aggregate'] # Level 1 facet
else:
f = facet['aggregate']
if not f['ops']:
f['ops'] = [{'function': 'field', 'value': facet['field'], 'ops': []}]
return cls.__get_aggregate_function(f)
@classmethod
def __get_aggregate_function(cls, f):
if f['function'] == 'field':
return f['value']
else:
fields = []
for _f in f['ops']:
fields.append(cls.__get_aggregate_function(_f))
if f['function'] == 'median':
f['function'] = 'percentile'
fields.append('50')
elif f['function'] == 'percentile':
fields.extend(map(lambda a: str(a), [_p['value'] for _p in f['percentiles']]))
f['function'] = 'percentile'
return '%s(%s)' % (f['function'], ','.join(fields))
def _get_range_borders(self, collection, query):
props = {}
time_field = collection['timeFilter'].get('field')
if time_field and (collection['timeFilter']['value'] != 'all' or collection['timeFilter']['type'] == 'fixed'):
# fqs overrides main time filter
fq_time_ids = [fq['id'] for fq in query['fqs'] if fq['field'] == time_field]
props['time_filter_overrides'] = fq_time_ids
props['time_field'] = time_field
if collection['timeFilter']['type'] == 'rolling':
props['field'] = collection['timeFilter']['field']
props['from'] = 'NOW-%s' % collection['timeFilter']['value']
props['to'] = 'NOW'
props['gap'] = GAPS.get(collection['timeFilter']['value'])
elif collection['timeFilter']['type'] == 'fixed':
props['field'] = collection['timeFilter']['field']
props['from'] = collection['timeFilter'].get('from', 'NOW-7DAYS')
props['to'] = collection['timeFilter'].get('to', 'NOW')
props['fixed'] = True
return props
def _get_time_filter_query(self, timeFilter, facet):
if 'fixed' in timeFilter:
props = {}
stat_facet = {'min': timeFilter['from'], 'max': timeFilter['to']}
_compute_range_facet(facet['widgetType'], stat_facet, props, stat_facet['min'], stat_facet['max'])
gap = props['gap']
unit = re.split('\d+', gap)[1]
return {
'start': '%(from)s/%(unit)s' % {'from': timeFilter['from'], 'unit': unit},
'end': '%(to)s/%(unit)s' % {'to': timeFilter['to'], 'unit': unit},
'gap': '%(gap)s' % props, # add a 'auto'
}
else:
gap = timeFilter['gap'][facet['widgetType']]
return {
'start': '%(from)s/%(unit)s' % {'from': timeFilter['from'], 'unit': gap['unit']},
'end': '%(to)s/%(unit)s' % {'to': timeFilter['to'], 'unit': gap['unit']},
'gap': '%(coeff)s%(unit)s/%(unit)s' % gap, # add a 'auto'
}
def _get_fq(self, collection, query):
params = ()
timeFilter = {}
if collection:
timeFilter = self._get_range_borders(collection, query)
if timeFilter and not timeFilter.get('time_filter_overrides'):
params += (('fq', urllib.unquote(utf_quoter('%(field)s:[%(from)s TO %(to)s]' % timeFilter))),)
# Merge facets queries on same fields
grouped_fqs = groupby(query['fqs'], lambda x: (x['type'], x['field']))
merged_fqs = []
for key, group in grouped_fqs:
field_fq = next(group)
for fq in group:
for f in fq['filter']:
field_fq['filter'].append(f)
merged_fqs.append(field_fq)
for fq in merged_fqs:
if fq['type'] == 'field':
fields = fq['field'] if type(fq['field']) == list else [fq['field']] # 2D facets support
for field in fields:
f = []
for _filter in fq['filter']:
values = _filter['value'] if type(_filter['value']) == list else [_filter['value']] # 2D facets support
if fields.index(field) < len(values): # Lowest common field denominator
value = values[fields.index(field)]
exclude = '-' if _filter['exclude'] else ''
if value is not None and ' ' in force_unicode(value):
value = force_unicode(value).replace('"', '\\"')
f.append('%s%s:"%s"' % (exclude, field, value))
else:
f.append('%s{!field f=%s}%s' % (exclude, field, value))
_params ='{!tag=%(id)s}' % fq + ' '.join(f)
params += (('fq', urllib.unquote(utf_quoter(_params))),)
elif fq['type'] == 'range':
params += (('fq', '{!tag=%(id)s}' % fq + ' '.join([urllib.unquote(
utf_quoter('%s%s:[%s TO %s}' % ('-' if field['exclude'] else '', fq['field'], f['from'], f['to']))) for field, f in zip(fq['filter'], fq['properties'])])),)
elif fq['type'] == 'range-up':
params += (('fq', '{!tag=%(id)s}' % fq + ' '.join([urllib.unquote(
utf_quoter('%s%s:[%s TO %s}' % ('-' if field['exclude'] else '', fq['field'], f['from'] if fq['is_up'] else '*', '*' if fq['is_up'] else f['from'])))
for field, f in zip(fq['filter'], fq['properties'])])),)
elif fq['type'] == 'map':
_keys = fq.copy()
_keys.update(fq['properties'])
params += (('fq', '{!tag=%(id)s}' % fq + urllib.unquote(
utf_quoter('%(lat)s:[%(lat_sw)s TO %(lat_ne)s} AND %(lon)s:[%(lon_sw)s TO %(lon_ne)s}' % _keys))),)
nested_fields = self._get_nested_fields(collection)
if nested_fields:
params += (('fq', urllib.unquote(utf_quoter(' OR '.join(nested_fields)))),)
return params
def _get_nested_fields(self, collection):
if collection and collection.get('nested') and collection['nested']['enabled']:
return [field['filter'] for field in self._flatten_schema(collection['nested']['schema']) if field['selected']]
else:
return []
def _flatten_schema(self, level):
fields = []
for field in level:
fields.append(field)
if field['values']:
fields.extend(self._flatten_schema(field['values']))
return fields
@classmethod
def _get_json(cls, response):
if type(response) != dict:
# Got 'plain/text' mimetype instead of 'application/json'
try:
response = json.loads(response)
except ValueError, e:
# Got some null bytes in the response
LOG.error('%s: %s' % (unicode(e), repr(response)))
response = json.loads(response.replace('\x00', ''))
return response
def uniquekey(self, collection):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%s/schema/uniquekey' % collection, params=params)
return self._get_json(response)['uniqueKey']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
GAPS = {
'5MINUTES': {
'histogram-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots
'timeline-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots
'bucket-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots
'bar-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots
'facet-widget': {'coeff': '+1', 'unit': 'MINUTES'}, # ~10 slots
},
'30MINUTES': {
'histogram-widget': {'coeff': '+20', 'unit': 'SECONDS'},
'timeline-widget': {'coeff': '+20', 'unit': 'SECONDS'},
'bucket-widget': {'coeff': '+20', 'unit': 'SECONDS'},
'bar-widget': {'coeff': '+20', 'unit': 'SECONDS'},
'facet-widget': {'coeff': '+5', 'unit': 'MINUTES'},
},
'1HOURS': {
'histogram-widget': {'coeff': '+30', 'unit': 'SECONDS'},
'timeline-widget': {'coeff': '+30', 'unit': 'SECONDS'},
'bucket-widget': {'coeff': '+30', 'unit': 'SECONDS'},
'bar-widget': {'coeff': '+30', 'unit': 'SECONDS'},
'facet-widget': {'coeff': '+10', 'unit': 'MINUTES'},
},
'12HOURS': {
'histogram-widget': {'coeff': '+7', 'unit': 'MINUTES'},
'timeline-widget': {'coeff': '+7', 'unit': 'MINUTES'},
'bucket-widget': {'coeff': '+7', 'unit': 'MINUTES'},
'bar-widget': {'coeff': '+7', 'unit': 'MINUTES'},
'facet-widget': {'coeff': '+1', 'unit': 'HOURS'},
},
'1DAYS': {
'histogram-widget': {'coeff': '+15', 'unit': 'MINUTES'},
'timeline-widget': {'coeff': '+15', 'unit': 'MINUTES'},
'bucket-widget': {'coeff': '+15', 'unit': 'MINUTES'},
'bar-widget': {'coeff': '+15', 'unit': 'MINUTES'},
'facet-widget': {'coeff': '+3', 'unit': 'HOURS'},
},
'2DAYS': {
'histogram-widget': {'coeff': '+30', 'unit': 'MINUTES'},
'timeline-widget': {'coeff': '+30', 'unit': 'MINUTES'},
'bucket-widget': {'coeff': '+30', 'unit': 'MINUTES'},
'bar-widget': {'coeff': '+30', 'unit': 'MINUTES'},
'facet-widget': {'coeff': '+6', 'unit': 'HOURS'},
},
'7DAYS': {
'histogram-widget': {'coeff': '+3', 'unit': 'HOURS'},
'timeline-widget': {'coeff': '+3', 'unit': 'HOURS'},
'bucket-widget': {'coeff': '+3', 'unit': 'HOURS'},
'bar-widget': {'coeff': '+3', 'unit': 'HOURS'},
'facet-widget': {'coeff': '+1', 'unit': 'DAYS'},
},
'1MONTHS': {
'histogram-widget': {'coeff': '+12', 'unit': 'HOURS'},
'timeline-widget': {'coeff': '+12', 'unit': 'HOURS'},
'bucket-widget': {'coeff': '+12', 'unit': 'HOURS'},
'bar-widget': {'coeff': '+12', 'unit': 'HOURS'},
'facet-widget': {'coeff': '+5', 'unit': 'DAYS'},
},
'3MONTHS': {
'histogram-widget': {'coeff': '+1', 'unit': 'DAYS'},
'timeline-widget': {'coeff': '+1', 'unit': 'DAYS'},
'bucket-widget': {'coeff': '+1', 'unit': 'DAYS'},
'bar-widget': {'coeff': '+1', 'unit': 'DAYS'},
'facet-widget': {'coeff': '+30', 'unit': 'DAYS'},
},
'1YEARS': {
'histogram-widget': {'coeff': '+3', 'unit': 'DAYS'},
'timeline-widget': {'coeff': '+3', 'unit': 'DAYS'},
'bucket-widget': {'coeff': '+3', 'unit': 'DAYS'},
'bar-widget': {'coeff': '+3', 'unit': 'DAYS'},
'facet-widget': {'coeff': '+12', 'unit': 'MONTHS'},
},
'2YEARS': {
'histogram-widget': {'coeff': '+7', 'unit': 'DAYS'},
'timeline-widget': {'coeff': '+7', 'unit': 'DAYS'},
'bucket-widget': {'coeff': '+7', 'unit': 'DAYS'},
'bar-widget': {'coeff': '+7', 'unit': 'DAYS'},
'facet-widget': {'coeff': '+3', 'unit': 'MONTHS'},
},
'10YEARS': {
'histogram-widget': {'coeff': '+1', 'unit': 'MONTHS'},
'timeline-widget': {'coeff': '+1', 'unit': 'MONTHS'},
'bucket-widget': {'coeff': '+1', 'unit': 'MONTHS'},
'bar-widget': {'coeff': '+1', 'unit': 'MONTHS'},
'facet-widget': {'coeff': '+1', 'unit': 'YEARS'},
}
}
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.data"""
import glob
import os
import os.path
import re
import sqlite3
import threading
from unittest import mock
import pytest
from coverage.data import CoverageData, combine_parallel_data
from coverage.data import add_data_to_hash, line_counts
from coverage.debug import DebugControlString
from coverage.exceptions import DataError, NoDataError
from coverage.files import PathAliases, canonical_filename
from tests.coveragetest import CoverageTest
from tests.helpers import assert_count_equal
LINES_1 = {
'a.py': {1, 2},
'b.py': {3},
}
SUMMARY_1 = {'a.py': 2, 'b.py': 1}
MEASURED_FILES_1 = ['a.py', 'b.py']
A_PY_LINES_1 = [1, 2]
B_PY_LINES_1 = [3]
LINES_2 = {
'a.py': {1, 5},
'c.py': {17},
}
SUMMARY_1_2 = {'a.py': 3, 'b.py': 1, 'c.py': 1}
MEASURED_FILES_1_2 = ['a.py', 'b.py', 'c.py']
ARCS_3 = {
'x.py': {(-1, 1), (1, 2), (2, 3), (3, -1)},
'y.py': {(-1, 17), (17, 23), (23, -1)},
}
X_PY_ARCS_3 = [(-1, 1), (1, 2), (2, 3), (3, -1)]
Y_PY_ARCS_3 = [(-1, 17), (17, 23), (23, -1)]
SUMMARY_3 = {'x.py': 3, 'y.py': 2}
MEASURED_FILES_3 = ['x.py', 'y.py']
X_PY_LINES_3 = [1, 2, 3]
Y_PY_LINES_3 = [17, 23]
ARCS_4 = {
'x.py': {(-1, 2), (2, 5), (5, -1)},
'z.py': {(-1, 1000), (1000, -1)},
}
SUMMARY_3_4 = {'x.py': 4, 'y.py': 2, 'z.py': 1}
MEASURED_FILES_3_4 = ['x.py', 'y.py', 'z.py']
def DebugCoverageData(*args, **kwargs):
"""Factory for CovergeData instances with debugging turned on.
This lets us exercise the debugging lines in sqldata.py. We don't make
any assertions about the debug output, but at least we can know that they
execute successfully, and they won't be marked as distracting missing
lines in our coverage reports.
"""
assert "debug" not in kwargs
debug = DebugControlString(options=["dataio", "dataop", "sql"])
return CoverageData(*args, debug=debug, **kwargs)
def assert_line_counts(covdata, counts, fullpath=False):
"""Check that the line_counts of `covdata` is `counts`."""
assert line_counts(covdata, fullpath) == counts
def assert_measured_files(covdata, measured):
"""Check that `covdata`'s measured files are `measured`."""
assert_count_equal(covdata.measured_files(), measured)
def assert_lines1_data(covdata):
"""Check that `covdata` has the data from LINES1."""
assert_line_counts(covdata, SUMMARY_1)
assert_measured_files(covdata, MEASURED_FILES_1)
assert_count_equal(covdata.lines("a.py"), A_PY_LINES_1)
assert not covdata.has_arcs()
def assert_arcs3_data(covdata):
"""Check that `covdata` has the data from ARCS3."""
assert_line_counts(covdata, SUMMARY_3)
assert_measured_files(covdata, MEASURED_FILES_3)
assert_count_equal(covdata.lines("x.py"), X_PY_LINES_3)
assert_count_equal(covdata.arcs("x.py"), X_PY_ARCS_3)
assert_count_equal(covdata.lines("y.py"), Y_PY_LINES_3)
assert_count_equal(covdata.arcs("y.py"), Y_PY_ARCS_3)
assert covdata.has_arcs()
def dicts_from_sets(file_data):
"""Convert a dict of sets into a dict of dicts.
Before 6.0, file data was a dict with None as the values. In 6.0, file
data is a set. SqlData all along only cared that it was an iterable.
This function helps us test that the old dict format still works.
"""
return {k: dict.fromkeys(v) for k, v in file_data.items()}
class CoverageDataTest(CoverageTest):
"""Test cases for CoverageData."""
def test_empty_data_is_false(self):
covdata = DebugCoverageData()
assert not covdata
def test_line_data_is_true(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
assert covdata
def test_arc_data_is_true(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
assert covdata
def test_empty_line_data_is_false(self):
covdata = DebugCoverageData()
covdata.add_lines({})
assert not covdata
def test_empty_arc_data_is_false(self):
covdata = DebugCoverageData()
covdata.add_arcs({})
assert not covdata
@pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)])
def test_adding_lines(self, lines):
covdata = DebugCoverageData()
covdata.add_lines(lines)
assert_lines1_data(covdata)
@pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)])
def test_adding_arcs(self, arcs):
covdata = DebugCoverageData()
covdata.add_arcs(arcs)
assert_arcs3_data(covdata)
def test_ok_to_add_lines_twice(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.add_lines(LINES_2)
assert_line_counts(covdata, SUMMARY_1_2)
assert_measured_files(covdata, MEASURED_FILES_1_2)
def test_ok_to_add_arcs_twice(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_arcs(ARCS_4)
assert_line_counts(covdata, SUMMARY_3_4)
assert_measured_files(covdata, MEASURED_FILES_3_4)
def test_cant_add_arcs_with_lines(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
msg = "Can't add branch measurements to existing line data"
with pytest.raises(DataError, match=msg):
covdata.add_arcs(ARCS_3)
def test_cant_add_lines_with_arcs(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
msg = "Can't add line measurements to existing branch data"
with pytest.raises(DataError, match=msg):
covdata.add_lines(LINES_1)
def test_touch_file_with_lines(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.touch_file('zzz.py')
assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py'])
def test_touch_file_with_arcs(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.touch_file('zzz.py')
assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py'])
def test_set_query_contexts(self):
covdata = DebugCoverageData()
covdata.set_context('test_a')
covdata.add_lines(LINES_1)
covdata.set_query_contexts(['te.*a'])
assert covdata.lines('a.py') == [1, 2]
covdata.set_query_contexts(['other'])
assert covdata.lines('a.py') == []
def test_no_lines_vs_unmeasured_file(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.touch_file('zzz.py')
assert covdata.lines('zzz.py') == []
assert covdata.lines('no_such_file.py') is None
def test_lines_with_contexts(self):
covdata = DebugCoverageData()
covdata.set_context('test_a')
covdata.add_lines(LINES_1)
assert covdata.lines('a.py') == [1, 2]
covdata.set_query_contexts(['test'])
assert covdata.lines('a.py') == [1, 2]
covdata.set_query_contexts(['other'])
assert covdata.lines('a.py') == []
def test_contexts_by_lineno_with_lines(self):
covdata = DebugCoverageData()
covdata.set_context('test_a')
covdata.add_lines(LINES_1)
expected = {1: ['test_a'], 2: ['test_a']}
assert covdata.contexts_by_lineno('a.py') == expected
@pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)])
def test_no_duplicate_lines(self, lines):
covdata = DebugCoverageData()
covdata.set_context("context1")
covdata.add_lines(lines)
covdata.set_context("context2")
covdata.add_lines(lines)
assert covdata.lines('a.py') == A_PY_LINES_1
@pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)])
def test_no_duplicate_arcs(self, arcs):
covdata = DebugCoverageData()
covdata.set_context("context1")
covdata.add_arcs(arcs)
covdata.set_context("context2")
covdata.add_arcs(arcs)
assert covdata.arcs('x.py') == X_PY_ARCS_3
def test_no_arcs_vs_unmeasured_file(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.touch_file('zzz.py')
assert covdata.lines('zzz.py') == []
assert covdata.lines('no_such_file.py') is None
assert covdata.arcs('zzz.py') == []
assert covdata.arcs('no_such_file.py') is None
def test_arcs_with_contexts(self):
covdata = DebugCoverageData()
covdata.set_context('test_x')
covdata.add_arcs(ARCS_3)
assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)]
covdata.set_query_contexts(['test_.$'])
assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)]
covdata.set_query_contexts(['other'])
assert covdata.arcs('x.py') == []
def test_contexts_by_lineno_with_arcs(self):
covdata = DebugCoverageData()
covdata.set_context('test_x')
covdata.add_arcs(ARCS_3)
expected = {1: ['test_x'], 2: ['test_x'], 3: ['test_x']}
assert covdata.contexts_by_lineno('x.py') == expected
def test_contexts_by_lineno_with_unknown_file(self):
covdata = DebugCoverageData()
covdata.set_context('test_x')
covdata.add_arcs(ARCS_3)
assert covdata.contexts_by_lineno('xyz.py') == {}
def test_context_by_lineno_with_query_contexts_with_lines(self):
covdata = DebugCoverageData()
covdata.set_context("test_1")
covdata.add_lines(LINES_1)
covdata.set_context("test_2")
covdata.add_lines(LINES_2)
covdata.set_query_context("test_1")
assert covdata.contexts_by_lineno("a.py") == dict.fromkeys([1,2], ["test_1"])
def test_context_by_lineno_with_query_contexts_with_arcs(self):
covdata = DebugCoverageData()
covdata.set_context("test_1")
covdata.add_arcs(ARCS_3)
covdata.set_context("test_2")
covdata.add_arcs(ARCS_4)
covdata.set_query_context("test_1")
assert covdata.contexts_by_lineno("x.py") == dict.fromkeys([1,2,3], ["test_1"])
def test_file_tracer_name(self):
covdata = DebugCoverageData()
covdata.add_lines({
"p1.foo": [1, 2, 3],
"p2.html": [10, 11, 12],
"main.py": [20],
})
covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"})
assert covdata.file_tracer("p1.foo") == "p1.plugin"
assert covdata.file_tracer("p2.html") == "p2.plugin"
assert covdata.file_tracer("main.py") == ""
assert covdata.file_tracer("p3.not_here") is None
def test_ok_to_repeat_file_tracer(self):
covdata = DebugCoverageData()
covdata.add_lines({
"p1.foo": [1, 2, 3],
"p2.html": [10, 11, 12],
})
covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"})
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
assert covdata.file_tracer("p1.foo") == "p1.plugin"
def test_ok_to_set_empty_file_tracer(self):
covdata = DebugCoverageData()
covdata.add_lines({
"p1.foo": [1, 2, 3],
"p2.html": [10, 11, 12],
"main.py": [20],
})
covdata.add_file_tracers({"p1.foo": "p1.plugin", "main.py": ""})
assert covdata.file_tracer("p1.foo") == "p1.plugin"
assert covdata.file_tracer("main.py") == ""
def test_cant_file_tracer_unmeasured_files(self):
covdata = DebugCoverageData()
msg = "Can't add file tracer data for unmeasured file 'p1.foo'"
with pytest.raises(DataError, match=msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
covdata.add_lines({"p2.html": [10, 11, 12]})
with pytest.raises(DataError, match=msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
def test_cant_change_file_tracer_name(self):
covdata = DebugCoverageData()
covdata.add_lines({"p1.foo": [1, 2, 3]})
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
msg = "Conflicting file tracer name for 'p1.foo': 'p1.plugin' vs 'p1.plugin.foo'"
with pytest.raises(DataError, match=msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin.foo"})
def test_update_lines(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines(LINES_2)
covdata3 = DebugCoverageData(suffix='3')
covdata3.update(covdata1)
covdata3.update(covdata2)
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
def test_update_arcs(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_arcs(ARCS_3)
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_arcs(ARCS_4)
covdata3 = DebugCoverageData(suffix='3')
covdata3.update(covdata1)
covdata3.update(covdata2)
assert_line_counts(covdata3, SUMMARY_3_4)
assert_measured_files(covdata3, MEASURED_FILES_3_4)
def test_update_cant_mix_lines_and_arcs(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_arcs(ARCS_3)
with pytest.raises(DataError, match="Can't combine arc data with line data"):
covdata1.update(covdata2)
with pytest.raises(DataError, match="Can't combine line data with arc data"):
covdata2.update(covdata1)
def test_update_file_tracers(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines({
"p1.html": [1, 2, 3, 4],
"p2.html": [5, 6, 7],
"main.py": [10, 11, 12],
})
covdata1.add_file_tracers({
"p1.html": "html.plugin",
"p2.html": "html.plugin2",
})
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines({
"p1.html": [3, 4, 5, 6],
"p2.html": [7, 8, 9],
"p3.foo": [1000, 1001],
"main.py": [10, 11, 12],
})
covdata2.add_file_tracers({
"p1.html": "html.plugin",
"p2.html": "html.plugin2",
"p3.foo": "foo_plugin",
})
covdata3 = DebugCoverageData(suffix='3')
covdata3.update(covdata1)
covdata3.update(covdata2)
assert covdata3.file_tracer("p1.html") == "html.plugin"
assert covdata3.file_tracer("p2.html") == "html.plugin2"
assert covdata3.file_tracer("p3.foo") == "foo_plugin"
assert covdata3.file_tracer("main.py") == ""
def test_update_conflicting_file_tracers(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines({"p1.html": [1, 2, 3]})
covdata1.add_file_tracers({"p1.html": "html.plugin"})
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines({"p1.html": [1, 2, 3]})
covdata2.add_file_tracers({"p1.html": "html.other_plugin"})
msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs 'html.other_plugin'"
with pytest.raises(DataError, match=msg):
covdata1.update(covdata2)
msg = "Conflicting file tracer name for 'p1.html': 'html.other_plugin' vs 'html.plugin'"
with pytest.raises(DataError, match=msg):
covdata2.update(covdata1)
def test_update_file_tracer_vs_no_file_tracer(self):
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines({"p1.html": [1, 2, 3]})
covdata1.add_file_tracers({"p1.html": "html.plugin"})
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_lines({"p1.html": [1, 2, 3]})
msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs ''"
with pytest.raises(DataError, match=msg):
covdata1.update(covdata2)
msg = "Conflicting file tracer name for 'p1.html': '' vs 'html.plugin'"
with pytest.raises(DataError, match=msg):
covdata2.update(covdata1)
def test_update_lines_empty(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata2 = DebugCoverageData(suffix='2')
covdata1.update(covdata2)
assert_line_counts(covdata1, SUMMARY_1)
def test_update_arcs_empty(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_arcs(ARCS_3)
covdata2 = DebugCoverageData(suffix='2')
covdata1.update(covdata2)
assert_line_counts(covdata1, SUMMARY_3)
def test_asking_isnt_measuring(self):
# Asking about an unmeasured file shouldn't make it seem measured.
covdata = DebugCoverageData()
assert_measured_files(covdata, [])
assert covdata.arcs("missing.py") is None
assert_measured_files(covdata, [])
def test_add_to_hash_with_lines(self):
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
hasher = mock.Mock()
add_data_to_hash(covdata, "a.py", hasher)
assert hasher.method_calls == [
mock.call.update([1, 2]), # lines
mock.call.update(""), # file_tracer name
]
def test_add_to_hash_with_arcs(self):
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_file_tracers({"y.py": "hologram_plugin"})
hasher = mock.Mock()
add_data_to_hash(covdata, "y.py", hasher)
assert hasher.method_calls == [
mock.call.update([(-1, 17), (17, 23), (23, -1)]), # arcs
mock.call.update("hologram_plugin"), # file_tracer name
]
def test_add_to_lines_hash_with_missing_file(self):
# https://github.com/nedbat/coveragepy/issues/403
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
hasher = mock.Mock()
add_data_to_hash(covdata, "missing.py", hasher)
assert hasher.method_calls == [
mock.call.update([]),
mock.call.update(None),
]
def test_add_to_arcs_hash_with_missing_file(self):
# https://github.com/nedbat/coveragepy/issues/403
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_file_tracers({"y.py": "hologram_plugin"})
hasher = mock.Mock()
add_data_to_hash(covdata, "missing.py", hasher)
assert hasher.method_calls == [
mock.call.update([]),
mock.call.update(None),
]
def test_empty_lines_are_still_lines(self):
covdata = DebugCoverageData()
covdata.add_lines({})
covdata.touch_file("abc.py")
assert not covdata.has_arcs()
def test_empty_arcs_are_still_arcs(self):
covdata = DebugCoverageData()
covdata.add_arcs({})
covdata.touch_file("abc.py")
assert covdata.has_arcs()
def test_cant_touch_in_empty_data(self):
covdata = DebugCoverageData()
msg = "Can't touch files in an empty CoverageData"
with pytest.raises(DataError, match=msg):
covdata.touch_file("abc.py")
def test_read_and_write_are_opposites(self):
covdata1 = DebugCoverageData()
covdata1.add_arcs(ARCS_3)
covdata1.write()
covdata2 = DebugCoverageData()
covdata2.read()
assert_arcs3_data(covdata2)
def test_thread_stress(self):
covdata = DebugCoverageData()
exceptions = []
def thread_main():
"""Every thread will try to add the same data."""
try:
covdata.add_lines(LINES_1)
except Exception as ex: # pragma: only failure
exceptions.append(ex)
threads = [threading.Thread(target=thread_main) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_lines1_data(covdata)
assert not exceptions
class CoverageDataInTempDirTest(CoverageTest):
"""Tests of CoverageData that need a temporary directory to make files."""
def test_read_write_lines(self):
covdata1 = DebugCoverageData("lines.dat")
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = DebugCoverageData("lines.dat")
covdata2.read()
assert_lines1_data(covdata2)
def test_read_write_arcs(self):
covdata1 = DebugCoverageData("arcs.dat")
covdata1.add_arcs(ARCS_3)
covdata1.write()
covdata2 = DebugCoverageData("arcs.dat")
covdata2.read()
assert_arcs3_data(covdata2)
def test_read_errors(self):
self.make_file("xyzzy.dat", "xyzzy")
with pytest.raises(DataError, match=r"Couldn't .* '.*[/\\]xyzzy.dat': \S+"):
covdata = DebugCoverageData("xyzzy.dat")
covdata.read()
assert not covdata
def test_hard_read_error(self):
self.make_file("noperms.dat", "go away")
os.chmod("noperms.dat", 0)
with pytest.raises(DataError, match=r"Couldn't .* '.*[/\\]noperms.dat': \S+"):
covdata = DebugCoverageData("noperms.dat")
covdata.read()
@pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData])
def test_error_when_closing(self, klass):
msg = r"Couldn't .* '.*[/\\]flaked.dat': \S+"
with pytest.raises(DataError, match=msg):
covdata = klass("flaked.dat")
covdata.add_lines(LINES_1)
# I don't know how to make a real error, so let's fake one.
sqldb = list(covdata._dbs.values())[0]
sqldb.close = lambda: 1/0
covdata.add_lines(LINES_1)
def test_wrong_schema_version(self):
with sqlite3.connect("wrong_schema.db") as con:
con.execute("create table coverage_schema (version integer)")
con.execute("insert into coverage_schema (version) values (99)")
msg = r"Couldn't .* '.*[/\\]wrong_schema.db': wrong schema: 99 instead of \d+"
with pytest.raises(DataError, match=msg):
covdata = DebugCoverageData("wrong_schema.db")
covdata.read()
assert not covdata
def test_wrong_schema_schema(self):
with sqlite3.connect("wrong_schema_schema.db") as con:
con.execute("create table coverage_schema (xyzzy integer)")
con.execute("insert into coverage_schema (xyzzy) values (99)")
msg = r"Data file .* doesn't seem to be a coverage data file: .* no such column"
with pytest.raises(DataError, match=msg):
covdata = DebugCoverageData("wrong_schema_schema.db")
covdata.read()
assert not covdata
class CoverageDataFilesTest(CoverageTest):
"""Tests of CoverageData file handling."""
def test_reading_missing(self):
self.assert_doesnt_exist(".coverage")
covdata = DebugCoverageData()
covdata.read()
assert_line_counts(covdata, {})
def test_writing_and_reading(self):
covdata1 = DebugCoverageData()
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = DebugCoverageData()
covdata2.read()
assert_line_counts(covdata2, SUMMARY_1)
def test_debug_output_with_debug_option(self):
# With debug option dataio, we get debug output about reading and
# writing files.
debug = DebugControlString(options=["dataio"])
covdata1 = CoverageData(debug=debug)
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData(debug=debug)
covdata2.read()
assert_line_counts(covdata2, SUMMARY_1)
assert re.search(
r"^Erasing data file '.*\.coverage'\n" +
r"Opening data file '.*\.coverage'\n" +
r"Initing data file '.*\.coverage'\n" +
r"Opening data file '.*\.coverage'\n$",
debug.get_output()
)
def test_debug_output_without_debug_option(self):
# With a debug object, but not the dataio option, we don't get debug
# output.
debug = DebugControlString(options=[])
covdata1 = CoverageData(debug=debug)
covdata1.add_lines(LINES_1)
covdata1.write()
covdata2 = CoverageData(debug=debug)
covdata2.read()
assert_line_counts(covdata2, SUMMARY_1)
assert debug.get_output() == ""
def test_explicit_suffix(self):
self.assert_doesnt_exist(".coverage.SUFFIX")
covdata = DebugCoverageData(suffix='SUFFIX')
covdata.add_lines(LINES_1)
covdata.write()
self.assert_exists(".coverage.SUFFIX")
self.assert_doesnt_exist(".coverage")
def test_true_suffix(self):
self.assert_file_count(".coverage.*", 0)
# suffix=True will make a randomly named data file.
covdata1 = DebugCoverageData(suffix=True)
covdata1.add_lines(LINES_1)
covdata1.write()
self.assert_doesnt_exist(".coverage")
data_files1 = glob.glob(".coverage.*")
assert len(data_files1) == 1
# Another suffix=True will choose a different name.
covdata2 = DebugCoverageData(suffix=True)
covdata2.add_lines(LINES_1)
covdata2.write()
self.assert_doesnt_exist(".coverage")
data_files2 = glob.glob(".coverage.*")
assert len(data_files2) == 2
# In addition to being different, the suffixes have the pid in them.
assert all(str(os.getpid()) in fn for fn in data_files2)
def test_combining(self):
self.assert_file_count(".coverage.*", 0)
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines(LINES_1)
covdata1.write()
self.assert_exists(".coverage.1")
self.assert_file_count(".coverage.*", 1)
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines(LINES_2)
covdata2.write()
self.assert_exists(".coverage.2")
self.assert_file_count(".coverage.*", 2)
covdata3 = DebugCoverageData()
combine_parallel_data(covdata3)
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_file_count(".coverage.*", 0)
def test_erasing(self):
covdata1 = DebugCoverageData()
covdata1.add_lines(LINES_1)
covdata1.write()
covdata1.erase()
assert_line_counts(covdata1, {})
covdata2 = DebugCoverageData()
covdata2.read()
assert_line_counts(covdata2, {})
def test_erasing_parallel(self):
self.make_file("datafile.1")
self.make_file("datafile.2")
self.make_file(".coverage")
data = DebugCoverageData("datafile")
data.erase(parallel=True)
self.assert_file_count("datafile.*", 0)
self.assert_exists(".coverage")
def test_combining_with_aliases(self):
covdata1 = DebugCoverageData(suffix='1')
covdata1.add_lines({
'/home/ned/proj/src/a.py': {1, 2},
'/home/ned/proj/src/sub/b.py': {3},
'/home/ned/proj/src/template.html': {10},
})
covdata1.add_file_tracers({
'/home/ned/proj/src/template.html': 'html.plugin',
})
covdata1.write()
covdata2 = DebugCoverageData(suffix='2')
covdata2.add_lines({
r'c:\ned\test\a.py': {4, 5},
r'c:\ned\test\sub\b.py': {3, 6},
})
covdata2.write()
self.assert_file_count(".coverage.*", 2)
covdata3 = DebugCoverageData()
aliases = PathAliases()
aliases.add("/home/ned/proj/src/", "./")
aliases.add(r"c:\ned\test", "./")
combine_parallel_data(covdata3, aliases=aliases)
self.assert_file_count(".coverage.*", 0)
# covdata3 hasn't been written yet. Should this file exist or not?
#self.assert_exists(".coverage")
apy = canonical_filename('./a.py')
sub_bpy = canonical_filename('./sub/b.py')
template_html = canonical_filename('./template.html')
assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True)
assert_measured_files(covdata3, [apy, sub_bpy, template_html])
assert covdata3.file_tracer(template_html) == 'html.plugin'
def test_combining_from_different_directories(self):
os.makedirs('cov1')
covdata1 = DebugCoverageData('cov1/.coverage.1')
covdata1.add_lines(LINES_1)
covdata1.write()
os.makedirs('cov2')
covdata2 = DebugCoverageData('cov2/.coverage.2')
covdata2.add_lines(LINES_2)
covdata2.write()
# This data won't be included.
covdata_xxx = DebugCoverageData('.coverage.xxx')
covdata_xxx.add_arcs(ARCS_3)
covdata_xxx.write()
covdata3 = DebugCoverageData()
combine_parallel_data(covdata3, data_paths=['cov1', 'cov2'])
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_doesnt_exist("cov1/.coverage.1")
self.assert_doesnt_exist("cov2/.coverage.2")
self.assert_exists(".coverage.xxx")
def test_combining_from_files(self):
os.makedirs('cov1')
covdata1 = DebugCoverageData('cov1/.coverage.1')
covdata1.add_lines(LINES_1)
covdata1.write()
os.makedirs('cov2')
covdata2 = DebugCoverageData('cov2/.coverage.2')
covdata2.add_lines(LINES_2)
covdata2.write()
# This data won't be included.
covdata_xxx = DebugCoverageData('.coverage.xxx')
covdata_xxx.add_arcs(ARCS_3)
covdata_xxx.write()
covdata_2xxx = DebugCoverageData('cov2/.coverage.xxx')
covdata_2xxx.add_arcs(ARCS_3)
covdata_2xxx.write()
covdata3 = DebugCoverageData()
combine_parallel_data(covdata3, data_paths=['cov1', 'cov2/.coverage.2'])
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
self.assert_doesnt_exist("cov1/.coverage.1")
self.assert_doesnt_exist("cov2/.coverage.2")
self.assert_exists(".coverage.xxx")
self.assert_exists("cov2/.coverage.xxx")
def test_combining_from_nonexistent_directories(self):
covdata = DebugCoverageData()
msg = "Couldn't combine from non-existent path 'xyzzy'"
with pytest.raises(NoDataError, match=msg):
combine_parallel_data(covdata, data_paths=['xyzzy'])
def test_interleaved_erasing_bug716(self):
# pytest-cov could produce this scenario. #716
covdata1 = DebugCoverageData()
covdata2 = DebugCoverageData()
# this used to create the .coverage database file..
covdata2.set_context("")
# then this would erase it all..
covdata1.erase()
# then this would try to use tables that no longer exist.
# "no such table: meta"
covdata2.add_lines(LINES_1)
class DumpsLoadsTest(CoverageTest):
"""Tests of CoverageData.dumps and loads."""
run_in_temp_dir = False
@pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData])
def test_serialization(self, klass):
covdata1 = klass(no_disk=True)
covdata1.add_lines(LINES_1)
covdata1.add_lines(LINES_2)
serial = covdata1.dumps()
covdata2 = klass(no_disk=True)
covdata2.loads(serial)
assert_line_counts(covdata2, SUMMARY_1_2)
assert_measured_files(covdata2, MEASURED_FILES_1_2)
def test_misfed_serialization(self):
covdata = CoverageData(no_disk=True)
bad_data = b'Hello, world!\x07 ' + b'z' * 100
msg = r"Unrecognized serialization: {} \(head of {} bytes\)".format(
re.escape(repr(bad_data[:40])),
len(bad_data),
)
with pytest.raises(DataError, match=msg):
covdata.loads(bad_data)
|
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Use list/dict helpers for policies, metadetails, members
# Version 1.3: Make uuid a non-None real string
# Version 1.4: Add add_members()
# Version 1.5: Add get_hosts()
# Version 1.6: Add get_by_name()
# Version 1.7: Deprecate metadetails
# Version 1.8: Add count_members_by_user()
# Version 1.9: Add get_by_instance_uuid()
VERSION = '1.9'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'name': fields.StringField(nullable=True),
'policies': fields.ListOfStringsField(nullable=True),
'members': fields.ListOfStringsField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we had an always-empty
# metadetails property
primitive['metadetails'] = {}
@staticmethod
def _from_db_object(context, instance_group, db_inst):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
# Most of the field names match right now, so be quick
for field in instance_group.fields:
if field == 'deleted':
instance_group.deleted = db_inst['deleted'] == db_inst['id']
else:
instance_group[field] = db_inst[field]
instance_group._context = context
instance_group.obj_reset_changes()
return instance_group
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
db_inst = db.instance_group_get(context, uuid)
return cls._from_db_object(context, cls(), db_inst)
@base.remotable_classmethod
def get_by_name(cls, context, name):
# TODO(russellb) We need to get the group by name here. There's no
# db.api method for this yet. Come back and optimize this by
# adding a new query by name. This is unnecessarily expensive if a
# tenant has lots of groups.
igs = objects.InstanceGroupList.get_by_project_id(context,
context.project_id)
for ig in igs:
if ig.name == name:
return ig
raise exception.InstanceGroupNotFound(group_uuid=name)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_inst = db.instance_group_get_by_instance(context, instance_uuid)
return cls._from_db_object(context, cls(), db_inst)
@classmethod
def get_by_hint(cls, context, hint):
if uuidutils.is_uuid_like(hint):
return cls.get_by_uuid(context, hint)
else:
return cls.get_by_name(context, hint)
@base.remotable
def save(self):
"""Save updates to this instance group."""
updates = self.obj_get_changes()
if not updates:
return
payload = dict(updates)
payload['server_group_id'] = self.uuid
db.instance_group_update(self._context, self.uuid, updates)
db_inst = db.instance_group_get(self._context, self.uuid)
self._from_db_object(self._context, self, db_inst)
compute_utils.notify_about_server_group_update(self._context,
"update", payload)
@base.remotable
def refresh(self):
"""Refreshes the instance group."""
current = self.__class__.get_by_uuid(self._context, self.uuid)
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
payload = dict(updates)
updates.pop('id', None)
policies = updates.pop('policies', None)
members = updates.pop('members', None)
db_inst = db.instance_group_create(self._context, updates,
policies=policies,
members=members)
self._from_db_object(self._context, self, db_inst)
payload['server_group_id'] = self.uuid
compute_utils.notify_about_server_group_update(self._context,
"create", payload)
@base.remotable
def destroy(self):
payload = {'server_group_id': self.uuid}
db.instance_group_delete(self._context, self.uuid)
self.obj_reset_changes()
compute_utils.notify_about_server_group_update(self._context,
"delete", payload)
@base.remotable_classmethod
def add_members(cls, context, group_uuid, instance_uuids):
payload = {'server_group_id': group_uuid,
'instance_uuids': instance_uuids}
members = db.instance_group_members_add(context, group_uuid,
instance_uuids)
compute_utils.notify_about_server_group_update(context,
"addmember", payload)
return list(members)
@base.remotable
def get_hosts(self, exclude=None):
"""Get a list of hosts for non-deleted instances in the group
This method allows you to get a list of the hosts where instances in
this group are currently running. There's also an option to exclude
certain instance UUIDs from this calculation.
"""
filter_uuids = self.members
if exclude:
filter_uuids = set(filter_uuids) - set(exclude)
filters = {'uuid': filter_uuids, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return list(set([instance.host for instance in instances
if instance.host]))
@base.remotable
def count_members_by_user(self, user_id):
"""Count the number of instances in a group belonging to a user."""
filter_uuids = self.members
filters = {'uuid': filter_uuids, 'user_id': user_id, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return len(instances)
@base.NovaObjectRegistry.register
class InstanceGroupList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceGroup <= version 1.3
# Version 1.1: InstanceGroup <= version 1.4
# Version 1.2: InstanceGroup <= version 1.5
# Version 1.3: InstanceGroup <= version 1.6
# Version 1.4: InstanceGroup <= version 1.7
# Version 1.5: InstanceGroup <= version 1.8
# Version 1.6: InstanceGroup <= version 1.9
VERSION = '1.6'
fields = {
'objects': fields.ListOfObjectsField('InstanceGroup'),
}
# NOTE(danms): InstanceGroup was at 1.3 before we added this
obj_relationships = {
'objects': [('1.0', '1.3'), ('1.1', '1.4'), ('1.2', '1.5'),
('1.3', '1.6'), ('1.4', '1.7'), ('1.5', '1.8'),
('1.6', '1.9')],
}
@base.remotable_classmethod
def get_by_project_id(cls, context, project_id):
groups = db.instance_group_get_all_by_project_id(context, project_id)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
groups)
@base.remotable_classmethod
def get_all(cls, context):
groups = db.instance_group_get_all(context)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
groups)
|
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
Utilities for unit tests.
"""
import sys
import os
import random
import uuid
import tempfile
import imp
import unittest
import shutil
import warnings
import subprocess
import pydoop
import pydoop.utils.jvm as jvm
from pydoop.utils.py3compat import StringIO
JAVA_HOME = jvm.get_java_home()
JAVA = os.path.join(JAVA_HOME, "bin", "java")
JAVAC = os.path.join(JAVA_HOME, "bin", "javac")
_HADOOP_HOME = pydoop.hadoop_home()
_HADOOP_CONF_DIR = pydoop.hadoop_conf()
_RANDOM_DATA_SIZE = 32
_DEFAULT_HDFS_HOST = os.getenv("HOSTNAME", "localhost")
_DEFAULT_HDFS_PORT = 8020 if pydoop.is_cloudera() else 9000
_DEFAULT_BYTES_PER_CHECKSUM = 512
HDFS_HOST = os.getenv("HDFS_HOST", _DEFAULT_HDFS_HOST)
HDFS_PORT = os.getenv("HDFS_PORT", _DEFAULT_HDFS_PORT)
def _get_special_chr():
"""
This is used to check unicode support. On some systems, depending
on locale settings, we won't be able to use non-ASCII characters
when interacting with system calls. Since in such cases it
doesn't really make sense to run these tests we set UNI_CHR to a
regular ASCII character.
"""
# something outside the latin-1 range
the_chr = u'\N{CYRILLIC CAPITAL LETTER O WITH DIAERESIS}'
fd = None
fname = None
try:
fd, fname = tempfile.mkstemp(suffix=the_chr)
except UnicodeEncodeError:
msg = ("local file system doesn't support unicode characters"
"in filenames, falling back to ASCII-only")
warnings.warn(msg, UnicodeWarning)
the_chr = u's'
finally:
if fd:
os.close(fd)
os.remove(fname)
return the_chr
UNI_CHR = _get_special_chr()
try:
HDFS_PORT = int(HDFS_PORT)
except ValueError:
raise ValueError("Environment variable HDFS_PORT must be an int")
_FD_MAP = {
"stdout": sys.stdout.fileno(),
"stderr": sys.stderr.fileno(),
}
class FSTree(object):
"""
>>> t = FSTree('root')
>>> d1 = t.add('d1')
>>> f1 = t.add('f1', 0)
>>> d2 = d1.add('d2')
>>> f2 = d2.add('f2', 0)
>>> for x in t.walk(): print x.name, x.kind
...
root 1
d1 1
d2 1
f2 0
f1 0
"""
def __init__(self, name, kind=1):
assert kind in (0, 1) # (file, dir)
self.name = name
self.kind = kind
if self.kind:
self.children = []
def add(self, name, kind=1):
t = FSTree(name, kind)
self.children.append(t)
return t
def walk(self):
yield self
if self.kind:
for c in self.children:
for t in c.walk():
yield t
def make_wd(fs, prefix="pydoop_test_"):
if fs.host:
wd = "%s%s" % (prefix, uuid.uuid4().hex)
fs.create_directory(wd)
return fs.get_path_info(wd)['name']
else:
return tempfile.mkdtemp(prefix=prefix)
def make_random_data(size=_RANDOM_DATA_SIZE, printable=True):
randint = random.randint
start, stop = (32, 126) if printable else (0, 255)
return bytes(bytearray([randint(start, stop) for _ in range(size)]))
def get_bytes_per_checksum():
params = pydoop.hadoop_params(_HADOOP_CONF_DIR, _HADOOP_HOME)
return int(params.get('io.bytes.per.checksum',
params.get('dfs.bytes-per-checksum',
_DEFAULT_BYTES_PER_CHECKSUM)))
def silent_call(func, *args, **kwargs):
with open(os.devnull, "w") as dev_null:
cache = {}
for s in "stdout", "stderr":
cache[s] = os.dup(_FD_MAP[s])
os.dup2(dev_null.fileno(), _FD_MAP[s])
try:
ret = func(*args, **kwargs)
finally:
for s in "stdout", "stderr":
os.dup2(cache[s], _FD_MAP[s])
return ret
def get_module(name, path=None):
fp, pathname, description = imp.find_module(name, path)
try:
module = imp.load_module(name, fp, pathname, description)
return module
finally:
fp.close()
def compile_java(java_file, classpath):
java_class_file = os.path.splitext(
os.path.realpath(java_file)
)[0] + '.class'
if (not os.path.exists(java_class_file) or
os.path.getmtime(java_file) > os.path.getmtime(java_class_file)):
cmd = [JAVAC, '-cp', classpath, java_file]
try:
subprocess.check_call(cmd, cwd=os.path.dirname(java_file))
except subprocess.CalledProcessError as e:
raise RuntimeError("Error compiling Java file %s\n%s" % (
java_file, e))
def run_java(jclass, classpath, args, wd):
try:
subprocess.check_call([JAVA, '-cp', classpath, jclass] + args, cwd=wd)
except subprocess.CalledProcessError as e:
raise RuntimeError("Error running Java class %s\n%s" % (
jclass, e))
def get_java_output_stream(jclass, classpath, args, wd):
output = subprocess.check_output(
[JAVA, '-cp', classpath, jclass] + args,
cwd=wd, stderr=open('/dev/null', 'w'))
return StringIO(output)
class WDTestCase(unittest.TestCase):
def setUp(self):
self.wd = tempfile.mkdtemp(prefix='pydoop_test_')
def tearDown(self):
shutil.rmtree(self.wd)
def _mkfn(self, basename):
return os.path.join(self.wd, basename)
def _mkf(self, basename, mode='w'):
return open(self._mkfn(basename), mode)
|
|
import argparse
import itertools
import logging
import os
import re
import simplejson as json
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB import NeighborSearch
from config import PDB_RESIDUE_TYPES_BY_RESIDUE
LEVEL_MAP = {
'A': 'atom',
'R': 'residue',
'C': 'chain'
}
# MATCHES PDB ACCESSIONS
# E.G. 1XKK
PDB_REGEX = r'([0-9]{1}[a-zA-Z]{3})'
# MATCHES PDB-BIOMOLID
# E.G. 1XKK-1
PDB_BIOMOL_REGEX = PDB_REGEX + r'-([0-9]+)'
# MATCHES RSCB STYLE ASSEMBLIES
# E.G. 1XKK.pdb1
PDB_RCSB_ASM_REGEX = PDB_REGEX + r'\.pdb([0-9]+)'
PDB_REGEX = re.compile(PDB_REGEX)
PDB_BIOMOL_REGEX = re.compile(PDB_BIOMOL_REGEX)
PDB_RCSB_ASM_REGEX = re.compile(PDB_RCSB_ASM_REGEX)
def cap(string, char=','):
'''Pad the beginning and end of string with a character.'''
return '{}{}{}'.format(char, string, char)
if __name__ == '__main__':
# ARGUMENT PARSING
parser = argparse.ArgumentParser(description='''
# Determine Interactions
Calculate all atom-atom, residue-residue, and chain-chain interactions
in PDB files.
This program assumes that the PDB file has already been cleaned and only takes
into account the first model.
''', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'inputfile', type=str, help='Path to the PDB file to be analysed.')
parser.add_argument('-i', '--interacting', type=float,
default=5.0, help='Distance cutoff for interactions.')
parser.add_argument('-o', '--outputs', type=str,
default='cr', help='Granularity of output: string '
'including letters "c" for chain level, "r" for '
'residue level, "a" for atom level.')
parser.add_argument('-tf', '--type-filter', type=str,
default='*', help='Filter which types of residue are '
'included in atom and residue level calculations. '
'Will consider all interactions made between residues '
'of that/those type(s), and other entities, e.g., '
'filtering by \'dna\' would include DNA-protein '
'interactions. \n'
'Options are: * for all, or: peptide, peptide_like, '
'dna, rna, saccharide, non_polymer, water. Seperate '
'multiple residue types with ')
parser.add_argument('-v', '--verbose',
action='store_true', help='Be chatty.')
args = parser.parse_args()
# SET ARGS TO CONSTANTS
INPUT_FILE = args.inputfile
INPUT_FILE_SPLITEXT = os.path.splitext(INPUT_FILE)[0]
INPUT_FILENAME = os.path.split(INPUT_FILE)[1]
INTERACTION_THRESHOLD = args.interacting
TYPE_FILTER = args.type_filter
OUTPUTS = args.outputs.upper()
# LOGGING
if args.verbose:
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s//%(asctime)s.%(msecs).03d//%(message)s',
datefmt='%H:%M:%S')
else:
logging.basicConfig(
level=logging.WARN,
format='%(levelname)s//%(asctime)s.%(msecs).03d//%(message)s',
datefmt='%H:%M:%S')
logging.info('Program begin.')
# DETECT PDB ACCESSION FROM FILENAME IF POSSIBLE
PDB = ''
BIOMOL_ID = ''
pdb_match = re.search(PDB_REGEX, INPUT_FILENAME)
if pdb_match:
PDB = pdb_match.group(1)
pdb_biomol_match = re.search(PDB_BIOMOL_REGEX, INPUT_FILENAME)
if pdb_biomol_match:
PDB = pdb_biomol_match.group(1)
BIOMOL_ID = pdb_biomol_match.group(2)
pdb_rcsb_asm_match = re.search(PDB_RCSB_ASM_REGEX, INPUT_FILENAME)
if pdb_rcsb_asm_match:
PDB = pdb_rcsb_asm_match.group(1)
BIOMOL_ID = pdb_rcsb_asm_match.group(2)
# LOAD STRUCTURE
structure = PDBParser().get_structure('structure', INPUT_FILE)
structure_atoms = list(structure.get_atoms())
logging.info('Loaded PDB structure (BioPython).')
# CONSTRUCT KDTREE
neighborsearch = NeighborSearch(structure_atoms)
logging.info('Constructured NeighborSearch.')
# GET INTERACTIONS
logging.info('Calculating interactions...')
for interaction_level in 'ARC':
if interaction_level in OUTPUTS:
logging.info('Calculating interactions for {}s...'.format(
LEVEL_MAP[interaction_level]))
pairs = neighborsearch.search_all(INTERACTION_THRESHOLD,
level=interaction_level)
logging.info('Search complete for {}s.'.format(
LEVEL_MAP[interaction_level]))
logging.info('Organising interactions for {}s...'.format(
LEVEL_MAP[interaction_level]))
interactions = {}
for entities in pairs:
entity1, entity2 = entities
# NO SELFIES
if (entity1 is entity2) or (entity1 == entity2):
continue
id1 = entity1.get_full_id()
id2 = entity2.get_full_id()
if interaction_level == 'A':
res1 = entity1.get_parent()
res2 = entity2.get_parent()
res1 = res1.resname.strip()
res2 = res2.resname.strip()
entity1 = cap(','.join(
[id1[2], str(id1[3][1]) + id1[3][2].strip() + '`' + res1, entity1.name]))
entity2 = cap(','.join(
[id2[2], str(id2[3][1]) + id2[3][2].strip() + '`' + res2, entity2.name]))
elif interaction_level == 'R':
entity1 = cap(','.join(
[id1[2], str(id1[3][1]) + id1[3][2].strip() + '`' + entity1.resname.strip()]))
entity2 = cap(','.join(
[id2[2], str(id2[3][1]) + id2[3][2].strip() + '`' + entity2.resname.strip()]))
elif interaction_level == 'C':
entity1 = cap(entity1.id)
entity2 = cap(entity2.id)
# ADD INTERACTING ENTITY TO LIST OF INTERACTORS
if entity1 not in interactions:
interactions[entity1] = []
if entity2 not in interactions:
interactions[entity2] = []
if entity2 not in interactions[entity1]:
interactions[entity1].append(entity2)
if entity1 not in interactions[entity2]:
interactions[entity2].append(entity1)
for entity in interactions:
interactions[entity] = sorted(interactions[entity])
logging.info('Organisation complete for {}s.'.format(
LEVEL_MAP[interaction_level]))
logging.info('Constructing JSON for {}s...'.format(
LEVEL_MAP[interaction_level]))
json_output = {
'input': INPUT_FILE,
'pdb': PDB,
'biomol_id': BIOMOL_ID,
'level': LEVEL_MAP[interaction_level],
'interactions': interactions
}
# TYPE RESIDUES IF POSSIBLE
if interaction_level in 'AR' and PDB_RESIDUE_TYPES_BY_RESIDUE:
logging.info('Typing residues for {} output...'.format(
LEVEL_MAP[interaction_level]))
json_output['residue_types'] = {}
for entity in json_output['interactions']:
resname = None
if interaction_level == 'A':
resname = entity.split(',')[-3].split('`')[1]
if interaction_level == 'R':
resname = entity.split(',')[-2].split('`')[1]
if resname:
restype = None
try:
restype = PDB_RESIDUE_TYPES_BY_RESIDUE[resname]
except:
logging.warn('Could not type residue: {}'.format(entity))
json_output['residue_types'][
entity] = restype
# TYPE FILTER
if TYPE_FILTER != '*' and not PDB_RESIDUE_TYPES_BY_RESIDUE:
logging.warn('Not applying type filtering, because PDB '
'residue typing data is not available. '
'See https://github.com/harryjubb/pdb_interactions#residue-typing for information.')
if TYPE_FILTER != '*' and interaction_level in 'AR' and PDB_RESIDUE_TYPES_BY_RESIDUE:
logging.info('Filtering interactions by residue type for {} output...'.format(
LEVEL_MAP[interaction_level]))
# REMOVE INTERACTIONS NOT IN TYPE FILTER
json_output['interactions'] = {
entity: interactors for entity, interactors in json_output['interactions'].iteritems() if
json_output['residue_types'][entity] in TYPE_FILTER
}
# REMOVE ANY ENTITIES NOT INTERACTING FROM THE RESIDUE TYPES DICTIONARY
remaining_interacting_entities = set(list(itertools.chain(
*([entity] + interactors for entity, interactors in json_output['interactions'].iteritems())
)))
json_output['residue_types'] = {
entity: etype for entity, etype in json_output['residue_types'].iteritems()
if entity in remaining_interacting_entities
}
# WRITE OUT JSON OUTPUT
logging.info('Writing JSON for {}s...'.format(
LEVEL_MAP[interaction_level]))
with open('.'.join([INPUT_FILE_SPLITEXT, LEVEL_MAP[interaction_level], 'interactions' if TYPE_FILTER == '*' else '_'.join(TYPE_FILTER.split()), 'json']), 'wb') as fo:
json.dump(json_output, fo)
logging.info('JSON output written for {}s.'.format(
LEVEL_MAP[interaction_level]))
# FINISH UP
logging.info('Program end.')
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
from catapult_base.dependency_manager import dependency_info
from catapult_base.dependency_manager import exceptions
class BaseConfig(object):
"""A basic config class for use with the DependencyManager.
Initiated with a json file in the following format:
{ "config_type": "BaseConfig",
"dependencies": {
"dep_name1": {
"cloud_storage_base_folder": "base_folder1",
"cloud_storage_bucket": "bucket1",
"file_info": {
"platform1": {
"cloud_storage_hash": "hash_for_platform1",
"download_path": "download_path111",
"cs_remote_path": "cs_path111",
"version_in_cs": "1.11.1.11."
"local_paths": ["local_path1110", "local_path1111"]
},
"platform2": {
"cloud_storage_hash": "hash_for_platform2",
"download_path": "download_path2",
"cs_remote_path": "cs_path2",
"local_paths": ["local_path20", "local_path21"]
},
...
}
},
"dependency_name_2": {
...
},
...
}
}
Required fields: "dependencies" and "config_type".
Note that config_type must be "BaseConfig"
Assumptions:
"cloud_storage_base_folder" is a top level folder in the given
"cloud_storage_bucket" where all of the dependency files are stored
at "dependency_name"_"cloud_storage_hash".
"download_path" and all paths in "local_paths" are relative to the
config file's location.
All or none of the following cloud storage related fields must be
included in each platform dictionary:
"cloud_storage_hash", "download_path", "cs_remote_path"
"version_in_cs" is an optional cloud storage field, but is dependent
on the above cloud storage related fields.
Also note that platform names are often of the form os_architechture.
Ex: "win_AMD64"
More information on the fields can be found in dependencies_info.py
"""
def __init__(self, file_path, writable=False):
""" Initialize a BaseConfig for the DependencyManager.
Args:
writable: False: This config will be used to lookup information.
True: This config will be used to update information.
file_path: Path to a file containing a json dictionary in the expected
json format for this config class. Base format expected:
{ "config_type": config_type,
"dependencies": dependencies_dict }
config_type: must match the return value of GetConfigType.
dependencies: A dictionary with the information needed to
create dependency_info instances for the given
dependencies.
See dependency_info.py for more information.
"""
self._config_path = file_path
self._writable = writable
if not file_path:
raise ValueError('Must supply config file path.')
if not os.path.exists(file_path):
if not writable:
raise exceptions.EmptyConfigError(file_path)
self._config_data = {}
self.CreateEmptyConfig(file_path)
else:
with open(file_path, 'r') as f:
config_data = json.load(f)
if not config_data:
raise exceptions.EmptyConfigError(file_path)
config_type = config_data.pop('config_type', None)
if config_type != self.GetConfigType():
raise ValueError(
'Supplied config_type (%s) is not the expected type (%s) in file '
'%s' % (config_type, self.GetConfigType(), file_path))
self._config_data = config_data.get('dependencies', {})
def IterDependencyInfo(self):
""" Yields a DependencyInfo for each dependency/platform pair.
Raises:
ReadWriteError: If called when the config is writable.
ValueError: If any of the dependencies contain partial information for
downloading from cloud_storage. (See dependency_info.py)
"""
if self._writable:
raise exceptions.ReadWriteError(
'Trying to read dependency info from a writable config. File for '
'config: %s' % self._config_path)
for dep in self._config_data:
base_path = os.path.dirname(self._config_path)
dependency_dict = self._config_data.get(dep, {})
platforms_dict = dependency_dict.get('file_info')
cs_bucket = dependency_dict.get('cloud_storage_bucket', None)
cs_base_folder = dependency_dict.get('cloud_storage_base_folder', '')
for platform in platforms_dict:
platform_info = platforms_dict.get(platform)
local_paths = platform_info.get('local_paths', [])
if local_paths:
paths = []
for path in local_paths:
path = self._FormatPath(path)
paths.append(os.path.abspath(os.path.join(base_path, path)))
local_paths = paths
download_path = platform_info.get('download_path', None)
if download_path:
download_path = self._FormatPath(download_path)
download_path = os.path.abspath(
os.path.join(base_path, download_path))
cs_remote_path = None
cs_hash = platform_info.get('cloud_storage_hash', None)
if cs_hash:
cs_remote_file = '%s_%s' % (dep, cs_hash)
cs_remote_path = cs_remote_file if not cs_base_folder else (
'%s/%s' % (cs_base_folder, cs_remote_file))
version_in_cs = platform_info.get('version_in_cs', None)
if download_path or cs_remote_path or cs_hash or version_in_cs:
dep_info = dependency_info.DependencyInfo(
dep, platform, self._config_path, cs_bucket=cs_bucket,
cs_remote_path=cs_remote_path, download_path=download_path,
cs_hash=cs_hash, version_in_cs=version_in_cs,
local_paths=local_paths)
else:
dep_info = dependency_info.DependencyInfo(
dep, platform, self._config_path, local_paths=local_paths)
yield dep_info
@classmethod
def CreateEmptyConfig(cls, file_path):
"""Create an empty BaseConfig json dict and write it out to |file_path|.
Raises:
ValueError: If the path already exists.
"""
if os.path.exists(file_path):
raise ValueError('File already exists, and would be overwritten.')
json_dict = {'config_type': cls.GetConfigType(),
'dependencies': {}}
with open(file_path, 'w') as outfile:
json.dump(json_dict, outfile, indent=2, sort_keys=True)
return json_dict
@classmethod
def GetConfigType(cls):
return 'BaseConfig'
@property
def config_path(self):
return self._config_path
def UpdateCloudStorageDependency(
self, dependency, platform, dependency_path, version=None):
"""Update the cloud storage hash and the version for the given dependency.
"""
# TODO(aiolos): Only allow the config to be updated if writable is True to
# avoid data changing underneath the dependency manager.
raise NotImplementedError
def GetVersion(self, dependency, platform):
"""Return the Version information for the given dependency."""
if not self._config_data(dependency):
raise ValueError('Dependency %s is not in config.' % dependency)
if not self.config_data[dependency].get(platform):
raise ValueError('Dependency %s has no information for platform %s in '
'this config.' % (dependency, platform))
return self._config_data[dependency][platform].get('version_in_cs')
@classmethod
def _FormatPath(cls, file_path):
"""Format |file_path| for the current file system.
We may be downloading files for another platform, so paths must be
downloadable on the current system.
"""
if not file_path:
return file_path
if os.path.sep != '\\':
return file_path.replace('\\', os.path.sep)
elif os.path.sep != '/':
return file_path.replace('/', os.path.sep)
return file_path
|
|
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.utils import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import ExpressionNode
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (CURSOR, SINGLE, MULTI, NO_RESULTS,
ORDER_DIR, GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def __call__(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
return self(name)
def compile(self, node):
vendor_impl = getattr(
node, 'as_' + self.connection.vendor, None)
if vendor_impl:
return vendor_impl(self, self.connection)
else:
return node.as_sql(self, self.connection)
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.query.where)
having, h_params = self.compile(self.query.having)
having_group_by = self.query.having.get_group_by_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
params.extend(o_params)
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError("select_for_update cannot be used outside of a transaction.")
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = self.compile(col)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = self.compile(aggregate)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.column
for seen_model, seen_alias in seen_models.items():
if seen_model and seen_alias == alias:
ancestor_link = seen_model._meta.get_ancestor_link(model)
if ancestor_link:
column = ancestor_link.column
break
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field))
aliases.add(alias)
continue
if with_aliases and column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
# For plain DISTINCT queries any ORDER BY clause must appear
# in SELECT clause.
# http://www.postgresql.org/message-id/[email protected]
must_append_to_select = distinct and not self.query.distinct_fields
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not must_append_to_select or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif not self.query._extra or get_order_dir(field)[0] not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if must_append_to_select and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
if must_append_to_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
result.append('%s %s' % (elt, order))
else:
result.append("(%s) %s" % (self.query.extra[col][0], order))
params.extend(self.query.extra[col][1])
else:
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.rel and path and opts.ordering and name != field.attname:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(self.query.alias_map[j].table_name for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(alias, [t.column for t in targets], order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = self.compile(extra_cond)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
self.compile(col)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or order_params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns, _ = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo((col[0], col[1].column), col[1]) for col in columns)
if restricted:
next = requested.get(f.name, {})
else:
next = False
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, _ = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo((col[0], col[1].column), col[1]) for col in columns)
next = requested.get(f.related_query_name(), {})
self.fill_related_selections(model._meta, alias, cur_depth + 1,
next, restricted)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
elif self.query.default_cols:
fields = self.query.get_meta().concrete_fields
else:
fields = []
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
fields = [f for f in fields if f.model._meta.db_table not in only_load or
f.column in only_load[f.model._meta.db_table]]
if has_aggregate_select:
# pad None in to fields for aggregates
fields = fields[:aggregate_start] + [
None for x in range(0, aggregate_end - aggregate_start)
] + fields[aggregate_start:]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
row = tuple(row[:aggregate_start]) + tuple(
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
) + tuple(row[aggregate_end:])
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = cursor_iter(cursor,
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, qn):
inner_qn = self
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (inner_qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
if field.rel or isinstance(val, ExpressionNode):
val = val.prepare_database_save(field)
else:
raise TypeError("Database is trying to update a relational field "
"of type %s with a value of type %s. Make sure "
"you are setting the correct relations" %
(field.__class__.__name__, val.__class__.__name__))
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = self.compile(aggregate)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.utils import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.utils import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artificially returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.datetimes(). Are time zone "
"definitions for your database and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def cursor_iter(cursor, sentinel):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield rows
finally:
cursor.close()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
finally:
cursor.close()
|
|
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
notification.py: Parser for notification events received from Controller
"""
import os
import re
import xmltodict
from pysdn.common.utils import dbg_print
yang_namespace_to_prefix_map = {
'urn:opendaylight:inventory': 'inv',
'urn:opendaylight:netconf-node-inventory': 'netinv"',
'urn:opendaylight:flow:inventory': 'flownode',
'urn:opendaylight:flow:statistics': 'fstat',
'urn:opendaylight:flow:table:statistics': 'flowstat',
'urn:opendaylight:port:statistics': 'portstat',
'urn:TBD:params:xml:ns:yang:network-topology': 'nt',
'urn:opendaylight:model:topology:inventory': 'nt1',
'urn:opendaylight:host-tracker': 'host-track',
}
def yang_nsname_to_prefix(nsname):
if nsname in yang_namespace_to_prefix_map:
return yang_namespace_to_prefix_map[nsname]
else:
return nsname
def yang_prefix_to_nsname(prefix):
for k, v in yang_namespace_to_prefix_map:
if v == prefix:
return k
return prefix
class NetworkTopologyChangeNotification():
""" Parser for notification messages generated by the Controller
when it detects changes in the network topology data tree.
"""
def __init__(self, event):
self.added_switches = []
self.removed_switches = []
self.added_hosts = []
self.removed_hosts = []
self.added_links = []
self.removed_links = []
d = xmltodict.parse(event)
try:
p1 = 'notification'
notification = d[p1]
p2 = 'eventTime'
self.timestamp = notification[p2]
self.events = []
p3 = 'data-changed-notification'
p4 = 'data-change-event'
events = notification[p3][p4]
if isinstance(events, list):
for item in events:
tc_evt = TopoChangeEvent(item)
self.events.append(tc_evt)
elif isinstance(events, dict):
tc_evt = TopoChangeEvent(events)
self.events.append(tc_evt)
else:
msg = ("DEBUG: events=%s, unexpected data format '%s'" %
(events, type(events)))
dbg_print(msg)
for event in self.events:
if event.created():
if event.is_switch():
self.added_switches.append(event.get_node_id())
elif event.is_host():
self.added_hosts.append(event.get_node_id())
elif event.is_link():
self.added_links.append(event.get_link_id())
elif event.deleted():
if event.is_switch():
self.removed_switches.append(event.get_node_id())
elif event.is_host():
self.removed_hosts.append(event.get_node_id())
elif event.is_link():
self.removed_links.append(event.get_link_id())
except(Exception):
msg = "DEBUG: failed to process event '%s'" % event
dbg_print(msg)
def get_time(self):
return self.timestamp
def switches_added(self):
return self.added_switches
def switches_removed(self):
return self.removed_switches
def hosts_added(self):
return self.added_hosts
def hosts_removed(self):
return self.removed_hosts
def links_added(self):
return self.added_links
def links_removed(self):
return self.removed_links
def print_events(self):
for event in self.events:
if event.is_link():
print "\n".strip()
event.do_print()
print "\n".strip()
else:
print "\n".strip()
event.do_print()
print "\n".strip()
class TopoChangeEvent():
""" Parser for the data change event located in the network topology
change notification message received from the Controller.
Helper subclass for the 'NetworkTopologyChangeNotification' class.
"""
def __init__(self, event):
p = 'path'
if isinstance(event, dict):
for k, v in event.items():
if k == p:
self.path_info = PathInfo(v)
else:
setattr(self, k, v)
else:
msg = ("DEBUG: event=%s, unexpected data format '%s'" %
(event, type(event)))
dbg_print(msg)
def created(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'created')
return res
def deleted(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'deleted')
return res
def updated(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'updated')
return res
def get_path(self):
path = None
p = 'path_info'
if hasattr(self, p):
path = str(self.path_info.path)
return path
def is_node(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = '.*node-id$'
r = re.search(p1, basename)
if r is not None:
res = True
return res
def is_switch(self):
res = False
if self.is_node():
node_id = self.get_node_id()
if node_id and node_id.startswith('openflow'):
res = True
return res
def is_host(self):
res = False
if self.is_node():
node_id = self.get_node_id()
if node_id and node_id.startswith('host'):
res = True
return res
def get_node_id(self):
node_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = repr(path).split(']')
if chunks:
p = 'node-id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
node_id = s[idx + len(p):].translate(None, "[]'\"")
break
return node_id
def is_link(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = '.*link-id$'
r = re.search(p1, basename)
if r is not None:
res = True
return res
def get_link_id(self):
link_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = repr(path).split(']')
if chunks:
p = 'link-id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
link_id = s[idx + len(p):].translate(None, "[]'\"")
break
return link_id
def do_print(self):
w = 65
print ("%s" % '<' * w)
print " operation: %s" % self.operation
self.path_info.do_print()
print ("%s" % '>' * w)
class InventoryChangeNotification():
""" Parser for notification messages generated by the Controller
when it detects changes in its internal inventory data store.
"""
def __init__(self, event):
self.added_nodes = []
self.removed_nodes = []
self.added_flows = []
self.removed_flows = []
d = xmltodict.parse(event)
try:
p1 = 'notification'
notification = d[p1]
p2 = 'eventTime'
self.timestamp = notification[p2]
self.events = []
p3 = 'data-changed-notification'
p4 = 'data-change-event'
events = notification[p3][p4]
if isinstance(events, list):
for item in events:
evt = InventoryChangeEvent(item)
self.events.append(evt)
elif isinstance(events, dict):
evt = InventoryChangeEvent(events)
self.events.append(evt)
else:
msg = ("DEBUG: events=%s, unexpected data format '%s'" %
(events, type(events)))
dbg_print(msg)
for event in self.events:
if event.created():
if event.is_node():
self.added_nodes.append(event.get_node_id())
elif event.is_flow_entry():
flow_info = FlowInfo(event)
self.added_flows.append(flow_info)
elif event.deleted():
if event.is_node():
self.removed_nodes.append(event.get_node_id())
elif event.is_flow_entry():
flow_info = FlowInfo(event)
self.removed_flows.append(flow_info)
except(Exception) as e:
print "Error, %s" % e
def get_time(self):
return self.timestamp
def nodes_added(self):
return self.added_nodes
def nodes_removed(self):
return self.removed_nodes
def flows_added(self):
return self.added_flows
def flows_removed(self):
return self.removed_flows
def print_events(self):
for event in self.events:
if event.created():
print "\n".strip()
event.do_print()
print "\n".strip()
class InventoryChangeEvent():
""" Parser for the data change event located in the inventory change
notification message received from the Controller.
Helper subclass for the 'InventoryChangeNotification' class.
"""
def __init__(self, event):
self.path_info = None
p = 'path'
if isinstance(event, dict):
for k, v in event.items():
if k == p:
self.path_info = PathInfo(v)
else:
setattr(self, k, v)
else:
msg = ("DEBUG: events=%s, unexpected data format '%s'" %
(event, type(event)))
dbg_print(msg)
def created(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'created')
return res
def deleted(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'deleted')
return res
def updated(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'updated')
return res
def get_path(self):
path = None
p = 'path_info'
if hasattr(self, p):
path = str(self.path_info.path)
return path
def is_node(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = 'node\[.*:id=.*\]'
r = re.search(p1, basename)
if r is not None:
res = True
return res
def is_switch(self):
res = False
if self.is_node():
node_id = self.get_node_id()
if node_id and node_id.startswith('openflow'):
res = True
return res
def get_node_id(self):
node_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = str(path).split('[')
p = ':id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
node_id = s[idx + len(p):].translate(None, "[]'\"")
break
return node_id
def is_flow_entry(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = 'flow\[.*:id=.*\]'
r = re.search(p1, basename)
if r is not None:
res = True
return res
def get_flow_entry_id(self):
flow_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = str(path).split('[')
p = ':id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
flow_id = s[idx + len(p):].translate(None, "[]'\"")
return flow_id
def do_print(self):
w = 65
print ("%s" % '<' * w)
print " operation: %s" % self.operation
self.path_info.do_print()
print ("%s" % '>' * w)
class FlowInfo():
def __init__(self, event):
self.node_id = None
self.table_id = None
self.flow_id = None
if isinstance(event, InventoryChangeEvent) and event.is_flow_entry():
path = event.get_path()
try:
chunks = path.split('/')
l = []
p = ':id='
for s in chunks:
idx = s.find(p)
if idx >= 0:
l.append(s[idx + len(p):].translate(None, "'[]"))
self.node_id = l[0]
self.table_id = l[1]
self.flow_id = l[2]
except(Exception):
msg = "DEBUG: unexpected string format: %s" % path
dbg_print(msg)
else:
msg = "wrong class usage"
dbg_print(msg)
def to_string(self):
s = ("{node='%s', table='%s', flowid='%s'}" %
(self.node_id, self.table_id, self.flow_id))
return s
class PathInfo():
""" Represents the path to the node in the Controller's internal
data tree where the change has been detected.
Helper subclass for the 'NetworkTopologyChangeNotification'
and 'InventoryChangeNotification' classes.
"""
def __init__(self, info):
self.namespaces = None
self.path = None
if isinstance(info, dict):
p1 = '#text'
p2 = '@xmlns'
try:
path = info[p1]
namespaces = []
for k, v in info.items():
if k.startswith(p2):
pfx = yang_nsname_to_prefix(v)
d = {'ns': v, 'pfx': pfx}
namespaces.append(d)
nickname = k.split(':')[-1]
path = path.replace(nickname, pfx)
self.namespaces = namespaces
self.path = path
except:
msg = "DEBUG: failed to process info '%s'" % info
dbg_print(msg)
elif isinstance(info, basestring):
self.path = info
else:
msg = "DEBUG: info=%s, " \
"unexpected data format '%s'" % (info, type(info))
dbg_print(msg)
def do_print(self):
for ns in self.namespaces:
print " namespace: %s (prefix: %s)" % (ns['ns'], ns['pfx'])
print " path: %s" % self.path
|
|
# $Author: norbert $
# $Date: 2014-08-03 20:34:12 +0200 (So, 03 Aug 2014) $
# $Revision: 252 $
import os
import sys
import string
import argparse
import shlex
import platform
import traceback
import subprocess
import email
import smtplib
import itertools
import logging
import tempfile
import locale
from email.mime.text import MIMEText
from datetime import datetime, date, time, timedelta
locale.setlocale(locale.LC_ALL, '') # use system's locale
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',level=logging.DEBUG)
class ProgError(Exception):
pass
class snapshot:
def __init__(self, args):
# Default snapshot arguments, if --argsfile is not used.
# --CreateDir: Create destination directory if it does not exist.
self.default_snapshot_args = ['--CreateDir', '--AutoBackupSize:512', '-L0', '-Gx' ]
# Those arguments can not be used in the snapshot arguments file because they are used by this script.
self.bad_snapshot_args = ['-W', '--LogFile', '-h' ]
# Date format used in backup file names. Should not be changed.
self.dateformat = '%Y%m%d-%H%M%S'
self.args = args
self.backup_file = None
self.backup_commandline = None
self.backup_nr = None
self.backup_type = None
self.machine = None
self.drive = None
self.failed = False
self.exception = None
self.logfilename = None
self.logtext = None
self.returncode = None
self.deletetime_all = None
self.deletetime_diff = None
self.deleted_files = []
def split_args(self, s):
lex = shlex.shlex(s, posix=True)
lex.whitespace_split = True
lex.escape = ''
lex.commenters = ''
return list(lex)
def read_snapshot_args(self, argsfile):
if not os.path.isfile(argsfile):
raise(ProgError('The snapshot arguments file \'{0}\' was not found!'.format(argsfile)))
with open(argsfile, 'r') as logfile:
argstr = logfile.read()
arglist = self.split_args(argstr)
for arg in arglist:
for bad_arg in self.bad_snapshot_args:
if arg.lower().startswith(bad_arg.lower()):
raise(ProgError('Argument "{0}" can not be used in snapshot arguments file "{1}", because this argument is used by this script itself!'.format(arg, argsfile)))
return arglist
def dismantle(self, file):
name = os.path.splitext(file)[0] # remove extension
parts = name.split('_')
if len(parts) != 5:
raise(ProgError('{0}: invalid backup file name. It must be composed of five_parts separated by \'_\'.'.format(file)))
nr = parts[2]
if not nr.startswith('b'):
raise(ProgError('{0}: invalid backup number. It must start with \'b\' followed by a number.'.format(file)))
nr = nr[1:]
if not nr.isdigit():
raise(ProgError('{0}: invalid backup number. It must start with \'b\' followed by a number.'.format(file)))
nr = int(nr)
type = parts[4]
if type != 'full' and type != 'diff':
raise(ProgError('{0}: invalid type \'{1}\'.'.format(file, type)))
ds = parts[3];
if len(ds) != 15:
raise(ProgError('{0}: invalid date part \'{1}\'.'.format(file, parts[1])))
dd = datetime.strptime(ds, self.dateformat)
return (file, nr, type, dd)
def makemachinefilter(self, machine, drive):
def findmachine(x) :
return x.startswith(machine + '_' + drive + '_')
return findmachine
def findhsh(self, x) : return x.endswith('.hsh')
def findsna(self, x) : return x.endswith('.sna')
def get_existing_backups(self):
# get all backup files which belong to this machine and drive
filesall = filter(self.findsna, os.listdir(self.args.backupdir))
files = filter(self.makemachinefilter(self.machine, self.drive), filesall)
# get parts of each file name
struct = map(self.dismantle, files)
if not struct:
struct = []
return struct
def delete_backupfiles(self, files):
logging.debug(files)
retval = []
for filename in files:
base = os.path.splitext(filename)[0].lower()
for f in sorted(os.listdir(self.args.backupdir)):
f = f.lower()
if not os.path.isfile(f):
dpath = os.path.join(self.args.backupdir, f)
[dbase, dext] = os.path.splitext(f)
if base == dbase:
# delete only *.hsh and *.sn* files
if dext.startswith('.sn') or dext == '.hsh':
logging.info('Deleting {0}'.format(dpath))
retval.append(dpath)
if not self.args.simulate:
os.remove(dpath)
return retval
def dobackup(self):
self.machine = platform.node().lower()
self.drive = self.args.drive.lower()
if self.drive.endswith(':'):
self.drive = self.drive[:-1]
# check if snapshot command exists
if not os.path.isfile(self.args.cmd):
raise(ProgError('The snapshot executable \'{0}\' was not found!'.format(self.args.cmd)))
if not os.access(self.args.cmd, os.X_OK):
raise(ProgError('The snapshot executable \'{0}\' is not executable!'.format(self.args.cmd)))
# check if backup dir is not a file and create it
if os.path.isfile(self.args.backupdir):
raise(ProgError('The backup directory \'{0}\' is not a directory !!!'.format(self.args.backupdir)))
# read snapshot args file
snapshot_args = self.default_snapshot_args
if self.args.argsfile:
snapshot_args = self.read_snapshot_args(self.args.argsfile)
self.args.backupdir = os.path.abspath(self.args.backupdir)
if not os.path.isdir(self.args.backupdir):
os.makedirs(self.args.backupdir)
struct = self.get_existing_backups()
# Sort by backup number and date. The last one in the list is the most recent backup.
struct = sorted(struct, key=lambda x: (x[1], x[3]))
# filter out all full backups
fullbackups = [ s for s in struct if s[2] == 'full' ]
# determine last full backup
lastfull = None
if len(fullbackups) > 0:
lastfull = fullbackups[-1]
hshfile = os.path.join(self.args.backupdir, lastfull[0][:-4] + '.hsh')
if not os.path.isfile(hshfile):
raise(ProgError('Hash file of last full backup {0} does not exist!'.format(hshfile)))
# determine number of differential backups since last full backup
count_diffs = 0
if lastfull:
diffbackups = [ s for s in struct if s[1] == lastfull[1] and s[2] == 'diff' ]
count_diffs = len(diffbackups)
# make this a differential backup if full backup exists and the number of differential
# backups is below --diffcount
self.backup_type = 'diff'
if not lastfull:
self.backup_type = 'full'
self.backup_nr = 1
else:
self.backup_nr = lastfull[1]
if count_diffs >= self.args.diffcount:
self.backup_type = 'full'
self.backup_nr = self.backup_nr + 1
# create file name of backup
date = datetime.now()
self.backup_file = os.path.join(self.args.backupdir, '{0}_{1}_b{2}_{3}_{4}.sna'.format(self.machine, self.drive, self.backup_nr, date.strftime(self.dateformat), self.backup_type))
# create backup command line
backup_cmd = [self.args.cmd, self.drive + ':', self.backup_file, '-W'] + snapshot_args
# if diff backup add reference to hash file of full backup
if self.backup_type == 'diff':
logging.info('Performing differential backup based on hash file {0}.'.format(hshfile))
backup_cmd = backup_cmd + [ '-h' + hshfile ]
# exclude files
if self.args.exclude:
# merge exclude arguments into a single list
excludes = [el for elements in self.args.exclude for el in elements]
exstr = string.join(map(lambda s: '"{0}"'.format(s) if '@' in s else s, excludes), ',')
backup_cmd = backup_cmd + [ '--exclude:' + exstr ]
# log to temp logfile
with tempfile.NamedTemporaryFile(delete=False, suffix = ".log") as logfile:
self.logfilename = logfile.name
backup_cmd = backup_cmd + [ '--LogFile:' + self.logfilename ]
self.backup_commandline = string.join(backup_cmd)
logging.info("Executing: " + self.backup_commandline)
# do it
if (not self.args.simulate):
self.returncode = subprocess.call(backup_cmd)
else:
self.returncode = 0
with open(self.logfilename, 'r') as logfile:
self.logtext = logfile.read()
if self.returncode != 0:
raise(ProgError('Snapshot returned with errorcode {0}!'.format(self.returncode)))
def docleanup(self):
# clean up old backups
struct = self.get_existing_backups()
now = datetime.now()
delfiles = set()
# delete differential backups older then x days
if not self.args.deletediff is None:
self.deletetime_diff = now - timedelta(days = self.args.deletediff)
logging.info('Deleting differential backups <= {0}'.format(self.deletetime_diff))
delfiles.update([ f for f in struct if f[2] == 'diff' and f[3] <= self.deletetime_diff ])
# delete all backups older then x days
if not self.args.delete is None:
self.deletetime_all = now - timedelta(days = self.args.delete)
logging.info('Deleting all backups <= {0}'.format(self.deletetime_all))
delfiles.update([ f for f in struct if f[3] <= self.deletetime_all ])
# do not delete full backups which have diff backups that are kept.
keep_id = set([ f[1] for f in struct if not f in delfiles ])
delfiles.difference_update([ f for f in delfiles if f[1] in keep_id and f[2] == 'full' ])
delfiles = [ f[0] for f in delfiles ]
# actually delete the files
self.deleted_files = self.delete_backupfiles(delfiles)
def mail(self, body):
# mail the stuff
if self.args.mail_to:
msg = MIMEText(body, 'text')
msg['Subject'] = '{0}Snapshot of {1} drive {2} {3}'.format('SIMULATED ' if self.args.simulate else '', self.machine, self.args.drive, 'FAILED' if self.failed else 'SUCCESSFULL')
msg['From'] = self.args.mail_from
msg['To'] = self.args.mail_to
msg['Date'] = email.utils.formatdate(localtime=True)
msg.add_header('X-SnapshotBackup', 'Yes')
# force base64 encoding
msg._headers = [h for h in msg._headers if h[0] != 'Content-Transfer-Encoding']
email.encoders.encode_base64(msg)
if (self.args.mail_ssl):
server = smtplib.SMTP_SSL(self.args.mail_smtp)
else:
server = smtplib.SMTP(self.args.mail_smtp)
if self.args.mail_debug:
server.set_debuglevel(1)
# log in, if credentials are given
if self.args.mail_user != None or self.args.mail_password != None:
server.login(self.args.mail_user, self.args.mail_password)
try:
server.sendmail(self.args.mail_from, self.args.mail_to, msg.as_string())
finally:
server.quit()
def execute(self):
# We catch any exceptions during backup and cleanup and
# add that to the mail.
try:
# execute snapshot backup
self.dobackup()
# perform cleanup
self.docleanup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception, ex:
logging.exception(ex.message)
self.exception = traceback.format_exc()
self.failed = True
if self.logfilename:
os.remove(self.logfilename)
# gather information into readable form
body = ''
if self.args.simulate:
body = body + 'Simulation: YES\n'
if self.machine:
body = body + 'Machine: {0}\n'.format(self.machine)
if self.drive:
body = body + 'Drive: {0}\n'.format(self.drive)
if self.backup_type:
body = body + 'Backup Type: {0}\n'.format(self.backup_type)
if self.backup_nr:
body = body + 'Backup Number: {0}\n'.format(self.backup_nr)
if self.backup_file:
body = body + 'Backup File: {0}\n'.format(self.backup_file)
if self.backup_commandline:
body = body + 'Backup Command: {0}\n'.format(self.backup_commandline)
if not self.returncode is None:
body = body + 'Return Value: {0}\n'.format(self.returncode)
if self.exception:
body = body + '\nException:\n{0}\n'.format(self.exception)
if self.logtext:
body = body + '\nOutput of snapshot:{0}\n'.format(self.logtext)
if self.deletetime_all or self.deletetime_diff:
body = body + '\nCLEANUP:\n'
if self.deletetime_all:
body = body + 'Deleted all backups <= {0}\n'.format(self.deletetime_all.strftime("%x %X"))
if self.deletetime_diff:
body = body + 'Deleted differental backups <= {0}\n'.format(self.deletetime_diff.strftime("%x %X"))
body = body + 'Deleted files:\n'
for f in self.deleted_files:
body = body + ' {0}\n'.format(f)
logging.info('\n' + body)
self.mail(body)
logging.info("Finished!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# paths
parser.add_argument('backupdir', help='directory containing the backup files')
parser.add_argument('drive', help='drive to back up. e.g. C:')
# general options
parser.add_argument('--cmd', default='snapshot.exe', help='Path for snapshot binary snapshot64.exe or snapshot.exe.')
parser.add_argument('--diffcount', type=int, default=0, metavar='X', help='Create X differential backups after every full backup. 0 = only full backups.')
parser.add_argument('--exclude', nargs='*', action='append', help='Excludes given file(s) or folder(s)')
parser.add_argument('--argsfile', '-af', metavar='ARGS_FILE', help='Additional command line arguments for snapshot are read form this file. If not specified, the following arguments are used by default: --CreateDir --AutoBackupSize:512 -L0 -Gx -W.')
parser.add_argument('--simulate', action='store_true', help='Does not call snapshot nor deletes any files. All messages are printed and mail is sent.')
parser.add_argument('--delete', '-d', type=int, metavar='DAYS', help='Delete all backups which are older then DAYS days. Full backups are not deleted if there are any differential backups depending on them which are kept.')
parser.add_argument('--deletediff', '-dd', type=int, metavar='DAYS', help='Delete differential backups which are older then DAYS days.')
# mail options
mailgroup = parser.add_argument_group('mail options')
mailgroup.add_argument('--mail-to', help='Mail address for status mail.')
mailgroup.add_argument('--mail-from', help='Sender mail address for mail. Required if --mail_to is specified.')
mailgroup.add_argument('--mail-smtp', help='Smtp server for mailing. Required if --mail_to is specified.')
mailgroup.add_argument('--mail-ssl', help='Use SSL (port 465) for sending mail.', action='store_true')
mailgroup.add_argument('--mail-user', help='User for mailing if authentication is needed.')
mailgroup.add_argument('--mail-password', help='User for mailing if authentication is needed.')
mailgroup.add_argument('--mail-debug', help='Outputs messages for debugging mail issues.', action='store_true')
args = parser.parse_args()
if args.mail_to:
argerr = []
if not args.mail_from:
argerr = argerr + ['The argument mail_from is missing.']
if not args.mail_smtp:
argerr = argerr + ['The argument mail_smtp is missing.']
if len(argerr) > 0:
parser.print_help()
sys.stderr.write('\n{0}\n'.format(string.join(argerr, '\n')))
os._exit(1)
snapshot = snapshot(args)
snapshot.execute()
|
|
from lxml import etree
import json
import os
import tempfile
from compare import ensure, expect
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.files.uploadedfile import TemporaryUploadedFile
from django.test import TestCase
from django.test.client import Client
from nose.plugins.skip import SkipTest
from tardis.tardis_portal.models import User, UserProfile, \
Experiment, ExperimentACL, Dataset, Dataset_File
from tardis.tardis_portal.staging import write_uploaded_file_to_dataset
from wand.image import Image
"""
Tests for IIIF API.
http://library.stanford.edu/iiif/image-api/
"""
def _create_datafile():
user = User.objects.create_user('testuser', '[email protected]', 'pwd')
user.save()
UserProfile(user=user).save()
full_access = Experiment.PUBLIC_ACCESS_FULL
experiment = Experiment.objects.create(title="IIIF Test",
created_by=user,
public_access=full_access)
experiment.save()
ExperimentACL(experiment=experiment,
pluginId='django_user',
entityId=str(user.id),
isOwner=True,
canRead=True,
canWrite=True,
canDelete=True,
aclOwnershipType=ExperimentACL.OWNER_OWNED).save()
dataset = Dataset()
dataset.save()
dataset.experiments.add(experiment)
dataset.save()
# Create new Datafile
tempfile = TemporaryUploadedFile('iiif_stored_file', None, None, None)
with Image(filename='magick:rose') as img:
img.format = 'tiff'
img.save(file=tempfile.file)
tempfile.file.flush()
datafile = Dataset_File(dataset=dataset)
datafile.size = os.path.getsize(tempfile.file.name)
#os.remove(tempfilename)
datafile.filename = 'iiif_named_file'
datafile.url = write_uploaded_file_to_dataset(dataset, tempfile)
datafile.verify(allowEmptyChecksums=True)
datafile.save()
return datafile
def _check_compliance_level(response):
"""
Current complies with Level 1 API, so should assert no more.
"""
import re
ensure(re.search(r'\<http:\/\/library.stanford.edu\/iiif\/image-api\/'+\
r'compliance.html#level[01]\>;rel="compliesTo"',
response['Link']) != None,
True,
"Compliance header missing")
class Level0TestCase(TestCase):
""" As per: http://library.stanford.edu/iiif/image-api/compliance.html """
def setUp(self):
self.datafile = _create_datafile()
self.width = 70
self.height = 46
def testCanGetInfoAsXML(self):
client = Client()
kwargs = {'datafile_id': self.datafile.id,
'format': 'xml' }
response = client.get(reverse('tardis.tardis_portal.iiif.download_info',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
# Check the response content is good
nsmap = { 'i': 'http://library.stanford.edu/iiif/image-api/ns/' }
xml = etree.fromstring(response.content)
identifier = xml.xpath('/i:info/i:identifier', namespaces=nsmap)[0]
expect(int(identifier.text)).to_equal(self.datafile.id)
height = xml.xpath('/i:info/i:height', namespaces=nsmap)[0]
expect(int(height.text)).to_equal(self.height)
width = xml.xpath('/i:info/i:width', namespaces=nsmap)[0]
expect(int(width.text)).to_equal(self.width)
# Check compliance level
_check_compliance_level(response)
def testCanGetInfoAsJSON(self):
client = Client()
kwargs = {'datafile_id': self.datafile.id,
'format': 'json' }
response = client.get(reverse('tardis.tardis_portal.iiif.download_info',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
# Check the response content is good
data = json.loads(response.content)
expect(data['identifier']).to_equal(self.datafile.id)
expect(data['height']).to_equal(self.height)
expect(data['width']).to_equal(self.width)
# Check compliance level
_check_compliance_level(response)
def testCanGetOriginalImage(self):
client = Client()
kwargs = {'datafile_id': self.datafile.id,
'region': 'full',
'size': 'full',
'rotation': '0',
'quality': 'native' }
response = client.get(reverse('tardis.tardis_portal.iiif.download_image',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
with Image(blob=response.content) as img:
expect(img.format).to_equal('TIFF')
expect(img.width).to_equal(self.width)
expect(img.height).to_equal(self.height)
# Check compliance level
_check_compliance_level(response)
class Level1TestCase(TestCase):
""" As per: http://library.stanford.edu/iiif/image-api/compliance.html """
def setUp(self):
self.datafile = _create_datafile()
self.width = 70
self.height = 46
def testCanGetJpegFormat(self):
client = Client()
kwargs = {'datafile_id': self.datafile.id,
'region': 'full',
'size': 'full',
'rotation': '0',
'quality': 'native',
'format': 'jpg' }
response = client.get(reverse('tardis.tardis_portal.iiif.download_image',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
with Image(blob=response.content) as img:
expect(img.format).to_equal('JPEG')
expect(img.width).to_equal(self.width)
expect(img.height).to_equal(self.height)
# Check compliance level
_check_compliance_level(response)
def testHandleRegions(self):
client = Client()
# Inside box
kwargs = {'datafile_id': self.datafile.id,
'region': '15,10,25,20',
'size': 'full',
'rotation': '0',
'quality': 'native',
'format': 'jpg' }
response = client.get(reverse('tardis.tardis_portal.iiif.download_image',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
with Image(blob=response.content) as img:
expect(img.width).to_equal(25)
expect(img.height).to_equal(20)
# Partly outside box
kwargs = {'datafile_id': self.datafile.id,
'region': '60,41,20,20',
'size': 'full',
'rotation': '0',
'quality': 'native',
'format': 'jpg' }
response = client.get(reverse('tardis.tardis_portal.iiif.download_image',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
with Image(blob=response.content) as img:
expect(img.width).to_equal(10)
expect(img.height).to_equal(5)
# Check compliance level
_check_compliance_level(response)
def testHandleSizing(self):
client = Client()
def get_with_size(sizearg):
kwargs = {'datafile_id': self.datafile.id,
'region': 'full',
'size': sizearg,
'rotation': '0',
'quality': 'native',
'format': 'jpg' }
response = client.get(reverse('tardis.tardis_portal.iiif.download_image',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
return response
permutations = [
# Width (aspect ratio preserved)
{'arg': '50,', 'width': 50, 'height': 33},
# Height (aspect ratio preserved)
{'arg': ',30', 'width': 46, 'height': 30},
# Percent size (aspect ratio preserved)
{'arg': 'pct:50', 'width': 35, 'height': 23},
]
for data in permutations:
response = get_with_size(data['arg'])
with Image(blob=response.content) as img:
expect(img.width).to_equal(data['width'])
expect(img.height).to_equal(data['height'])
def testHandleRotation(self):
client = Client()
def get_with_rotation(rotation):
kwargs = {'datafile_id': self.datafile.id,
'region': 'full',
'size': 'full',
'rotation': rotation,
'quality': 'native',
'format': 'jpg' }
response = client.get(reverse('tardis.tardis_portal.iiif.download_image',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
return response
rotations = [ get_with_rotation(i) for i in [0,90,180,270] ]
for response in rotations[::2]:
with Image(blob=response.content) as img:
expect(img.width).to_equal(self.width)
expect(img.height).to_equal(self.height)
for response in rotations[1::2]:
with Image(blob=response.content) as img:
expect(img.width).to_equal(self.height)
expect(img.height).to_equal(self.width)
class Level2TestCase(TestCase):
""" As per: http://library.stanford.edu/iiif/image-api/compliance.html """
def setUp(self):
self.datafile = _create_datafile()
self.width = 70
self.height = 46
def testCanGetRequiredFormats(self):
client = Client()
for ext, format in [('jpg', 'JPEG'), ('png', 'PNG'), ('jp2', 'JP2')]:
kwargs = {'datafile_id': self.datafile.id,
'region': 'full',
'size': 'full',
'rotation': '0',
'quality': 'native',
'format': ext }
response = client.get(reverse('tardis.tardis_portal.iiif.'+
'download_image',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
with Image(blob=response.content) as img:
expect(img.format).to_equal(format)
expect(img.width).to_equal(self.width)
expect(img.height).to_equal(self.height)
# Check compliance level
_check_compliance_level(response)
def testHandleSizing(self):
client = Client()
def get_with_size(sizearg):
kwargs = {'datafile_id': self.datafile.id,
'region': 'full',
'size': sizearg,
'rotation': '0',
'quality': 'native',
'format': 'jpg' }
response = client.get(reverse('tardis.tardis_portal.iiif.download_image',
kwargs=kwargs))
expect(response.status_code).to_equal(200)
return response
permutations = [
# Exact dimensions *without* aspect ratio preserved
{'arg': '16,16', 'width': 16, 'height': 16},
# Maximum dimensions (aspect ratio preserved)
{'arg': '!16,16', 'width': 16, 'height': 11},
{'arg': '!90,11', 'width': 17, 'height': 11},
{'arg': '!16,10', 'width': 15, 'height': 10},
]
for data in permutations:
response = get_with_size(data['arg'])
with Image(blob=response.content) as img:
expect(img.width).to_equal(data['width'])
expect(img.height).to_equal(data['height'])
def testCanGetRequiredQualities(self):
client = Client()
data = [('native', 3019), ('color', 3019), ('grey', 205), ('bitonal', 2)]
# Not currently implemented
raise SkipTest
class ExtraTestCases(TestCase):
""" As per: http://library.stanford.edu/iiif/image-api/compliance.html """
def setUp(self):
self.datafile = _create_datafile()
self.width = 70
self.height = 46
def testInfoHasEtags(self):
client = Client()
for format_ in ('json', 'xml'):
kwargs = {'datafile_id': self.datafile.id,
'format': format_ }
url = reverse('tardis.tardis_portal.iiif.download_info',
kwargs=kwargs)
response = client.get(url)
expect(response.status_code).to_equal(200)
# Check etag exists
ensure('Etag' in response, True, "Info should have an etag")
def testImageHasEtags(self):
client = Client()
kwargs = {'datafile_id': self.datafile.id,
'region': 'full',
'size': 'full',
'rotation': '0',
'quality': 'native' }
url = reverse('tardis.tardis_portal.iiif.download_image', kwargs=kwargs)
response = client.get(url)
expect(response.status_code).to_equal(200)
# Check etag exists
ensure('Etag' in response, True, "Image should have an etag")
def testImageCacheControl(self):
client = Client()
kwargs = {'datafile_id': self.datafile.id,
'region': 'full',
'size': 'full',
'rotation': '0',
'quality': 'native' }
url = reverse('tardis.tardis_portal.iiif.download_image', kwargs=kwargs)
response = client.get(url)
expect(response.status_code).to_equal(200)
# Check etag exists
ensure('Cache-Control' in response, True,
"Image should have a Cache-Control header")
ensure('max-age' in response['Cache-Control'], True,
"Image should have a Cache-Control header")
# By default the image is public, so
ensure('public' in response['Cache-Control'], True,
"Image should have a Cache-Control header")
is_logged_in = client.login(username='testuser', password='pwd')
expect(is_logged_in).to_be_truthy()
experiment = self.datafile.dataset.get_first_experiment()
experiment.public_access = Experiment.PUBLIC_ACCESS_NONE
experiment.save()
url = reverse('tardis.tardis_portal.iiif.download_image', kwargs=kwargs)
response = client.get(url)
expect(response.status_code).to_equal(200)
# Check etag exists
ensure('Cache-Control' in response, True,
"Image should have a Cache-Control header")
ensure('max-age' in response['Cache-Control'], True,
"Image should have a Cache-Control header")
# By default the image is now private, so
ensure('private' in response['Cache-Control'], True,
"Image should have a Cache-Control header")
|
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apple SUS shared functions."""
import datetime
import logging
import re
import xml
from xml.dom import minidom
from google.appengine.api import taskqueue
from google.appengine.ext import deferred
from simian import settings
from simian.mac import common
from simian.mac import models
from simian.mac.common import gae_util
from simian.mac.models import constants
from simian.mac.munki import plist
OS_VERSIONS = frozenset(['10.7', '10.8', '10.9', '10.10', '10.11', '10.12'])
CATALOG_REGENERATION_LOCK_NAME = 'applesus_catalog_regeneration_%s'
MON, TUE, WED, THU, FRI, SAT, SUN = range(0, 7)
class Error(Exception):
"""Base error."""
class DocumentFormatError(Error):
"""Error in document format."""
class DistFileDocument(object):
"""Class to hold a Apple SUS distfile document."""
def __init__(self):
"""Initializer."""
self.Reset()
def Reset(self):
"""Reset variables."""
self.description = None
self.restart_required = None
self.server_comment = None
self.softwareupdate_name = None
self.title = None
self.version = None
self._installer_script = {}
def _ParseInstallerScriptString(self, istr):
"""Parse an installer script string and return its key/value pairs.
The installer script string appears generally as
"KEY" = "VALUE"
and can contain multiple lines. Apparently the quoting chars can be
double or single quotes, and the alternative quote char is allowed as
a literal inside the other.
Standard javascript-style comments are permitted.
Poorly formed lines will disrupt the parser and incomplete/no values
will be returned.
For example:
// This comment is OK
"KEY" = "VALUE";
"KEY2" = "VALUE2";
// Here's another comment later on.
"KEY3" = 'VALUE3
VALUE3MORE "THIS IS VALID"
';
Or, consider:
"KEY" = ; # this will break the parser
"NOTFOUNDKEY" = "NEVER GET HERE";
Args:
istr: str, see above format example above.
Returns:
dict
"""
installer_script = {}
kv_split = re.compile(
(r'(?:^//[^\n]*$)|'
'(?:^"(\w+)"\s*=\s*([\"\'])([^\\2]*?)\\2;$)'),
re.MULTILINE | re.DOTALL)
for i in re.finditer(kv_split, istr):
if i.group(1):
installer_script[i.group(1)] = i.group(3)
return installer_script
def LoadDocument(self, distfile_xml):
"""Load an entire distfile XML document and parse it.
Args:
distfile_xml: str, xml document
Raises:
DocumentFormatError: the XML document is malformed.
"""
try:
p = minidom.parseString(distfile_xml)
except xml.parsers.expat.ExpatError, e:
raise DocumentFormatError(str(e))
try:
l = p.getElementsByTagName('localization')[0]
s = p.getElementsByTagName('strings')[0]
cdata = []
for cn in s.childNodes:
cdata.append(cn.nodeValue)
cdata = ''.join(cdata)
except IndexError:
raise DocumentFormatError
# TODO(user): intead of regex, parse XML.
self.restart_required = re.search(
r'onConclusion=("|\')RequireRestart("|\')', distfile_xml) is not None
swupd_name_match = re.search(
r'suDisabledGroupID=("|\')([\w\s\.-]*)("|\')', distfile_xml)
if swupd_name_match:
self.softwareupdate_name = swupd_name_match.group(2)
self._installer_script = self._ParseInstallerScriptString(cdata)
self.description = self._installer_script.get('SU_DESCRIPTION')
self.server_comment = self._installer_script.get('SU_SERVERCOMMENT')
self.title = self._installer_script.get('SU_TITLE')
self.version = (self._installer_script.get('SU_VERS') or
self._installer_script.get('SU_VERSION'))
def GenerateAppleSUSCatalogs(track=None, tracks=None, delay=0):
"""Generates Apple SUS catalogs for a given track, set of tracks, or all.
Note: this generates tracks for all os_versions on the given track/tracks.
Args:
track: string track to generate catalog for. OR,
tracks: list of string tracks.
delay: int. if > 0, defer generating the catalogs by this many seconds.
"""
if track and tracks:
raise ValueError('only one of track and tracks is allowed')
elif not tracks and not track:
tracks = common.TRACKS
elif track:
tracks = [track]
for track in tracks:
for os_version in OS_VERSIONS:
if delay:
now_str = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
deferred_name = 'gen-applesus-catalog-%s-%s-%s' % (
os_version, track, now_str)
deferred_name = re.sub(r'[^\w-]', '', deferred_name)
try:
deferred.defer(
GenerateAppleSUSCatalog, os_version, track,
_countdown=delay, _name=deferred_name)
except taskqueue.TaskAlreadyExistsError:
logging.info('Skipping duplicate Apple SUS Catalog generation task.')
else:
GenerateAppleSUSCatalog(os_version, track)
if delay:
now_str = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
deferred_name = 'gen-sus-metadata-catalog-%s' % now_str
deferred_name = re.sub(r'[^\w-]', '', deferred_name)
try:
deferred.defer(
GenerateAppleSUSMetadataCatalog, _name=deferred_name)
except taskqueue.TaskAlreadyExistsError:
logging.info('Skipping duplicate Apple SUS Catalog generation task.')
else:
GenerateAppleSUSMetadataCatalog()
def GenerateAppleSUSCatalog(os_version, track, _datetime=datetime.datetime):
"""Generates an Apple SUS catalog for a given os_version and track.
This function loads the untouched/raw Apple SUS catalog, removes any
products/updates that are not approved for the given track, then saves
a new catalog (plist/xml) to Datastore for client consumption.
Args:
os_version: str OS version to generate the catalog for.
track: str track name to generate the catalog for.
_datetime: datetime module; only used for stub during testing.
Returns:
tuple, new models.AppleSUSCatalog object and plist.ApplePlist object. Or,
if there is no "untouched" catalog for the os_version, then (None, None) is
returned.
"""
logging.info('Generating catalog: %s_%s', os_version, track)
# clear any locks on this track, potentially set by admin product changes.
gae_util.ReleaseLock(CATALOG_REGENERATION_LOCK_NAME % track)
catalog_key = '%s_untouched' % os_version
untouched_catalog_obj = models.AppleSUSCatalog.get_by_key_name(catalog_key)
if not untouched_catalog_obj:
logging.warning('Apple Update catalog does not exist: %s', catalog_key)
return None, None
untouched_catalog_plist = plist.ApplePlist(untouched_catalog_obj.plist)
untouched_catalog_plist.Parse()
approved_product_ids = set()
products_query = models.AppleSUSProduct.AllActive().filter('tracks =', track)
for product in products_query:
approved_product_ids.add(product.product_id)
product_ids = untouched_catalog_plist.get('Products', {}).keys()
new_plist = untouched_catalog_plist
for product_id in product_ids:
if product_id not in approved_product_ids:
del new_plist['Products'][product_id]
catalog_plist_xml = new_plist.GetXml()
# Save the catalog using a time-specific key for rollback purposes.
now = _datetime.utcnow()
now_str = now.strftime('%Y-%m-%d-%H-%M-%S')
backup = models.AppleSUSCatalog(
key_name='backup_%s_%s_%s' % (os_version, track, now_str))
backup.plist = catalog_plist_xml
backup.put()
# Overwrite the catalog being served for this os_version/track pair.
c = models.AppleSUSCatalog(key_name='%s_%s' % (os_version, track))
c.plist = catalog_plist_xml
c.put()
return c, new_plist
def GenerateAppleSUSMetadataCatalog():
"""Generates the Apple SUS metadata catalog.
Returns:
The Catalog instance created.
"""
logging.info('Generating catalog: apple_update_metadata')
products = {}
# Currently, items need to exist in this catalog if they're unattended or
# have a force_install_after_date date set.
unattended = models.AppleSUSProduct.AllActive().filter('unattended =', True)
force_install_after_date = models.AppleSUSProduct.AllActive().filter(
'force_install_after_date !=', None)
for p in unattended:
products[p.product_id] = p
for p in force_install_after_date:
products[p.product_id] = p
catalog_plist_xml_fragments = [
p.plist.GetXmlContent() for p in products.values()]
catalog_plist_xml = constants.CATALOG_PLIST_XML % (
'\n'.join(catalog_plist_xml_fragments))
# Overwrite the catalog being served for this os_version/track pair.
c = models.Catalog(key_name='apple_update_metadata')
c.plist = catalog_plist_xml
c.put()
models.Catalog.DeleteMemcacheWrap(
'apple_update_metadata', prop_name='plist_xml')
return c
def GetAutoPromoteDate(track, applesus_product):
"""Returns a date of when a given update will auto-promote.
Args:
track: str track to get the auto-promote datetime for.
applesus_product: models.AppleSUSProduct object.
Returns:
datetime.date of when the Apple SUS update will be auto-promoted to track,
or None if the product will never be auto-promoted due to manual_override or
the product not being in the unstable track.
Raises:
ValueError: an invalid track was specified; only testing/stable supported.
"""
if not settings.APPLE_AUTO_PROMOTE_ENABLED:
return None
if applesus_product.manual_override:
return None
elif common.UNSTABLE not in applesus_product.tracks:
return None
if track == common.TESTING:
days = settings.APPLE_UNSTABLE_GRACE_PERIOD_DAYS
elif track == common.STABLE:
days = settings.APPLE_TESTING_GRACE_PERIOD_DAYS
else:
raise ValueError('Invalid track was specified: %s' % track)
auto_promote_offset = datetime.timedelta(days=days)
previous_track_date = applesus_product.mtime.date()
if track == common.TESTING:
auto_promote_date = previous_track_date + auto_promote_offset
if auto_promote_date.weekday() >= SAT: # Sat or Sun.
auto_promote_date = _GetNextWeekdayDate(
weekday=MON, min_date=auto_promote_date)
return auto_promote_date
# If we're looking for a stable auto-promotion date but the item is not yet in
# testing, then we need to first figure out when it will go to testing and set
# the previous_track_mtime to that.
if common.TESTING not in applesus_product.tracks:
previous_track_date = GetAutoPromoteDate('testing', applesus_product)
# Unstable should only promoted on Wednesdays and only after the grace period.
min_auto_promote_date = previous_track_date + auto_promote_offset
return _GetNextWeekdayDate(
weekday=settings.APPLE_AUTO_PROMOTE_STABLE_WEEKDAY,
min_date=min_auto_promote_date)
def _GetNextWeekdayDate(weekday, min_date=None):
"""Returns the date of the current or next weekday on or after min_date.
Args:
weekday: int weekday number, where Monday is 0 and Sunday is 6.
min_date: datetime.date object of the minimum date to find the weekday on
or after. default of None uses today as the minimum date.
Returns:
datetime.date object of the current or next desired weekday.
"""
if min_date is None:
min_date = datetime.datetime.utcnow().date()
next_date = min_date
if min_date.weekday() > weekday:
next_date += datetime.timedelta(7 - min_date.weekday() + weekday)
else:
next_date += datetime.timedelta(weekday - min_date.weekday())
return next_date
|
|
# -*- coding: utf-8 -*-
"""
Base definitions for models of programs.
** under construction **
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
from grako.util import asjson, asjsons, Mapping, builtins
from grako.exceptions import SemanticError
from grako.ast import AST
EOLCOL = 50
class Node(object):
""" Base class for model nodes
"""
inline = True
def __init__(self, ctx=None, ast=None, parseinfo=None, **kwargs):
super(Node, self).__init__()
self._ctx = ctx
self._ast = ast
if isinstance(ast, AST):
parseinfo = ast.parseinfo if not parseinfo else None
self._parseinfo = parseinfo
attributes = ast or {}
# asume that kwargs contains node attributes of interest
if isinstance(ast, Mapping):
attributes.update({k: v for k, v in kwargs.items() if v is not None})
self._parent = None
self._adopt_children(attributes)
self.__postinit__(attributes)
def __postinit__(self, ast):
if isinstance(ast, Mapping):
for name, value in ast.items():
while hasattr(self, name):
name = name + '_'
setattr(self, name, value)
@property
def ast(self):
return self._ast
@property
def parent(self):
return self._parent
@property
def line(self):
info = self.line_info
if info:
return info.line
@property
def col(self):
info = self.line_info
if info:
return info.col
@property
def ctx(self):
return self._ctx
@property
def context(self):
return self._ctx
@property
def parseinfo(self):
return self._parseinfo
@property
def line_info(self):
if self.parseinfo:
return self.parseinfo.buffer.line_info(self.parseinfo.pos)
@property
def text(self):
if self.parseinfo:
text = self.parseinfo.buffer.text
return text[self.parseinfo.pos:self.parseinfo.endpos]
@property
def comments(self):
if self.parseinfo:
return self.parseinfo.buffer.comments(self.parseinfo.pos)
return [], []
def children(self):
childset = set()
def cn(child):
if isinstance(child, Node):
childset.add(child)
elif isinstance(child, Mapping):
for c in child.values():
cn(c)
elif isinstance(child, list):
for c in child:
cn(c)
for k, c in vars(self).items():
if not 'k'.startswith('_'):
cn(c)
return list(childset)
def asjson(self):
return asjson(self)
def _adopt_children(self, ast, parent=None):
childset = set()
def adopt(node):
if isinstance(node, Node) and node not in childset:
if isinstance(parent, Node):
node._parent = parent
childset.add(node)
elif isinstance(node, Mapping):
for c in node.values():
self._adopt_children(c, parent=node)
elif isinstance(node, list):
for c in node:
self._adopt_children(c, parent=node)
adopt(ast)
def _pubdict(self):
return {
k: v
for k, v in vars(self).items()
if not k.startswith('_')
}
def __json__(self):
result = collections.OrderedDict(
__class__=self.__class__.__name__,
)
result.update(self._pubdict())
return asjson(result)
def __str__(self):
return asjsons(self)
class NodeWalker(object):
def _find_walker(self, node, prefix='walk_'):
classes = [node.__class__]
while classes:
cls = classes.pop()
name = prefix + cls.__name__
walker = getattr(self, name, None)
if callable(walker):
return walker
for b in cls.__bases__:
if b not in classes:
classes.append(b)
return getattr(self, 'walk_default', None)
def walk(self, node, *args, **kwargs):
walker = self._find_walker(node)
if callable(walker):
return walker(node, *args, **kwargs)
class DepthFirstWalker(NodeWalker):
def walk(self, node, *args, **kwargs):
tv = super(DepthFirstWalker, self).walk
if isinstance(node, Node):
children = [self.walk(c, *args, **kwargs) for c in node.children()]
return tv(node, children, *args, **kwargs)
elif isinstance(node, collections.Iterable):
return [tv(e, [], *args, **kwargs) for e in node]
else:
return tv(node, [], *args, **kwargs)
class ModelBuilderSemantics(object):
""" Intended as a semantic action for parsing, a ModelBuilderSemantics creates
nodes using the class name given as first parameter to a grammar
rule, and synthesizes the class/type if it's not known.
"""
def __init__(self, context=None, baseType=Node, types=None):
self.ctx = context
self.baseType = baseType
self.constructors = dict()
for t in types or ():
self._register_constructor(t)
def _register_constructor(self, constructor):
self.constructors[constructor.__name__] = constructor
def _get_constructor(self, typename):
typename = str(typename)
if typename in self.constructors:
return self.constructors[typename]
constructor = builtins
for name in typename.split('.'):
try:
context = vars(constructor)
except Exception as e:
raise SemanticError(
'Could not find constructor for %s (%s): %s'
% (typename, type(constructor).__name__, str(e))
)
if name in context:
constructor = context[name]
else:
constructor = None
break
if constructor:
return constructor
# synthethize a new type
constructor = type(typename, (self.baseType,), {})
self._register_constructor(constructor)
return constructor
def _default(self, ast, *args, **kwargs):
if not args:
return ast
name = args[0]
constructor = self._get_constructor(name)
try:
if type(constructor) is type and issubclass(constructor, Node):
return constructor(*args[1:], ast=ast, ctx=self.ctx, **kwargs)
else:
return constructor(ast, *args[1:], **kwargs)
except Exception as e:
raise SemanticError(
'Could not call constructor for %s: %s'
% (name, str(e))
)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plotting terminal based histograms
"""
from __future__ import print_function
from __future__ import division
import os
import sys
import math
import optparse
from os.path import dirname
from .utils.helpers import *
from .utils.commandhelp import hist
def calc_bins(n, min_val, max_val, h=None, binwidth=None):
"""
Calculate number of bins for the histogram
"""
if not h:
h = max(10, math.log(n + 1, 2))
if binwidth == 0:
binwidth = 0.1
if binwidth is None:
binwidth = (max_val - min_val) / h
for b in drange(min_val, max_val, step=binwidth, include_stop=True):
if b.is_integer():
yield int(b)
else:
yield b
def read_numbers(numbers):
"""
Read the input data in the most optimal way
"""
if isiterable(numbers):
for number in numbers:
yield float(str(number).strip())
else:
with open(numbers) as fh:
for number in fh:
yield float(number.strip())
def run_demo():
"""
Run a demonstration
"""
module_dir = dirname(dirname(os.path.realpath(__file__)))
demo_file = os.path.join(module_dir, 'examples/data/exp.txt')
if not os.path.isfile(demo_file):
sys.stderr.write("demo input file not found!\n")
sys.stderr.write("run the downloaddata.sh script in the example first\n")
sys.exit(1)
# plotting a histogram
print("plotting a basic histogram")
print("plot_hist('%s')" % demo_file)
print("hist -f %s" % demo_file)
print("cat %s | hist" % demo_file)
plot_hist(demo_file)
print("*" * 80)
# with colours
print("histogram with colours")
print("plot_hist('%s', colour='blue')" % demo_file)
print("hist -f %s -c blue" % demo_file)
plot_hist(demo_file, colour='blue')
print("*" * 80)
# changing the shape of the point
print("changing the shape of the bars")
print("plot_hist('%s', pch='.')" % demo_file)
print("hist -f %s -p ." % demo_file)
plot_hist(demo_file, pch='.')
print("*" * 80)
# changing the size of the plot
print("changing the size of the plot")
print("plot_hist('%s', height=35.0, bincount=40)" % demo_file)
print("hist -f %s -s 35.0 -b 40" % demo_file)
plot_hist(demo_file, height=35.0, bincount=40)
def plot_hist(f, height=20.0, bincount=None, binwidth=None, pch="o", colour="default", title="", xlab=None, showSummary=False, regular=False):
"""
Make a histogram
Arguments:
height -- the height of the histogram in # of lines
bincount -- number of bins in the histogram
binwidth -- width of bins in the histogram
pch -- shape of the bars in the plot
colour -- colour of the bars in the terminal
title -- title at the top of the plot
xlab -- boolean value for whether or not to display x-axis labels
showSummary -- boolean value for whether or not to display a summary
regular -- boolean value for whether or not to start y-labels at 0
"""
if pch is None:
pch = "o"
if isinstance(f, str):
with open(f) as fh:
f = fh.readlines()
min_val, max_val = None, None
n, mean, sd = 0.0, 0.0, 0.0
for number in read_numbers(f):
n += 1
if min_val is None or number < min_val:
min_val = number
if max_val is None or number > max_val:
max_val = number
mean += number
mean /= n
for number in read_numbers(f):
sd += (mean - number)**2
sd /= (n - 1)
sd **= 0.5
bins = list(calc_bins(n, min_val, max_val, bincount, binwidth))
hist = dict((i, 0) for i in range(len(bins)))
for number in read_numbers(f):
for i, b in enumerate(bins):
if number <= b:
hist[i] += 1
break
if number == max_val and max_val > bins[len(bins) - 1]:
hist[len(hist) - 1] += 1
min_y, max_y = min(hist.values()), max(hist.values())
start = max(min_y, 1)
stop = max_y + 1
if regular:
start = 1
if height is None:
height = stop - start
if height > 20:
height = 20
ys = list(drange(start, stop, float(stop - start) / height))
ys.reverse()
nlen = max(len(str(min_y)), len(str(max_y))) + 1
if title:
print(box_text(title, max(len(hist) * 2, len(title)), nlen))
print()
used_labs = set()
for y in ys:
ylab = str(int(y))
if ylab in used_labs:
ylab = ""
else:
used_labs.add(ylab)
ylab = " " * (nlen - len(ylab)) + ylab + "|"
print(ylab, end=' ')
for i in range(len(hist)):
if int(y) <= hist[i]:
printcolour(pch, True, colour)
else:
printcolour(" ", True, colour)
print('')
xs = hist.keys()
print(" " * (nlen + 1) + "-" * len(xs))
if xlab:
labels = abbreviate([str(b) for b in bins])
xlen = len(labels[0])
for i in range(0, xlen):
printcolour(" " * (nlen + 1), True, colour)
for x in range(0, len(hist)):
num = labels[x]
if x % 2 != 0:
pass
elif i < len(num):
print(num[i], end=' ')
else:
print(" ", end=' ')
print('')
center = max(map(len, map(str, [n, min_val, mean, max_val])))
center += 15
if showSummary:
print()
print("-" * (2 + center))
print("|" + "Summary".center(center) + "|")
print("-" * (2 + center))
summary = "|" + ("observations: %d" % n).center(center) + "|\n"
summary += "|" + ("min value: %f" % min_val).center(center) + "|\n"
summary += "|" + ("mean : %f" % mean).center(center) + "|\n"
summary += "|" + ("std dev : %f" % sd).center(center) + "|\n"
summary += "|" + ("max value: %f" % max_val).center(center) + "|\n"
summary += "-" * (2 + center)
print(summary)
def main():
parser = optparse.OptionParser(usage=hist['usage'])
parser.add_option(
'-f', '--file', help='a file containing a column of numbers', default=None, dest='f')
parser.add_option('-t', '--title', help='title for the chart', default="", dest='t')
parser.add_option(
'-b', '--bins', help='number of bins in the histogram', type='int', default=None, dest='b')
parser.add_option('-w', '--binwidth', help='width of bins in the histogram',
type='float', default=None, dest='binwidth')
parser.add_option('-s', '--height', help='height of the histogram (in lines)',
type='int', default=None, dest='h')
parser.add_option('-p', '--pch', help='shape of each bar', default='o', dest='p')
parser.add_option('-x', '--xlab', help='label bins on x-axis',
default=None, action="store_true", dest='x')
parser.add_option('-c', '--colour', help='colour of the plot (%s)' %
colour_help, default='default', dest='colour')
parser.add_option('-d', '--demo', help='run demos', action='store_true', dest='demo')
parser.add_option('-n', '--nosummary', help='hide summary',
action='store_false', dest='showSummary', default=True)
parser.add_option('-r', '--regular',
help='use regular y-scale (0 - maximum y value), instead of truncated y-scale (minimum y-value - maximum y-value)',
default=False, action="store_true", dest='regular')
opts, args = parser.parse_args()
if opts.f is None:
if len(args) > 0:
opts.f = args[0]
elif opts.demo is None or opts.demo is False:
opts.f = sys.stdin.readlines()
if opts.demo:
run_demo()
elif opts.f:
plot_hist(opts.f, opts.h, opts.b, opts.binwidth, opts.p, opts.colour,
opts.t, opts.x, opts.showSummary, opts.regular)
else:
print("nothing to plot!")
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import urllib
from itertools import groupby
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import force_unicode
from desktop.lib.rest.http_client import HttpClient, RestException
from desktop.lib.rest import resource
from search.conf import EMPTY_QUERY, SECURITY_ENABLED
from search.api import _compute_range_facet
from libsolr.conf import SSL_CERT_CA_VERIFY
LOG = logging.getLogger(__name__)
DEFAULT_USER = 'hue'
def utf_quoter(what):
return urllib.quote(unicode(what).encode('utf-8'), safe='~@#$&()*!+=;,.?/\'')
class SolrApi(object):
"""
http://wiki.apache.org/solr/CoreAdmin#CoreAdminHandler
"""
def __init__(self, solr_url, user, security_enabled=SECURITY_ENABLED.get(), ssl_cert_ca_verify=SSL_CERT_CA_VERIFY.get()):
self._url = solr_url
self._user = user
self._client = HttpClient(self._url, logger=LOG)
self.security_enabled = security_enabled
if self.security_enabled:
self._client.set_kerberos_auth()
self._client.set_verify(ssl_cert_ca_verify)
self._root = resource.Resource(self._client)
# The Kerberos handshake requires two requests in order to authenticate,
# but if our first request is a PUT/POST, it might flat-out reject the
# first request if the body is too large. So, connect here in order to get
# a cookie so future PUT/POSTs will be pre-authenticated.
if self.security_enabled:
self._root.invoke('HEAD', '/')
def _get_params(self):
if self.security_enabled:
return (('doAs', self._user ),)
return (('user.name', DEFAULT_USER), ('doAs', self._user),)
def _get_q(self, query):
q_template = '(%s)' if len(query['qs']) >= 2 else '%s'
return 'OR'.join([q_template % (q['q'] or EMPTY_QUERY.get()) for q in query['qs']]).encode('utf-8')
def _get_aggregate_function(self, facet):
props = {
'field': facet['field'],
'aggregate': facet['properties']['aggregate'] if 'properties' in facet else facet['aggregate']
}
if props['aggregate'] == 'median':
return 'percentile(%(field)s,50)' % props
else:
return '%(aggregate)s(%(field)s)' % props
def _get_range_borders(self, collection, query):
props = {}
GAPS = {
'5MINUTES': {
'histogram-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots
'bucket-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots
'bar-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots
'facet-widget': {'coeff': '+1', 'unit': 'MINUTES'}, # ~10 slots
},
'30MINUTES': {
'histogram-widget': {'coeff': '+20', 'unit': 'SECONDS'},
'bucket-widget': {'coeff': '+20', 'unit': 'SECONDS'},
'bar-widget': {'coeff': '+20', 'unit': 'SECONDS'},
'facet-widget': {'coeff': '+5', 'unit': 'MINUTES'},
},
'1HOURS': {
'histogram-widget': {'coeff': '+30', 'unit': 'SECONDS'},
'bucket-widget': {'coeff': '+30', 'unit': 'SECONDS'},
'bar-widget': {'coeff': '+30', 'unit': 'SECONDS'},
'facet-widget': {'coeff': '+10', 'unit': 'MINUTES'},
},
'12HOURS': {
'histogram-widget': {'coeff': '+7', 'unit': 'MINUTES'},
'bucket-widget': {'coeff': '+7', 'unit': 'MINUTES'},
'bar-widget': {'coeff': '+7', 'unit': 'MINUTES'},
'facet-widget': {'coeff': '+1', 'unit': 'HOURS'},
},
'1DAYS': {
'histogram-widget': {'coeff': '+15', 'unit': 'MINUTES'},
'bucket-widget': {'coeff': '+15', 'unit': 'MINUTES'},
'bar-widget': {'coeff': '+15', 'unit': 'MINUTES'},
'facet-widget': {'coeff': '+3', 'unit': 'HOURS'},
},
'2DAYS': {
'histogram-widget': {'coeff': '+30', 'unit': 'MINUTES'},
'bucket-widget': {'coeff': '+30', 'unit': 'MINUTES'},
'bar-widget': {'coeff': '+30', 'unit': 'MINUTES'},
'facet-widget': {'coeff': '+6', 'unit': 'HOURS'},
},
'7DAYS': {
'histogram-widget': {'coeff': '+3', 'unit': 'HOURS'},
'bucket-widget': {'coeff': '+3', 'unit': 'HOURS'},
'bar-widget': {'coeff': '+3', 'unit': 'HOURS'},
'facet-widget': {'coeff': '+1', 'unit': 'DAYS'},
},
'1MONTHS': {
'histogram-widget': {'coeff': '+12', 'unit': 'HOURS'},
'bucket-widget': {'coeff': '+12', 'unit': 'HOURS'},
'bar-widget': {'coeff': '+12', 'unit': 'HOURS'},
'facet-widget': {'coeff': '+5', 'unit': 'DAYS'},
},
'3MONTHS': {
'histogram-widget': {'coeff': '+1', 'unit': 'DAYS'},
'bucket-widget': {'coeff': '+1', 'unit': 'DAYS'},
'bar-widget': {'coeff': '+1', 'unit': 'DAYS'},
'facet-widget': {'coeff': '+30', 'unit': 'DAYS'},
},
'1YEARS': {
'histogram-widget': {'coeff': '+3', 'unit': 'DAYS'},
'bucket-widget': {'coeff': '+3', 'unit': 'DAYS'},
'bar-widget': {'coeff': '+3', 'unit': 'DAYS'},
'facet-widget': {'coeff': '+12', 'unit': 'MONTHS'},
},
'2YEARS': {
'histogram-widget': {'coeff': '+7', 'unit': 'DAYS'},
'bucket-widget': {'coeff': '+7', 'unit': 'DAYS'},
'bar-widget': {'coeff': '+7', 'unit': 'DAYS'},
'facet-widget': {'coeff': '+3', 'unit': 'MONTHS'},
},
'10YEARS': {
'histogram-widget': {'coeff': '+1', 'unit': 'MONTHS'},
'bucket-widget': {'coeff': '+1', 'unit': 'MONTHS'},
'bar-widget': {'coeff': '+1', 'unit': 'MONTHS'},
'facet-widget': {'coeff': '+1', 'unit': 'YEARS'},
}
}
time_field = collection['timeFilter'].get('field')
if time_field and (collection['timeFilter']['value'] != 'all' or collection['timeFilter']['type'] == 'fixed'):
# fqs overrides main time filter
fq_time_ids = [fq['id'] for fq in query['fqs'] if fq['field'] == time_field]
props['time_filter_overrides'] = fq_time_ids
props['time_field'] = time_field
if collection['timeFilter']['type'] == 'rolling':
props['field'] = collection['timeFilter']['field']
props['from'] = 'NOW-%s' % collection['timeFilter']['value']
props['to'] = 'NOW'
props['gap'] = GAPS.get(collection['timeFilter']['value'])
elif collection['timeFilter']['type'] == 'fixed':
props['field'] = collection['timeFilter']['field']
props['from'] = collection['timeFilter']['from']
props['to'] = collection['timeFilter']['to']
props['fixed'] = True
return props
def _get_time_filter_query(self, timeFilter, facet):
if 'fixed' in timeFilter:
props = {}
stat_facet = {'min': timeFilter['from'], 'max': timeFilter['to']}
_compute_range_facet(facet['widgetType'], stat_facet, props, stat_facet['min'], stat_facet['max'])
gap = props['gap']
unit = re.split('\d+', gap)[1]
return {
'start': '%(from)s/%(unit)s' % {'from': timeFilter['from'], 'unit': unit},
'end': '%(to)s/%(unit)s' % {'to': timeFilter['to'], 'unit': unit},
'gap': '%(gap)s' % props, # add a 'auto'
}
else:
gap = timeFilter['gap'][facet['widgetType']]
return {
'start': '%(from)s/%(unit)s' % {'from': timeFilter['from'], 'unit': gap['unit']},
'end': '%(to)s/%(unit)s' % {'to': timeFilter['to'], 'unit': gap['unit']},
'gap': '%(coeff)s%(unit)s/%(unit)s' % gap, # add a 'auto'
}
def _get_fq(self, collection, query):
params = ()
timeFilter = {}
if collection:
timeFilter = self._get_range_borders(collection, query)
if timeFilter and not timeFilter.get('time_filter_overrides'):
params += (('fq', urllib.unquote(utf_quoter('%(field)s:[%(from)s TO %(to)s]' % timeFilter))),)
# Merge facets queries on same fields
grouped_fqs = groupby(query['fqs'], lambda x: (x['type'], x['field']))
merged_fqs = []
for key, group in grouped_fqs:
field_fq = next(group)
for fq in group:
for f in fq['filter']:
field_fq['filter'].append(f)
merged_fqs.append(field_fq)
for fq in merged_fqs:
if fq['type'] == 'field':
fields = fq['field'] if type(fq['field']) == list else [fq['field']] # 2D facets support
for field in fields:
f = []
for _filter in fq['filter']:
values = _filter['value'] if type(_filter['value']) == list else [_filter['value']] # 2D facets support
if fields.index(field) < len(values): # Lowest common field denominator
value = values[fields.index(field)]
exclude = '-' if _filter['exclude'] else ''
if value is not None and ' ' in force_unicode(value):
value = force_unicode(value).replace('"', '\\"')
f.append('%s%s:"%s"' % (exclude, field, value))
else:
f.append('%s{!field f=%s}%s' % (exclude, field, value))
_params ='{!tag=%(id)s}' % fq + ' '.join(f)
params += (('fq', urllib.unquote(utf_quoter(_params))),)
elif fq['type'] == 'range':
params += (('fq', '{!tag=%(id)s}' % fq + ' '.join([urllib.unquote(
utf_quoter('%s%s:[%s TO %s}' % ('-' if field['exclude'] else '', fq['field'], f['from'], f['to']))) for field, f in zip(fq['filter'], fq['properties'])])),)
elif fq['type'] == 'range-up':
params += (('fq', '{!tag=%(id)s}' % fq + ' '.join([urllib.unquote(
utf_quoter('%s%s:[%s TO %s}' % ('-' if field['exclude'] else '', fq['field'], f['from'] if fq['is_up'] else '*', '*' if fq['is_up'] else f['from'])))
for field, f in zip(fq['filter'], fq['properties'])])),)
elif fq['type'] == 'map':
_keys = fq.copy()
_keys.update(fq['properties'])
params += (('fq', '{!tag=%(id)s}' % fq + urllib.unquote(
utf_quoter('%(lat)s:[%(lat_sw)s TO %(lat_ne)s} AND %(lon)s:[%(lon_sw)s TO %(lon_ne)s}' % _keys))),)
return params
def query(self, collection, query):
solr_query = {}
solr_query['collection'] = collection['name']
if query.get('download'):
solr_query['rows'] = 1000
solr_query['start'] = 0
else:
solr_query['rows'] = int(collection['template']['rows'] or 10)
solr_query['start'] = int(query['start'])
solr_query['rows'] = min(solr_query['rows'], 1000)
solr_query['start'] = min(solr_query['start'], 10000)
params = self._get_params() + (
('q', self._get_q(query)),
('wt', 'json'),
('rows', solr_query['rows']),
('start', solr_query['start']),
)
if any(collection['facets']):
params += (
('facet', 'true'),
('facet.mincount', 0),
('facet.limit', 10),
)
json_facets = {}
timeFilter = self._get_range_borders(collection, query)
for facet in collection['facets']:
if facet['type'] == 'query':
params += (('facet.query', '%s' % facet['field']),)
elif facet['type'] == 'range' or facet['type'] == 'range-up':
keys = {
'id': '%(id)s' % facet,
'field': facet['field'],
'key': '%(field)s-%(id)s' % facet,
'start': facet['properties']['start'],
'end': facet['properties']['end'],
'gap': facet['properties']['gap'],
'mincount': int(facet['properties']['mincount'])
}
if timeFilter and timeFilter['time_field'] == facet['field'] and (facet['id'] not in timeFilter['time_filter_overrides'] or facet['widgetType'] != 'histogram-widget'):
keys.update(self._get_time_filter_query(timeFilter, facet))
params += (
('facet.range', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.range.start=%(start)s f.%(field)s.facet.range.end=%(end)s f.%(field)s.facet.range.gap=%(gap)s f.%(field)s.facet.mincount=%(mincount)s}%(field)s' % keys),
)
elif facet['type'] == 'field':
keys = {
'id': '%(id)s' % facet,
'field': facet['field'],
'key': '%(field)s-%(id)s' % facet,
'limit': int(facet['properties'].get('limit', 10)) + (1 if facet['widgetType'] == 'facet-widget' else 0),
'mincount': int(facet['properties']['mincount'])
}
params += (
('facet.field', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.limit=%(limit)s f.%(field)s.facet.mincount=%(mincount)s}%(field)s' % keys),
)
elif facet['type'] == 'nested':
_f = {
'field': facet['field'],
'limit': int(facet['properties'].get('limit', 10)) + (1 if facet['widgetType'] == 'facet-widget' else 0),
'mincount': int(facet['properties']['mincount'])
}
if 'start' in facet['properties']:
_f.update({
'type': 'range',
'start': facet['properties']['start'],
'end': facet['properties']['end'],
'gap': facet['properties']['gap'],
})
if timeFilter and timeFilter['time_field'] == facet['field'] and (facet['id'] not in timeFilter['time_filter_overrides'] or facet['widgetType'] != 'bucket-widget'):
_f.update(self._get_time_filter_query(timeFilter, facet))
else:
_f.update({
'type': 'terms',
'field': facet['field'],
'excludeTags': facet['id']
})
if facet['properties']['facets']:
if facet['properties']['facets'][0]['aggregate'] == 'count':
_f['facet'] = {
'd2': {
'type': 'terms',
'field': '%(field)s' % facet['properties']['facets'][0],
'limit': int(facet['properties']['facets'][0].get('limit', 10)),
'mincount': int(facet['properties']['facets'][0]['mincount'])
}
}
if len(facet['properties']['facets']) > 1: # Get 3rd dimension calculation
_f['facet']['d2']['facet'] = {
'd2': self._get_aggregate_function(facet['properties']['facets'][1])
}
else:
_f['facet'] = {
'd2': self._get_aggregate_function(facet['properties']['facets'][0])
}
json_facets[facet['id']] = _f
elif facet['type'] == 'function':
json_facets[facet['id']] = self._get_aggregate_function(facet)
json_facets['processEmpty'] = True
elif facet['type'] == 'pivot':
if facet['properties']['facets'] or facet['widgetType'] == 'map-widget':
fields = facet['field']
fields_limits = []
for f in facet['properties']['facets']:
fields_limits.append('f.%s.facet.limit=%s' % (f['field'], f['limit']))
fields_limits.append('f.%s.facet.mincount=%s' % (f['field'], f['mincount']))
fields += ',' + f['field']
keys = {
'id': '%(id)s' % facet,
'key': '%(field)s-%(id)s' % facet,
'field': facet['field'],
'fields': fields,
'limit': int(facet['properties'].get('limit', 10)),
'mincount': int(facet['properties']['mincount']),
'fields_limits': ' '.join(fields_limits)
}
params += (
('facet.pivot', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.limit=%(limit)s f.%(field)s.facet.mincount=%(mincount)s %(fields_limits)s}%(fields)s' % keys),
)
if json_facets:
params += (
('json.facet', json.dumps(json_facets)),
)
params += self._get_fq(collection, query)
if collection['template']['fieldsSelected'] and collection['template']['isGridLayout']:
fields = set(collection['template']['fieldsSelected'] + [collection['idField']] if collection['idField'] else [])
# Add field if needed
if collection['template']['leafletmap'].get('latitudeField'):
fields.add(collection['template']['leafletmap']['latitudeField'])
if collection['template']['leafletmap'].get('longitudeField'):
fields.add(collection['template']['leafletmap']['longitudeField'])
if collection['template']['leafletmap'].get('labelField'):
fields.add(collection['template']['leafletmap']['labelField'])
params += (('fl', urllib.unquote(utf_quoter(','.join(list(fields))))),)
else:
params += (('fl', '*'),)
params += (
('hl', 'true'),
('hl.fl', '*'),
('hl.snippets', 5),
('hl.fragsize', 1000),
)
if collection['template']['fieldsSelected']:
fields = []
for field in collection['template']['fieldsSelected']:
attribute_field = filter(lambda attribute: field == attribute['name'], collection['template']['fieldsAttributes'])
if attribute_field:
if attribute_field[0]['sort']['direction']:
fields.append('%s %s' % (field, attribute_field[0]['sort']['direction']))
if fields:
params += (
('sort', ','.join(fields)),
)
response = self._root.get('%(collection)s/select' % solr_query, params)
return self._get_json(response)
def suggest(self, collection, query):
try:
params = self._get_params() + (
('suggest', 'true'),
('suggest.build', 'true'),
('suggest.q', query['q']),
('wt', 'json'),
)
if query.get('dictionary'):
params += (
('suggest.dictionary', query['dictionary']),
)
response = self._root.get('%s/suggest' % collection, params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def collections(self): # To drop, used in indexer v1
try:
params = self._get_params() + (
('detail', 'true'),
('path', '/clusterstate.json'),
)
response = self._root.get('zookeeper', params=params)
return json.loads(response['znode'].get('data', '{}'))
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def collections2(self):
try:
params = self._get_params() + (
('action', 'LIST'),
('wt', 'json'),
)
return self._root.get('admin/collections', params=params)['collections']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def configs(self):
try:
params = self._get_params() + (
('action', 'LIST'),
('wt', 'json'),
)
return self._root.get('admin/configs', params=params)['configSets']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def aliases(self):
try:
params = self._get_params() + ( # Waiting for SOLR-4968
('detail', 'true'),
('path', '/aliases.json'),
)
response = self._root.get('zookeeper', params=params)
return json.loads(response['znode'].get('data', '{}')).get('collection', {})
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def collection_or_core(self, hue_collection):
if hue_collection.is_core_only:
return self.core(hue_collection.name)
else:
return self.collection(hue_collection.name)
def collection(self, name):
try:
collections = self.collections()
return collections[name]
except Exception, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def create_collection(self, name, shards=1, replication=1):
try:
params = self._get_params() + (
('action', 'CREATE'),
('name', name),
('numShards', shards),
('replicationFactor', replication),
('collection.configName', name),
('wt', 'json')
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if 'success' in response:
return True
else:
LOG.error("Could not create collection. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def create_core(self, name, instance_dir, shards=1, replication=1):
try:
params = self._get_params() + (
('action', 'CREATE'),
('name', name),
('instanceDir', instance_dir),
('wt', 'json'),
)
response = self._root.post('admin/cores', params=params, contenttype='application/json')
if response.get('responseHeader', {}).get('status', -1) == 0:
return True
else:
LOG.error("Could not create core. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
if 'already exists' in e.message:
LOG.warn("Could not create collection.", exc_info=True)
return False
else:
raise PopupException(e, title=_('Error while accessing Solr'))
def create_or_modify_alias(self, name, collections):
try:
params = self._get_params() + (
('action', 'CREATEALIAS'),
('name', name),
('collections', ','.join(collections)),
('wt', 'json'),
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if response.get('responseHeader', {}).get('status', -1) != 0:
msg = _("Could not create or edit alias. Check response:\n%s") % json.dumps(response, indent=2)
LOG.error(msg)
raise PopupException(msg)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def delete_alias(self, name):
try:
params = self._get_params() + (
('action', 'DELETEALIAS'),
('name', name),
('wt', 'json'),
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if response.get('responseHeader', {}).get('status', -1) != 0:
msg = _("Could not delete alias. Check response:\n%s") % json.dumps(response, indent=2)
LOG.error(msg)
raise PopupException(msg)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def remove_collection(self, name, replication=1):
try:
params = self._get_params() + (
('action', 'DELETE'),
('name', name),
('replicationFactor', replication),
('wt', 'json')
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if 'success' in response:
return True
else:
LOG.error("Could not remove collection. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def remove_core(self, name):
try:
params = self._get_params() + (
('action', 'UNLOAD'),
('name', name),
('deleteIndex', 'true'),
('wt', 'json')
)
response = self._root.post('admin/cores', params=params, contenttype='application/json')
if 'success' in response:
return True
else:
LOG.error("Could not remove core. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def add_fields(self, collection, fields):
try:
params = self._get_params()
return self._root.post('%s/schema/fields' % collection, params=params, data=json.dumps(fields), contenttype='application/json')
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def cores(self):
try:
params = self._get_params() + (
('wt', 'json'),
)
return self._root.get('admin/cores', params=params)['status']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def core(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
('core', core),
)
return self._root.get('admin/cores', params=params)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def schema(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
('file', 'schema.xml'),
)
return self._root.get('%(core)s/admin/file' % {'core': core}, params=params)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def fields(self, core, dynamic=False):
try:
params = self._get_params() + (
('wt', 'json'),
('fl', '*'),
)
if not dynamic:
params += (('show', 'schema'),)
response = self._root.get('%(core)s/admin/luke' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def luke(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%(core)s/admin/luke' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def schema_fields(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%(core)s/schema/fields' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def stats(self, core, fields, query=None, facet=''):
try:
params = self._get_params() + (
('q', self._get_q(query) if query is not None else EMPTY_QUERY.get()),
('wt', 'json'),
('rows', 0),
('stats', 'true'),
)
if query is not None:
params += self._get_fq(None, query)
if facet:
params += (('stats.facet', facet),)
params += tuple([('stats.field', field) for field in fields])
response = self._root.get('%(core)s/select' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def terms(self, core, field, properties=None):
try:
params = self._get_params() + (
('wt', 'json'),
('rows', 0),
('terms.fl', field),
)
if properties:
for key, val in properties.iteritems():
params += ((key, val),)
response = self._root.get('%(core)s/terms' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def sql(self, collection, statement):
try:
if 'limit' not in statement.lower(): # rows is not supported
statement = statement + ' LIMIT 100'
params = self._get_params() + (
('wt', 'json'),
('rows', 0),
('stmt', statement),
('rows', 100),
('start', 0),
)
response = self._root.get('%(collection)s/sql' % {'collection': collection}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def get(self, core, doc_id):
try:
params = self._get_params() + (
('id', doc_id),
('wt', 'json'),
)
response = self._root.get('%(core)s/get' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
@classmethod
def _get_json(cls, response):
if type(response) != dict:
# Got 'plain/text' mimetype instead of 'application/json'
try:
response = json.loads(response)
except ValueError, e:
# Got some null bytes in the response
LOG.error('%s: %s' % (unicode(e), repr(response)))
response = json.loads(response.replace('\x00', ''))
return response
def uniquekey(self, collection):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%s/schema/uniquekey' % collection, params=params)
return self._get_json(response)['uniqueKey']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def update(self, collection_or_core_name, data, content_type='csv', version=None):
if content_type == 'csv':
content_type = 'application/csv'
elif content_type == 'json':
content_type = 'application/json'
else:
LOG.error("Trying to update collection %s with content type %s. Allowed content types: csv/json" % (collection_or_core_name, content_type))
params = self._get_params() + (
('wt', 'json'),
('overwrite', 'true'),
)
if version is not None:
params += (
('_version_', version),
('versions', 'true')
)
response = self._root.post('%s/update' % collection_or_core_name, contenttype=content_type, params=params, data=data)
return self._get_json(response)
|
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from metal.utils import set_seed
class Encoder(nn.Module):
"""The Encoder implements the encode() method, which maps a batch of data to
encoded output of dimension [batch_size, max_seq_len, encoded_size]
The first argument must be the encoded size of the Encoder output.
Args:
encoded_size: (int) Output feature dimension of the Encoder
"""
def __init__(self, encoded_size, verbose=True):
super().__init__()
self.encoded_size = encoded_size
def encode(self, X):
"""
Args:
X: (torch.LongTensor) of shape [batch_size, max_seq_length,
encoded_size], with all-0s vectors as padding.
"""
assert X.shape[-1] == self.encoded_size
return X.float()
class EmbeddingsEncoder(Encoder):
def __init__(
self,
encoded_size,
vocab_size=None,
embeddings=None,
freeze=False,
verbose=True,
seed=None,
**kwargs,
):
"""
Args:
encoded_size: (in) Output feature dimension of the Encoder, and
input feature dimension of the LSTM
vocab_size: The size of the vocabulary of the embeddings
If embeddings=None, this helps to set the size of the randomly
initialized embeddings
If embeddings != None, this is used to double check that the
provided embeddings have the intended size
embeddings: An optional embedding Tensor
freeze: If False, allow the embeddings to be updated
"""
super().__init__(encoded_size)
self.verbose = verbose
# Load provided embeddings or randomly initialize new ones
if embeddings is None:
# Note: Need to set seed here for deterministic init
if seed is not None:
set_seed(seed)
self.embeddings = nn.Embedding(vocab_size, encoded_size)
if self.verbose:
print(f"Using randomly initialized embeddings.")
else:
self.embeddings = self._load_pretrained(embeddings)
if self.verbose:
print(f"Using pretrained embeddings.")
# Freeze or not
self.embeddings.weight.requires_grad = not freeze
if self.verbose:
print(
f"Embeddings shape = ({self.embeddings.num_embeddings}, "
f"{self.embeddings.embedding_dim})"
)
print(f"The embeddings are {'' if freeze else 'NOT '}FROZEN")
def _load_pretrained(self, pretrained):
if not pretrained.dim() == 2:
msg = (
f"Provided embeddings have shape {pretrained.shape}. "
"Expected a 2-dimensional tensor."
)
raise ValueError(msg)
rows, cols = pretrained.shape
embedding = nn.Embedding(num_embeddings=rows, embedding_dim=cols)
embedding.weight.data.copy_(pretrained)
return embedding
def encode(self, X):
"""
Args:
X: (torch.LongTensor) of shape [batch_size, max_seq_length],
containing the indices of the embeddings to look up for each item in
the batch, or 0 for padding.
"""
return self.embeddings(X.long())
class CNNEncoder(nn.Module):
def encode(self, X):
"""
Args:
X: (torch.LongTensor) of shape [batch_size, max_seq_length,
encoded_size], with all-0s vectors as padding.
"""
raise NotImplementedError()
class LSTMModule(nn.Module):
"""An LSTM-based input module"""
def __init__(
self,
encoded_size,
hidden_size,
lstm_reduction="max",
bidirectional=True,
verbose=True,
seed=None,
lstm_num_layers=1,
encoder_class=Encoder,
encoder_kwargs={},
**kwargs,
):
"""
Args:
encoder: An Encoder object with encode() method that maps from
input sequences to [batch_size, max_seq_len, feature_dim]
hidden_size: (int) The size of the hidden layer in the LSTM
lstm_reduction: One of ['mean', 'max', 'last', 'attention']
denoting what to return as the output of the LSTMLayer
freeze: If False, allow the embeddings to be updated
skip_embeddings: If True, directly accept X without using embeddings
"""
super().__init__()
self.output_dim = hidden_size * 2 if bidirectional else hidden_size
self.verbose = verbose
if seed is not None:
set_seed(seed)
# Initialize Encoder
# Note constructing the Encoder here is helpful for e.g. Tuner, as then
# all model params initialized here
encoder_kwargs["verbose"] = self.verbose
self.encoder = encoder_class(encoded_size, **encoder_kwargs)
self.lstm_reduction = lstm_reduction
if self.verbose:
print(f"Using lstm_reduction = '{lstm_reduction}'")
# Create lstm core
# NOTE: We only pass explicitly-named kwargs here; can always add more!
self.lstm = nn.LSTM(
self.encoder.encoded_size,
hidden_size,
num_layers=lstm_num_layers,
batch_first=True,
bidirectional=bidirectional,
)
if lstm_reduction == "attention":
att_size = hidden_size * (self.lstm.bidirectional + 1)
att_param = nn.Parameter(torch.FloatTensor(att_size, 1))
nn.init.xavier_normal_(att_param)
self.attention_param = att_param
def _attention(self, output):
# output is of shape (seq_length, hidden_size)
score = torch.matmul(output, self.attention_param).squeeze()
score = F.softmax(score, dim=0).view(output.size(0), 1)
scored_output = output * score
condensed_output = torch.sum(scored_output, dim=0)
return condensed_output
def reset_parameters(self):
# Note: Classifier.reset() calls reset_parameters() recursively on all
# children, so this method need not reset children modules such as
# nn.lstm or nn.Embedding
pass
def _reduce_output(self, outputs, seq_lengths):
"""Reduces the output of an LSTM step
Args:
outputs: (torch.FloatTensor) the hidden state outputs from the
lstm, with shape [batch_size, max_seq_length, hidden_size]
"""
batch_size = outputs.shape[0]
reduced = []
# Necessary to iterate over batch because of different sequence lengths
for i in range(batch_size):
if self.lstm_reduction == "mean":
# Average over all non-padding reduced
# Use dim=0 because first dimension disappears after indexing
reduced.append(outputs[i, : seq_lengths[i], :].mean(dim=0))
elif self.lstm_reduction == "max":
# Max-pool over all non-padding reduced
# Use dim=0 because first dimension disappears after indexing
reduced.append(outputs[i, : seq_lengths[i], :].max(dim=0)[0])
elif self.lstm_reduction == "last":
# Take the last output of the sequence (before padding starts)
# NOTE: maybe better to take first and last?
reduced.append(outputs[i, seq_lengths[i] - 1, :])
elif self.lstm_reduction == "attention":
reduced.append(self._attention(outputs[i, : seq_lengths[i], :]))
else:
msg = (
f"Did not recognize lstm kwarg 'lstm_reduction' == "
f"{self.lstm_reduction}"
)
raise ValueError(msg)
return torch.stack(reduced, dim=0)
def forward(self, X):
"""Applies one step of an lstm (plus reduction) to the input X, which
is handled by self.encoder"""
# Identify the first non-zero integer from the right (i.e., the length
# of the sequence before padding starts).
batch_size, max_seq = X.shape[0], X.shape[1]
seq_lengths = torch.zeros(batch_size, dtype=torch.long)
for i in range(batch_size):
for j in range(max_seq - 1, -1, -1):
if not torch.all(X[i, j] == 0):
seq_lengths[i] = j + 1
break
# Sort by length because pack_padded_sequence requires it
# Save original order to restore before returning
seq_lengths, perm_idx = seq_lengths.sort(0, descending=True)
X = X[perm_idx]
inv_perm_idx = torch.tensor(
[i for i, _ in sorted(enumerate(perm_idx), key=lambda idx: idx[1])],
dtype=torch.long,
)
# Encode and pack input sequence
X_packed = rnn_utils.pack_padded_sequence(
self.encoder.encode(X), seq_lengths, batch_first=True
)
# Run LSTM
outputs, (h_t, c_t) = self.lstm(X_packed)
# Unpack and reduce outputs
outputs_unpacked, _ = rnn_utils.pad_packed_sequence(outputs, batch_first=True)
reduced = self._reduce_output(outputs_unpacked, seq_lengths)
return reduced[inv_perm_idx, :]
|
|
##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycalendar.datetime import DateTime
from pycalendar.duration import Duration
from pycalendar.period import Period
from pycalendar.timezone import Timezone
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue
from twistedcaldav import caldavxml
from twistedcaldav.caldavxml import TimeRange
from twistedcaldav.config import config
from twistedcaldav.dateops import compareDateTime, normalizeToUTC, \
parseSQLTimestampToPyCalendar, clipPeriod, timeRangesOverlap, \
normalizePeriodList
from twistedcaldav.ical import Component, Property, iCalendarProductID
from twistedcaldav.instance import InstanceList
from twistedcaldav.memcacher import Memcacher
from txdav.caldav.datastore.query.filter import Filter
from txdav.caldav.icalendarstore import QueryMaxResources
from txdav.common.icommondatastore import IndexedSearchException, \
InternalDataStoreError
import uuid
log = Logger()
fbtype_mapper = {"BUSY": 0, "BUSY-TENTATIVE": 1, "BUSY-UNAVAILABLE": 2}
fbtype_index_mapper = {'B': 0, 'T': 1, 'U': 2}
fbcacher = Memcacher("FBCache", pickle=True)
class FBCacheEntry(object):
CACHE_DAYS_FLOATING_ADJUST = 1
def __init__(self, key, token, timerange, fbresults):
self.key = key
self.token = token
self.timerange = timerange
self.fbresults = fbresults
@classmethod
@inlineCallbacks
def getCacheEntry(cls, calresource, useruid, timerange):
key = str(calresource.id()) + "/" + useruid
token = (yield calresource.syncToken())
entry = (yield fbcacher.get(key))
if entry:
# Offset one day at either end to account for floating
cached_start = entry.timerange.start + Duration(days=FBCacheEntry.CACHE_DAYS_FLOATING_ADJUST)
cached_end = entry.timerange.end - Duration(days=FBCacheEntry.CACHE_DAYS_FLOATING_ADJUST)
# Verify that the requested time range lies within the cache time range
if compareDateTime(timerange.end, cached_end) <= 0 and compareDateTime(timerange.start, cached_start) >= 0:
# Verify that cached entry is still valid
if token == entry.token:
returnValue(entry.fbresults)
returnValue(None)
@classmethod
@inlineCallbacks
def makeCacheEntry(cls, calresource, useruid, timerange, fbresults):
key = str(calresource.id()) + "/" + useruid
token = (yield calresource.syncToken())
entry = cls(key, token, timerange, fbresults)
yield fbcacher.set(key, entry)
def generateFreeBusyInfo(
calresource,
fbinfo,
timerange,
matchtotal,
excludeuid=None,
organizer=None,
organizerPrincipal=None,
same_calendar_user=False,
servertoserver=False,
event_details=None,
logItems=None,
accountingItems=None,
):
"""
Get freebusy information for a calendar. Different behavior for internal vs external calendars.
See L{_internalGenerateFreeBusyInfo} for argument description.
"""
# TODO: this method really should be moved to L{CalendarObject} so the internal/external pieces
# can be split across L{CalendarObject} and L{CalendarObjectExternal}
if calresource.external():
return _externalGenerateFreeBusyInfo(
calresource,
fbinfo,
timerange,
matchtotal,
excludeuid,
organizer,
organizerPrincipal,
same_calendar_user,
servertoserver,
event_details,
logItems,
accountingItems,
)
else:
return _internalGenerateFreeBusyInfo(
calresource,
fbinfo,
timerange,
matchtotal,
excludeuid,
organizer,
organizerPrincipal,
same_calendar_user,
servertoserver,
event_details,
logItems,
accountingItems,
)
@inlineCallbacks
def _externalGenerateFreeBusyInfo(
calresource,
fbinfo,
timerange,
matchtotal,
excludeuid=None,
organizer=None,
organizerPrincipal=None,
same_calendar_user=False,
servertoserver=False,
event_details=None,
logItems=None,
accountingItems=None,
):
"""
Generate a freebusy response for an external (cross-pod) calendar by making a cross-pod call. This will bypass
any type of smart caching on this pod in favor of using caching on the pod hosting the actual calendar data.
See L{_internalGenerateFreeBusyInfo} for argument description.
"""
fbresults, matchtotal = yield calresource._txn.store().conduit.send_freebusy(calresource, timerange, matchtotal, excludeuid, organizer, organizerPrincipal, same_calendar_user, servertoserver, event_details)
for i in range(3):
fbinfo[i].extend([Period.parseText(p) for p in fbresults[i]])
returnValue(matchtotal)
@inlineCallbacks
def _internalGenerateFreeBusyInfo(
calresource,
fbinfo,
timerange,
matchtotal,
excludeuid=None,
organizer=None,
organizerPrincipal=None,
same_calendar_user=False,
servertoserver=False,
event_details=None,
logItems=None,
accountingItems=None,
):
"""
Run a free busy report on the specified calendar collection
accumulating the free busy info for later processing.
@param calresource: the L{Calendar} for a calendar collection.
@param fbinfo: the array of busy periods to update.
@param timerange: the L{TimeRange} for the query.
@param matchtotal: the running total for the number of matches.
@param excludeuid: a C{str} containing a UID value to exclude any
components with that UID from contributing to free-busy.
@param organizer: a C{str} containing the value of the ORGANIZER property
in the VFREEBUSY request. This is used in conjunction with the UID
value to process exclusions.
@param same_calendar_user: a C{bool} indicating whether the calendar user
requesting the free-busy information is the same as the calendar user
being targeted.
@param servertoserver: a C{bool} indicating whether we are doing a local or
remote lookup request.
@param event_details: a C{list} into which to store extended VEVENT details if not C{None}
@param logItems: a C{dict} to store logging info to
@param accountingItems: a C{dict} to store accounting info to
"""
# First check the privilege on this collection
# TODO: for server-to-server we bypass this right now as we have no way to authorize external users.
# TODO: actually we by pass altogether by assuming anyone can check anyone else's freebusy
# May need organizer principal
organizer_record = (yield calresource.directoryService().recordWithCalendarUserAddress(organizer)) if organizer else None
organizer_uid = organizer_record.uid if organizer_record else ""
# Free busy is per-user
attendee_uid = calresource.viewerHome().uid()
attendee_record = yield calresource.directoryService().recordWithUID(attendee_uid.decode("utf-8"))
# Get the timezone property from the collection.
tz = calresource.getTimezone()
# Look for possible extended free busy information
rich_options = {
"organizer": False,
"delegate": False,
"resource": False,
}
do_event_details = False
if event_details is not None and organizer_record is not None and attendee_record is not None:
# Get the principal of the authorized user which may be different from the organizer if a delegate of
# the organizer is making the request
authz_uid = organizer_uid
authz_record = organizer_record
if calresource._txn._authz_uid is not None and calresource._txn._authz_uid != organizer_uid:
authz_uid = calresource._txn._authz_uid
authz_record = yield calresource.directoryService().recordWithUID(authz_uid.decode("utf-8"))
# Check if attendee is also the organizer or the delegate doing the request
if attendee_uid in (organizer_uid, authz_uid):
do_event_details = True
rich_options["organizer"] = True
# Check if authorized user is a delegate of attendee
proxy = (yield authz_record.isProxyFor(attendee_record))
if config.Scheduling.Options.DelegeteRichFreeBusy and proxy:
do_event_details = True
rich_options["delegate"] = True
# Check if attendee is room or resource
if config.Scheduling.Options.RoomResourceRichFreeBusy and attendee_record.getCUType() in ("RESOURCE", "ROOM",):
do_event_details = True
rich_options["resource"] = True
# Try cache
resources = (yield FBCacheEntry.getCacheEntry(calresource, attendee_uid, timerange)) if config.EnableFreeBusyCache else None
if resources is None:
if accountingItems is not None:
accountingItems["fb-uncached"] = accountingItems.get("fb-uncached", 0) + 1
caching = False
if config.EnableFreeBusyCache:
# Log extended item
if logItems is not None:
logItems["fb-uncached"] = logItems.get("fb-uncached", 0) + 1
# We want to cache a large range of time based on the current date
cache_start = normalizeToUTC(DateTime.getToday() + Duration(days=0 - config.FreeBusyCacheDaysBack))
cache_end = normalizeToUTC(DateTime.getToday() + Duration(days=config.FreeBusyCacheDaysForward))
# If the requested time range would fit in our allowed cache range, trigger the cache creation
if compareDateTime(timerange.start, cache_start) >= 0 and compareDateTime(timerange.end, cache_end) <= 0:
cache_timerange = TimeRange(start=cache_start.getText(), end=cache_end.getText())
caching = True
#
# What we do is a fake calendar-query for VEVENT/VFREEBUSYs in the specified time-range.
# We then take those results and merge them into one VFREEBUSY component
# with appropriate FREEBUSY properties, and return that single item as iCal data.
#
# Create fake filter element to match time-range
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
caldavxml.ComponentFilter(
cache_timerange if caching else timerange,
name=("VEVENT", "VFREEBUSY", "VAVAILABILITY"),
),
name="VCALENDAR",
)
)
filter = Filter(filter)
tzinfo = filter.settimezone(tz)
if accountingItems is not None:
tr = cache_timerange if caching else timerange
accountingItems["fb-query-timerange"] = (str(tr.start), str(tr.end),)
try:
resources = yield calresource.search(filter, useruid=attendee_uid, fbtype=True)
if caching:
yield FBCacheEntry.makeCacheEntry(calresource, attendee_uid, cache_timerange, resources)
except IndexedSearchException:
raise InternalDataStoreError("Invalid indexedSearch query")
else:
if accountingItems is not None:
accountingItems["fb-cached"] = accountingItems.get("fb-cached", 0) + 1
# Log extended item
if logItems is not None:
logItems["fb-cached"] = logItems.get("fb-cached", 0) + 1
# Determine appropriate timezone (UTC is the default)
tzinfo = tz.gettimezone() if tz is not None else Timezone(utc=True)
# We care about separate instances for VEVENTs only
aggregated_resources = {}
for name, uid, type, test_organizer, float, start, end, fbtype, transp in resources:
if transp == 'T' and fbtype != '?':
fbtype = 'F'
aggregated_resources.setdefault((name, uid, type, test_organizer,), []).append((float, start, end, fbtype,))
if accountingItems is not None:
accountingItems["fb-resources"] = {}
for k, v in aggregated_resources.items():
name, uid, type, test_organizer = k
accountingItems["fb-resources"][uid] = []
for float, start, end, fbtype in v:
fbstart = parseSQLTimestampToPyCalendar(start)
if float == 'Y':
fbstart.setTimezone(tzinfo)
else:
fbstart.setTimezone(Timezone(utc=True))
fbend = parseSQLTimestampToPyCalendar(end)
if float == 'Y':
fbend.setTimezone(tzinfo)
else:
fbend.setTimezone(Timezone(utc=True))
accountingItems["fb-resources"][uid].append((
float,
str(fbstart),
str(fbend),
fbtype,
))
for key in aggregated_resources.iterkeys():
name, uid, type, test_organizer = key
# Short-cut - if an fbtype exists we can use that
if type == "VEVENT" and aggregated_resources[key][0][3] != '?':
matchedResource = False
# Look at each instance
for float, start, end, fbtype in aggregated_resources[key]:
# Ignore free time or unknown
if fbtype in ('F', '?'):
continue
# Ignore ones of this UID
if excludeuid:
# See if we have a UID match
if (excludeuid == uid):
test_record = (yield calresource.directoryService().recordWithCalendarUserAddress(test_organizer)) if test_organizer else None
test_uid = test_record.uid if test_record else ""
# Check that ORGANIZER's match (security requirement)
if (organizer is None) or (organizer_uid == test_uid):
continue
# Check for no ORGANIZER and check by same calendar user
elif (test_uid == "") and same_calendar_user:
continue
# Apply a timezone to any floating times
fbstart = parseSQLTimestampToPyCalendar(start)
if float == 'Y':
fbstart.setTimezone(tzinfo)
else:
fbstart.setTimezone(Timezone(utc=True))
fbend = parseSQLTimestampToPyCalendar(end)
if float == 'Y':
fbend.setTimezone(tzinfo)
else:
fbend.setTimezone(Timezone(utc=True))
# Clip instance to time range
clipped = clipPeriod(Period(fbstart, duration=fbend - fbstart), Period(timerange.start, timerange.end))
# Double check for overlap
if clipped:
matchedResource = True
fbinfo[fbtype_index_mapper.get(fbtype, 0)].append(clipped)
if matchedResource:
# Check size of results is within limit
matchtotal += 1
if matchtotal > config.MaxQueryWithDataResults:
raise QueryMaxResources(config.MaxQueryWithDataResults, matchtotal)
# Add extended details
if do_event_details:
child = (yield calresource.calendarObjectWithName(name))
# Only add fully public events
if not child.accessMode or child.accessMode == Component.ACCESS_PUBLIC:
calendar = (yield child.componentForUser())
_addEventDetails(calendar, event_details, rich_options, timerange, tzinfo)
else:
child = (yield calresource.calendarObjectWithName(name))
calendar = (yield child.componentForUser())
# The calendar may come back as None if the resource is being changed, or was deleted
# between our initial index query and getting here. For now we will ignore this error, but in
# the longer term we need to implement some form of locking, perhaps.
if calendar is None:
log.error("Calendar %s is missing from calendar collection %r" % (name, calresource))
continue
# Ignore ones of this UID
if excludeuid:
# See if we have a UID match
if (excludeuid == uid):
test_organizer = calendar.getOrganizer()
test_record = (yield calresource.principalForCalendarUserAddress(test_organizer)) if test_organizer else None
test_uid = test_record.principalUID() if test_record else ""
# Check that ORGANIZER's match (security requirement)
if (organizer is None) or (organizer_uid == test_uid):
continue
# Check for no ORGANIZER and check by same calendar user
elif (test_organizer is None) and same_calendar_user:
continue
if accountingItems is not None:
accountingItems.setdefault("fb-filter-match", []).append(uid)
if filter.match(calendar, None):
if accountingItems is not None:
accountingItems.setdefault("fb-filter-matched", []).append(uid)
# Check size of results is within limit
matchtotal += 1
if matchtotal > config.MaxQueryWithDataResults:
raise QueryMaxResources(config.MaxQueryWithDataResults, matchtotal)
if calendar.mainType() == "VEVENT":
processEventFreeBusy(calendar, fbinfo, timerange, tzinfo)
elif calendar.mainType() == "VFREEBUSY":
processFreeBusyFreeBusy(calendar, fbinfo, timerange)
elif calendar.mainType() == "VAVAILABILITY":
processAvailabilityFreeBusy(calendar, fbinfo, timerange)
else:
assert "Free-busy query returned unwanted component: %s in %r", (name, calresource,)
# Add extended details
if calendar.mainType() == "VEVENT" and do_event_details:
child = (yield calresource.calendarObjectWithName(name))
# Only add fully public events
if not child.accessMode or child.accessMode == Component.ACCESS_PUBLIC:
calendar = (yield child.componentForUser())
_addEventDetails(calendar, event_details, rich_options, timerange, tzinfo)
returnValue(matchtotal)
def _addEventDetails(calendar, event_details, rich_options, timerange, tzinfo):
"""
Expand events within the specified time range and limit the set of properties to those allowed for
delegate extended free busy.
@param calendar: the calendar object to expand
@type calendar: L{Component}
@param event_details: list to append VEVENT components to
@type event_details: C{list}
@param timerange: the time-range in which to expand
@type timerange: L{TimeRange}
@param tzinfo: timezone for floating time calculations
@type tzinfo: L{Timezone}
"""
# First expand the component
expanded = calendar.expand(timerange.start, timerange.end, timezone=tzinfo)
keep_props = (
"UID",
"RECURRENCE-ID",
"DTSTAMP",
"DTSTART",
"DTEND",
"DURATION",
)
if rich_options["organizer"] or rich_options["delegate"]:
keep_props += ("SUMMARY",)
if rich_options["organizer"] or rich_options["resource"]:
keep_props += ("ORGANIZER",)
# Remove all but essential properties
expanded.filterProperties(keep=keep_props)
# Need to remove all child components of VEVENT
for subcomponent in expanded.subcomponents():
if subcomponent.name() == "VEVENT":
for sub in tuple(subcomponent.subcomponents()):
subcomponent.removeComponent(sub)
event_details.extend([subcomponent for subcomponent in expanded.subcomponents() if subcomponent.name() == "VEVENT"])
def processEventFreeBusy(calendar, fbinfo, timerange, tzinfo):
"""
Extract free busy data from a VEVENT component.
@param calendar: the L{Component} that is the VCALENDAR containing the VEVENT's.
@param fbinfo: the tuple used to store the three types of fb data.
@param timerange: the time range to restrict free busy data to.
@param tzinfo: the L{Timezone} for the timezone to use for floating/all-day events.
"""
# Expand out the set of instances for the event with in the required range
instances = calendar.expandTimeRanges(timerange.end, lowerLimit=timerange.start, ignoreInvalidInstances=True)
# Can only do timed events
for key in instances:
instance = instances[key]
if instance.start.isDateOnly():
return
break
else:
return
for key in instances:
instance = instances[key]
# Apply a timezone to any floating times
fbstart = instance.start
if fbstart.floating():
fbstart.setTimezone(tzinfo)
fbend = instance.end
if fbend.floating():
fbend.setTimezone(tzinfo)
# Check TRANSP property of underlying component
if instance.component.hasProperty("TRANSP"):
# If its TRANSPARENT we always ignore it
if instance.component.propertyValue("TRANSP") == "TRANSPARENT":
continue
# Determine status
if instance.component.hasProperty("STATUS"):
status = instance.component.propertyValue("STATUS")
else:
status = "CONFIRMED"
# Ignore cancelled
if status == "CANCELLED":
continue
# Clip period for this instance - use duration for period end if that
# is what original component used
if instance.component.hasProperty("DURATION"):
period = Period(fbstart, duration=fbend - fbstart)
else:
period = Period(fbstart, fbend)
clipped = clipPeriod(period, Period(timerange.start, timerange.end))
# Double check for overlap
if clipped:
if status == "TENTATIVE":
fbinfo[1].append(clipped)
else:
fbinfo[0].append(clipped)
def processFreeBusyFreeBusy(calendar, fbinfo, timerange):
"""
Extract FREEBUSY data from a VFREEBUSY component.
@param calendar: the L{Component} that is the VCALENDAR containing the VFREEBUSY's.
@param fbinfo: the tuple used to store the three types of fb data.
@param timerange: the time range to restrict free busy data to.
"""
for vfb in [x for x in calendar.subcomponents() if x.name() == "VFREEBUSY"]:
# First check any start/end in the actual component
start = vfb.getStartDateUTC()
end = vfb.getEndDateUTC()
if start and end:
if not timeRangesOverlap(start, end, timerange.start, timerange.end):
continue
# Now look at each FREEBUSY property
for fb in vfb.properties("FREEBUSY"):
# Check the type
fbtype = fb.parameterValue("FBTYPE", default="BUSY")
if fbtype == "FREE":
continue
# Look at each period in the property
assert isinstance(fb.value(), list), "FREEBUSY property does not contain a list of values: %r" % (fb,)
for period in fb.value():
# Clip period for this instance
clipped = clipPeriod(period.getValue(), Period(timerange.start, timerange.end))
if clipped:
fbinfo[fbtype_mapper.get(fbtype, 0)].append(clipped)
def processAvailabilityFreeBusy(calendar, fbinfo, timerange):
"""
Extract free-busy data from a VAVAILABILITY component.
@param calendar: the L{Component} that is the VCALENDAR containing the VAVAILABILITY's.
@param fbinfo: the tuple used to store the three types of fb data.
@param timerange: the time range to restrict free busy data to.
"""
for vav in [x for x in calendar.subcomponents() if x.name() == "VAVAILABILITY"]:
# Get overall start/end
start = vav.getStartDateUTC()
if start is None:
start = DateTime(1900, 1, 1, 0, 0, 0, tzid=Timezone(utc=True))
end = vav.getEndDateUTC()
if end is None:
end = DateTime(2100, 1, 1, 0, 0, 0, tzid=Timezone(utc=True))
period = Period(start, end)
overall = clipPeriod(period, Period(timerange.start, timerange.end))
if overall is None:
continue
# Now get periods for each instance of AVAILABLE sub-components
periods = processAvailablePeriods(vav, timerange)
# Now invert the periods and store in accumulator
busyperiods = []
last_end = timerange.start
for period in periods:
if last_end < period.getStart():
busyperiods.append(Period(last_end, period.getStart()))
last_end = period.getEnd()
if last_end < timerange.end:
busyperiods.append(Period(last_end, timerange.end))
# Add to actual results mapped by busy type
fbtype = vav.propertyValue("BUSYTYPE")
if fbtype is None:
fbtype = "BUSY-UNAVAILABLE"
fbinfo[fbtype_mapper.get(fbtype, 2)].extend(busyperiods)
def processAvailablePeriods(calendar, timerange):
"""
Extract instance period data from an AVAILABLE component.
@param calendar: the L{Component} that is the VAVAILABILITY containing the AVAILABLE's.
@param timerange: the time range to restrict free busy data to.
"""
periods = []
# First we need to group all AVAILABLE sub-components by UID
uidmap = {}
for component in calendar.subcomponents():
if component.name() == "AVAILABLE":
uid = component.propertyValue("UID")
uidmap.setdefault(uid, []).append(component)
# Then we expand each uid set separately
for componentSet in uidmap.itervalues():
instances = InstanceList(ignoreInvalidInstances=True)
instances.expandTimeRanges(componentSet, timerange.end)
# Now convert instances into period list
for key in instances:
instance = instances[key]
# Ignore any with floating times (which should not happen as the spec requires UTC or local
# but we will try and be safe here).
start = instance.start
if start.floating():
continue
end = instance.end
if end.floating():
continue
# Clip period for this instance - use duration for period end if that
# is what original component used
if instance.component.hasProperty("DURATION"):
period = Period(start, duration=end - start)
else:
period = Period(start, end)
clipped = clipPeriod(period, Period(timerange.start, timerange.end))
if clipped:
periods.append(clipped)
normalizePeriodList(periods)
return periods
def buildFreeBusyResult(fbinfo, timerange, organizer=None, attendee=None, uid=None, method=None, event_details=None):
"""
Generate a VCALENDAR object containing a single VFREEBUSY that is the
aggregate of the free busy info passed in.
@param fbinfo: the array of busy periods to use.
@param timerange: the L{TimeRange} for the query.
@param organizer: the L{Property} for the Organizer of the free busy request, or None.
@param attendee: the L{Property} for the Attendee responding to the free busy request, or None.
@param uid: the UID value from the free busy request.
@param method: the METHOD property value to insert.
@param event_details: VEVENT components to add.
@return: the L{Component} containing the calendar data.
"""
# Merge overlapping time ranges in each fb info section
normalizePeriodList(fbinfo[0])
normalizePeriodList(fbinfo[1])
normalizePeriodList(fbinfo[2])
# Now build a new calendar object with the free busy info we have
fbcalendar = Component("VCALENDAR")
fbcalendar.addProperty(Property("VERSION", "2.0"))
fbcalendar.addProperty(Property("PRODID", iCalendarProductID))
if method:
fbcalendar.addProperty(Property("METHOD", method))
fb = Component("VFREEBUSY")
fbcalendar.addComponent(fb)
if organizer is not None:
fb.addProperty(organizer)
if attendee is not None:
fb.addProperty(attendee)
fb.addProperty(Property("DTSTART", timerange.start))
fb.addProperty(Property("DTEND", timerange.end))
fb.addProperty(Property("DTSTAMP", DateTime.getNowUTC()))
if len(fbinfo[0]) != 0:
fb.addProperty(Property("FREEBUSY", fbinfo[0], {"FBTYPE": "BUSY"}))
if len(fbinfo[1]) != 0:
fb.addProperty(Property("FREEBUSY", fbinfo[1], {"FBTYPE": "BUSY-TENTATIVE"}))
if len(fbinfo[2]) != 0:
fb.addProperty(Property("FREEBUSY", fbinfo[2], {"FBTYPE": "BUSY-UNAVAILABLE"}))
if uid is not None:
fb.addProperty(Property("UID", uid))
else:
uid = str(uuid.uuid4())
fb.addProperty(Property("UID", uid))
if event_details:
for vevent in event_details:
fbcalendar.addComponent(vevent)
return fbcalendar
|
|
"""
Wrapper for the Evernote Python SDK
"""
from __future__ import print_function
from __future__ import division
import datetime
from time import sleep
import arrow
from itertools import islice
import logging
from evernote.api.client import EvernoteClient
from evernote.edam.type.ttypes import Note
from evernote.edam.notestore.ttypes import (NoteFilter,
NotesMetadataResultSpec
)
from evernote.edam.error.ttypes import (EDAMSystemException, EDAMErrorCode)
from pandas import DataFrame
from collections import defaultdict
from past.utils import old_div
from builtins import object
__all__ = ["client", "userStore", "user", "noteStore", "all_notebooks",
"notes_metadata", "sizes_of_notes", "all_tags", "tag",
"tag_counts_by_name", "tags_by_guid", "init",
"project_notes_and_tags", "projects_to_df",
"all_actions", "actions_to_df", "non_project_plus_tags",
'fix_wayward_plus_tags', 'action_note_tags',
'retire_project']
logger = logging.getLogger(__name__)
def init(auth_token, sandbox=False):
"""
enable auth_token to be set
"""
global _client, client, userStore, noteStore, user
global _notebooks, _notebook_name_dict, _notebook_guid_dict
global _tags, _tag_counts, _tags_by_name
global _tags_by_guid, _tag_counts_by_name
global _when_tags, _when_tags_guids
_client = EvernoteClient(token=auth_token, sandbox=sandbox)
client = RateLimitingEvernoteProxy(_client)
userStore = client.get_user_store()
noteStore = client.get_note_store()
user = userStore.getUser()
_notebooks = None
_notebook_name_dict = None
_notebook_guid_dict = None
_tags = None
_tag_counts = None
_tags_by_name = None
_tags_by_guid = None
_tag_counts_by_name = None
_when_tags = [t for t in all_tags() if t.parentGuid ==
tag(name=".When").guid]
_when_tags_guids = set([t.guid for t in _when_tags])
def evernote_wait_try_again(f):
"""
Wait until mandated wait and try again
http://dev.evernote.com/doc/articles/rate_limits.php
"""
def f2(*args, **kwargs):
try:
return f(*args, **kwargs)
except EDAMSystemException as e:
if e.errorCode == EDAMErrorCode.RATE_LIMIT_REACHED:
logger.info("rate limit: {0} s. wait".format(
e.rateLimitDuration))
sleep(e.rateLimitDuration)
logger("wait over")
return f(*args, **kwargs)
else:
print(e)
raise e
return f2
class RateLimitingEvernoteProxy(object):
# based on http://code.activestate.com/recipes/496741-object-proxying/
__slots__ = ["_obj"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
def __getattribute__(self, name):
return evernote_wait_try_again(
getattr(object.__getattribute__(self, "_obj"), name))
def all_notebooks(refresh=False):
# List all of the notebooks in the user's account
global _notebooks, _notebook_guid_dict, _notebook_name_dict
if _notebooks is None or refresh:
_notebooks = noteStore.listNotebooks()
_notebook_guid_dict = dict([(nb.guid, nb) for nb in _notebooks])
_notebook_name_dict = dict([(nb.name, nb) for nb in _notebooks])
return _notebooks
def notebook(name=None, guid=None, refresh=False):
global _notebooks, _notebook_guid_dict, _notebook_name_dict
if _notebooks is None or refresh:
all_notebooks(refresh)
if name is not None:
return _notebook_name_dict.get(name)
elif guid is not None:
return _notebook_guid_dict.get(guid)
else:
return None
def all_tags(refresh=False):
global _tags, _tag_counts, _tags_by_name, _tags_by_guid
global _tag_counts_by_name, _tag_counts
if _tags is None or refresh:
_tags = noteStore.listTags()
_tag_counts = noteStore.findNoteCounts(NoteFilter(), False)
_tags_by_name = dict([(tag.name, tag) for tag in _tags])
_tags_by_guid = dict([(tag.guid, tag) for tag in _tags])
_tag_counts_by_name = dict([(_tags_by_guid[guid].name, count) for (
guid, count) in list(_tag_counts.tagCounts.items())])
return _tags
def tag(name=None, guid=None, refresh=False):
if _tags is None or refresh:
all_tags(refresh)
# add count if available
if name is not None:
_tag = _tags_by_name.get(name)
if _tag is not None:
_tag.count = _tag_counts_by_name.get(name, 0)
return _tag
elif guid is not None:
_tag = _tags_by_guid.get(guid)
if _tag is not None:
_tag.count = _tag_counts_by_name.get(_tag.name, 0)
return _tag
else:
return None
def tag_counts_by_name(refresh=False):
if _tags is None or refresh:
all_tags(refresh)
return _tag_counts_by_name
def tags_by_guid(refresh=False):
if _tags is None or refresh:
all_tags(refresh)
return _tags_by_guid
def display_notebooks():
notebooks = all_notebooks()
for (i, notebook) in enumerate(notebooks):
print(i, notebook.name, notebook.guid)
def notebookcounts():
""" return a dict of notebook guid -> number of notes in notebook"""
# http://dev.evernote.com/documentation/reference/NoteStore.html#Fn_NoteStore_findNoteCounts
counts = noteStore.findNoteCounts(NoteFilter(), False)
return counts.notebookCounts
def notes_metadata(**input_kw):
""" """
# http://dev.evernote.com/documentation/reference/NoteStore.html#Fn_NoteStore_findNotesMetadata
# pull out offset and page_size value if supplied
offset = input_kw.pop("offset", 0)
page_size = input_kw.pop("page_size", 100)
# let's update any keywords that are updated
# http://dev.evernote.com/documentation/reference/NoteStore.html#Struct_NotesMetadataResultSpec
include_kw = {
'includeTitle': False,
'includeContentLength': False,
'includeCreated': False,
'includeUpdated': False,
'includeDeleted': False,
'includeUpdateSequenceNum': False,
'includeNotebookGuid': False,
'includeTagGuids': False,
'includeAttributes': False,
'includeLargestResourceMime': False,
'includeLargestResourceSize': False
}
include_kw.update([(k, input_kw[k])
for k in set(input_kw.keys()) & set(include_kw.keys())])
# keywords aimed at NoteFilter
# http://dev.evernote.com/documentation/reference/NoteStore.html#Struct_NoteFilter
filter_kw_list = ('order', 'ascending', 'words', 'notebookGuid',
'tagGuids', 'timeZone', 'inactive', 'emphasized')
filter_kw = dict([(k, input_kw[k])
for k in set(filter_kw_list) & set(input_kw.keys())])
# what possible parameters are aimed at NoteFilter
# order i32 optional
# ascending bool optional
# words string optional
# notebookGuid Types.Guid optional
# tagGuids list<Types.Guid> optional
# timeZone string optional
# inactive bool
# emphasized string
more_nm = True
while more_nm:
# grab a page of data
note_meta = (noteStore.
findNotesMetadata(
NoteFilter(**filter_kw),
offset, page_size,
NotesMetadataResultSpec(**include_kw)))
# yield each individually
for nm in note_meta.notes:
yield nm
# grab next page if there is more to grab
if len(note_meta.notes):
offset += len(note_meta.notes)
else:
more_nm = False
def sizes_of_notes():
"""a generator for note sizes"""
return (nm.contentLength
for nm in notes_metadata(includeContentLength=True))
def notes(title=None):
return notes_metadata(includeTitle=True,
includeUpdated=True,
includeUpdateSequenceNum=True,
words='intitle:"{0}"'.format(title))
def get_note(guid,
withContent=False,
withResourcesData=False,
withResourcesRecognition=False,
withResourcesAlternateData=False):
# https://dev.evernote.com/doc/reference/NoteStore.html#Fn_NoteStore_getNote
return noteStore.getNote(guid, withContent, withResourcesData,
withResourcesRecognition,
withResourcesAlternateData)
def create_note(title, content, tagNames=None, notebookGuid=None):
# put the note into the :INBOX notebook by default
inbox_nb_guid = notebook(name=':INBOX').guid
note_template = u"""
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">
<en-note style="word-wrap: break-word; -webkit-nbsp-mode: space;
-webkit-line-break: after-white-space;">
{0}
</en-note>
""".strip()
note = Note()
note.title = title.encode('utf-8')
note.content = note_template.format(content).encode('utf-8')
if tagNames is None:
note.tagNames = []
else:
note.tagNames = tagNames
if notebookGuid is None:
note.notebookGuid = inbox_nb_guid
else:
note.notebookGuid = notebookGuid
note = noteStore.createNote(note)
return note
def note_link(guid):
"""
return for note with guid:
https://[service]/shard/[shardId]/nl/[userId]/[noteGuid]/
"""
global user
note = get_note(guid)
return ("https://{service}/shard/{shardId}/nl/{userId}/{noteGuid}/".format(
service="www.evernote.com",
shardId=user.shardId,
userId=user.id,
noteGuid=note.guid
))
def update_note(note, title=None, content=None, tagNames=None,
notebookGuid=None, updated=None):
"""
With the exception of the note's title and guid, fields that
are not being changed do not need to be set. If the content
is not being modified, note.content should be left unset. If
the list of resources is not being modified, note.resources
should be left unset.
"""
note_template = u"""
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">
<en-note style="word-wrap: break-word; -webkit-nbsp-mode: space;
-webkit-line-break: after-white-space;">
{0}
</en-note>
""".strip()
if title is not None:
note.title = title.encode('utf-8')
if content is not None:
note.content = note_template.format(content).encode('utf-8')
if tagNames is not None:
note.tagNames = tagNames
if notebookGuid is not None:
note.notebookGuid = notebookGuid
if updated is None:
note.updated = arrow.utcnow().timestamp * 1000
note = noteStore.updateNote(note)
return note
def set_notebook_for_note(note, notebook_name):
"""
Place the given note in the notebook of name notebook_name
"""
new_nb_guid = notebook(name=notebook_name).guid
if note.notebookGuid != new_nb_guid:
note.notebookGuid = new_nb_guid
noteStore.updateNote(note)
return note
def web_api_notes_from_selection():
from appscript import app
evnote = app('Evernote')
return [get_note(sel_note.note_link().split("/")[-3])
for sel_note in evnote.selection()]
def all_actions():
actions = list(islice(
notes_metadata(includeTitle=True,
includeUpdated=True,
includeCreated=True,
includeUpdateSequenceNum=True,
includeTagGuids=True,
notebookGuid=notebook(name='Action Pending').guid),
None))
return actions
def actions_to_df(actions):
def j_(items):
return ",".join(items)
actions_data = []
for note in actions:
tags = [tag(
guid=tagGuid).name
for tagGuid in note.tagGuids] if note.tagGuids is not None else []
plus_tags = [tag_ for tag_ in tags if tag_.startswith("+")]
context_tags = [tag_ for tag_ in tags if tag_.startswith("@")]
when_tags = [tag_ for tag_ in tags if tag_.startswith("#")]
other_tags = [tag_ for tag_ in tags if tag_[0] not in ['+', '@', '#']]
actions_data.append(dict([('title', note.title),
('guid', note.guid),
('created', datetime.datetime.fromtimestamp(
old_div(note.created, 1000.))),
('updated', datetime.datetime.fromtimestamp(
old_div(note.updated, 1000.))),
('plus', j_(plus_tags)),
('context', j_(context_tags)),
('when', j_(when_tags)),
('other', j_(other_tags))
])
)
actions_df = DataFrame(actions_data,
columns=['title', 'guid', 'created', 'updated',
'plus', 'context', 'when', 'other'])
return actions_df
def actions_for_project(tag_name,
includeTitle=True,
includeUpdated=True,
includeUpdateSequenceNum=True,
includeTagGuids=True):
notes = notes_metadata(includeTitle=includeTitle,
includeUpdated=includeUpdated,
includeUpdateSequenceNum=includeUpdateSequenceNum,
includeTagGuids=includeTagGuids,
tagGuids=[tag(tag_name).guid],
notebookGuid=notebook(name='Action Pending').guid)
return notes
def strip_when_tags(note):
"""
remove any tagGuids that are when tags from note
"""
guids_new_tag_set = set(note.tagGuids) - _when_tags_guids
note.tagGuids = list(guids_new_tag_set)
noteStore.updateNote(note)
return note
def strip_when_tags_move_to_ref_nb_for_selection():
from appscript import app
evnote = app('Evernote')
notes = web_api_notes_from_selection()
notes = [strip_when_tags(note) for note in notes]
# move note to the :REFERENCE Notebook
notes = [set_notebook_for_note(note, ":REFERENCE") for note in notes]
evnote.synchronize()
def projects_to_df(notes):
df = DataFrame([dict([('title', note.title),
('guid', note.guid),
('created', datetime.datetime.fromtimestamp(
old_div(note.created, 1000.))),
('updated', datetime.datetime.fromtimestamp(
old_div(note.updated, 1000.)))
]) for note in notes],
columns=['title', 'guid', 'created', 'updated'])
return df
def project_notes_and_tags():
"""
get all the notes in the :PROJECTS Notebook
"""
notes = list(islice(
notes_metadata(includeTitle=True,
includeUpdated=True,
includeCreated=True,
includeUpdateSequenceNum=True,
includeTagGuids=True,
notebookGuid=notebook(name=':PROJECTS').guid),
None))
# accumulate all the tags that begin with "+" associated with notes in
# :PROJECTS notebook
plus_tags_set = set()
for note in notes:
tags = ([tag(
guid=tagGuid).name for tagGuid in note.tagGuids]
if note.tagGuids is not None
else [])
plus_tags = [tag_ for tag_ in tags if tag_.startswith("+")]
plus_tags_set.update(plus_tags)
return (notes, plus_tags_set)
def project_tags_for_selected():
project_tags = set()
for note in web_api_notes_from_selection():
project_tags |= set(
[s for s in [tag(guid=g).name
for g in note.tagGuids] if s.startswith("+")])
return project_tags
def non_project_plus_tags():
all_plus_tags = set([tag_ for tag_ in [tag_.name for tag_ in all_tags(
refresh=False)] if tag_.startswith("+")])
projects_notes = list(islice(
notes_metadata(includeTitle=True,
includeUpdated=True,
includeUpdateSequenceNum=True,
notebookGuid=notebook(name=':PROJECTS').guid), None))
project_plus_tags = set()
for note in projects_notes:
tags = noteStore.getNoteTagNames(note.guid)
plus_tags = [tag_ for tag_ in tags if tag_.startswith("+")]
project_plus_tags.update(plus_tags)
return (all_plus_tags - project_plus_tags)
def generate_project_starter_notes():
projects_nb_guid = notebook(name=':PROJECTS').guid
notes = []
for tag_name in non_project_plus_tags():
proj_name = tag_name[1:]
note = create_note(proj_name, " ", tagNames=[tag_name],
notebookGuid=projects_nb_guid)
notes.append(note)
return notes
def fix_wayward_plus_tags():
active_projects_tag = tag(name=".Active Projects")
# inactive_projects_tag = tag(name=".Inactive Projects")
wayward_plus_tags = [tag_ for tag_ in all_tags(refresh=True)
if tag_.name.startswith("+") and
tag_.parentGuid != active_projects_tag.guid]
for tag_ in wayward_plus_tags:
tag_.parentGuid = active_projects_tag.guid
noteStore.updateTag(tag_)
return [tag_.name for tag_ in wayward_plus_tags]
def action_note_tags():
# when_tags = [tag_ for tag_ in all_tags(
# refresh=True)
# if tag_.parentGuid == tag(name=".When").guid]
# when_tags_guids = set([tag_.guid for tag_ in when_tags])
note_tags_dict = defaultdict(list)
action_notes = list(islice(
notes_metadata(includeTitle=True,
includeUpdated=True,
includeUpdateSequenceNum=True,
includeTagGuids=True,
notebookGuid=notebook(name='Action Pending').guid),
None))
# tags that have no .When tags whatsover
# ideally -- each action has one and only one .When tag
for note in action_notes:
tag_guids = note.tagGuids
if tag_guids is None:
tag_guids = []
tag_names = [tag(guid=g).name for g in tag_guids]
for tag_name in tag_names:
note_tags_dict[tag_name].append(note)
if len(tag_guids) == 0:
note_tags_dict['__UNTAGGED__'].append(note)
return note_tags_dict
def retire_project(tag_name,
ignore_actions=False,
dry_run=False,
display_remaining_actions=True):
"""
Retire the project represented by tag_name
"""
tag_ = tag(name=tag_name)
# make sure tag_name starts with "+"
if not tag_name.startswith("+"):
return tag_
# if ignore_actions is False, check whether are still associated
# actions for the project.
# if there are actions, then don't retire project.
# Optionally display actions in Evernote
if not ignore_actions:
associated_actions = list(actions_for_project(tag_name))
if len(associated_actions):
if display_remaining_actions:
from appscript import app
evnote = app('Evernote')
evnote.open_collection_window(
with_query_string='''notebook:"Action Pending" tag:"{0}"'''
.format(tag_name))
return tag_name
# before just trying to turn the + to a -, check
# for existence of the new name.
# if the new name exists, we would delete the + tag and apply
# the - tag to the notes tied to the
# + tag
# let's take care of the simple case first
# do I have logic for finding all notes that have a given tag?
# tagging a set of notes with a given tag?
retired_tag_name = "-" + tag_name[1:]
if tag(retired_tag_name) is None:
tag_.name = retired_tag_name
else:
raise Exception("{0} already exists".format(retired_tag_name))
# change parent reference
tag_.parentGuid = tag('.Inactive Projects').guid
# move the project note (if it exists) from the project notebook
# to the retired project notebook
project_notes = notes_metadata(
includeTitle=True,
includeNotebookGuid=True,
tagGuids=[tag_.guid],
notebookGuid=notebook(name=':PROJECTS').guid)
# with NoteMetadata, how to make change to the corresponding note?
# make use of
# http://dev.evernote.com/doc/reference/NoteStore.html#Fn_NoteStore_updateNote
for note in project_notes:
note.notebookGuid = notebook(name=":PROJECTS--RETIRED").guid
noteStore.updateNote(note)
# deal with the associated actions for the project
# apply changes to tag
noteStore.updateTag(tag_)
return tag_
|
|
# -*- coding: utf-8 -*-
import pytest
import sys
from .as_status_codes import AerospikeStatus
from .index_helpers import ensure_dropped_index
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestIndex(object):
@pytest.fixture(autouse=True)
def setup(self, request, as_connection):
for i in range(5):
key = ('test', u'demo', i)
rec = {
'name': 'name%s' % (str(i)),
'addr': 'name%s' % (str(i)),
'age': i,
'no': i
}
as_connection.put(key, rec)
def teardown():
ensure_dropped_index(self.as_connection, 'test', 'age_index')
ensure_dropped_index(self.as_connection, 'test', 'name_index')
for i in range(5):
key = ('test', u'demo', i)
rec = {
'name': 'name%s' % (str(i)),
'addr': 'name%s' % (str(i)),
'age': i,
'no': i
}
as_connection.remove(key)
request.addfinalizer(teardown)
def test_create_indexes_with_no_parameters(self):
"""
Invoke indexc_string_reate() without any
mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
self.as_connection.index_string_create()
assert "argument 'ns' (pos 1)" in str(
typeError.value)
with pytest.raises(TypeError) as typeError:
self.as_connection.index_integer_create()
assert "argument 'ns' (pos 1)" in str(
typeError.value)
def test_create_integer_index_with_correct_parameters(self):
"""
Invoke createindex() with correct arguments
"""
policy = {}
retobj = self.as_connection.index_integer_create('test', 'demo', 'age',
'age_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove('test', 'age_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
def test_create_integer_index_with_set_name_too_long(self):
# Invoke createindex with a set name beyond the maximum
set_name = 'a' * 128
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_integer_create(
'test', set_name, 'age', 'age_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_integer_index_with_incorrect_namespace(self):
"""
Invoke createindex() with non existent namespace
"""
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_integer_create('fake_namespace', 'demo',
'age', 'age_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_integer_index_with_incorrect_set(self):
"""
Invoke createindex() with nonexistent set
It should succeed
"""
policy = {}
retobj = self.as_connection.index_integer_create(
'test', 'demo1', 'age', 'age_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove('test', 'age_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
def test_create_integer_index_with_incorrect_bin(self):
"""
Invoke createindex() with a nonexistent bin
"""
policy = {}
retobj = self.as_connection.index_integer_create(
'test', 'demo', 'fake_bin', 'age_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove('test', 'age_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
def test_create_integer_index_with_namespace_is_none(self):
"""
Invoke createindex() with namespace is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_integer_create(None, 'demo',
'age', 'age_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_creat_integer_eindex_with_set_is_none(self):
# Invoke createindex() with set is None
policy = {}
retobj = self.as_connection.index_integer_create(
'test', None, 'age', 'age_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove('test', 'age_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
def test_create_integer_index_with_set_is_int(self):
# Invoke createindex() with set is int
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_integer_create('test', 1, 'age',
'age_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_integer_index_with_bin_is_none(self):
"""
Invoke createindex() with bin is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_integer_create('test', 'demo',
None, 'age_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_integer_index_with_index_is_none(self):
"""
Invoke createindex() with index_name is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_integer_create('test', 'demo',
'age', None, policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_same_integer_index_multiple_times(self):
"""
Invoke createindex() with the same arguments
multiple times on the same bin
"""
policy = {}
retobj = self.as_connection.index_integer_create('test', 'demo', 'age',
'age_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
with pytest.raises(e.IndexFoundError):
retobj = self.as_connection.index_integer_create(
'test', 'demo', 'age', 'age_index', policy)
self.as_connection.index_remove('test', 'age_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
def test_create_same_integer_index_multiple_times_different_bin(self):
"""
Invoke createindex() with the same index name,
multiple times on different bin names
"""
policy = {}
retobj = self.as_connection.index_integer_create(
'test', 'demo', 'age', 'age_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
with pytest.raises(e.IndexFoundError):
retobj = self.as_connection.index_integer_create(
'test', 'demo', 'no', 'age_index', policy)
self.as_connection.index_remove('test', 'age_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
def test_create_different_integer_index_multiple_times_same_bin(self):
"""
Invoke createindex() with multiple times on same bin with different
name
"""
policy = {}
retobj = self.as_connection.index_integer_create(
'test', 'demo', 'age', 'age_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
with pytest.raises(e.IndexFoundError):
retobj = self.as_connection.index_integer_create(
'test', 'demo', 'age', 'age_index1', policy)
self.as_connection.index_remove('test', 'age_index1', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
def test_create_integer_index_with_policy(self):
"""
Invoke createindex() with policy
"""
policy = {'timeout': 1000}
retobj = self.as_connection.index_integer_create('test', 'demo', 'age',
'age_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_positive(self):
"""
Invoke create string index() with correct arguments
"""
policy = {}
retobj = self.as_connection.index_string_create('test', 'demo', 'name',
'name_index', policy)
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_with_set_length_too_long(self):
# Invoke createindex() with correct arguments set length extra
set_name = 'a' * 100
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_string_create(
'test', set_name, 'name', 'name_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_string_index_with_correct_parameters_ns_length_extra(self):
# Invoke createindex() with correct arguments ns length extra
ns_name = 'a' * 50
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_string_create(
ns_name, 'demo', 'name', 'name_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_string_index_with_incorrect_namespace(self):
"""
Invoke create string index() with incorrect namespace
"""
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_string_create(
'fake_namespace', 'demo', 'name', 'name_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_string_index_with_incorrect_set(self):
"""
Invoke create string index() with incorrect set
"""
policy = {}
retobj = self.as_connection.index_string_create(
'test', 'demo1', 'name', 'name_index', policy)
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_with_incorrect_bin(self):
"""
Invoke create string index() with incorrect bin
"""
policy = {}
retobj = self.as_connection.index_string_create(
'test', 'demo', 'name1', 'name_index', policy)
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_with_namespace_is_none(self):
"""
Invoke create string index() with namespace is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_string_create(
None, 'demo', 'name', 'name_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_string_index_with_set_is_none(self):
# Invoke create string index() with set is None
policy = {}
retobj = self.as_connection.index_string_create(
'test', None, 'name', 'name_index', policy)
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_with_bin_is_none(self):
"""
Invoke create string index() with bin is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_string_create(
'test', 'demo', None, 'name_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_string_index_with_index_is_none(self):
"""
Invoke create_string_index() with index name is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_string_create(
'test', 'demo', 'name', None, policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_same_string_index_multiple_times(self):
"""
Invoke create string index() with multiple times on same bin
"""
policy = {}
retobj = self.as_connection.index_string_create(
'test', 'demo', 'name', 'name_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
with pytest.raises(e.IndexFoundError):
retobj = self.as_connection.index_string_create(
'test', 'demo', 'name', 'name_index', policy)
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
def test_create_same_string_index_multiple_times_different_bin(self):
"""
Invoke create string index() with multiple times on different bin
"""
policy = {}
retobj = self.as_connection.index_string_create('test', 'demo', 'name',
'name_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
with pytest.raises(e.IndexFoundError):
retobj = self.as_connection.index_string_create(
'test', 'demo', 'addr', 'name_index', policy)
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_different_string_index_multiple_times_same_bin(self):
"""
Invoke create string index() with multiple times on same
bin with different name
"""
policy = {}
retobj = self.as_connection.index_string_create('test', 'demo', 'name',
'name_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
with pytest.raises(e.IndexFoundError):
retobj = self.as_connection.index_string_create(
'test', 'demo', 'name', 'name_index1', policy)
self.as_connection.index_remove('test', 'name_index1', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
def test_create_string_index_with_policy(self):
"""
Invoke create string index() with policy
"""
policy = {'timeout': 1000}
retobj = self.as_connection.index_string_create('test', 'demo', 'name',
'name_index', policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove('test', 'name_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'name_index')
def test_drop_invalid_index(self):
"""
Invoke drop invalid index()
"""
policy = {}
with pytest.raises(e.IndexNotFound):
retobj = self.as_connection.index_remove('test', 'notarealindex',
policy)
def test_drop_valid_index(self):
"""
Invoke drop valid index()
"""
policy = {}
self.as_connection.index_integer_create('test', 'demo', 'age',
'age_index', policy)
retobj = self.as_connection.index_remove('test', 'age_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_drop_valid_index_policy(self):
"""
Invoke drop valid index() policy
"""
policy = {'timeout': 1000}
self.as_connection.index_integer_create('test', 'demo', 'age',
'age_index', policy)
retobj = self.as_connection.index_remove('test', 'age_index', policy)
ensure_dropped_index(self.as_connection, 'test', 'age_index')
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_createindex_with_long_index_name(self):
# Invoke createindex() with long index name
policy = {}
with pytest.raises(e.InvalidRequest):
retobj = self.as_connection.index_integer_create(
'test', 'demo', 'age', 'index' * 100, policy)
def test_create_string_index_unicode_positive(self):
"""
Invoke create string index() with correct arguments
"""
policy = {}
retobj = self.as_connection.index_string_create('test', u'demo',
u'name',
u'uni_name_index',
policy)
self.as_connection.index_remove('test', u'uni_name_index', policy)
ensure_dropped_index(self.as_connection, 'test', u'uni_name_index')
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_createindex_integer_unicode(self):
"""
Invoke createindex() with correct arguments
"""
policy = {}
retobj = self.as_connection.index_integer_create('test', u'demo',
u'age',
u'uni_age_index',
policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove('test', u'uni_age_index', policy)
ensure_dropped_index(self.as_connection, 'test', u'uni_age_index')
def test_createindex_with_correct_parameters_without_connection(self):
# Invoke createindex() with correct arguments without connection
policy = {}
config = {'hosts': [('127.0.0.1', 3000)]}
client1 = aerospike.client(config)
with pytest.raises(e.ClusterError) as err_info:
client1.index_integer_create(
'test', 'demo', 'age', 'age_index', policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_CLUSTER_ERROR
def test_index_remove_no_args(self):
with pytest.raises(TypeError):
self.as_connection.index_remove()
def test_index_remove_no_index(self):
with pytest.raises(TypeError):
self.as_connection.index_remove('test')
def test_index_remove_extra_args(self):
# pass 'ns', 'idx_name', 'policy', and an extra argument
with pytest.raises(TypeError):
self.as_connection.index_remove('test', 'demo', {}, 'index_name')
@pytest.mark.parametrize(
"ns, idx_name, policy",
(
('test', 'idx', 'policy'),
('test', 5, {}),
(5, 'idx', {}),
('test', None, {}),
(None, 'idx', {})
)
)
def test_index_remove_wrong_arg_types(self, ns, idx_name, policy):
with pytest.raises(e.ParamError):
self.as_connection.index_remove(ns, idx_name, policy)
|
|
#!/usr/bin/env python
"""
Botan install script
(C) 2014,2015,2017 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import errno
import json
import logging
import optparse # pylint: disable=deprecated-module
import os
import shutil
import sys
import subprocess
import traceback
def parse_command_line(args):
parser = optparse.OptionParser()
parser.add_option('--verbose', action='store_true', default=False,
help='Show debug messages')
parser.add_option('--quiet', action='store_true', default=False,
help='Show only warnings and errors')
build_group = optparse.OptionGroup(parser, 'Source options')
build_group.add_option('--build-dir', metavar='DIR', default='build',
help='Location of build output (default \'%default\')')
parser.add_option_group(build_group)
install_group = optparse.OptionGroup(parser, 'Installation options')
install_group.add_option('--prefix', default='/usr/local',
help='Set output directory (default %default)')
install_group.add_option('--bindir', default='bin', metavar='DIR',
help='Set binary subdir (default %default)')
install_group.add_option('--libdir', default='lib', metavar='DIR',
help='Set library subdir (default %default)')
install_group.add_option('--includedir', default='include', metavar='DIR',
help='Set include subdir (default %default)')
install_group.add_option('--docdir', default='share/doc', metavar='DIR',
help='Set documentation subdir (default %default)')
install_group.add_option('--pkgconfigdir', default='pkgconfig', metavar='DIR',
help='Set pkgconfig subdir (default %default)')
install_group.add_option('--umask', metavar='MASK', default='022',
help='Umask to set (default %default)')
parser.add_option_group(install_group)
(options, args) = parser.parse_args(args)
def log_level():
if options.verbose:
return logging.DEBUG
if options.quiet:
return logging.WARNING
return logging.INFO
logging.getLogger().setLevel(log_level())
return (options, args)
class PrependDestdirError(Exception):
pass
def is_subdir(path, subpath):
return os.path.relpath(path, start=subpath).startswith("..")
def prepend_destdir(path):
"""
Needed because os.path.join() discards the first path if the
second one is absolute, which is usually the case here. Still, we
want relative paths to work and leverage the os awareness of
os.path.join().
"""
destdir = os.environ.get('DESTDIR', "")
if destdir:
# DESTDIR is non-empty, but we only join absolute paths on UNIX-like file systems
if os.path.sep != "/":
raise PrependDestdirError("Only UNIX-like file systems using forward slash " \
"separator supported when DESTDIR is set.")
if not os.path.isabs(path):
raise PrependDestdirError("--prefix must be an absolute path when DESTDIR is set.")
path = os.path.normpath(path)
# Remove / or \ prefixes if existent to accomodate for os.path.join()
path = path.lstrip(os.path.sep)
path = os.path.join(destdir, path)
if not is_subdir(destdir, path):
raise PrependDestdirError("path escapes DESTDIR (path='%s', destdir='%s')" % (path, destdir))
return path
def makedirs(dirname, exist_ok=True):
try:
logging.debug('Creating directory %s' % (dirname))
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST or not exist_ok:
raise e
# Clear link and create new one
def force_symlink(target, linkname):
try:
os.unlink(linkname)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
os.symlink(target, linkname)
def calculate_exec_mode(options):
out = 0o777
if 'umask' in os.__dict__:
umask = int(options.umask, 8)
logging.debug('Setting umask to %s' % oct(umask))
os.umask(int(options.umask, 8))
out &= (umask ^ 0o777)
return out
def main(args):
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
logging.basicConfig(stream=sys.stdout,
format='%(levelname) 7s: %(message)s')
(options, args) = parse_command_line(args)
exe_mode = calculate_exec_mode(options)
def copy_file(src, dst):
logging.debug('Copying %s to %s' % (src, dst))
shutil.copyfile(src, dst)
def copy_executable(src, dst):
copy_file(src, dst)
logging.debug('Make %s executable' % dst)
os.chmod(dst, exe_mode)
with open(os.path.join(options.build_dir, 'build_config.json')) as f:
cfg = json.load(f)
ver_major = int(cfg['version_major'])
ver_minor = int(cfg['version_minor'])
ver_patch = int(cfg['version_patch'])
target_os = cfg['os']
build_shared_lib = bool(cfg['build_shared_lib'])
build_static_lib = bool(cfg['build_static_lib'])
out_dir = cfg['out_dir']
bin_dir = os.path.join(options.prefix, options.bindir)
lib_dir = os.path.join(options.prefix, options.libdir)
target_include_dir = os.path.join(options.prefix,
options.includedir,
'botan-%d' % (ver_major),
'botan')
for d in [options.prefix, lib_dir, bin_dir, target_include_dir]:
makedirs(prepend_destdir(d))
build_include_dir = os.path.join(options.build_dir, 'include', 'botan')
for include in sorted(os.listdir(build_include_dir)):
if include == 'internal':
continue
copy_file(os.path.join(build_include_dir, include),
prepend_destdir(os.path.join(target_include_dir, include)))
build_external_include_dir = os.path.join(options.build_dir, 'include', 'external')
for include in sorted(os.listdir(build_external_include_dir)):
copy_file(os.path.join(build_external_include_dir, include),
prepend_destdir(os.path.join(target_include_dir, include)))
if build_static_lib or target_os == 'windows':
static_lib = cfg['static_lib_name']
copy_file(os.path.join(out_dir, static_lib),
prepend_destdir(os.path.join(lib_dir, os.path.basename(static_lib))))
if build_shared_lib:
if target_os == "windows":
libname = cfg['libname']
soname_base = libname + '.dll'
copy_executable(os.path.join(out_dir, soname_base),
prepend_destdir(os.path.join(lib_dir, soname_base)))
else:
soname_patch = cfg['soname_patch']
soname_abi = cfg['soname_abi']
soname_base = cfg['soname_base']
copy_executable(os.path.join(out_dir, soname_patch),
prepend_destdir(os.path.join(lib_dir, soname_patch)))
if target_os != "openbsd":
prev_cwd = os.getcwd()
try:
os.chdir(prepend_destdir(lib_dir))
force_symlink(soname_patch, soname_abi)
force_symlink(soname_patch, soname_base)
finally:
os.chdir(prev_cwd)
copy_executable(cfg['cli_exe'], prepend_destdir(os.path.join(bin_dir, cfg['cli_exe_name'])))
# On MacOS, if we are using shared libraries and we install, we should fix
# up the library name, otherwise the botan command won't work; ironically
# we only need to do this because we previously changed it from a setting
# that would be correct for installation to one that lets us run it from
# the build directory
if target_os == 'macos' and build_shared_lib:
soname_abi = cfg['soname_abi']
subprocess.check_call(['install_name_tool',
'-change',
os.path.join('@executable_path', soname_abi),
os.path.join(lib_dir, soname_abi),
os.path.join(bin_dir, cfg['cli_exe_name'])])
if 'botan_pkgconfig' in cfg:
pkgconfig_dir = os.path.join(options.prefix, options.libdir, options.pkgconfigdir)
makedirs(prepend_destdir(pkgconfig_dir))
copy_file(cfg['botan_pkgconfig'],
prepend_destdir(os.path.join(pkgconfig_dir, os.path.basename(cfg['botan_pkgconfig']))))
if 'ffi' in cfg['mod_list']:
for ver in cfg['python_version'].split(','):
py_lib_path = os.path.join(lib_dir, 'python%s' % (ver), 'site-packages')
logging.debug('Installing python module to %s' % (py_lib_path))
makedirs(prepend_destdir(py_lib_path))
py_dir = cfg['python_dir']
copy_file(os.path.join(py_dir, 'botan2.py'),
prepend_destdir(os.path.join(py_lib_path, 'botan2.py')))
if cfg['with_documentation']:
target_doc_dir = os.path.join(options.prefix, options.docdir,
'botan-%d.%d.%d' % (ver_major, ver_minor, ver_patch))
shutil.rmtree(prepend_destdir(target_doc_dir), True)
shutil.copytree(cfg['doc_output_dir'], prepend_destdir(target_doc_dir))
copy_file(os.path.join(cfg['base_dir'], 'license.txt'),
prepend_destdir(os.path.join(target_doc_dir, 'license.txt')))
copy_file(os.path.join(cfg['base_dir'], 'news.rst'),
prepend_destdir(os.path.join(target_doc_dir, 'news.txt')))
for f in [f for f in os.listdir(cfg['doc_dir']) if f.endswith('.txt')]:
copy_file(os.path.join(cfg['doc_dir'], f), prepend_destdir(os.path.join(target_doc_dir, f)))
if cfg['with_rst2man']:
man1_dir = prepend_destdir(os.path.join(options.prefix, os.path.join(cfg['mandir'], 'man1')))
makedirs(man1_dir)
copy_file(os.path.join(cfg['build_dir'], 'botan.1'),
os.path.join(man1_dir, 'botan.1'))
logging.info('Botan %s installation complete', cfg['version'])
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except Exception as e: # pylint: disable=broad-except
logging.error('Failure: %s' % (e))
logging.info(traceback.format_exc())
sys.exit(1)
|
|
""" Python Character Mapping Codec generated from 'VENDORS/APPLE/ARABIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-arabic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x00a0, # NO-BREAK SPACE, right-left
0x0082: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x06ba, # ARABIC LETTER NOON GHUNNA
0x008c: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x2026, # HORIZONTAL ELLIPSIS, right-left
0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0096: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f7, # DIVISION SIGN, right-left
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x0020, # SPACE, right-left
0x00a1: 0x0021, # EXCLAMATION MARK, right-left
0x00a2: 0x0022, # QUOTATION MARK, right-left
0x00a3: 0x0023, # NUMBER SIGN, right-left
0x00a4: 0x0024, # DOLLAR SIGN, right-left
0x00a5: 0x066a, # ARABIC PERCENT SIGN
0x00a6: 0x0026, # AMPERSAND, right-left
0x00a7: 0x0027, # APOSTROPHE, right-left
0x00a8: 0x0028, # LEFT PARENTHESIS, right-left
0x00a9: 0x0029, # RIGHT PARENTHESIS, right-left
0x00aa: 0x002a, # ASTERISK, right-left
0x00ab: 0x002b, # PLUS SIGN, right-left
0x00ac: 0x060c, # ARABIC COMMA
0x00ad: 0x002d, # HYPHEN-MINUS, right-left
0x00ae: 0x002e, # FULL STOP, right-left
0x00af: 0x002f, # SOLIDUS, right-left
0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE, right-left (need override)
0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO, right-left (need override)
0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE, right-left (need override)
0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX, right-left (need override)
0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE, right-left (need override)
0x00ba: 0x003a, # COLON, right-left
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: 0x003c, # LESS-THAN SIGN, right-left
0x00bd: 0x003d, # EQUALS SIGN, right-left
0x00be: 0x003e, # GREATER-THAN SIGN, right-left
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x274a, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
0x00c1: 0x0621, # ARABIC LETTER HAMZA
0x00c2: 0x0622, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x00c3: 0x0623, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x00c4: 0x0624, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x00c5: 0x0625, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x00c7: 0x0627, # ARABIC LETTER ALEF
0x00c8: 0x0628, # ARABIC LETTER BEH
0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA
0x00ca: 0x062a, # ARABIC LETTER TEH
0x00cb: 0x062b, # ARABIC LETTER THEH
0x00cc: 0x062c, # ARABIC LETTER JEEM
0x00cd: 0x062d, # ARABIC LETTER HAH
0x00ce: 0x062e, # ARABIC LETTER KHAH
0x00cf: 0x062f, # ARABIC LETTER DAL
0x00d0: 0x0630, # ARABIC LETTER THAL
0x00d1: 0x0631, # ARABIC LETTER REH
0x00d2: 0x0632, # ARABIC LETTER ZAIN
0x00d3: 0x0633, # ARABIC LETTER SEEN
0x00d4: 0x0634, # ARABIC LETTER SHEEN
0x00d5: 0x0635, # ARABIC LETTER SAD
0x00d6: 0x0636, # ARABIC LETTER DAD
0x00d7: 0x0637, # ARABIC LETTER TAH
0x00d8: 0x0638, # ARABIC LETTER ZAH
0x00d9: 0x0639, # ARABIC LETTER AIN
0x00da: 0x063a, # ARABIC LETTER GHAIN
0x00db: 0x005b, # LEFT SQUARE BRACKET, right-left
0x00dc: 0x005c, # REVERSE SOLIDUS, right-left
0x00dd: 0x005d, # RIGHT SQUARE BRACKET, right-left
0x00de: 0x005e, # CIRCUMFLEX ACCENT, right-left
0x00df: 0x005f, # LOW LINE, right-left
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0x0641, # ARABIC LETTER FEH
0x00e2: 0x0642, # ARABIC LETTER QAF
0x00e3: 0x0643, # ARABIC LETTER KAF
0x00e4: 0x0644, # ARABIC LETTER LAM
0x00e5: 0x0645, # ARABIC LETTER MEEM
0x00e6: 0x0646, # ARABIC LETTER NOON
0x00e7: 0x0647, # ARABIC LETTER HEH
0x00e8: 0x0648, # ARABIC LETTER WAW
0x00e9: 0x0649, # ARABIC LETTER ALEF MAKSURA
0x00ea: 0x064a, # ARABIC LETTER YEH
0x00eb: 0x064b, # ARABIC FATHATAN
0x00ec: 0x064c, # ARABIC DAMMATAN
0x00ed: 0x064d, # ARABIC KASRATAN
0x00ee: 0x064e, # ARABIC FATHA
0x00ef: 0x064f, # ARABIC DAMMA
0x00f0: 0x0650, # ARABIC KASRA
0x00f1: 0x0651, # ARABIC SHADDA
0x00f2: 0x0652, # ARABIC SUKUN
0x00f3: 0x067e, # ARABIC LETTER PEH
0x00f4: 0x0679, # ARABIC LETTER TTEH
0x00f5: 0x0686, # ARABIC LETTER TCHEH
0x00f6: 0x06d5, # ARABIC LETTER AE
0x00f7: 0x06a4, # ARABIC LETTER VEH
0x00f8: 0x06af, # ARABIC LETTER GAF
0x00f9: 0x0688, # ARABIC LETTER DDAL
0x00fa: 0x0691, # ARABIC LETTER RREH
0x00fb: 0x007b, # LEFT CURLY BRACKET, right-left
0x00fc: 0x007c, # VERTICAL LINE, right-left
0x00fd: 0x007d, # RIGHT CURLY BRACKET, right-left
0x00fe: 0x0698, # ARABIC LETTER JEH
0x00ff: 0x06d2, # ARABIC LETTER YEH BARREE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> CONTROL CHARACTER
'\x01' # 0x0001 -> CONTROL CHARACTER
'\x02' # 0x0002 -> CONTROL CHARACTER
'\x03' # 0x0003 -> CONTROL CHARACTER
'\x04' # 0x0004 -> CONTROL CHARACTER
'\x05' # 0x0005 -> CONTROL CHARACTER
'\x06' # 0x0006 -> CONTROL CHARACTER
'\x07' # 0x0007 -> CONTROL CHARACTER
'\x08' # 0x0008 -> CONTROL CHARACTER
'\t' # 0x0009 -> CONTROL CHARACTER
'\n' # 0x000a -> CONTROL CHARACTER
'\x0b' # 0x000b -> CONTROL CHARACTER
'\x0c' # 0x000c -> CONTROL CHARACTER
'\r' # 0x000d -> CONTROL CHARACTER
'\x0e' # 0x000e -> CONTROL CHARACTER
'\x0f' # 0x000f -> CONTROL CHARACTER
'\x10' # 0x0010 -> CONTROL CHARACTER
'\x11' # 0x0011 -> CONTROL CHARACTER
'\x12' # 0x0012 -> CONTROL CHARACTER
'\x13' # 0x0013 -> CONTROL CHARACTER
'\x14' # 0x0014 -> CONTROL CHARACTER
'\x15' # 0x0015 -> CONTROL CHARACTER
'\x16' # 0x0016 -> CONTROL CHARACTER
'\x17' # 0x0017 -> CONTROL CHARACTER
'\x18' # 0x0018 -> CONTROL CHARACTER
'\x19' # 0x0019 -> CONTROL CHARACTER
'\x1a' # 0x001a -> CONTROL CHARACTER
'\x1b' # 0x001b -> CONTROL CHARACTER
'\x1c' # 0x001c -> CONTROL CHARACTER
'\x1d' # 0x001d -> CONTROL CHARACTER
'\x1e' # 0x001e -> CONTROL CHARACTER
'\x1f' # 0x001f -> CONTROL CHARACTER
' ' # 0x0020 -> SPACE, left-right
'!' # 0x0021 -> EXCLAMATION MARK, left-right
'"' # 0x0022 -> QUOTATION MARK, left-right
'#' # 0x0023 -> NUMBER SIGN, left-right
'$' # 0x0024 -> DOLLAR SIGN, left-right
'%' # 0x0025 -> PERCENT SIGN, left-right
'&' # 0x0026 -> AMPERSAND, left-right
"'" # 0x0027 -> APOSTROPHE, left-right
'(' # 0x0028 -> LEFT PARENTHESIS, left-right
')' # 0x0029 -> RIGHT PARENTHESIS, left-right
'*' # 0x002a -> ASTERISK, left-right
'+' # 0x002b -> PLUS SIGN, left-right
',' # 0x002c -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
'-' # 0x002d -> HYPHEN-MINUS, left-right
'.' # 0x002e -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
'/' # 0x002f -> SOLIDUS, left-right
'0' # 0x0030 -> DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
'2' # 0x0032 -> DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
'3' # 0x0033 -> DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
':' # 0x003a -> COLON, left-right
';' # 0x003b -> SEMICOLON, left-right
'<' # 0x003c -> LESS-THAN SIGN, left-right
'=' # 0x003d -> EQUALS SIGN, left-right
'>' # 0x003e -> GREATER-THAN SIGN, left-right
'?' # 0x003f -> QUESTION MARK, left-right
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET, left-right
'\\' # 0x005c -> REVERSE SOLIDUS, left-right
']' # 0x005d -> RIGHT SQUARE BRACKET, left-right
'^' # 0x005e -> CIRCUMFLEX ACCENT, left-right
'_' # 0x005f -> LOW LINE, left-right
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET, left-right
'|' # 0x007c -> VERTICAL LINE, left-right
'}' # 0x007d -> RIGHT CURLY BRACKET, left-right
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> CONTROL CHARACTER
'\xc4' # 0x0080 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xa0' # 0x0081 -> NO-BREAK SPACE, right-left
'\xc7' # 0x0082 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x0083 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x0084 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x0085 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x0086 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x0087 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x0088 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x0089 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x008a -> LATIN SMALL LETTER A WITH DIAERESIS
'\u06ba' # 0x008b -> ARABIC LETTER NOON GHUNNA
'\xab' # 0x008c -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xe7' # 0x008d -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x008e -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x008f -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x0090 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0091 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x0092 -> LATIN SMALL LETTER I WITH ACUTE
'\u2026' # 0x0093 -> HORIZONTAL ELLIPSIS, right-left
'\xee' # 0x0094 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x0095 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x0096 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x0097 -> LATIN SMALL LETTER O WITH ACUTE
'\xbb' # 0x0098 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xf4' # 0x0099 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x009a -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0x009b -> DIVISION SIGN, right-left
'\xfa' # 0x009c -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x009d -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x009e -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x009f -> LATIN SMALL LETTER U WITH DIAERESIS
' ' # 0x00a0 -> SPACE, right-left
'!' # 0x00a1 -> EXCLAMATION MARK, right-left
'"' # 0x00a2 -> QUOTATION MARK, right-left
'#' # 0x00a3 -> NUMBER SIGN, right-left
'$' # 0x00a4 -> DOLLAR SIGN, right-left
'\u066a' # 0x00a5 -> ARABIC PERCENT SIGN
'&' # 0x00a6 -> AMPERSAND, right-left
"'" # 0x00a7 -> APOSTROPHE, right-left
'(' # 0x00a8 -> LEFT PARENTHESIS, right-left
')' # 0x00a9 -> RIGHT PARENTHESIS, right-left
'*' # 0x00aa -> ASTERISK, right-left
'+' # 0x00ab -> PLUS SIGN, right-left
'\u060c' # 0x00ac -> ARABIC COMMA
'-' # 0x00ad -> HYPHEN-MINUS, right-left
'.' # 0x00ae -> FULL STOP, right-left
'/' # 0x00af -> SOLIDUS, right-left
'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO, right-left (need override)
'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE, right-left (need override)
'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO, right-left (need override)
'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE, right-left (need override)
'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR, right-left (need override)
'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE, right-left (need override)
'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX, right-left (need override)
'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN, right-left (need override)
'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT, right-left (need override)
'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE, right-left (need override)
':' # 0x00ba -> COLON, right-left
'\u061b' # 0x00bb -> ARABIC SEMICOLON
'<' # 0x00bc -> LESS-THAN SIGN, right-left
'=' # 0x00bd -> EQUALS SIGN, right-left
'>' # 0x00be -> GREATER-THAN SIGN, right-left
'\u061f' # 0x00bf -> ARABIC QUESTION MARK
'\u274a' # 0x00c0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
'\u0621' # 0x00c1 -> ARABIC LETTER HAMZA
'\u0622' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0x00c5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0x00c7 -> ARABIC LETTER ALEF
'\u0628' # 0x00c8 -> ARABIC LETTER BEH
'\u0629' # 0x00c9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0x00ca -> ARABIC LETTER TEH
'\u062b' # 0x00cb -> ARABIC LETTER THEH
'\u062c' # 0x00cc -> ARABIC LETTER JEEM
'\u062d' # 0x00cd -> ARABIC LETTER HAH
'\u062e' # 0x00ce -> ARABIC LETTER KHAH
'\u062f' # 0x00cf -> ARABIC LETTER DAL
'\u0630' # 0x00d0 -> ARABIC LETTER THAL
'\u0631' # 0x00d1 -> ARABIC LETTER REH
'\u0632' # 0x00d2 -> ARABIC LETTER ZAIN
'\u0633' # 0x00d3 -> ARABIC LETTER SEEN
'\u0634' # 0x00d4 -> ARABIC LETTER SHEEN
'\u0635' # 0x00d5 -> ARABIC LETTER SAD
'\u0636' # 0x00d6 -> ARABIC LETTER DAD
'\u0637' # 0x00d7 -> ARABIC LETTER TAH
'\u0638' # 0x00d8 -> ARABIC LETTER ZAH
'\u0639' # 0x00d9 -> ARABIC LETTER AIN
'\u063a' # 0x00da -> ARABIC LETTER GHAIN
'[' # 0x00db -> LEFT SQUARE BRACKET, right-left
'\\' # 0x00dc -> REVERSE SOLIDUS, right-left
']' # 0x00dd -> RIGHT SQUARE BRACKET, right-left
'^' # 0x00de -> CIRCUMFLEX ACCENT, right-left
'_' # 0x00df -> LOW LINE, right-left
'\u0640' # 0x00e0 -> ARABIC TATWEEL
'\u0641' # 0x00e1 -> ARABIC LETTER FEH
'\u0642' # 0x00e2 -> ARABIC LETTER QAF
'\u0643' # 0x00e3 -> ARABIC LETTER KAF
'\u0644' # 0x00e4 -> ARABIC LETTER LAM
'\u0645' # 0x00e5 -> ARABIC LETTER MEEM
'\u0646' # 0x00e6 -> ARABIC LETTER NOON
'\u0647' # 0x00e7 -> ARABIC LETTER HEH
'\u0648' # 0x00e8 -> ARABIC LETTER WAW
'\u0649' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0x00ea -> ARABIC LETTER YEH
'\u064b' # 0x00eb -> ARABIC FATHATAN
'\u064c' # 0x00ec -> ARABIC DAMMATAN
'\u064d' # 0x00ed -> ARABIC KASRATAN
'\u064e' # 0x00ee -> ARABIC FATHA
'\u064f' # 0x00ef -> ARABIC DAMMA
'\u0650' # 0x00f0 -> ARABIC KASRA
'\u0651' # 0x00f1 -> ARABIC SHADDA
'\u0652' # 0x00f2 -> ARABIC SUKUN
'\u067e' # 0x00f3 -> ARABIC LETTER PEH
'\u0679' # 0x00f4 -> ARABIC LETTER TTEH
'\u0686' # 0x00f5 -> ARABIC LETTER TCHEH
'\u06d5' # 0x00f6 -> ARABIC LETTER AE
'\u06a4' # 0x00f7 -> ARABIC LETTER VEH
'\u06af' # 0x00f8 -> ARABIC LETTER GAF
'\u0688' # 0x00f9 -> ARABIC LETTER DDAL
'\u0691' # 0x00fa -> ARABIC LETTER RREH
'{' # 0x00fb -> LEFT CURLY BRACKET, right-left
'|' # 0x00fc -> VERTICAL LINE, right-left
'}' # 0x00fd -> RIGHT CURLY BRACKET, right-left
'\u0698' # 0x00fe -> ARABIC LETTER JEH
'\u06d2' # 0x00ff -> ARABIC LETTER YEH BARREE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # CONTROL CHARACTER
0x0001: 0x0001, # CONTROL CHARACTER
0x0002: 0x0002, # CONTROL CHARACTER
0x0003: 0x0003, # CONTROL CHARACTER
0x0004: 0x0004, # CONTROL CHARACTER
0x0005: 0x0005, # CONTROL CHARACTER
0x0006: 0x0006, # CONTROL CHARACTER
0x0007: 0x0007, # CONTROL CHARACTER
0x0008: 0x0008, # CONTROL CHARACTER
0x0009: 0x0009, # CONTROL CHARACTER
0x000a: 0x000a, # CONTROL CHARACTER
0x000b: 0x000b, # CONTROL CHARACTER
0x000c: 0x000c, # CONTROL CHARACTER
0x000d: 0x000d, # CONTROL CHARACTER
0x000e: 0x000e, # CONTROL CHARACTER
0x000f: 0x000f, # CONTROL CHARACTER
0x0010: 0x0010, # CONTROL CHARACTER
0x0011: 0x0011, # CONTROL CHARACTER
0x0012: 0x0012, # CONTROL CHARACTER
0x0013: 0x0013, # CONTROL CHARACTER
0x0014: 0x0014, # CONTROL CHARACTER
0x0015: 0x0015, # CONTROL CHARACTER
0x0016: 0x0016, # CONTROL CHARACTER
0x0017: 0x0017, # CONTROL CHARACTER
0x0018: 0x0018, # CONTROL CHARACTER
0x0019: 0x0019, # CONTROL CHARACTER
0x001a: 0x001a, # CONTROL CHARACTER
0x001b: 0x001b, # CONTROL CHARACTER
0x001c: 0x001c, # CONTROL CHARACTER
0x001d: 0x001d, # CONTROL CHARACTER
0x001e: 0x001e, # CONTROL CHARACTER
0x001f: 0x001f, # CONTROL CHARACTER
0x0020: 0x0020, # SPACE, left-right
0x0020: 0x00a0, # SPACE, right-left
0x0021: 0x0021, # EXCLAMATION MARK, left-right
0x0021: 0x00a1, # EXCLAMATION MARK, right-left
0x0022: 0x0022, # QUOTATION MARK, left-right
0x0022: 0x00a2, # QUOTATION MARK, right-left
0x0023: 0x0023, # NUMBER SIGN, left-right
0x0023: 0x00a3, # NUMBER SIGN, right-left
0x0024: 0x0024, # DOLLAR SIGN, left-right
0x0024: 0x00a4, # DOLLAR SIGN, right-left
0x0025: 0x0025, # PERCENT SIGN, left-right
0x0026: 0x0026, # AMPERSAND, left-right
0x0026: 0x00a6, # AMPERSAND, right-left
0x0027: 0x0027, # APOSTROPHE, left-right
0x0027: 0x00a7, # APOSTROPHE, right-left
0x0028: 0x0028, # LEFT PARENTHESIS, left-right
0x0028: 0x00a8, # LEFT PARENTHESIS, right-left
0x0029: 0x0029, # RIGHT PARENTHESIS, left-right
0x0029: 0x00a9, # RIGHT PARENTHESIS, right-left
0x002a: 0x002a, # ASTERISK, left-right
0x002a: 0x00aa, # ASTERISK, right-left
0x002b: 0x002b, # PLUS SIGN, left-right
0x002b: 0x00ab, # PLUS SIGN, right-left
0x002c: 0x002c, # COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
0x002d: 0x002d, # HYPHEN-MINUS, left-right
0x002d: 0x00ad, # HYPHEN-MINUS, right-left
0x002e: 0x002e, # FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
0x002e: 0x00ae, # FULL STOP, right-left
0x002f: 0x002f, # SOLIDUS, left-right
0x002f: 0x00af, # SOLIDUS, right-left
0x0030: 0x0030, # DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
0x0032: 0x0032, # DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
0x0033: 0x0033, # DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
0x003a: 0x003a, # COLON, left-right
0x003a: 0x00ba, # COLON, right-left
0x003b: 0x003b, # SEMICOLON, left-right
0x003c: 0x003c, # LESS-THAN SIGN, left-right
0x003c: 0x00bc, # LESS-THAN SIGN, right-left
0x003d: 0x003d, # EQUALS SIGN, left-right
0x003d: 0x00bd, # EQUALS SIGN, right-left
0x003e: 0x003e, # GREATER-THAN SIGN, left-right
0x003e: 0x00be, # GREATER-THAN SIGN, right-left
0x003f: 0x003f, # QUESTION MARK, left-right
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET, left-right
0x005b: 0x00db, # LEFT SQUARE BRACKET, right-left
0x005c: 0x005c, # REVERSE SOLIDUS, left-right
0x005c: 0x00dc, # REVERSE SOLIDUS, right-left
0x005d: 0x005d, # RIGHT SQUARE BRACKET, left-right
0x005d: 0x00dd, # RIGHT SQUARE BRACKET, right-left
0x005e: 0x005e, # CIRCUMFLEX ACCENT, left-right
0x005e: 0x00de, # CIRCUMFLEX ACCENT, right-left
0x005f: 0x005f, # LOW LINE, left-right
0x005f: 0x00df, # LOW LINE, right-left
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET, left-right
0x007b: 0x00fb, # LEFT CURLY BRACKET, right-left
0x007c: 0x007c, # VERTICAL LINE, left-right
0x007c: 0x00fc, # VERTICAL LINE, right-left
0x007d: 0x007d, # RIGHT CURLY BRACKET, left-right
0x007d: 0x00fd, # RIGHT CURLY BRACKET, right-left
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # CONTROL CHARACTER
0x00a0: 0x0081, # NO-BREAK SPACE, right-left
0x00ab: 0x008c, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x00bb: 0x0098, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x00c4: 0x0080, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0082, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0083, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x0084, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0085, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x0086, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00e0: 0x0088, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x0087, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0089, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x008a, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x008d, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008f, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x008e, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0090, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0091, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x0092, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x0094, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x0095, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x0096, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x0097, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0099, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x009a, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x009b, # DIVISION SIGN, right-left
0x00f9: 0x009d, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x009c, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x009e, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x009f, # LATIN SMALL LETTER U WITH DIAERESIS
0x060c: 0x00ac, # ARABIC COMMA
0x061b: 0x00bb, # ARABIC SEMICOLON
0x061f: 0x00bf, # ARABIC QUESTION MARK
0x0621: 0x00c1, # ARABIC LETTER HAMZA
0x0622: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x0623: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x0624: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x0625: 0x00c5, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x0626: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x0627: 0x00c7, # ARABIC LETTER ALEF
0x0628: 0x00c8, # ARABIC LETTER BEH
0x0629: 0x00c9, # ARABIC LETTER TEH MARBUTA
0x062a: 0x00ca, # ARABIC LETTER TEH
0x062b: 0x00cb, # ARABIC LETTER THEH
0x062c: 0x00cc, # ARABIC LETTER JEEM
0x062d: 0x00cd, # ARABIC LETTER HAH
0x062e: 0x00ce, # ARABIC LETTER KHAH
0x062f: 0x00cf, # ARABIC LETTER DAL
0x0630: 0x00d0, # ARABIC LETTER THAL
0x0631: 0x00d1, # ARABIC LETTER REH
0x0632: 0x00d2, # ARABIC LETTER ZAIN
0x0633: 0x00d3, # ARABIC LETTER SEEN
0x0634: 0x00d4, # ARABIC LETTER SHEEN
0x0635: 0x00d5, # ARABIC LETTER SAD
0x0636: 0x00d6, # ARABIC LETTER DAD
0x0637: 0x00d7, # ARABIC LETTER TAH
0x0638: 0x00d8, # ARABIC LETTER ZAH
0x0639: 0x00d9, # ARABIC LETTER AIN
0x063a: 0x00da, # ARABIC LETTER GHAIN
0x0640: 0x00e0, # ARABIC TATWEEL
0x0641: 0x00e1, # ARABIC LETTER FEH
0x0642: 0x00e2, # ARABIC LETTER QAF
0x0643: 0x00e3, # ARABIC LETTER KAF
0x0644: 0x00e4, # ARABIC LETTER LAM
0x0645: 0x00e5, # ARABIC LETTER MEEM
0x0646: 0x00e6, # ARABIC LETTER NOON
0x0647: 0x00e7, # ARABIC LETTER HEH
0x0648: 0x00e8, # ARABIC LETTER WAW
0x0649: 0x00e9, # ARABIC LETTER ALEF MAKSURA
0x064a: 0x00ea, # ARABIC LETTER YEH
0x064b: 0x00eb, # ARABIC FATHATAN
0x064c: 0x00ec, # ARABIC DAMMATAN
0x064d: 0x00ed, # ARABIC KASRATAN
0x064e: 0x00ee, # ARABIC FATHA
0x064f: 0x00ef, # ARABIC DAMMA
0x0650: 0x00f0, # ARABIC KASRA
0x0651: 0x00f1, # ARABIC SHADDA
0x0652: 0x00f2, # ARABIC SUKUN
0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE, right-left (need override)
0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO, right-left (need override)
0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE, right-left (need override)
0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX, right-left (need override)
0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE, right-left (need override)
0x066a: 0x00a5, # ARABIC PERCENT SIGN
0x0679: 0x00f4, # ARABIC LETTER TTEH
0x067e: 0x00f3, # ARABIC LETTER PEH
0x0686: 0x00f5, # ARABIC LETTER TCHEH
0x0688: 0x00f9, # ARABIC LETTER DDAL
0x0691: 0x00fa, # ARABIC LETTER RREH
0x0698: 0x00fe, # ARABIC LETTER JEH
0x06a4: 0x00f7, # ARABIC LETTER VEH
0x06af: 0x00f8, # ARABIC LETTER GAF
0x06ba: 0x008b, # ARABIC LETTER NOON GHUNNA
0x06d2: 0x00ff, # ARABIC LETTER YEH BARREE
0x06d5: 0x00f6, # ARABIC LETTER AE
0x2026: 0x0093, # HORIZONTAL ELLIPSIS, right-left
0x274a: 0x00c0, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
}
|
|
""" Trains an agent with Deep Q Learning or Double DQN on Breakout. Uses OpenAI Gym.
"""
import sys
import os
sys.path.insert(0,os.path.expanduser('~/Library/Python/2.7/lib/python/site-packages/'))
import numpy as np
import cPickle as pickle
import gym
from optparse import OptionParser
import itertools
import random
import time
from collections import deque, namedtuple
import copy
from scipy.misc import imresize
from malpi.layers import *
from malpi.model import *
from malpi.optimizer import Optimizer
from malpi.experience import Experience2
try:
import config
except:
print "Failed to load config file config.py."
print "Try copying config_empty.py to config.py and re-running."
exit()
import ast
from sklearn.linear_model import BayesianRidge, LinearRegression
import sklearn.gaussian_process as gp
from scipy.stats import norm
from scipy.optimize import minimize
def expected_improvement(x, gaussian_process, evaluated_loss, greater_is_better=False, n_params=1):
""" expected_improvement
Expected improvement acquisition function.
Arguments:
----------
x: array-like, shape = [n_samples, n_hyperparams]
The point for which the expected improvement needs to be computed.
gaussian_process: GaussianProcessRegressor object.
Gaussian process trained on previously evaluated hyperparameters.
evaluated_loss: Numpy array.
Numpy array that contains the values off the loss function for the previously
evaluated hyperparameters.
greater_is_better: Boolean.
Boolean flag that indicates whether the loss function is to be maximised or minimised.
n_params: int.
Dimension of the hyperparameter space.
"""
x_to_predict = x.reshape(-1, n_params)
mu, sigma = gaussian_process.predict(x_to_predict, return_std=True)
if greater_is_better:
loss_optimum = np.max(evaluated_loss)
else:
loss_optimum = np.min(evaluated_loss)
scaling_factor = (-1) ** (not greater_is_better)
# In case sigma equals zero
with np.errstate(divide='ignore'):
Z = scaling_factor * (mu - loss_optimum) / sigma
expected_improvement = scaling_factor * (mu - loss_optimum) * norm.cdf(Z) + sigma * norm.pdf(Z)
expected_improvement[sigma == 0.0] == 0.0
return -1 * expected_improvement
def sample_next_hyperparameter(acquisition_func, gaussian_process, evaluated_loss, greater_is_better=False,
bounds=(0, 10), n_restarts=25):
""" sample_next_hyperparameter
Proposes the next hyperparameter to sample the loss function for.
Arguments:
----------
acquisition_func: function.
Acquisition function to optimise.
gaussian_process: GaussianProcessRegressor object.
Gaussian process trained on previously evaluated hyperparameters.
evaluated_loss: array-like, shape = [n_obs,]
Numpy array that contains the values off the loss function for the previously
evaluated hyperparameters.
greater_is_better: Boolean.
Boolean flag that indicates whether the loss function is to be maximised or minimised.
bounds: Tuple.
Bounds for the L-BFGS optimiser.
n_restarts: integer.
Number of times to run the minimiser with different starting points.
"""
best_x = None
best_acquisition_value = 1
n_params = bounds.shape[0]
for starting_point in np.random.uniform(bounds[:, 0], bounds[:, 1], size=(n_restarts, n_params)):
res = minimize(fun=acquisition_func,
x0=starting_point.reshape(1, -1),
bounds=bounds,
method='L-BFGS-B',
args=(gaussian_process, evaluated_loss, greater_is_better, n_params))
if res.fun < best_acquisition_value:
best_acquisition_value = res.fun
best_x = res.x
return best_x
def bayesian_optimisation(n_iters, sample_loss, bounds, x0=None, n_pre_samples=5,
gp_params=None, random_search=False, alpha=1e-5, epsilon=1e-7):
""" bayesian_optimisation
Uses Gaussian Processes to optimise the loss function `sample_loss`.
Arguments:
----------
n_iters: integer.
Number of iterations to run the search algorithm.
sample_loss: function.
Function to be optimised.
bounds: array-like, shape = [n_params, 2].
Lower and upper bounds on the parameters of the function `sample_loss`.
x0: array-like, shape = [n_pre_samples, n_params].
Array of initial points to sample the loss function for. If None, randomly
samples from the loss function.
n_pre_samples: integer.
If x0 is None, samples `n_pre_samples` initial points from the loss function.
gp_params: dictionary.
Dictionary of parameters to pass on to the underlying Gaussian Process.
random_search: integer.
Flag that indicates whether to perform random search or L-BFGS-B optimisation
over the acquisition function.
alpha: double.
Variance of the error term of the GP.
epsilon: double.
Precision tolerance for floats.
"""
x_list = []
y_list = []
n_params = bounds.shape[0]
if x0 is None:
for params in np.random.uniform(bounds[:, 0], bounds[:, 1], (n_pre_samples, bounds.shape[0])):
x_list.append(params)
y_list.append(sample_loss(params))
else:
for params in x0:
x_list.append(params)
y_list.append(sample_loss(params))
xp = np.array(x_list)
yp = np.array(y_list)
# Create the GP
if gp_params is not None:
model = gp.GaussianProcessRegressor(**gp_params)
else:
kernel = gp.kernels.Matern()
model = gp.GaussianProcessRegressor(kernel=kernel,
alpha=alpha,
n_restarts_optimizer=10,
normalize_y=True)
for n in range(n_iters):
model.fit(xp, yp)
# Sample next hyperparameter
if random_search:
x_random = np.random.uniform(bounds[:, 0], bounds[:, 1], size=(random_search, n_params))
ei = -1 * expected_improvement(x_random, model, yp, greater_is_better=True, n_params=n_params)
next_sample = x_random[np.argmax(ei), :]
else:
next_sample = sample_next_hyperparameter(expected_improvement, model, yp, greater_is_better=True, bounds=bounds, n_restarts=100)
# Duplicates will break the GP. In case of a duplicate, we will randomly sample a next query point.
if np.any(np.abs(next_sample - xp) <= epsilon):
next_sample = np.random.uniform(bounds[:, 0], bounds[:, 1], bounds.shape[0])
# Sample loss for new set of parameters
cv_score = sample_loss(next_sample)
# Update lists
x_list.append(next_sample)
y_list.append(cv_score)
# Update xp and yp
xp = np.array(x_list)
yp = np.array(y_list)
return xp, yp
# {'epsilon_decay': 0.99957392597900963, 'epsilon': 0.96126118058910504, 'learning_rate': 0.0048160891703121133, 'batch_size': 32, 'best_score': 164.90000000000001, 'episodes': 3000, 'clip_error': False, 'learning_rate_decay': 0.99992369857077323, 'lr_decay_on_best': 0.94999999999999996, 'update_rate': 20, 'reg': 0.0050000000000000001, 'gamma': 0.99}
def readParams():
hparams = []
y = []
with open('CartPole-v0_dqn_won.txt', 'r') as f:
for line in f:
resd = ast.literal_eval(line)
if isinstance(resd,dict):
best = 195.0
if 'best_score' in resd:
best = resd['best_score']
sample = [32, 10, 200, 0.99, resd['epsilon'], resd['epsilon_decay'],resd['learning_rate'],resd['learning_rate_decay'],resd['lr_decay_on_best'],resd['clip_error'], 0.005]
elif isinstance(resd,list):
sample = resd[0:11]
best = resd[11]
hparams.append(sample)
y.append(best)
#hparams = np.array(hparams)
#y = np.array(y)
return (hparams,y)
#clf = BayesianRidge(compute_score=True)
#clf.fit(hparams, y)
#ols = LinearRegression()
#ols.fit(X, y)
#np.seterr(all='raise')
np.seterr(under='ignore')
def stats(arr, msg=""):
mi = np.min(arr)
ma = np.max(arr)
av = np.mean(arr)
std = np.std(arr)
abs_arr = np.abs(arr)
mi_abs = np.min(abs_arr)
ma_abs = np.max(abs_arr)
print "%sMin/Max/Mean/Stdev abs(Min/Max): %g/%g/%g/%g %g/%g" % (msg,mi,ma,av,std,mi_abs,ma_abs)
def saveModel( model, options ):
filename = os.path.join( options.dir_model, options.model_name + ".pickle" )
with open(filename, 'wb') as f:
pickle.dump( model, f, pickle.HIGHEST_PROTOCOL)
def initializeModel( name, number_actions, input_dim=(4,84,84), verbose=False ):
output = "FC-%d" % (number_actions,)
# layers = ["conv-32", "maxpool", "conv-64", "maxpool", "conv-64", "FC-512", output]
# layer_params = [{'filter_size':3, 'stride':1 },
# {'pool_stride': 2, 'pool_width': 2, 'pool_height': 2},
# {'filter_size':3, 'stride':1 },
# {'pool_stride': 2, 'pool_width': 2, 'pool_height': 2},
# {'filter_size':3, 'stride':2 },
# {}, {'relu':False} ]
# From the DQN paper, mostly
# layers = ["conv-32", "conv-64", "conv-64", "FC-512", output]
# layer_params = [{'filter_size':8, 'stride':4, 'pad':4 },
# {'filter_size':4, 'stride':2, 'pad':2},
# {'filter_size':3, 'stride':1, 'pad':1},
# {}, {'relu':False} ]
layers = ["FC-200", output]
layer_params = [ {}, {'relu':False} ]
model = MalpiModel(layers, layer_params, input_dim=input_dim, reg=0.005, dtype=np.float32, verbose=verbose)
model.name = name
if verbose:
print
model.describe()
print
return model
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x)) # sigmoid "squashing" function to interval [0,1]
def prepro(I):
""" prepro 210x160x3 uint8 frame into (84x84) float
"""
rgb_weights = [0.2989, 0.5870, 0.1140]
I = I[35:195] # crop
I = imresize(I, (84,84), interp='nearest' )
I = np.sum( I * rgb_weights, axis=2) # Convert to grayscale, shape = (84,84)
return I.astype(np.float) / 255.0
#return I.astype(np.float)
def discount_rewards(r, gamma, done, normalize=True):
""" take 1D float array of rewards and compute discounted reward.
if normalize is True: subtract mean and divide by std dev
"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
if not done[t]: running_add = 0 # reset the sum, since this was a game boundary
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
if normalize:
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_r -= np.mean(discounted_r)
discounted_r /= np.std(discounted_r)
return discounted_r
def discount_rewards(r, gamma, normalize=True):
""" take 1D float array of rewards and compute discounted reward.
if normalize is True: subtract mean and divide by std dev
"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
if normalize:
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_r -= np.mean(discounted_r)
discounted_r /= np.std(discounted_r)
return discounted_r
def make_epsilon_greedy_policy(estimator, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function approximator and epsilon.
Args:
estimator: An estimator that returns q values for a given state
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
A = np.ones(nA, dtype=float) * epsilon / nA
q_values,_ = estimator.forward(observation, mode="test")
best_action = np.argmax(q_values[0])
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
def choose_epsilon_greedy( estimator, observation, epsilon, nA ):
if np.random.random() < epsilon:
return np.random.randint(nA)
else:
q_values,_ = estimator.forward(observation.reshape(1,4), mode="test")
return np.argmax(q_values[0])
def check_weights( model ):
for k,w in model.params.iteritems():
smallest = np.min( np.abs(w) )
print "Smallest %s: %g" % (k,smallest)
mask_zeros = w != 0.0
mask = np.abs(w) < 1e-20
mask = np.logical_and(mask_zeros,mask)
if np.count_nonzero(mask) > 0:
print "Underflow in %s " % (k,)
def hyperparameterGenerator( oneRun = False ):
batch_size = 32 # backprop batch size
update_rate = 20 # every how many episodes to copy behavior model to target
gamma = 0.99 # discount factor for reward
epsilon = 0.5
epsilon_decay = 0.999
learning_rate = 0.01
learning_rate_decay = 0.999
lr_decay_on_best = 0.95
clip_error = True
reg = 0.005
hparams = { "reg": reg, "learning_rate": learning_rate, "learning_rate_decay":learning_rate_decay, "batch_size":batch_size, "update_rate":update_rate, "gamma":gamma, "epsilon":epsilon, "epsilon_decay":epsilon_decay,
"lr_decay_on_best":lr_decay_on_best, "clip_error":clip_error }
variations = np.array([0.9,1.0,1.1])
if oneRun:
reguls = [3.37091767808e-05]
lrs = [0.0002006801544726]
else:
count = 4
reguls = np.array([0.005])
epsilons = np.random.uniform( 0.5, 1.0, count )
epsilon_decays = np.random.uniform( 0.999, 0.9999, count )
lrs = np.random.uniform( 0.0001, 0.03, count )
lr_decays = np.random.uniform( 0.999, 0.99999, count )
decays_on_best = np.array([lr_decay_on_best])
clip_errors = np.array([True,False])
# reguls = np.array([3.37091767808e-05]) * variations
# lrs = np.array([0.0002006801544726]) * variations
#reguls = 10 ** np.random.uniform(-5, -4, 2) #[0.0001, 0.001, 0.01]
#lrs = 10 ** np.random.uniform(-6, -3, 5) #[1e-4, 1e-3, 1e-2]
#reguls = np.append([3.37091767808e-05],reguls)
#lrs = np.append([0.000182436504066],lrs)
for reg in reguls:
for lr in lrs:
for decay in lr_decays:
for epsilon in epsilons:
for epsilon_decay in epsilon_decays:
for decay_on_best in decays_on_best:
for clip_error in clip_errors:
hparams["reg"] = reg
hparams["learning_rate"] = lr
hparams["learning_rate_decay"] = decay
hparams["epsilon"] = epsilon
hparams["epsilon_decay"] = epsilon_decay
hparams["lr_decay_on_best"] = decay_on_best
hparams["clip_error"] = clip_error
yield hparams
def test(tmodel, env, options):
reward_100 = 0
for i in range(100):
episode_reward = 0
state = env.reset()
done = False
steps = 0
while not done and (steps < 1000):
if options.render: env.render()
q_values,_ = tmodel.forward(state.reshape(1,4), mode="test")
action = np.argmax(q_values[0])
state, reward, done, info = env.step(action)
episode_reward += reward
steps += 1
reward_100 += episode_reward
return (reward_100 / 100.0)
def train(env, options):
alpha=1e-5
epsilon=1e-7
kernel = gp.kernels.Matern()
model = gp.GaussianProcessRegressor(kernel=kernel,
alpha=alpha,
n_restarts_optimizer=10,
normalize_y=True)
#x_list,y_list = readParams()
good_list = [ [48.76090415229852, 1.6927175081103532, 223.02341007066963, 0.5678010007909667, 0.5549954432648416, 0.9933886373603302, 0.04461187669276121, 0.9911454128640629, 0.9563065642076264, 0.8080555822008355, 0.0015395898545990808, 165.0, 5000.0],
[44.05077224914717, 4.581278696929539, 567.454951740726, 0.872342953950116, 0.18049877148475657, 0.990163709408304, 0.062370916712252866, 0.9944033072903318, 0.9963371399648688, 0.6395886294825446, 0.0010001848382758618, 117.75, 5000.0],
[36.3723209296144, 17.13540920146732, 649.2612028561178, 0.13224300863461783, 0.543166266140875, 0.9943310250757145, 0.08187568538373177, 0.9966047176258499, 0.996227517495977, 0.4472571272004753, 0.00832929196043553, 105.12, 5000.0],
[10.916692091321929, 1.7197588754360758, 859.5984930832783, 0.9928960793644326, 0.1274628002990129, 0.9905321890913572, 0.08505446936131436, 0.9954039819492306, 0.9393970414024277, 0.20165955117569845, 0.00393562696555546, 184.0, 5000.0],
[24.61024966623437, 2.3382317127384797, 125.6807628925593, 0.7513928228888437, 0.2758971455651426, 0.9928318138327047, 0.013857939559698086, 0.9927166247992542, 0.9609541841323385, 0.4939770517123132, 0.004033141328968626, 127.14, 5000.0],
[48.414647941793945, 29.60459215462402, 929.5659155100193, 0.22797686540871967, 0.29012857317101626, 0.9902589981938963, 0.048126323473176816, 0.999365668290878, 0.9537590730846931, 0.3837955994859634, 0.0046700024476340925, 131.60344827586206, 5000.0],
[11.625857336308801, 1.7992254729400174, 834.250910881173, 0.9904487770340547, 0.1441466452323528, 0.99, 0.08112103123697603, 0.9967248247150776, 0.9628560158758284, 0.64953096598099, 0.005206558865528496, 134.0, 5000.0],
[ 32, 20, 100, 0.99, 0.7, 0.9995, 0.01, 0.9999, 0.95,True, 0.0005, 195.0, 2000.0 ]
]
x_list = []
y_list = []
for param in good_list:
x_list.append( param[0:11] )
y_list.append( param[11] )
xp = np.array(x_list)
yp = np.array(y_list)
# batch_size update_rate update_freq gamma epsilon epsilon_decay learning_rate learning_rate_decay lr_decay_on_best clip_error behavior.reg
bounds = np.array( [ [10, 50], [1,50], [100,1000], [0.1,1.0], [0.1,1.0], [0.99,1.0], [0.0001,0.1], [0.99,1.0], [0.9,1.0],[0.0,1.0], [0.0005,0.01] ] )
do_bayes = False
do_uniform = False
do_normal = False
next_sample = np.array( [ 32, 20, 100, 0.99, 0.7, 0.9995, 0.01, 0.9999, 0.95,True, 0.0005 ] )
scores = []
for i in range(100):
if do_bayes:
model.fit(xp, yp)
next_sample = sample_next_hyperparameter(expected_improvement, model, yp, greater_is_better=True, bounds=bounds, n_restarts=100)
# Duplicates will break the GP. In case of a duplicate, we will randomly sample a next query point.
if np.any(np.abs(next_sample - xp) <= epsilon):
next_sample = np.random.uniform(bounds[:, 0], bounds[:, 1], bounds.shape[0])
#next_sample = [32, 20, 200, 0.99, 0.88, 0.99957, 0.0045, 0.9999, 0.95, True, 0.005]
# Sample loss for new set of parameters
cv_score = train_one(env, next_sample, options)
scores.append(cv_score)
print "Score %f for %s" % (cv_score, next_sample)
# Update lists
x_list.append(next_sample)
y_list.append(cv_score)
# Update xp and yp
xp = np.array(x_list)
yp = np.array(y_list)
else:
if do_uniform:
next_sample = []
for b in bounds:
next_sample.append( np.random.uniform( b[0], b[1] ) )
elif do_normal:
next_sample = []
stddev = [ 5.0, 0.1, 50, 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.5, 0.001 ]
stdi = 0
for b in good_list[3][0:11]:
next_sample.append( np.random.normal( b, stddev[stdi] ) )
stdi += 1
bt = bounds.T
next_sample = np.clip( next_sample, bt[0], bt[1] )
print next_sample
cv_score = train_one(env, next_sample, options)
scores.append(cv_score)
print "100 iterations: %f / %f" % (np.mean(scores), np.std(scores))
def train_one(env, hparams, options):
ksteps = options.k_steps # number of frames to skip before selecting a new action
num_actions = env.action_space.n
batch_size = int(hparams[0])
update_rate = int(hparams[1])
update_freq = int(hparams[2])
gamma = hparams[3]
epsilon = hparams[4]
epsilon_decay = hparams[5]
learning_rate = hparams[6]
learning_rate_decay = hparams[7]
lr_decay_on_best = hparams[8]
if hparams[9] < 0.5:
clip_error = False
else:
clip_error = True
target = initializeModel( options.model_name, num_actions, input_dim=(4,1) )
target.reg = hparams[10]
target.params["W1"] *= 0.1
behavior = copy.deepcopy(target)
optim = Optimizer( "rmsprop", behavior, learning_rate=learning_rate, decay_rate=0.99, upd_frequency=update_freq)
reward_sum = 0
reward_100 = deque(maxlen=100)
best_test = 15.0 # test(target, env, options)
steps = 0
episode_steps = 0
episode_number = 0
state = env.reset()
exp_history = Experience2( 2000, state.shape )
with open( os.path.join( options.game + ".txt" ), 'a+') as f:
f.write( "%s = %s\n" % ('Start',time.strftime("%Y-%m-%d %H:%M:%S")) )
f.write( "%s = %s\n" % ('Model Name',target.name) )
if options.initialize:
f.write( "Weights initialized\n" )
f.write( str(target.layers) + "\n" )
f.write( str(target.layer_params) + "\n" )
f.write( "%s = %d\n" % ('batch_size',batch_size) )
f.write( "%s = %d\n" % ('update_rate',update_rate) )
f.write( "%s = %f\n" % ('gamma',gamma) )
f.write( "%s = %f\n" % ('epsilon',epsilon) )
f.write( "%s = %f\n" % ('epsilon_decay',epsilon_decay) )
f.write( "%s = %d\n" % ('k-steps',ksteps) )
f.write( "%s = %f\n" % ('learning_rate',learning_rate) )
f.write( "%s = %f\n" % ('learning_rate_decay',learning_rate_decay) )
f.write( "%s = %f\n" % ('lr_decay_on_best',lr_decay_on_best) )
f.write( "%s = %s\n" % ('clip_error',str(clip_error)) )
f.write( "Optimizer %s\n" % (optim.optim_type,) )
f.write( " %s = %f\n" % ('learning rate',optim.learning_rate) )
f.write( " %s = %f\n" % ('decay rate',optim.decay_rate) )
f.write( " %s = %f\n" % ('epsilon',optim.epsilon) )
f.write( " %s = %f\n" % ('update frequency',optim.upd_frequency) )
f.write( "\n" )
while (options.max_episodes == 0) or (episode_number < options.max_episodes):
if options.render: env.render()
action = choose_epsilon_greedy( behavior, state, epsilon, num_actions )
#action = np.random.randint(num_actions)
# step the environment once, or ksteps times
reward = 0
done = False
for k in range(ksteps):
next_state, r, d, info = env.step(action)
reward += r
if d:
done = True
reward_sum += reward
steps += ksteps
episode_steps += ksteps
exp_history.save( state, action, reward, done, next_state )
state = next_state
if (exp_history.size() > (batch_size * 5)):
states, actions, rewards, batch_done, new_states, _ = exp_history.batch( batch_size )
actions = actions.astype(np.int)
target_values, _ = target.forward( new_states, mode='test' )
double_dqn = True
if double_dqn:
behavior_values, _ = behavior.forward( new_states, mode='test' )
best_actions = np.argmax(behavior_values,axis=1)
q_target = rewards + batch_done * gamma * target_values[np.arange(batch_size), best_actions]
else:
q_target = rewards + batch_done * gamma * np.max(target_values, axis=1)
action_values, cache = behavior.forward(states, mode='train', verbose=False)
q_error = np.zeros( action_values.shape )
#q_error[ np.arange(batch_size), actions ] = q_target - action_values[ np.arange(batch_size), actions ]
q_error[ np.arange(batch_size), actions ] = action_values[ np.arange(batch_size), actions ] - q_target
dx = q_error
dx /= batch_size
if clip_error:
np.clip( dx, -1.0, 1.0, dx )
q_error = np.sum( np.square( q_error ) )
# dx needs to have shape(batch_size,num_actions), e.g. (32,6)
_, grad = behavior.backward(cache, q_error, dx )
optim.update( grad, check_ratio=False )
if done: # an episode finished
episode_number += 1
reward_100.append(reward_sum)
if episode_number % update_rate == 0:
target = copy.deepcopy(behavior)
treward = np.mean(reward_100) # test(target, env, options)
print
print 'Ep %d' % ( episode_number, )
print 'Reward : %0.2f %0.2f' % ( reward_sum, np.mean(reward_100) )
print "Test reward : %0.2f vs %0.2f" % (treward, best_test)
print "Learning rate: %g" % (optim.learning_rate,)
print "Epsilon : %g" % (epsilon,)
if treward > best_test:
best_test = treward
if treward > 195.0:
print "Final Learning rate: %f" % (optim.learning_rate,)
print "WON! In %d episodes" % (episode_number,)
break
if optim.learning_rate > 0.00001:
optim.learning_rate *= lr_decay_on_best
if optim.learning_rate > 0.00001:
optim.learning_rate *= learning_rate_decay
if epsilon > 0.1:
epsilon *= epsilon_decay
reward_sum = 0
episode_steps = 0
steps = 0
state = env.reset()
with open( os.path.join( options.game + "_dqn_won.txt" ), 'a+') as f:
hparams = np.append( hparams, [best_test, episode_number] )
f.write( "%s\n" % (hparams.tolist(),) )
with open( os.path.join( options.game + ".txt" ), 'a+') as f:
f.write( "%s = %f\n" % ('Final epsilon', epsilon) )
f.write( "%s = %f\n" % ('Final learning rate', optim.learning_rate) )
f.write( "%s = %f\n" % ('Best test score', best_test) )
f.write( "%s = %d\n" % ('Episodes', episode_number) )
f.write( "\n\n" )
return best_test
def getOptions():
usage = "Usage: python pg-pong [options] <model name>"
parser = OptionParser( usage=usage )
parser.add_option("-i","--initialize", action="store_true", default=False, help="Initialize model, save to <model name>.pickle, then start training.");
parser.add_option("-d","--dir_model", default="", help="Directory for finding/initializing model files. Defaults to current directory.");
parser.add_option("-r","--render", action="store_true", default=False, help="Render gym environment while training. Will greatly reduce speed.");
parser.add_option("-s","--starting_ep", type="int", default=0, help="Starting episode number (for record keeping).");
parser.add_option("-k","--k_steps", type="int", default=1, help="How many game steps to take before the model chooses a new action.");
parser.add_option("-p","--play", action="store_true", default=False, help="Play only. No training and always choose the best action.");
parser.add_option("--test_only", action="store_true", default=False, help="Run tests, then exit.");
parser.add_option("--desc", action="store_true", default=False, help="Describe the model, then exit.");
parser.add_option("-g","--game", default="Breakout-v0", help="The game environment to use. Defaults to Breakout.");
parser.add_option("-m","--max_episodes", default="0", type="int", help="Maximum number of episodes to train.");
parser.add_option("--upload", action="store_true", default=False, help="Monitor the training run and upload to OpenAI.");
(options, args) = parser.parse_args()
options.model_name = "HyperParamSearch"
if options.desc or options.test_only:
if len(args) != 1:
print usage
exit()
if args[0].endswith('.pickle'):
args[0] = args[0][:-7]
options.model_name = args[0]
if options.k_steps != 1 and options.k_steps != 4:
print "Game step sizes other than 1 and 4 are not currently supported."
exit()
options.dir_model = os.path.expanduser(options.dir_model)
return (options, args)
if __name__ == "__main__":
options, _ = getOptions()
env = gym.envs.make(options.game)
if hasattr(env,'get_action_meanings'):
print env.get_action_meanings()
if options.desc or options.test_only:
if options.initialize:
filename = os.path.join( options.dir_model, options.model_name + ".pickle" )
if os.path.exists(filename):
print "Model already exists at " + filename
print "Delete the existing file or don't use the --initialize/-i flag."
exit()
nA = env.action_space.n
print "Initializing model with %d actions..." % (nA,)
model = initializeModel( options.model_name, nA, input_dim=(4,1) )
model.params["W1"] *= 0.1
model.describe()
model.env = options.game
saveModel( model, options )
else:
print "Reading model..."
with open( os.path.join( options.dir_model, options.model_name+'.pickle'), 'rb') as f:
model = pickle.load( f )
if not hasattr(model, 'env'):
print "Warning, model may not work with the current environment."
if options.desc:
model.describe()
exit()
if options.test_only:
if hasattr(model, 'env'):
if model.env != options.game:
print "Model was not initialized for the current environment: %s vs %s" % (model.env,options.game)
exit()
treward = test(model, env, options)
print "Gym reward: %f" % treward
exit()
if options.upload:
env = gym.wrappers.Monitor(env, "./" + options.game, force=True)
train(env, options)
env.close()
if options.upload:
if hasattr(config, 'openai_key'):
gym.upload('./' + options.game, api_key=config.openai_key)
else:
print "Unable to upload results. Missing 'openai_key' in config."
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class FIFOQueueTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(10, tf.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testEnqueueDictWithoutNames(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(3, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testHighDimension(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((), (2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = tf.FIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(tf.int32, enq.inputs[1].dtype)
self.assertEqual(tf.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
with self.assertRaises(ValueError):
q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(50, tf.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_placeholder = tf.placeholder(tf.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = tf.placeholder(
tf.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(elements_enqueued,
elements_enqueued + count,
dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = tf.placeholder(tf.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(elements_dequeued,
elements_dequeued + count,
dtype=np.int32)
self.assertAllEqual(
expected_range, dequeuemany_t.eval({count_placeholder: count}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.FIFOQueue(100, tf.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.FIFOQueue(total_count, tf.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, (tf.float32, tf.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = tf.FIFOQueue(1, tf.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_a")
q_a_2 = tf.FIFOQueue(15, tf.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_b")
q_b_2 = tf.FIFOQueue(10, tf.int32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
q_c_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_c")
q_c_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
q_d_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = tf.FIFOQueue(10, tf.float32, shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_f")
q_f_2 = tf.FIFOQueue(
10, (tf.float32, tf.int32), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(tf.FIFOQueue(10, tf.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.FIFOQueue(10, tf.float32)
q2 = tf.FIFOQueue(15, tf.float32)
enq_q = tf.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.FIFOQueue(5, tf.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.FIFOQueue(5, tf.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(5, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(2, tf.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.int64, tf.bool, tf.complex64]
shape = (32, 4, 128)
q = tf.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == tf.bool:
np_array = np_array > 0
elif dtype == tf.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDeviceColocation(self):
with tf.device("/job:ps"):
q = tf.FIFOQueue(32, [tf.int32], name="q")
with tf.device("/job:worker/task:7"):
dequeued_t = q.dequeue()
self.assertDeviceEqual("/job:ps", dequeued_t.device)
self.assertEqual([b"loc:@q"], dequeued_t.op.colocation_groups())
class FIFOQueueDictTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "j"),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "f"),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32, tf.string),
shapes=((), (), ()), names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
"s": ["dd", "ee"], "x": [1, 2]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
"s": ["dd", "ee"]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(tf.compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([tf.compat.as_bytes("bb"), tf.compat.as_bytes("cc")],
list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([tf.compat.as_bytes("dd"), tf.compat.as_bytes("ee")],
list(s))
class FIFOQueueWithTimeoutTest(tf.test.TestCase):
def testDequeueWithTimeout(self):
with self.test_session(
config=tf.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = tf.FIFOQueue(10, tf.float32)
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t)
if __name__ == "__main__":
tf.test.main()
|
|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script translates invalid authors in the contributors list generated
# by generate-contributors.py. When the script encounters an author name that
# is considered invalid, it searches Github and JIRA in an attempt to search
# for replacements. This tool runs in two modes:
#
# (1) Interactive mode: For each invalid author name, this script presents
# all candidate replacements to the user and awaits user response. In this
# mode, the user may also input a custom name. This is the default.
#
# (2) Non-interactive mode: For each invalid author name, this script replaces
# the name with the first valid candidate it can find. If there is none, it
# uses the original name. This can be enabled through the --non-interactive flag.
import os
import sys
from releaseutils import *
# You must set the following before use!
JIRA_API_BASE = os.environ.get("JIRA_API_BASE", "https://issues.apache.org/jira")
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", None)
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", None)
GITHUB_API_TOKEN = os.environ.get("GITHUB_API_TOKEN", None)
if not JIRA_USERNAME or not JIRA_PASSWORD:
sys.exit("Both JIRA_USERNAME and JIRA_PASSWORD must be set")
if not GITHUB_API_TOKEN:
sys.exit("GITHUB_API_TOKEN must be set")
# Write new contributors list to <old_file_name>.final
if not os.path.isfile(contributors_file_name):
print("Contributors file %s does not exist!" % contributors_file_name)
print("Have you run ./generate-contributors.py yet?")
sys.exit(1)
contributors_file = open(contributors_file_name, "r")
warnings = []
# In non-interactive mode, this script will choose the first replacement that is valid
INTERACTIVE_MODE = True
if len(sys.argv) > 1:
options = set(sys.argv[1:])
if "--non-interactive" in options:
INTERACTIVE_MODE = False
if INTERACTIVE_MODE:
print("Running in interactive mode. To disable this, provide the --non-interactive flag.")
# Setup Github and JIRA clients
jira_options = {"server": JIRA_API_BASE}
jira_client = JIRA(options=jira_options, basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
github_client = Github(GITHUB_API_TOKEN)
# Load known author translations that are cached locally
known_translations = {}
known_translations_file_name = "known_translations"
known_translations_file = open(known_translations_file_name, "r")
for line in known_translations_file:
if line.startswith("#"):
continue
[old_name, new_name] = line.strip("\n").split(" - ")
known_translations[old_name] = new_name
known_translations_file.close()
# Open again in case the user adds new mappings
known_translations_file = open(known_translations_file_name, "a")
# Generate candidates for the given author. This should only be called if the given author
# name does not represent a full name as this operation is somewhat expensive. Under the
# hood, it makes several calls to the Github and JIRA API servers to find the candidates.
#
# This returns a list of (candidate name, source) 2-tuples. E.g.
# [
# (NOT_FOUND, "No full name found for Github user andrewor14"),
# ("Andrew Or", "Full name of JIRA user andrewor14"),
# ("Andrew Orso", "Full name of SPARK-1444 assignee andrewor14"),
# ("Andrew Ordall", "Full name of SPARK-1663 assignee andrewor14"),
# (NOT_FOUND, "No assignee found for SPARK-1763")
# ]
NOT_FOUND = "Not found"
def generate_candidates(author, issues):
candidates = []
# First check for full name of Github user
github_name = get_github_name(author, github_client)
if github_name:
candidates.append((github_name, "Full name of Github user %s" % author))
else:
candidates.append((NOT_FOUND, "No full name found for Github user %s" % author))
# Then do the same for JIRA user
jira_name = get_jira_name(author, jira_client)
if jira_name:
candidates.append((jira_name, "Full name of JIRA user %s" % author))
else:
candidates.append((NOT_FOUND, "No full name found for JIRA user %s" % author))
# Then do the same for the assignee of each of the associated JIRAs
# Note that a given issue may not have an assignee, or the assignee may not have a full name
for issue in issues:
try:
jira_issue = jira_client.issue(issue)
except JIRAError as e:
# Do not exit just because an issue is not found!
if e.status_code == 404:
warnings.append("Issue %s not found!" % issue)
continue
raise e
jira_assignee = jira_issue.fields.assignee
if jira_assignee:
user_name = jira_assignee.name
display_name = jira_assignee.displayName
if display_name:
candidates.append(
(display_name, "Full name of %s assignee %s" % (issue, user_name)))
else:
candidates.append(
(NOT_FOUND, "No full name found for %s assignee %s" % (issue, user_name)))
else:
candidates.append((NOT_FOUND, "No assignee found for %s" % issue))
# Guard against special characters in candidate names
# Note that the candidate name may already be in unicode (JIRA returns this)
for i, (candidate, source) in enumerate(candidates):
try:
candidate = unicode(candidate, "UTF-8")
except TypeError:
# already in unicode
pass
candidate = unidecode.unidecode(candidate).strip()
candidates[i] = (candidate, source)
return candidates
# Translate each invalid author by searching for possible candidates from Github and JIRA
# In interactive mode, this script presents the user with a list of choices and have the user
# select from this list. Additionally, the user may also choose to enter a custom name.
# In non-interactive mode, this script picks the first valid author name from the candidates
# If no such name exists, the original name is used (without the JIRA numbers).
print("\n========================== Translating contributor list ==========================")
lines = contributors_file.readlines()
contributions = []
for i, line in enumerate(lines):
# It is possible that a line in the contributor file only has the github name, e.g. yhuai.
# So, we need a strip() to remove the newline.
temp_author = line.strip(" * ").split(" -- ")[0].strip()
print("Processing author %s (%d/%d)" % (temp_author, i + 1, len(lines)))
if not temp_author:
error_msg = " ERROR: Expected the following format \" * <author> -- <contributions>\"\n"
error_msg += " ERROR: Actual = %s" % line
print(error_msg)
warnings.append(error_msg)
contributions.append(line)
continue
author = temp_author.split("/")[0]
# Use the local copy of known translations where possible
if author in known_translations:
line = line.replace(temp_author, known_translations[author])
elif not is_valid_author(author):
new_author = author
issues = temp_author.split("/")[1:]
candidates = generate_candidates(author, issues)
# Print out potential replacement candidates along with the sources, e.g.
# [X] No full name found for Github user andrewor14
# [X] No assignee found for SPARK-1763
# [0] Andrew Or - Full name of JIRA user andrewor14
# [1] Andrew Orso - Full name of SPARK-1444 assignee andrewor14
# [2] Andrew Ordall - Full name of SPARK-1663 assignee andrewor14
# [3] andrewor14 - Raw Github username
# [4] Custom
candidate_names = []
bad_prompts = [] # Prompts that can't actually be selected; print these first.
good_prompts = [] # Prompts that contain valid choices
for candidate, source in candidates:
if candidate == NOT_FOUND:
bad_prompts.append(" [X] %s" % source)
else:
index = len(candidate_names)
candidate_names.append(candidate)
good_prompts.append(" [%d] %s - %s" % (index, candidate, source))
raw_index = len(candidate_names)
custom_index = len(candidate_names) + 1
for p in bad_prompts:
print(p)
if bad_prompts:
print(" ---")
for p in good_prompts:
print(p)
# In interactive mode, additionally provide "custom" option and await user response
if INTERACTIVE_MODE:
print(" [%d] %s - Raw Github username" % (raw_index, author))
print(" [%d] Custom" % custom_index)
response = raw_input(" Your choice: ")
last_index = custom_index
while not response.isdigit() or int(response) > last_index:
response = raw_input(" Please enter an integer between 0 and %d: " % last_index)
response = int(response)
if response == custom_index:
new_author = raw_input(" Please type a custom name for this author: ")
elif response != raw_index:
new_author = candidate_names[response]
# In non-interactive mode, just pick the first candidate
else:
valid_candidate_names = [name for name, _ in candidates
if is_valid_author(name) and name != NOT_FOUND]
if valid_candidate_names:
new_author = valid_candidate_names[0]
# Finally, capitalize the author and replace the original one with it
# If the final replacement is still invalid, log a warning
if is_valid_author(new_author):
new_author = capitalize_author(new_author)
else:
warnings.append(
"Unable to find a valid name %s for author %s" % (author, temp_author))
print(" * Replacing %s with %s" % (author, new_author))
# If we are in interactive mode, prompt the user whether we want to remember this new
# mapping
if INTERACTIVE_MODE and \
author not in known_translations and \
yesOrNoPrompt(
" Add mapping %s -> %s to known translations file?" % (author, new_author)):
known_translations_file.write("%s - %s\n" % (author, new_author))
known_translations_file.flush()
line = line.replace(temp_author, author)
contributions.append(line)
print("==================================================================================\n")
contributors_file.close()
known_translations_file.close()
# Sort the contributions before writing them to the new file.
# Additionally, check if there are any duplicate author rows.
# This could happen if the same user has both a valid full
# name (e.g. Andrew Or) and an invalid one (andrewor14).
# If so, warn the user about this at the end.
contributions.sort()
all_authors = set()
new_contributors_file_name = contributors_file_name + ".final"
new_contributors_file = open(new_contributors_file_name, "w")
for line in contributions:
author = line.strip(" * ").split(" -- ")[0]
if author in all_authors:
warnings.append("Detected duplicate author name %s. Please merge these manually." % author)
all_authors.add(author)
new_contributors_file.write(line)
new_contributors_file.close()
print("Translated contributors list successfully written to %s!" % new_contributors_file_name)
# Log any warnings encountered in the process
if warnings:
print("\n========== Warnings encountered while translating the contributor list ===========")
for w in warnings:
print(w)
print("Please manually correct these in the final contributors list at %s." %
new_contributors_file_name)
print("==================================================================================\n")
|
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.abstract_overlay import AbstractOverlay
from chaco.data_label import DataLabel
from chaco.tools.scatter_inspector import ScatterInspector
from chaco.tools.zoom_tool import ZoomTool
from kiva.fonttools import Font
from traits.api import Instance, List, Bool
from traitsui.api import View, Item, VGroup, HGroup, HSplit, InstanceEditor, spring
# ============= standard library imports ========================
import numpy as np
# ============= local library imports ==========================
from pychron.viewable import Viewable
from pychron.database.selectors.isotope_selector import IsotopeAnalysisSelector
from pychron.graph.stacked_graph import StackedGraph
from six.moves import range
from six.moves import zip
class LegendOverlay(AbstractOverlay):
def overlay(self, component, gc, *args, **kw):
gc.save_state()
gc.set_font(Font("Monaco"))
x = component.x
y2 = component.y2
gc.set_fill_color((1, 1, 1))
w = 100
h = 110
gc.rect(x + 5, y2 - 5 - h, w, h)
gc.draw_path()
machines = ["jan", "obama", "map"]
colors = [(1, 0.5, 0), (0, 0, 1), (1, 0, 0)]
xo = x + 5
yo = y2 - 10
texth = 12
for i, (mi, color) in enumerate(zip(machines, colors)):
gc.set_fill_color(color)
yi = yo - (texth) * (i + 1)
gc.set_text_position(xo + 20, yi)
gc.show_text(mi)
# markers = ['circle', 'square', 'diamond', 'triangle']
ats = ["blank", "air", "cocktail", "unknown", "background"]
gc.set_fill_color((0, 0, 0))
for i, si in enumerate(ats):
yy = yi - (texth) * (i + 1)
try:
plot = component.plots["jan {}".format(si)][0]
except KeyError:
continue
pcolor, plot.color = plot.color, "black"
mpcolor, plot.outline_color = plot.outline_color, "black"
plot._render_icon(gc, xo + 5, yy, 5, 5)
plot.color = pcolor
plot.outline_color = mpcolor
gc.set_text_position(xo + 20, yy)
gc.show_text(si)
gc.restore_state()
class SelectionView(Viewable):
table = Instance(IsotopeAnalysisSelector)
graph = Instance(StackedGraph)
data_labels_visible = Bool(True)
data_labels = List
def _data_labels_visible_changed(self):
for di in self.data_labels:
di.visible = self.data_labels_visible
self.graph.redraw()
def _graph_default(self):
g = StackedGraph(container_dict=dict(padding=5))
plot = g.new_plot(
# show_legend='ul',
padding=5
)
plot.overlays.append(
ZoomTool(
plot,
minimum_screen_delta=5,
enable_wheel=False,
drag_button="left",
tool_mode="range",
always_on_top=True,
axis="index",
)
)
# plot.on_trait_change(self._update, 'index_mapper.range.updated')
plot.overlays.append(LegendOverlay(plot))
g.set_axis_traits(axis="y", visible=False)
g.set_axis_traits(axis="x", visible=False)
g.set_grid_traits(grid="x", visible=False)
g.set_grid_traits(grid="y", visible=False)
return g
def build_graph(self):
skw = dict(type="scatter", marker_size=3)
# skw = dict(type='bar')
g = self.graph
xs = []
ys = []
ats = ["blank", "air", "cocktail", "unknown", "background"]
# ats += ['blank_air', 'blank_cocktail', 'blank_unknown']
machines = ["jan", "obama", "map"]
rids = []
def test(ri, at, mach):
if at == "blank":
attest = ri.analysis_type.startswith("blank")
else:
attest = ri.analysis_type == at
if attest:
return ri.mass_spectrometer == mach
for mach in machines:
for i, at in enumerate(ats):
dd = [
(ri.shortname, ri.timestamp)
for ri in self.table.records
if test(ri, at, mach)
]
if dd:
ni, xi = list(zip(*dd))
else:
xi = []
ni = []
xi = np.array(xi)
n = len(xi)
xs.append(xi)
ys.append(np.array(list(range(n))) + 1 + 3 * i)
rids.append(ni)
mm = [min(xj) for xj in xs if len(xj)]
if not mm:
return
xmi = min(mm)
mm = [max(yj) for yj in ys if len(yj)]
yma = max(mm)
xs = np.array([xk - xmi for xk in xs])
ys = np.array(ys)
mm = [max(xj) for xj in xs if len(xj)]
xma = max(mm)
colors = ["orange", "blue", "green"]
markers = ["circle", "square", "diamond", "triangle", "cross"]
def ffunc(s):
def func(new):
if new:
self._update_graph(s, xmi)
return func
def fffunc(s):
def func(new):
if new:
self._update(s, xmi)
return func
for i, (name, color) in enumerate(zip(machines, colors)):
xxj = xs[i * 5 : i * 5 + 5]
yyj = ys[i * 5 : i * 5 + 5]
nnj = rids[i * 5 : i * 5 + 5]
for at, xx, yy, nn, marker in zip(ats, xxj, yyj, nnj, markers):
s, _ = g.new_series(xx, yy, marker=marker, color=color, **skw)
g.set_series_label("{} {}".format(name, at))
# self.add_trait('scatter_{}_{}'.format(at, name), s)
tool = ScatterInspector(s, selection_mode="single")
s.tools.append(tool)
s.index.on_trait_change(ffunc(s), "metadata_changed")
for xi, yi, ni in zip(xx, yy, nn):
dl = DataLabel(
component=s,
font=Font("Monaco"),
data_point=(xi, yi),
label_position="top left",
bgcolor="white",
# label_format='%s',
label_text=ni,
show_label_coords=False,
arrow_visible=False,
marker_visible=False,
)
s.overlays.append(dl)
self.data_labels.append(dl)
# add range selection tool
# s.active_tool = RangeSelection(s, left_button_selects=True)
# s.overlays.append(RangeSelectionOverlay(component=s))
# s.index.on_trait_change(getattr(self, '_update_{}'.format(at)), 'metadata_changed')
g.set_x_limits(min_=0, max_=xma, pad="0.1")
g.set_y_limits(min_=0, max_=yma * 1.1)
# def _update(self, sc, nds):
# #rescale y limits
# plot = self.graph.plots[0]
# if self.data_labels:
# low = plot.index_mapper.range.low
# high = plot.index_mapper.range.high
# #get points in the range
# xs, ys = zip(*[di.data_point for di in self.data_labels])
# xs = np.array(xs)
# ys = np.array(ys)
#
# tags = np.invert(np.bitwise_or(xs < low, xs > high))
# nys = ys[tags]
# # dls = [di for di in self.data_labels if low <= di.data_point[0] <= high]
# if nys.shape[0]:
# nlow = min(nys)
# nhigh = max(nys)
#
# # nhigh = np.random.randint(100000)
# print 'looo', nlow, nhigh
# sc.value_mapper.range.low = nlow
# sc.value_mapper.range.high = nhigh
# # print plot.value_mapper.range.low_setting
# # print plot.value_mapper.range.high_setting
# # self.graph.set_y_limits(nlow, nhigh, pad='0.1')
# # plot.value_mapper.range.low = nlow
# # plot.value_mapper.range.high = nhigh
# self.graph.redraw()
def _update_graph(self, scatter, xmi):
# sel = scatter.index.metadata.get('selections')
hover = scatter.index.metadata.get("hover")
if hover:
xs = scatter.index.get_data()
ts = xs[hover] + xmi
result = next(
(ri for ri in self.table.records if abs(ri.timestamp - ts) < 1), None
)
self.table.selected = [result]
def traits_view(self):
tgrp = Item(
"table",
show_label=False,
style="custom",
width=0.3,
editor=InstanceEditor(view="panel_view"),
)
ggrp = VGroup(
HGroup(Item("data_labels_visible", label="Label"), spring),
Item("graph", show_label=False, style="custom", width=0.7),
)
v = View(
HSplit(tgrp, ggrp),
width=1000,
height=500,
title="Recent Analyses",
resizable=True,
)
return v
# ============= EOF =============================================
|
|
import unittest
import fsm
class Test(unittest.TestCase):
def setUp(self):
self.clr_A()
self.clr_B()
self.clr_C()
self.clr_D()
self.clr_E()
self.clr_F()
def set_A(self, event=None):
self.A = True
def clr_A(self, event=None):
self.A = False
def is_A_set(self, event=None):
return self.A
def is_A_clr(self, event=None):
return not self.A
def set_B(self, event=None):
self.B = True
def clr_B(self, event=None):
self.B = False
def is_B_set(self, event=None):
return self.B
def is_B_clr(self, event=None):
return not self.B
def set_C(self, event=None):
self.C = True
def clr_C(self, event=None):
self.C = False
def is_C_set(self, event=None):
return self.C
def is_C_clr(self, event=None):
return not self.C
def set_D(self, event=None):
self.D = True
def clr_D(self, event=None):
self.D = False
def is_D_set(self, event=None):
return self.D
def is_D_clr(self, event=None):
return not self.D
def set_E(self, event=None):
self.E = True
def clr_E(self, event=None):
self.E = False
def is_E_set(self, event=None):
return self.E
def is_E_clr(self, event=None):
return not self.E
def set_F(self, event=None):
self.F = True
def clr_F(self, event=None):
self.F = False
def is_F_set(self, event=None):
return self.F
def is_F_clr(self, event=None):
return not self.F
def test001_Event(self, event=None):
assert(not fsm.Event.is_event_or_event_type(None))
instance = fsm.Event()
assert(fsm.Event.is_event_or_event_type(instance))
def test01_Transition(self):
event = fsm.Event()
state = fsm.State()
transition = fsm.Transition(state)
triggered, target = transition.stimulate(event)
assert(triggered)
assert(target == state)
def test02_TransitionWithEffect(self):
event = fsm.Event()
state = fsm.State()
trans_with_effect = fsm.TransitionWithEffect(target=state, effect=self.set_A)
assert(self.is_A_clr())
triggered, target = trans_with_effect.stimulate(event)
assert(triggered)
assert(target == state)
assert(self.is_A_set())
def test03_TransitionEffectWithGuard(self):
event = fsm.Event()
state = fsm.State()
transition = fsm.TransitionWithGuardAndEffect(guard=self.is_A_set,
target=state,
effect=self.set_B)
assert(self.is_A_clr() and self.is_B_clr())
triggered, target = transition.stimulate(event)
assert(not triggered)
assert(target == None)
assert(self.is_A_clr() and self.is_B_clr())
self.set_A()
assert(self.is_A_set() and self.is_B_clr())
triggered, target = transition.stimulate(event)
assert(triggered)
assert(target == state)
assert(self.is_A_set() and self.is_B_set())
def test04_Activity(self):
event = fsm.Event()
activity = fsm.Activity(action=self.set_A)
assert(self.is_A_clr())
triggered, = activity.stimulate(event)
assert(triggered)
assert(self.is_A_set())
def test05_ActivityWithGuard(self):
event = fsm.Event()
activity = fsm.ActivityWithGuard(guard=self.is_A_set,
action=self.set_B)
assert(self.is_A_clr())
assert(self.is_B_clr())
triggered, = activity.stimulate(event)
assert(not triggered)
assert(self.is_A_clr())
assert(self.is_B_clr())
self.set_A()
assert(self.is_A_set())
assert(self.is_B_clr())
triggered, = activity.stimulate(event)
assert(triggered)
assert(self.is_A_set())
assert(self.is_B_set())
def test06_TransitionList(self):
event = fsm.Event()
stateA = fsm.State()
stateB = fsm.State()
transA = fsm.TransitionWithGuardAndEffect(guard=self.is_A_set,
target=stateA,
effect=self.set_C)
transB = fsm.TransitionWithGuardAndEffect(guard=self.is_B_set,
target=stateB,
effect=self.set_D)
transitions = fsm.TransitionList()
transitions.add_transition(transA)
transitions.add_transition(transB)
assert(self.is_C_clr())
triggered, target = transitions.stimulate(event)
assert(not triggered)
assert(None == target)
assert(self.is_C_clr())
self.set_A()
assert(self.is_C_clr())
triggered, target = transitions.stimulate(event)
assert(triggered)
assert(stateA == target)
assert(self.is_C_set())
self.clr_A()
self.set_B()
assert(self.is_D_clr())
triggered, target = transitions.stimulate(event)
assert(triggered)
assert(stateB == target)
assert(self.is_D_set())
# If both guards are true, the first transtion should get triggered.
self.clr_C()
self.clr_D()
self.set_A()
assert(self.is_C_clr())
assert(self.is_D_clr())
triggered, target = transitions.stimulate(event)
assert(triggered)
assert(stateA == target)
assert(self.is_C_set())
assert(self.is_D_clr())
def test07_ActivityList(self):
event = fsm.Event()
setBifAset = fsm.ActivityWithGuard(guard=self.is_A_set, action=self.set_B)
setDifCset = fsm.ActivityWithGuard(guard=self.is_C_set, action=self.set_D)
activities = fsm.ActivityList([setBifAset, setDifCset])
assert(self.is_A_clr() and self.is_B_clr())
assert(self.is_C_clr() and self.is_D_clr())
triggered, = activities.stimulate(event)
assert(not triggered)
assert(self.is_A_clr() and self.is_B_clr())
assert(self.is_C_clr() and self.is_D_clr())
self.set_A()
assert(self.is_A_set())
assert(self.is_B_clr() and self.is_C_clr() and self.is_D_clr())
triggered, = activities.stimulate(event)
assert(triggered)
assert(self.is_A_set() and self.is_B_set())
assert(self.is_C_clr() and self.is_D_clr())
self.clr_B()
self.set_C()
assert(self.is_A_set() and self.is_C_set())
assert(self.is_B_clr() and self.is_D_clr())
triggered, = activities.stimulate(event)
assert(triggered)
assert( self.is_A_set() and self.is_B_set()
and self.is_C_set() and self.is_D_set())
def test08_ActivitiesDict(self):
class Event1(fsm.Event): pass
class Event2(fsm.Event): pass
class Event3(fsm.Event): pass
ev0 = fsm.Event()
ev1 = Event1()
ev2 = Event2()
ev3 = Event3()
setA = fsm.Activity(action=self.set_A)
setB = fsm.Activity(action=self.set_B)
setDifCset = fsm.ActivityWithGuard(guard=self.is_C_set, action=self.set_D)
act_dict = fsm.EventDictOfActivities()
act_dict.add_activity(event=ev1, activity=setA)
act_dict.add_activity(event=ev2, activity=setB)
act_dict.add_activity(event=ev3, activity=setDifCset)
assert(self.is_A_clr())
assert(self.is_B_clr())
assert(self.is_C_clr())
assert(self.is_D_clr())
triggered, = act_dict.stimulate(ev0)
assert(not triggered)
assert(self.is_A_clr())
assert(self.is_B_clr())
assert(self.is_C_clr())
assert(self.is_D_clr())
triggered, = act_dict.stimulate(ev1)
assert(triggered)
assert(self.is_A_set())
assert(self.is_B_clr())
assert(self.is_C_clr())
assert(self.is_D_clr())
triggered, = act_dict.stimulate(ev2)
assert(triggered)
assert(self.is_A_set())
assert(self.is_B_set())
assert(self.is_C_clr())
assert(self.is_D_clr())
triggered, = act_dict.stimulate(ev3)
assert(not triggered)
assert(self.is_A_set())
assert(self.is_B_set())
assert(self.is_C_clr())
assert(self.is_D_clr())
self.set_C()
triggered, = act_dict.stimulate(ev3)
assert(triggered)
assert(self.is_A_set())
assert(self.is_B_set())
assert(self.is_C_set())
assert(self.is_D_set())
def test09_TransitionDict(self):
stateA = fsm.State()
stateB = fsm.State()
stateC = fsm.State()
stateD = fsm.State()
class Event1(fsm.Event): pass
class Event2(fsm.Event): pass
class Event3(fsm.Event): pass
class Event4(fsm.Event): pass
ev0 = fsm.Event()
ev1 = Event1()
ev2 = Event2()
ev3 = Event3()
ev4 = Event4()
to_A = fsm.Transition(stateA)
to_B_if_B_set = fsm.TransitionWithGuard(guard=self.is_B_set, target=stateB)
set_C_and_go_to_C = fsm.TransitionWithEffect(target=stateC, effect=self.set_C)
set_D_and_go_to_D_if_A_set = fsm.TransitionWithGuardAndEffect(
guard=self.is_A_set,
target=stateD,
effect=self.set_D)
trans_dict = fsm.EventDictOfTransitions()
trans_dict.add_transition(ev1, to_A)
trans_dict.add_transition(ev2, to_B_if_B_set)
trans_dict.add_transition(ev3, set_C_and_go_to_C)
trans_dict.add_transition(ev4, set_D_and_go_to_D_if_A_set)
assert( self.is_A_clr() and self.is_B_clr()
and self.is_C_clr() and self.is_D_clr())
triggered, target = trans_dict.stimulate(ev0)
assert(not triggered)
assert(None == target)
triggered, target = trans_dict.stimulate(ev1)
assert(triggered)
assert(stateA == target)
triggered, target = trans_dict.stimulate(ev2)
assert(not triggered)
assert(None == target)
self.set_B()
triggered, target = trans_dict.stimulate(ev2)
assert(triggered)
assert(stateB == target)
assert(self.is_C_clr())
triggered, target = trans_dict.stimulate(ev3)
assert(triggered)
assert(stateC == target)
assert(self.is_C_set())
triggered, target = trans_dict.stimulate(ev4)
assert(not triggered)
assert(None == target)
self.set_A()
assert(self.is_D_clr())
triggered, target = trans_dict.stimulate(ev4)
assert(triggered)
assert(stateD == target)
assert(self.is_D_set())
def test10_StimulusResponse(self):
res = fsm.StimulusResponse(False, False, None)
assert(not res.did_act())
assert(not res.was_transition_requested())
assert(None == res.get_target())
assert(not res.did_act_or_requested_transition())
res = fsm.StimulusResponse(True, False, None)
assert(res.did_act())
assert(not res.was_transition_requested())
assert(None == res.get_target())
assert(res.did_act_or_requested_transition())
res = fsm.StimulusResponse(False, True, None)
assert(not res.did_act())
assert(not res.was_transition_requested())
assert(None == res.get_target())
assert(not res.did_act_or_requested_transition())
state = fsm.State()
res = fsm.StimulusResponse(False, True, state)
assert(not res.did_act())
assert(res.was_transition_requested())
assert(state == res.get_target())
assert(res.did_act_or_requested_transition())
state = fsm.State()
res = fsm.StimulusResponse(True, True, state)
assert(res.did_act())
assert(res.was_transition_requested())
assert(state == res.get_target())
assert(res.did_act_or_requested_transition())
def test11_StateEnterAndExit(self):
state = fsm.State()
assert(not state.is_active())
activity, transition, target = state.enter()
assert(state.is_active())
assert(not activity)
assert(not transition)
assert(None == target)
activity, transition, target = state.exit()
assert(not state.is_active())
assert(not activity)
assert(not transition)
assert(None == target)
def test12_StateUnnamedTransition(self):
stateA = fsm.State()
stateB = fsm.State()
to_B = fsm.Transition(stateB)
stateA.add_unnamed_transition(to_B)
assert(not stateA.is_active())
activity, transition, target = stateA.enter()
assert(stateA.is_active())
assert(not activity)
assert(not transition)
assert(None == target)
def test13_StateEnterActivities(self):
set_A = fsm.Activity(self.set_A)
set_B_if_C_set = fsm.ActivityWithGuard(guard=self.is_C_set,
action=self.set_B)
state = fsm.State()
state.add_enter_activity(set_A)
state.add_enter_activity(set_B_if_C_set)
activity, transition, target = state.enter()
assert(state.is_active())
assert(activity)
assert(not transition)
assert(None == target)
assert(self.is_A_set())
assert(self.is_B_clr() and self.is_C_clr())
self.clr_A()
self.set_C()
assert(self.is_C_set())
assert(self.is_A_clr() and self.is_B_clr())
activity, transition, target = state.enter()
assert(state.is_active())
assert(activity)
assert(not transition)
assert(None == target)
assert(self.is_A_set() and self.is_B_set() and self.is_C_set())
def test14_StateExitActivities(self):
set_A = fsm.Activity(self.set_A)
set_B_if_C_set = fsm.ActivityWithGuard(guard=self.is_C_set,
action=self.set_B)
state = fsm.State()
state.add_exit_activity(set_A)
state.add_exit_activity(set_B_if_C_set)
activity, transition, target = state.exit()
assert(activity)
assert(not transition)
assert(None == target)
assert(self.is_A_set())
assert(self.is_B_clr() and self.is_C_clr())
self.clr_A()
self.set_C()
assert(self.is_C_set())
assert(self.is_A_clr() and self.is_B_clr())
activity, transition, target = state.exit()
assert(activity)
assert(not transition)
assert(None == target)
assert(self.is_A_set() and self.is_B_set() and self.is_C_set())
def test15_StateActivities(self):
event = fsm.Event()
set_A = fsm.Activity(self.set_A)
set_B_if_C_set = fsm.ActivityWithGuard(guard=self.is_C_set,
action=self.set_B)
state = fsm.State()
state.add_activity(event, set_A)
state.add_activity(event, set_B_if_C_set)
assert(self.is_A_clr() and self.is_B_clr() and self.is_C_clr())
activity, transition, target = state.stimulate(event)
assert(activity)
assert(not transition)
assert(None == target)
assert(self.is_A_set())
assert(self.is_B_clr() and self.is_C_clr())
self.clr_A()
self.set_C()
assert(self.is_A_clr() and self.is_B_clr() and self.is_C_set())
activity, transition, target = state.stimulate(event)
assert(activity)
assert(not transition)
assert(None == target)
assert(self.is_A_set() and self.is_B_set() and self.is_C_set())
def test15_StateTransitions(self):
state = fsm.State()
stateA = fsm.State()
stateB = fsm.State()
stateC = fsm.State()
stateD = fsm.State()
class Event1(fsm.Event): pass
class Event2(fsm.Event): pass
class Event3(fsm.Event): pass
class Event4(fsm.Event): pass
ev0 = fsm.Event()
ev1 = Event1()
ev2 = Event2()
ev3 = Event3()
ev4 = Event4()
to_A = fsm.Transition(stateA)
to_B_if_B_set = fsm.TransitionWithGuard(guard=self.is_B_set, target=stateB)
set_C_and_go_to_C = fsm.TransitionWithEffect(target=stateC, effect=self.set_C)
set_D_and_go_to_D_if_A_set = fsm.TransitionWithGuardAndEffect(
guard=self.is_A_set,
target=stateD,
effect=self.set_D)
state.add_transition(ev1, to_A)
state.add_transition(ev2, to_B_if_B_set)
state.add_transition(ev3, set_C_and_go_to_C)
state.add_transition(ev4, set_D_and_go_to_D_if_A_set)
assert( self.is_A_clr() and self.is_B_clr()
and self.is_C_clr() and self.is_D_clr())
activity, transition, target = state.stimulate(ev0)
assert(not activity)
assert(not transition)
assert(None == target)
assert( self.is_A_clr() and self.is_B_clr()
and self.is_C_clr() and self.is_D_clr())
activity, transition, target = state.stimulate(ev1)
assert(not activity)
assert(transition)
assert(stateA == target)
assert( self.is_A_clr() and self.is_B_clr()
and self.is_C_clr() and self.is_D_clr())
activity, transition, target = state.stimulate(ev2)
assert(not activity)
assert(not transition)
assert(None == target)
assert( self.is_A_clr() and self.is_B_clr()
and self.is_C_clr() and self.is_D_clr())
self.set_B()
activity, transition, target = state.stimulate(ev2)
assert(not activity)
assert(transition)
assert(stateB == target)
assert(self.is_A_clr() and self.is_C_clr() and self.is_D_clr())
assert(self.is_B_set())
activity, transition, target = state.stimulate(ev3)
assert(not activity)
assert(transition)
assert(stateC == target)
assert(self.is_A_clr() and self.is_D_clr())
assert(self.is_B_set() and self.is_C_set())
activity, transition, target = state.stimulate(ev4)
assert(not activity)
assert(not transition)
assert(None == target)
assert(self.is_A_clr() and self.is_D_clr())
assert(self.is_B_set() and self.is_C_set())
self.set_A()
activity, transition, target = state.stimulate(ev4)
assert(not activity)
assert(transition)
assert(stateD == target)
assert( self.is_A_set() and self.is_B_set()
and self.is_D_set() and self.is_C_set())
def test16_StateUnnamedTransition(self):
stateA = fsm.State()
self_trans = fsm.Transition(target=stateA)
stateA.add_unnamed_transition(self_trans)
assert(not stateA.is_active())
activity, transition, target = stateA.enter()
assert(stateA.is_active())
assert(not activity)
assert(not transition)
assert(None == target)
activity, transition, target = stateA.stimulate(fsm.State.UnnamedEvent)
assert(stateA.is_active())
assert(not activity)
assert(transition)
assert(stateA == target)
stateB = fsm.State()
self_trans_if_A_set = fsm.TransitionWithGuard(guard=self.is_A_set,
target=stateB)
stateB.add_unnamed_transition(self_trans_if_A_set)
assert(not stateB.is_active())
activity, transition, target = stateB.enter()
assert(stateB.is_active())
assert(not activity)
assert(not transition)
assert(None == target)
self.set_A()
assert(stateB.is_active())
activity, transition, target = stateB.stimulate(fsm.State.UnnamedEvent)
assert(stateB.is_active())
assert(not activity)
assert(transition)
assert(stateB == target)
def test17_FsmCtorXtor(self):
set_A = fsm.Activity(self.set_A)
set_B_if_C_set = fsm.ActivityWithGuard(guard=self.is_C_set, action=self.set_B)
set_D = fsm.Activity(self.set_D)
set_E_if_F_set = fsm.ActivityWithGuard(guard=self.is_F_set, action=self.set_E)
sm = fsm.FSM()
sm.add_start_activity(set_A)
sm.add_start_activity(set_B_if_C_set)
sm.add_stop_activity(set_D)
sm.add_stop_activity(set_E_if_F_set)
# Current state should be set to final after start() since the FSM does
# not have any internal state.
assert( self.is_A_clr() and self.is_B_clr() and self.is_C_clr()
and self.is_D_clr() and self.is_E_clr() and self.is_F_clr())
activity, transition, target = sm.start()
assert(not activity)
assert(not transition)
assert(None == target)
assert(sm.current == sm.final)
assert(self.is_A_set() and self.is_B_clr() and self.is_C_clr())
assert(self.is_D_set() and self.is_E_clr() and self.is_F_clr())
activity, transition, target = sm.stop()
assert(not activity)
assert(not transition)
assert(None == target)
assert(sm.current == sm.final)
assert(self.is_A_set() and self.is_B_clr() and self.is_C_clr())
assert(self.is_D_set() and self.is_E_clr() and self.is_F_clr())
def test18_FsmInit(self):
set_A = fsm.Activity(self.set_A)
set_B = fsm.Activity(self.set_B)
set_E = fsm.Activity(self.set_E)
set_F = fsm.Activity(self.set_F)
state = fsm.State()
sm = fsm.FSM([state])
sm.add_start_activity(set_A)
sm.add_stop_activity(set_B)
state.add_enter_activity(set_E)
state.add_exit_activity(set_F)
assert( self.is_A_clr() and self.is_B_clr()
and self.is_E_clr() and self.is_F_clr())
activity, transition, target = sm.start()
assert(not activity)
assert(not transition)
assert(None == target)
assert(sm.current == state)
assert(self.is_A_set() and self.is_B_clr())
assert(self.is_E_set() and self.is_F_clr())
activity, transition, target = sm.stop()
assert(not activity)
assert(not transition)
assert(None == target)
assert(sm.current == sm.final)
assert(self.is_A_set() and self.is_B_set())
assert(self.is_E_set() and self.is_F_set())
def test19_FsmTransition(self):
set_A = fsm.Activity(self.set_A)
set_B = fsm.Activity(self.set_B)
set_C = fsm.Activity(self.set_C)
set_D = fsm.Activity(self.set_D)
set_E = fsm.Activity(self.set_E)
set_F = fsm.Activity(self.set_F)
event = fsm.Event()
state1 = fsm.State()
state2 = fsm.State()
to_2 = fsm.Transition(state2)
state1.add_transition(event, to_2)
sm = fsm.FSM([state1, state2])
sm.add_start_activity(set_A)
sm.add_stop_activity(set_B)
state1.add_enter_activity(set_C)
state1.add_exit_activity(set_D)
state2.add_enter_activity(set_E)
state2.add_exit_activity(set_F)
assert( self.is_A_clr() and self.is_B_clr()
and self.is_C_clr() and self.is_D_clr()
and self.is_E_clr() and self.is_F_clr())
activity, transition, target = sm.start()
assert(not activity)
assert(not transition)
assert(None == target)
assert(sm.current == state1)
assert(self.is_A_set() and self.is_B_clr())
assert(self.is_C_set() and self.is_D_clr())
assert(self.is_E_clr() and self.is_F_clr())
activity, transition, target = sm.stimulate(event)
assert(not activity)
assert(not transition)
assert(None == target)
assert(sm.current == state2)
assert(self.is_A_set() and self.is_B_clr())
assert(self.is_C_set() and self.is_D_set())
assert(self.is_E_set() and self.is_F_clr())
activity, transition, target = sm.stop()
assert(not activity)
assert(not transition)
assert(None == target)
assert(sm.current == sm.final)
assert(self.is_A_set() and self.is_B_set())
assert(self.is_C_set() and self.is_D_set())
assert(self.is_E_set() and self.is_F_set())
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
from flask import render_template, request, redirect, abort, jsonify, url_for, session, Blueprint
from CTFd.utils import sha512, is_safe_url, authed, admins_only, is_admin, unix_time, unix_time_millis, get_config, set_config, sendmail, rmdir
from CTFd.models import db, Teams, Solves, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config
from itsdangerous import TimedSerializer, BadTimeSignature
from werkzeug.utils import secure_filename
from socket import inet_aton, inet_ntoa
from passlib.hash import bcrypt_sha256
from flask import current_app as app
import logging
import hashlib
import time
import re
import os
import json
admin = Blueprint('admin', __name__)
@admin.route('/admin', methods=['GET', 'POST'])
def admin_view():
if request.method == 'POST':
username = request.form.get('name')
password = request.form.get('password')
admin_user= Teams.query.filter_by(name=request.form['name'], admin=True).first()
if admin_user and bcrypt_sha256.verify(request.form['password'], admin.password):
try:
session.regenerate() # NO SESSION FIXATION FOR YOU
except:
pass # TODO: Some session objects dont implement regenerate :(
session['username'] = admin.name
session['id'] = admin.id
session['admin'] = True
session['nonce'] = sha512(os.urandom(10))
db.session.close()
return redirect('/admin/graphs')
if is_admin():
return redirect('/admin/graphs')
return render_template('admin/login.html')
@admin.route('/admin/graphs')
@admins_only
def admin_graphs():
return render_template('admin/graphs.html')
@admin.route('/admin/config', methods=['GET', 'POST'])
@admins_only
def admin_config():
if request.method == "POST":
try:
start = int(request.form['start'])
end = int(request.form['end'])
except (ValueError, TypeError):
start = None
end = None
try:
view_challenges_unregistered = bool(request.form.get('view_challenges_unregistered', None))
prevent_registration = bool(request.form.get('prevent_registration', None))
prevent_name_change = bool(request.form.get('prevent_name_change', None))
view_after_ctf = bool(request.form.get('view_after_ctf', None))
except (ValueError, TypeError):
view_challenges_unregistered = None
prevent_registration = None
prevent_name_change = None
view_after_ctf = None
finally:
view_challenges_unregistered = set_config('view_challenges_unregistered', view_challenges_unregistered)
prevent_registration = set_config('prevent_registration', prevent_registration)
prevent_name_change = set_config('prevent_name_change', prevent_name_change)
view_after_ctf = set_config('view_after_ctf', view_after_ctf)
ctf_name = set_config("ctf_name", request.form.get('ctf_name', None))
mg_api_key = set_config("mg_api_key", request.form.get('mg_api_key', None))
max_tries = set_config("max_tries", request.form.get('max_tries', None))
db_start = Config.query.filter_by(key='start').first()
db_start.value = start
db_end = Config.query.filter_by(key='end').first()
db_end.value = end
db.session.add(db_start)
db.session.add(db_end)
db.session.commit()
return redirect('/admin/config')
ctf_name = get_config('ctf_name')
if not ctf_name:
set_config('ctf_name', None)
mg_api_key = get_config('mg_api_key')
if not mg_api_key:
set_config('mg_api_key', None)
max_tries = get_config('max_tries')
if not max_tries:
set_config('max_tries', 0)
max_tries = 0
view_after_ctf = get_config('view_after_ctf') == '1'
if not view_after_ctf:
set_config('view_after_ctf', 0)
view_after_ctf = 0
start = get_config('start')
if not start:
set_config('start', None)
end = get_config('end')
if not end:
set_config('end', None)
view_challenges_unregistered = get_config('view_challenges_unregistered') == '1'
if not view_challenges_unregistered:
set_config('view_challenges_unregistered', None)
prevent_registration = get_config('prevent_registration') == '1'
if not prevent_registration:
set_config('prevent_registration', None)
prevent_name_change = get_config('prevent_name_change') == '1'
if not prevent_name_change:
set_config('prevent_name_change', None)
db.session.commit()
db.session.close()
return render_template('admin/config.html', ctf_name=ctf_name, start=start, end=end,
max_tries=max_tries,
view_challenges_unregistered=view_challenges_unregistered,
prevent_registration=prevent_registration, mg_api_key=mg_api_key,
prevent_name_change=prevent_name_change,
view_after_ctf=view_after_ctf)
@admin.route('/admin/css', methods=['GET', 'POST'])
@admins_only
def admin_css():
if request.method == 'POST':
css = request.form['css']
css = set_config('css', css)
print css
return "1"
return "0"
@admin.route('/admin/pages', defaults={'route': None}, methods=['GET', 'POST'])
@admin.route('/admin/pages/<route>', methods=['GET', 'POST'])
@admins_only
def admin_pages(route):
if request.method == 'GET' and request.args.get('mode') == 'create':
return render_template('admin/editor.html')
if route and request.method == 'GET':
page = Pages.query.filter_by(route=route).first()
return render_template('admin/editor.html', page=page)
if route and request.method == 'POST':
page = Pages.query.filter_by(route=route).first()
errors = []
html = request.form['html']
route = request.form['route']
if not route:
errors.append('Missing URL route')
if errors:
page = Pages(html, "")
return render_template('/admin/editor.html', page=page)
if page:
page.route = route
page.html = html
db.session.commit()
return redirect('/admin/pages')
page = Pages(route, html)
db.session.add(page)
db.session.commit()
return redirect('/admin/pages')
pages = Pages.query.all()
return render_template('admin/pages.html', routes=pages, css=get_config('css'))
@admin.route('/admin/page/<pageroute>/delete', methods=['POST'])
@admins_only
def delete_page(pageroute):
page = Pages.query.filter_by(route=pageroute).first()
db.session.delete(page)
db.session.commit()
return '1'
@admin.route('/admin/chals', methods=['POST', 'GET'])
@admins_only
def admin_chals():
if request.method == 'POST':
chals = Challenges.query.add_columns('id', 'name', 'value', 'description', 'category').order_by(Challenges.value).all()
json = {'game':[]}
for x in chals:
json['game'].append({'id':x[1], 'name':x[2], 'value':x[3], 'description':x[4], 'category':x[5]})
db.session.close()
return jsonify(json)
else:
return render_template('admin/chals.html')
@admin.route('/admin/keys/<chalid>', methods=['POST', 'GET'])
@admins_only
def admin_keys(chalid):
if request.method == 'GET':
keys = Keys.query.filter_by(chal=chalid).all()
json = {'keys':[]}
for x in keys:
json['keys'].append({'id':x.id, 'key':x.flag, 'type':x.key_type})
return jsonify(json)
elif request.method == 'POST':
keys = Keys.query.filter_by(chal=chalid).all()
for x in keys:
db.session.delete(x)
newkeys = request.form.getlist('keys[]')
newvals = request.form.getlist('vals[]')
for flag, val in zip(newkeys, newvals):
key = Keys(chalid, flag, val)
db.session.add(key)
db.session.commit()
db.session.close()
return '1'
@admin.route('/admin/tags/<chalid>', methods=['GET', 'POST'])
@admins_only
def admin_tags(chalid):
if request.method == 'GET':
tags = Tags.query.filter_by(chal=chalid).all()
json = {'tags':[]}
for x in tags:
json['tags'].append({'id':x.id, 'chal':x.chal, 'tag':x.tag})
return jsonify(json)
elif request.method == 'POST':
newtags = request.form.getlist('tags[]')
for x in newtags:
tag = Tags(chalid, x)
db.session.add(tag)
db.session.commit()
db.session.close()
return '1'
@admin.route('/admin/tags/<tagid>/delete', methods=['POST'])
@admins_only
def admin_delete_tags(tagid):
if request.method == 'POST':
tag = Tags.query.filter_by(id=tagid).first_or_404()
db.session.delete(tag)
db.session.commit()
db.session.close()
return "1"
@admin.route('/admin/files/<chalid>', methods=['GET', 'POST'])
@admins_only
def admin_files(chalid):
if request.method == 'GET':
files = Files.query.filter_by(chal=chalid).all()
json = {'files':[]}
for x in files:
json['files'].append({'id':x.id, 'file':x.location})
return jsonify(json)
if request.method == 'POST':
if request.form['method'] == "delete":
f = Files.query.filter_by(id=request.form['file']).first_or_404()
if os.path.isfile(f.location):
os.unlink(f.location)
db.session.delete(f)
db.session.commit()
db.session.close()
return "1"
elif request.form['method'] == "upload":
files = request.files.getlist('files[]')
for f in files:
filename = secure_filename(f.filename)
if len(filename) <= 0:
continue
md5hash = hashlib.md5(os.urandom(64)).hexdigest()
# BUG NEEDS TO GO TO S3
base = os.path.dirname(os.path.dirname(__file__))
## mod_wsgi does some sad things with cwd so the upload directory needs to be shifted a bit
if not os.path.exists(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash)):
os.makedirs(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash))
f.save(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash, filename))
## This needs to be relative to CTFd so doesn't nee base.
db_f = Files(chalid, os.path.join(app.config['UPLOAD_FOLDER'], md5hash, filename))
db.session.add(db_f)
db.session.commit()
db.session.close()
return redirect('/admin/chals')
@admin.route('/admin/teams')
@admins_only
def admin_teams():
teams = Teams.query.all()
return render_template('admin/teams.html', teams=teams)
@admin.route('/admin/team/<teamid>', methods=['GET', 'POST'])
@admins_only
def admin_team(teamid):
user = Teams.query.filter_by(id=teamid).first()
solves = Solves.query.filter_by(teamid=teamid).all()
addrs = Tracking.query.filter_by(team=teamid).group_by(Tracking.ip).all()
score = user.score()
place = user.place()
if request.method == 'GET':
return render_template('admin/team.html', solves=solves, team=user, addrs=addrs, score=score, place=place)
elif request.method == 'POST':
admin_user = request.form.get('admin', "false")
admin_user = 1 if admin_user == "true" else 0
if admin:
user.admin = 1
db.session.commit()
return jsonify({'data': ['success']})
name = request.form.get('name', None)
password = request.form.get('password', None)
email = request.form.get('email', None)
website = request.form.get('website', None)
affiliation = request.form.get('affiliation', None)
country = request.form.get('country', None)
errors = []
name_used = Teams.query.filter(Teams.name == name).first()
if name_used and int(name_used.id) != int(teamid):
errors.append('That name is taken')
email_used = Teams.query.filter(Teams.email == email).first()
if email_used and int(email_used.id) != int(teamid):
errors.append('That email is taken')
if errors:
db.session.close()
return jsonify({'data':errors})
else:
user.name = name
user.email = email
if password:
user.password = bcrypt_sha256.encrypt(password)
user.website = website
user.affiliation = affiliation
user.country = country
db.session.commit()
db.session.close()
return jsonify({'data':['success']})
@admin.route('/admin/team/<teamid>/mail', methods=['POST'])
@admins_only
def email_user(teamid):
message = request.form.get('msg', None)
team = Teams.query.filter(Teams.id == teamid).first()
if message and team:
if sendmail(team.email, message):
return "1"
return "0"
@admin.route('/admin/team/<teamid>/ban', methods=['POST'])
@admins_only
def ban(teamid):
user = Teams.query.filter_by(id=teamid).first()
user.banned = 1
db.session.commit()
return redirect('/admin/scoreboard')
@admin.route('/admin/team/<teamid>/unban', methods=['POST'])
@admins_only
def unban(teamid):
user = Teams.query.filter_by(id=teamid).first()
user.banned = None
db.session.commit()
return redirect('/admin/scoreboard')
@admin.route('/admin/team/<teamid>/delete', methods=['POST'])
@admins_only
def delete_team(teamid):
user = Teams.query.filter_by(id=teamid).first()
db.session.delete(user)
db.session.commit()
return '1'
@admin.route('/admin/graphs/<graph_type>')
@admins_only
def admin_graph(graph_type):
if graph_type == 'categories':
categories = db.session.query(Challenges.category, db.func.count(Challenges.category)).group_by(Challenges.category).all()
json = {'categories':[]}
for category, count in categories:
json['categories'].append({'category':category, 'count':count})
return jsonify(json)
elif graph_type == "solves":
solves = Solves.query.add_columns(db.func.count(Solves.chalid)).group_by(Solves.chalid).all()
json = {}
for chal, count in solves:
json[chal.chal.name] = count
return jsonify(json)
@admin.route('/admin/scoreboard')
@admins_only
def admin_scoreboard():
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.teamid, Teams.name, Teams.banned, score).join(Teams).join(Challenges).group_by(Solves.teamid).order_by(score.desc(), quickest)
db.session.close()
return render_template('admin/scoreboard.html', teams=teams)
@admin.route('/admin/scores')
@admins_only
def admin_scores():
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.teamid, Teams.name, score).join(Teams).join(Challenges).filter(Teams.banned == None).group_by(Solves.teamid).order_by(score.desc(), quickest)
db.session.close()
json = {'teams':[]}
for i, x in enumerate(teams):
json['teams'].append({'place':i+1, 'id':x.teamid, 'name':x.name,'score':int(x.score)})
return jsonify(json)
@admin.route('/admin/solves/<teamid>', methods=['GET'])
@admins_only
def admin_solves(teamid="all"):
if teamid == "all":
solves = Solves.query.all()
else:
solves = Solves.query.filter_by(teamid=teamid).all()
db.session.close()
json = {'solves':[]}
for x in solves:
json['solves'].append({'id':x.id, 'chal':x.chal.name, 'chalid':x.chalid,'team':x.teamid, 'value': x.chal.value, 'category':x.chal.category, 'time':unix_time(x.date)})
return jsonify(json)
@admin.route('/admin/solves/<teamid>/<chalid>/delete', methods=['POST'])
@admins_only
def delete_solve(teamid, chalid):
solve = Solves.query.filter_by(teamid=teamid, chalid=chalid).first()
db.session.delete(solve)
db.session.commit()
return '1'
@admin.route('/admin/statistics', methods=['GET'])
@admins_only
def admin_stats():
db.session.commit()
teams_registered = db.session.query(db.func.count(Teams.id)).first()[0]
wrong_count = db.session.query(db.func.count(WrongKeys.id)).first()[0]
solve_count = db.session.query(db.func.count(Solves.id)).first()[0]
challenge_count = db.session.query(db.func.count(Challenges.id)).first()[0]
most_solved_chal = Solves.query.add_columns(db.func.count(Solves.chalid).label('solves')).group_by(Solves.chalid).order_by('solves DESC').first()
least_solved_chal = Challenges.query.add_columns(db.func.count(Solves.chalid).label('solves')).outerjoin(Solves).group_by(Challenges.id).order_by('solves ASC').first()
db.session.close()
return render_template('admin/statistics.html', team_count=teams_registered,
wrong_count=wrong_count,
solve_count=solve_count,
challenge_count=challenge_count,
most_solved=most_solved_chal,
least_solved=least_solved_chal
)
@admin.route('/admin/wrong_keys/<page>', methods=['GET'])
@admins_only
def admin_wrong_key(page='1'):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * ( page - 1 )
page_end = results_per_page * ( page - 1 ) + results_per_page
wrong_keys = WrongKeys.query.add_columns(WrongKeys.flag, WrongKeys.team, WrongKeys.date,\
Challenges.name.label('chal_name'), Teams.name.label('team_name')).\
join(Challenges).join(Teams).order_by('team_name ASC').slice(page_start, page_end).all()
wrong_count = db.session.query(db.func.count(WrongKeys.id)).first()[0]
pages = int(wrong_count / results_per_page) + (wrong_count % results_per_page > 0)
return render_template('admin/wrong_keys.html', wrong_keys=wrong_keys, pages=pages)
@admin.route('/admin/correct_keys/<page>', methods=['GET'])
@admins_only
def admin_correct_key(page='1'):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
solves = Solves.query.add_columns(Solves.chalid, Solves.teamid, Solves.date, Solves.flag, \
Challenges.name.label('chal_name'), Teams.name.label('team_name')).\
join(Challenges).join(Teams).order_by('team_name ASC').slice(page_start, page_end).all()
solve_count = db.session.query(db.func.count(Solves.id)).first()[0]
pages = int(solve_count / results_per_page) + (solve_count % results_per_page > 0)
return render_template('admin/correct_keys.html', solves=solves, pages=pages)
@admin.route('/admin/fails/<teamid>', methods=['GET'])
@admins_only
def admin_fails(teamid='all'):
if teamid == "all":
fails = WrongKeys.query.count()
solves = Solves.query.count()
db.session.close()
json = {'fails':str(fails), 'solves': str(solves)}
return jsonify(json)
else:
fails = WrongKeys.query.filter_by(team=teamid).count()
solves = Solves.query.filter_by(teamid=teamid).count()
db.session.close()
json = {'fails':str(fails), 'solves': str(solves)}
return jsonify(json)
@admin.route('/admin/chal/new', methods=['POST'])
@admins_only
def admin_create_chal():
files = request.files.getlist('files[]')
# Create challenge
chal = Challenges(request.form['name'], request.form['desc'], request.form['value'], request.form['category'])
db.session.add(chal)
db.session.commit()
# Add keys
key = Keys(chal.id, request.form['key'], request.form['key_type[0]'])
db.session.add(key)
db.session.commit()
for f in files:
filename = secure_filename(f.filename)
if len(filename) <= 0:
continue
md5hash = hashlib.md5(filename).hexdigest()
if not os.path.exists(os.path.join(os.path.normpath(app.config['UPLOAD_FOLDER']), md5hash)):
os.makedirs(os.path.join(os.path.normpath(app.config['UPLOAD_FOLDER']), md5hash))
f.save(os.path.join(os.path.normpath(app.config['UPLOAD_FOLDER']), md5hash, filename))
db_f = Files(chal.id, os.path.join(os.path.normpath(app.config['UPLOAD_FOLDER']), md5hash, filename))
db.session.add(db_f)
db.session.commit()
db.session.close()
return redirect('/admin/chals')
@admin.route('/admin/chal/delete', methods=['POST'])
@admins_only
def admin_delete_chal():
challenge = Challenges.query.filter_by(id=request.form['id']).first()
if challenge:
WrongKeys.query.filter_by(chal=challenge.id).delete()
Solves.query.filter_by(chalid=challenge.id).delete()
Keys.query.filter_by(chal=challenge.id).delete()
files = Files.query.filter_by(chal=challenge.id).all()
Files.query.filter_by(chal=challenge.id).delete()
for file in files:
folder = os.path.dirname(file.location)
rmdir(folder)
Tags.query.filter_by(chal=challenge.id).delete()
Challenges.query.filter_by(id=challenge.id).delete()
db.session.commit()
db.session.close()
return '1'
@admin.route('/admin/chal/update', methods=['POST'])
@admins_only
def admin_update_chal():
challenge = Challenges.query.filter_by(id=request.form['id']).first()
challenge.name = request.form['name']
challenge.description = request.form['desc']
challenge.value = request.form['value']
challenge.category = request.form['category']
db.session.add(challenge)
db.session.commit()
db.session.close()
return redirect('/admin/chals')
|
|
"""Command line wrapper for samtools"""
# Last Checked with samtools [0.1.20 and 1.2]
# TODO samtools 1.x has additional options over 0.x which
# are missing from this wrapper
from __future__ import print_function
from Bio.Application import _Option, _Argument, _Switch
from Bio.Application import AbstractCommandline, _ArgumentList
from Bio.Application import _StaticArgument
class SamtoolsViewCommandline(AbstractCommandline):
"""Command line wrapper for samtools view.
Extract/print all or sub alignments in SAM or BAM format, equivalent to::
$ samtools view [-bchuHS] [-t in.refList] [-o output] [-f reqFlag]
[-F skipFlag] [-q minMapQ] [-l library] [-r readGroup]
[-R rgFile] <in.bam>|<in.sam> [region1 [...]]
See http://samtools.sourceforge.net/samtools.shtml for more details
Example
-------
>>> from Bio.Sequencing.Applications import SamtoolsViewCommandline
>>> input_file = "/path/to/sam_or_bam_file"
>>> samtools_view_cmd = SamtoolsViewCommandline(input_file=input_file)
>>> print(samtools_view_cmd)
samtools view /path/to/sam_or_bam_file
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("view"),
_Switch(["-b", "b"], "Output in the BAM format"),
_Switch(["-c", "c"],
"""Instead of printing the alignments, only count them and
print the total number.
All filter options, such as '-f', '-F' and '-q',
are taken into account"""),
_Switch(["-h", "h"], "Include the header in the output"),
_Switch(["-u", "u"],
"""Output uncompressed BAM.
This option saves time spent on compression/decompression
and is thus preferred when the output is piped to
another samtools command"""),
_Switch(["-H", "H"], "Output the header only"),
_Switch(["-S", "S"],
"""Input is in SAM.
If @SQ header lines are absent,
the '-t' option is required."""),
_Option(["-t", "t"],
"""This file is TAB-delimited.
Each line must contain the reference name and the
length of the reference, one line for each
distinct reference; additional fields are ignored.
This file also defines the order of the reference
sequences in sorting.
If you run 'samtools faidx <ref.fa>',
the resultant index file <ref.fa>.fai can be used
as this <in.ref_list> file.""",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-o", "o"], "Output file",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-f", "f"],
"""Only output alignments with all bits in
INT present in the FLAG field""",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-F", "F"],
"Skip alignments with bits present in INT",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-q", "q"],
"Skip alignments with MAPQ smaller than INT",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-r", "r"],
"Only output reads in read group STR",
equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-R", "R"],
"Output reads in read groups listed in FILE",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-l", "l"],
"Only output reads in library STR",
equate=False,
checker_function=lambda x: isinstance(x, str)),
_Switch(["-1", "fast_bam"],
"Use zlib compression level 1 to compress the output"),
_Argument(["input", "input_file"],
"Input File Name", filename=True, is_required=True),
_Argument(["region"], "Region", is_required=False),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsMpileupCommandline(AbstractCommandline):
"""Command line wrapper for samtools mpileup.
Generate BCF or pileup for one or multiple BAM files, equivalent to::
$ samtools mpileup [-EBug] [-C capQcoef] [-r reg] [-f in.fa]
[-l list] [-M capMapQ] [-Q minBaseQ]
[-q minMapQ] in.bam [in2.bam [...]]
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsMpileupCommandline
>>> input = ["/path/to/sam_or_bam_file"]
>>> samtools_mpileup_cmd = SamtoolsMpileupCommandline(input_file=input)
>>> print(samtools_mpileup_cmd)
samtools mpileup /path/to/sam_or_bam_file
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("mpileup"),
_Switch(["-E", "E"],
"""Extended BAQ computation.
This option helps sensitivity especially
for MNPs, but may hurt specificity a little bit"""),
_Switch(["-B", "B"],
"""Disable probabilistic realignment for the
computation of base alignment quality (BAQ).
BAQ is the Phred-scaled probability of a read base being
misaligned.
Applying this option greatly helps to reduce false SNPs
caused by misalignments"""),
_Switch(["-g", "g"],
"""Compute genotype likelihoods and output them in the
binary call format (BCF)"""),
_Switch(["-u", "u"],
"""Similar to -g except that the output is
uncompressed BCF, which is preferred for piping"""),
_Option(["-C", "C"],
"""Coefficient for downgrading mapping quality for
reads containing excessive mismatches.
Given a read with a phred-scaled probability q of
being generated from the mapped position,
the new mapping quality is about sqrt((INT-q)/INT)*INT.
A zero value disables this functionality;
if enabled, the recommended value for BWA is 50""",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-r", "r"],
"Only generate pileup in region STR",
equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-f", "f"],
"""The faidx-indexed reference file in the FASTA format.
The file can be optionally compressed by razip""",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-l", "l"],
"""BED or position list file containing a list of regions
or sites where pileup or BCF should be generated""",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-M", "M"],
"Cap Mapping Quality at M",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-q", "q"],
"Minimum mapping quality for an alignment to be used",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-Q", "Q"],
"Minimum base quality for a base to be considered",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Switch(["-6", "illumina_13"],
"Assume the quality is in the Illumina 1.3+ encoding"),
_Switch(["-A", "A"],
"Do not skip anomalous read pairs in variant calling."),
_Option(["-b", "b"],
"List of input BAM files, one file per line",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-d", "d"],
"At a position, read maximally INT reads per input BAM",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Switch(["-D", "D"], "Output per-sample read depth"),
_Switch(["-S", "S"], """Output per-sample Phred-scaled
strand bias P-value"""),
_Option(["-e", "e"],
"""Phred-scaled gap extension sequencing error probability.
Reducing INT leads to longer indels""",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-h", "h"],
"""Coefficient for modeling homopolymer errors.
Given an l-long homopolymer run, the sequencing error
of an indel of size s is modeled as INT*s/l""",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Switch(["-I", "I"], "Do not perform INDEL calling"),
_Option(["-L", "L"],
"""Skip INDEL calling if the average per-sample
depth is above INT""",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-o", "o"],
"""Phred-scaled gap open sequencing error probability.
Reducing INT leads to more indel calls.""",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-p", "p"],
"""Comma delimited list of platforms (determined by @RG-PL)
from which indel candidates are obtained.
It is recommended to collect indel candidates from
sequencing technologies that have low indel error rate
such as ILLUMINA""",
equate=False,
checker_function=lambda x: isinstance(x, str)),
_ArgumentList(["input_file"],
"Input File for generating mpileup",
filename=True, is_required=True),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsReheaderCommandline(AbstractCommandline):
"""Command line wrapper for samtools reheader.
Replace the header in in.bam with the header
in in.header.sam, equivalent to::
$ samtools reheader <in.header.sam> <in.bam>
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsReheaderCommandline
>>> input_header = "/path/to/header_sam_file"
>>> input_bam = "/path/to/input_bam_file"
>>> samtools_reheader_cmd = SamtoolsReheaderCommandline(\
input_header=input_header,\
input_bam=input_bam)
>>> print(samtools_reheader_cmd)
samtools reheader /path/to/header_sam_file /path/to/input_bam_file
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("reheader"),
_Argument(["input_header", "header_sam", "sam_file"],
"Sam file with header",
filename=True, is_required=True),
_Argument(["input_bam", "input_file", "bam_file"],
"BAM file for writing header to",
filename=True, is_required=True)
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsCatCommandline(AbstractCommandline):
"""Command line wrapper for samtools cat.
Concatenate BAMs, equivalent to::
$ samtools cat [-h header.sam] [-o out.bam] <in1.bam> <in2.bam> [ ... ]
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsCatCommandline
>>> input_bam1 = "/path/to/input_bam1"
>>> input_bam2 = "/path/to/input_bam2"
>>> input_bams = [input_bam1, input_bam2]
>>> samtools_cat_cmd = SamtoolsCatCommandline(input_bam=input_bams)
>>> print(samtools_cat_cmd)
samtools cat /path/to/input_bam1 /path/to/input_bam2
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("cat"),
_Option(["-h", "h"], "Header SAM file",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-o", "o"], "Output SAM file",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_ArgumentList(["input", "input_bam", "bams"], "Input BAM files",
filename=True, is_required=True)
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsSortCommandline(AbstractCommandline):
"""Command line wrapper for samtools sort.
Concatenate BAMs, equivalent to::
$ samtools sort [-no] [-m maxMem] <in.bam> <out.prefix>
See http://samtools.sourceforge.net/samtools.shtml for more details
Example
-------
>>> from Bio.Sequencing.Applications import SamtoolsSortCommandline
>>> input_bam = "/path/to/input_bam"
>>> out_prefix = "/path/to/out_prefix"
>>> samtools_sort_cmd = SamtoolsSortCommandline(\
input_bam=input_bam,\
out_prefix=out_prefix)
>>> print(samtools_sort_cmd)
samtools sort /path/to/input_bam /path/to/out_prefix
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("sort"),
_Switch(["-o", "o"], """Output the final alignment
to the standard output"""),
_Switch(["-n", "n"], """Sort by read names rather
than by chromosomal coordinates"""),
_Option(["-m", "m"], "Approximately the maximum required memory",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Argument(["input_bam"], "Input BAM file",
filename=True, is_required=True),
_Argument(["out_prefix"], "Output prefix",
filename=True, is_required=True)
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsMergeCommandline(AbstractCommandline):
"""Command line wrapper for samtools merge.
Merge multiple sorted alignments, equivalent to::
$ samtools merge [-nur1f] [-h inh.sam] [-R reg]
<out.bam> <in1.bam> <in2.bam> [...]
See http://samtools.sourceforge.net/samtools.shtml for more details
Example
-------
>>> from Bio.Sequencing.Applications import SamtoolsMergeCommandline
>>> out_bam = "/path/to/out_bam"
>>> in_bam = ["/path/to/input_bam1", "/path/to/input_bam2"]
>>> samtools_merge_cmd = SamtoolsMergeCommandline(\
out_bam=out_bam,\
input_bam=in_bam)
>>> print(samtools_merge_cmd)
samtools merge /path/to/out_bam /path/to/input_bam1 /path/to/input_bam2
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("merge"),
_Switch(["-n", "n"],
"""The input alignments are sorted by read names
rather than by chromosomal coordinates"""),
_Switch(["-r", "r"], """Attach an RG tag to each alignment.
The tag value is inferred from file names"""),
_Switch(["-u", "u"], "Uncompressed BAM output"),
_Switch(["-1", "fast_bam"], """Use zlib compression level 1
to compress the output"""),
_Switch(["-f", "f"], """Force to overwrite the
output file if present"""),
_Option(["-h", "h"], """Use the lines of FILE as '@'
headers to be copied to out.bam""",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-R", "R"],
"Merge files in the specified region indicated by STR",
equate=False,
checker_function=lambda x: isinstance(x, str)),
_Argument(["output_bam", "out_bam", "out", "output"],
"Output BAM file",
filename=True, is_required=True),
_ArgumentList(["input_bam", "in_bam", "input", "bam"],
"Input BAM",
filename=True, is_required=True),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsIndexCommandline(AbstractCommandline):
"""Command line wrapper for samtools index.
Index sorted alignment for fast random access, equivalent to::
$ samtools index <aln.bam>
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsIndexCommandline
>>> input = "/path/to/aln_bam"
>>> samtools_index_cmd = SamtoolsIndexCommandline(input_bam=input)
>>> print(samtools_index_cmd)
samtools index /path/to/aln_bam
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("index"),
_Argument(["input", "in_bam", "input_bam"],
"BAM file to be indexed"),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsIdxstatsCommandline(AbstractCommandline):
"""Command line wrapper for samtools idxstats.
Retrieve and print stats in the index file, equivalent to::
$ samtools idxstats <aln.bam>
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsIdxstatsCommandline
>>> input = "/path/to/aln_bam"
>>> samtools_idxstats_cmd = SamtoolsIdxstatsCommandline(input_bam=input)
>>> print(samtools_idxstats_cmd)
samtools idxstats /path/to/aln_bam
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("idxstats"),
_Argument(["input", "in_bam", "input_bam"],
"BAM file to be indexed")
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsFaidxCommandline(AbstractCommandline):
"""Command line wrapper for samtools faidx.
Retrieve and print stats in the index file, equivalent to::
$ samtools faidx <ref.fasta> [region1 [...]]
See http://samtools.sourceforge.net/samtools.shtml for more details
Example
-------
>>> from Bio.Sequencing.Applications import SamtoolsFaidxCommandline
>>> reference = "/path/to/reference.fasta"
>>> samtools_faidx_cmd = SamtoolsFaidxCommandline(reference=reference)
>>> print(samtools_faidx_cmd)
samtools faidx /path/to/reference.fasta
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("faidx"),
_Argument(["reference", "reference_fasta", "ref"],
"Reference FASTA to be indexed",
filename=True, is_required=True)
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsFixmateCommandline(AbstractCommandline):
"""Command line wrapper for samtools fixmate.
Fill in mate coordinates, ISIZE and mate related
flags from a name-sorted alignment, equivalent to::
$ samtools fixmate <in.nameSrt.bam> <out.bam>
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsFixmateCommandline
>>> in_bam = "/path/to/in.nameSrt.bam"
>>> out_bam = "/path/to/out.bam"
>>> samtools_fixmate_cmd = SamtoolsFixmateCommandline(\
input_bam=in_bam,\
out_bam=out_bam)
>>> print(samtools_fixmate_cmd)
samtools fixmate /path/to/in.nameSrt.bam /path/to/out.bam
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("fixmate"),
_Argument(["in_bam", "sorted_bam", "input_bam",
"input", "input_file"],
"Name Sorted Alignment File ",
filename=True, is_required=True),
_Argument(["out_bam", "output_bam", "output", "output_file"],
"Output file",
filename=True, is_required=True)
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsRmdupCommandline(AbstractCommandline):
"""Command line wrapper for samtools rmdup.
Remove potential PCR duplicates, equivalent to::
$ samtools rmdup [-sS] <input.srt.bam> <out.bam>
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsRmdupCommandline
>>> input_sorted_bam = "/path/to/input.srt.bam"
>>> out_bam = "/path/to/out.bam"
>>> samtools_rmdup_cmd = SamtoolsRmdupCommandline(\
input_bam=input_sorted_bam,\
out_bam=out_bam)
>>> print(samtools_rmdup_cmd)
samtools rmdup /path/to/input.srt.bam /path/to/out.bam
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("rmdup"),
_Switch(["-s", "s"],
"""Remove duplicates for single-end reads.
By default, the command works for paired-end
reads only"""),
_Switch(["-S", "S"], """Treat paired-end reads
as single-end reads"""),
_Argument(["in_bam", "sorted_bam", "input_bam",
"input", "input_file"],
"Name Sorted Alignment File ",
filename=True, is_required=True),
_Argument(["out_bam", "output_bam", "output", "output_file"],
"Output file", filename=True, is_required=True)
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsCalmdCommandline(AbstractCommandline):
"""Command line wrapper for samtools calmd.
Generate the MD tag, equivalent to::
$ samtools calmd [-EeubSr] [-C capQcoef] <aln.bam> <ref.fasta>
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsCalmdCommandline
>>> input_bam = "/path/to/aln.bam"
>>> reference_fasta = "/path/to/reference.fasta"
>>> samtools_calmd_cmd = SamtoolsCalmdCommandline(\
input_bam=input_bam,\
reference=reference_fasta)
>>> print(samtools_calmd_cmd)
samtools calmd /path/to/aln.bam /path/to/reference.fasta
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("calmd"),
_Switch(["-E", "E"],
"""Extended BAQ calculation.
This option trades specificity for sensitivity,
though the effect is minor."""),
_Switch(["-e", "e"],
"""Convert the read base to = if it is
identical to the aligned reference base.
Indel caller does not support the = bases
at the moment."""),
_Switch(["-u", "u"], "Output uncompressed BAM"),
_Switch(["-b", "b"], "Output compressed BAM "),
_Switch(["-S", "S"], "The input is SAM with header lines "),
_Switch(["-r", "r"], """Compute the BQ tag (without -A)
or cap base quality by BAQ (with -A)."""),
_Switch(["-A", "A"],
"""When used jointly with -r this option overwrites
the original base quality"""),
_Option(["-C", "C"], """Coefficient to cap mapping quality
of poorly mapped reads.
See the pileup command for details.""",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Argument(["input", "input_file", "in_bam", "infile", "input_bam"],
"Input BAM", filename=True, is_required=True),
_Argument(["reference", "reference_fasta", "ref"],
"Reference FASTA to be indexed",
filename=True, is_required=True)
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsTargetcutCommandline(AbstractCommandline):
"""Command line wrapper for samtools targetcut.
This command identifies target regions by examining the continuity
of read depth, computes haploid consensus sequences of targets
and outputs a SAM with each sequence corresponding to a target,
equivalent to::
$ samtools targetcut [-Q minBaseQ] [-i inPenalty] [-0 em0]
[-1 em1] [-2 em2] [-f ref] <in.bam>
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsTargetcutCommandline
>>> input_bam = "/path/to/aln.bam"
>>> samtools_targetcut_cmd = SamtoolsTargetcutCommandline(input_bam=input_bam)
>>> print(samtools_targetcut_cmd)
samtools targetcut /path/to/aln.bam
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("targetcut"),
_Option(["-Q", "Q"], "Minimum Base Quality ",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-i", "i"], "Insertion Penalty",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-f", "f"], "Reference Filename",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-0", "em0"], "em0", equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-1", "em1"], "em1", equate=False,
checker_function=lambda x: isinstance(x, str)),
_Option(["-2", "em2"], "em2", equate=False,
checker_function=lambda x: isinstance(x, str)),
_Argument(["input", "input_bam", "in_bam"],
"Input file",
filename=True, is_required=True)
]
AbstractCommandline.__init__(self, cmd, **kwargs)
class SamtoolsPhaseCommandline(AbstractCommandline):
"""Command line wrapper for samtools phase.
Call and phase heterozygous SNPs, equivalent to::
$ samtools phase [-AF] [-k len] [-b prefix]
[-q minLOD] [-Q minBaseQ] <in.bam>
See http://samtools.sourceforge.net/samtools.shtml for more details
Example:
>>> from Bio.Sequencing.Applications import SamtoolsPhaseCommandline
>>> input_bam = "/path/to/in.bam"
>>> samtools_phase_cmd = SamtoolsPhaseCommandline(input_bam=input_bam)
>>> print(samtools_phase_cmd)
samtools phase /path/to/in.bam
"""
def __init__(self, cmd="samtools", **kwargs):
self.program_name = cmd
self.parameters = [
_StaticArgument("phase"),
_Argument(["input", "input_bam", "in_bam"], "Input file",
filename=True, is_required=True),
_Switch(["-A", "A"], "Drop reads with ambiguous phase"),
_Option(["-b", "b"], "Prefix of BAM output",
filename=True, equate=False,
checker_function=lambda x: isinstance(x, str)),
_Switch(["-F", "F"], "Do not attempt to fix chimeric reads"),
_Option(["-k", "k"], "Maximum length for local phasing",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-q", "q"], """Minimum Phred-scaled LOD to
call a heterozygote""",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(["-Q", "Q"], """Minimum base quality to be
used in het calling""",
equate=False,
checker_function=lambda x: isinstance(x, int))
]
AbstractCommandline.__init__(self, cmd, **kwargs)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
|
# coding: utf-8
# Copyright 2017 video++ Project, SJTU MediaLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import json
import os
from flask import request, jsonify, abort, make_response
from vpp import log
from vpp import env
from vpp.config import CONF
from vpp.api.v1 import vpp_api as api
from vpp.api.v1 import auth
from vpp.db import models
from vpp.db.api import db_api
from vpp.job.context import JobContext, JobState
from vpp.queue import pending_queue
from vpp import exceptions as excepts
LOG = log.get_logger(__name__, CONF.server_log_file)
@api.route("/jobs", methods=['GET'])
def job_LIST():
"""API for getting the user's job list
user info should be provided in the request header
"""
user = auth.verify_user_or_abort(request.headers)
try:
jobs_db = db_api.list_jobs(user.username)
except excepts.VppDBEntryNotFound as e:
resp = {
"status": True,
"message": "no jobs",
"jobs": {}
}
return jsonify(resp), 200
except excepts.VppDBError as e:
abort(500, "db error")
jobs = []
for job_db in jobs_db:
job = {
"id": job_db.id,
"state": job_db.state,
"progress": "%d%%" % job_db.progress,
"owner": job_db.username,
"retried_times": job_db.retried_times,
"create_time": str(job_db.create_time),
"finish_time": str(job_db.finish_time),
"tasks": job_db.tasks,
"error_info": job_db.error_info,
}
jobs.append({"job": job})
resp = {
"status": True,
"message": "",
"jobs": jobs
}
return jsonify(resp), 200
@api.route("/jobs/<job_id>", methods=['GET'])
def job_GET(job_id):
"""API for getting a job specified by the id
user info should be provided in the request header
"""
user = auth.verify_user_or_abort(request.headers)
job = query_job_or_abort(user.username, job_id)
job_info = {}
if not job or job.deleted:
msg = "job not exist"
resp = {
"status": False,
"message": msg,
"job": job_info
}
return jsonify(resp), 404
msg = ""
job_info = {
"id": job.id,
"state": job.state,
"progress": "%d%%" % job.progress,
"owner": job.username,
"retried_times": job.retried_times,
"create_time": str(job.create_time),
"finish_time": str(job.finish_time),
"tasks": job.tasks,
"error_info": job.error_info,
}
resp = {
"status": True,
"message": msg,
"job": job_info
}
return jsonify(resp), 200
@api.route("/jobs", methods=['POST'])
def job_CREATE():
"""create a job
abort with flask.abort() on any exceptions
"""
user = auth.verify_user_or_abort(request.headers)
data = request.json
try:
job = data["job"]
source_file = job["source_file"]
job_type = job["type"]
params = job["params"]
except KeyError as e:
LOG.error("%s, body: %s" % (e, data))
abort(400, "field not provided: %s" % e)
except Exception as e:
LOG.error("%s, request body %s" % (e, data))
abort(500, "unkown error in handling the request body %s" % data)
return create_job(user.username, job_type, source_file, params)
@api.route("/jobs/<job_id>", methods=['DELETE'])
def job_DELETE(job_id):
"""API for deleting a job
user info should be provided in the request header
"""
user = auth.verify_user_or_abort(request.headers)
job = query_job_or_abort(user.username, job_id)
if not job or job.deleted:
msg = "job not found. username: %s, job_id: %s" % \
(user.username, job_id)
resp = {
"status": False,
"message": msg
}
return jsonify(resp), 400
try:
db_api.delete_job(job_id)
except Exception as e:
LOG.error("delete job %s failed: %s" % (job_id, e))
resp = {
"status": False,
"message": "delete job failed: internal server error",
}
return jsonify(resp), 500
else:
resp = {
"status": True,
"message": "job deleted: %s" % job_id,
}
return jsonify(resp), 202 # HTTP DELETED
def query_job_or_abort(username, job_id):
"""verify the user ownership over the job
abort on any exception with respective http code
:return: job record on success
"""
try:
job = db_api.get_job(job_id)
except Exception as e:
LOG.error("query task (%s: %s) faild: %s" % (username, job_id, e))
abort(500, "vpp db error")
if not job or job.username != username:
abort(400, "job not found")
return job
def create_job(username, job_type, source_file, job_params):
"""create a new job
:username: username
:source_file: source file name
:return: http response
"""
def get_abs_path_or_none(filename):
_file_dir = "/app/videoplusplus/data/user_files/"
_file = os.path.join(_file_dir, username, filename)
return _file if os.path.exists(_file) else None
local_file = get_abs_path_or_none(source_file)
if not local_file:
LOG.error("source file %s not exist" % local_file)
resp = {
"status": False,
"message": "create job failed, source file %s not exist" %
source_file,
"job": {}
}
return jsonify(resp), 400
job_id = str(uuid.uuid4())
job_state = JobState.PENDING
job_owner = username
job_filter_chain = ["start", "slicer", "uploader", "filter", "downloader",
"merger"]
jobcxt = JobContext(job_id=job_id, job_type=job_type, job_state=job_state,
job_owner=job_owner, src_file=local_file,
job_params=job_params, filter_chain=job_filter_chain)
try:
job_record = db_api.create_job(jobcxt)
jobinfo_record = db_api.create_jobinfo(jobcxt)
except Exception as e:
LOG.error("create job failed, db error: %s" % e)
return make_response("create job failed: db error", 500)
# insert into pending queue, will be processed by scheduler later
LOG.debug("--> ENQUE job %s into pending queue" % job_id)
pending_queue.append(jobcxt)
LOG.debug("create job successful, id: %s" % job_id)
response = {
"status": True,
"message": "create job successful",
"job": {
"id": jobcxt.job_id,
"owner": jobcxt.job_owner,
"create_time": str(job_record.create_time),
"status_url": "%s/api/jobs/%s" % (env.SERVER_URL, jobcxt.job_id),
}
}
return jsonify(response), 201 # HTTP CREATED
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Operations for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops import gen_tpu_ops
from tensorflow.python.ops.gen_tpu_ops import *
# pylint: enable=wildcard-import,unused-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_function
from tensorflow.python.util.tf_export import tf_export
def _create_default_group_assignment():
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is None:
logging.warning(
"cross_replica_sum should be used within a tpu_shard_context, but "
"got unset number_of_shards. Assuming 1.")
num_shards = 1
group_assignment = [list(range(num_shards))]
return group_assignment
def all_to_all(x,
concat_dimension,
split_dimension,
split_count,
group_assignment=None,
name=None):
"""Exchange data across TPU replicas.
Args:
x: The local tensor.
concat_dimension: The dimension number to concatenate.
split_dimension: The dimension number to split.
split_count: The number of splits, this number must equal to the sub-group
size(group_assignment.get_shape()[1])
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is concatenated by data from different replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.all_to_all(
x,
group_assignment,
concat_dimension=concat_dimension,
split_dimension=split_dimension,
split_count=split_count,
name=name)
@ops.RegisterGradient("AllToAll")
def _all_to_all_grad(op, grad):
# The gradient of a all-to-all is also a all-to-all but the
# split_dimension and concat_dimension is swapped.
# The graident with respect to group_assignment is None.
return [
gen_tpu_ops.all_to_all(
grad,
op.inputs[1],
concat_dimension=op.get_attr("split_dimension"),
split_dimension=op.get_attr("concat_dimension"),
split_count=op.get_attr("split_count")), None
]
@tf_export(v1=["tpu.cross_replica_sum"])
def cross_replica_sum(x, group_assignment=None, name=None):
"""Sum the input tensor across replicas according to group_assignment.
Args:
x: The local tensor to the sum.
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is summed across replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.cross_replica_sum(x, group_assignment, name=name)
def collective_permute(x, source_target_pairs, name=None):
"""Permute the input tensor across replicas given source_target_pairs.
For each source_target_pair <a, b>, we send replica a's input to replica b.
Each replica id must only appear once in the source column. Also it must
only appear once in the target column.
For the replica id not in the target column, this op returns a zero tensor
with the same shape and dtype of the input x.
For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
source_target_pairs=`[[0,1],[1,2],[2,3]]` gets the outputs:
`[0, A, B, C]`.
Args:
x: The local tensor to be permuted.
source_target_pairs: 2d int lists with shape [num_pairs, 2].
source_target_pairs[i][0] represents the source replica id and
source_target_pairs[i][1] represents the target replica id.
name: Optional op name.
Returns:
A `Tensor` which is permuted.
"""
return gen_tpu_ops.collective_permute(x, source_target_pairs, name=name)
@ops.RegisterGradient("CollectivePermute")
def _collective_permute_grad(op, grad):
# The gradient of a collective permute operation is also a collective
# permute, but with source/target pairs reversed. The gradient with respect
# to input argument `source_target_pairs` is `None`.
source_target_pairs = op.inputs[1][:, ::-1]
return [gen_tpu_ops.collective_permute(grad, source_target_pairs), None]
@ops.RegisterGradient("CrossReplicaSum")
def _cross_replica_sum_grad(op, grad):
# The gradient of a cross replica sum is also a cross-replica sum.
# The gradient with respect to group_assignment is None.
return [gen_tpu_ops.cross_replica_sum(grad, op.inputs[1]), None]
# This extra type checking exists to give a more helpful error message in
# the common case that uint8 and int64 values are infed. Remove when both
# types are supported.
_SUPPORTED_INFEED_DTYPES = set([
dtypes.bool, dtypes.int32, dtypes.int64, dtypes.bfloat16, dtypes.float32,
dtypes.complex64, dtypes.uint32
])
@ops.RegisterGradient("TPUEmbeddingActivations")
def _embedding_activations_grad(activations_op, grad_wrt_activations):
"""Saves the gradient of embedding activations ops in a graph collection."""
g = ops.get_default_graph()
table_id = activations_op.get_attr("table_id")
lookup_id = activations_op.get_attr("lookup_id")
table_gradients = g.get_collection_ref(
"tpu_embedding_gradients_table_%d" % table_id)
if not table_gradients:
raise RuntimeError(
"Gradients for TPUEmbedding have been generated in non-training mode."
"This is not expected. Consider putting your Optimizer.minimize code "
"behind the training mode condition check. For Estimator, you can "
"do \n\n"
" if mode == tf.estimator.ModeKeys.TRAIN:\n"
" train_op = opt.minimize(loss)\n"
"\n")
table_gradients[lookup_id] = array_ops.identity(grad_wrt_activations)
return [
# RegisterGradient requires that value be returned for all inputs. Since
# the first argument (tpu_gradient_variable_{table_name}) has shape [1],
# we will return zeros(shape=[1]). The actual gradient w.r.t. the
# embedding activations (grad_wrt_activations) has the same shape as the
# activations returned by embedding_activations.
array_ops.zeros(arg.shape, dtype=dtypes.float32)
for arg in activations_op.inputs
]
def infeed_dequeue(dtype, shape, name=None):
"""A placeholder op for a value that will be fed into the computation.
Args:
dtype: A `tf.DType`. The type of elements in the tensor.
shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
A tensor that will be provided using the infeed mechanism.
Raises:
TypeError: If 'dtype` is not a supported infeed type.
"""
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"Operation '{}' has type {} which is not a supported TPU infeed type. "
"Supported types are: {}".format(name, dtype,
list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue(dtype, shape, name=name)
# pylint: disable=redefined-outer-name
def infeed_dequeue_tuple(dtypes, shapes, name=None):
"""A placeholder op for values fed into the TPU simultaneously as a tuple.
Args:
dtypes: A list of `tf.DType`s that has length `>= 1`.
The element types of each element in `outputs`.
shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
The shapes of each tensor in `outputs`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
A list of tensors that will be provided using the infeed mechanism.
Raises:
TypeError: If a type in 'dtypes` is not a supported infeed type.
"""
for dtype in dtypes:
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"{} is not a supported TPU infeed type. Supported types are: "
"{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue_tuple(dtypes, shapes, name=name)
# pylint: enable=redefined-outer-name
# pylint: disable=protected-access
def send_tpu_embedding_gradients(inputs,
config,
learning_rates=None,
name=None):
"""A placeholder op for feeding per-sample gradients to the embedding layer.
Args:
inputs: A TensorList of gradients with which to update embedding tables.
This argument has the same length and shapes as the return value of
RecvTPUEmbeddingActivations, but contains gradients of the model's
loss with respect to the embedding activations. The embedding tables
are updated from these gradients via the optimizers specified in the
TPU embedding configuration given to tpu.initialize_system.
config: Serialized TPUEmbeddingConfiguration proto.
learning_rates: A TensorList of float32 scalars, one for each dynamic
learning rate tag: see the comments in
//third_party/tensorflow/core/protobuf/tpu/
optimization_parameters.proto.
Multiple tables can share the same dynamic learning rate tag as
specified in the configuration. If the learning rates for all tables
are constant, this list should be empty.
name: A name for the operation (optional).
Returns:
A SendTPUEmbeddingGradients operation.
"""
if learning_rates is None:
learning_rates = []
return gen_tpu_ops.send_tpu_embedding_gradients(
inputs=inputs, learning_rates=learning_rates, config=config, name=name)
send_tpu_embedding_gradients.__doc__ = (
gen_tpu_ops.send_tpu_embedding_gradients.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_integer_batch(batch,
device_ordinal,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
batch: A list of 1D tensors, one for each embedding table, containing the
indices into the tables.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingIntegerBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops.enqueue_tpu_embedding_integer_batch(
batch=batch,
device_ordinal=device_ordinal,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_integer_batch.__doc__ = (
gen_tpu_ops.enqueue_tpu_embedding_integer_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_batch(sample_indices,
embedding_indices,
aggregation_weights,
device_ordinal,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 1 Tensors specifying the training example
and feature to which the corresponding embedding_indices and
aggregation_weights values belong. sample_indices[i] must equal b * nf +
f, where nf is the number of features from the corresponding table, f is
in [0, nf), and b is in [0, batch size). Both int32 and int64 are allowed,
and will be converted to int32 internally.
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. Both int32 and int64 are allowed and will be converted to int32
internally.
aggregation_weights: A list of rank 1 Tensors containing per sample --
i.e. per (training example, feature) -- aggregation weights. Both float32
and float64 are allowed and will be converted to float32 internally.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops.enqueue_tpu_embedding_sparse_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
device_ordinal=device_ordinal,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_batch.__doc__ = (
gen_tpu_ops.enqueue_tpu_embedding_sparse_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_tensor_batch(sample_indices,
embedding_indices,
aggregation_weights,
table_ids,
device_ordinal,
max_sequence_lengths=None,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 2 Tensors specifying the training example
to which the corresponding embedding_indices and aggregation_weights
values belong. It corresponds to sp_ids.indices in
embedding_lookup_sparse(). If the size of its first dimension is 0, we
assume each embedding_indices belongs to a different sample. Both int32
and int64 are allowed and will be converted to int32 internally.
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. It corresponds to sp_ids.values in embedding_lookup_sparse(). Both
int32 and int64 are allowed and will be converted to int32 internally.
aggregation_weights: A list of rank 1 Tensors containing per training
example aggregation weights. It corresponds to sp_weights.values in
embedding_lookup_sparse(). If the size of its first dimension is 0, we
assume all weights are 1. Both float32 and float64 are allowed and will
be converted to float32 internally.
table_ids: A list of integers specifying the identifier of the embedding
table (offset of TableDescriptor in the TPUEmbeddingConfiguration) to
lookup the corresponding input. The ith input is looked up using
table_ids[i]. The size of the table_ids list must be equal to that of
sample_indices, embedding_indices and aggregation_weights.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
max_sequence_lengths: A list of integers, the size of which is equal to
sample_indices. If equal to 0, the corresponding feature is considered to
be a non-sequence feature, If greater than 0, the corresponding feature is
a sequence feature with the given maximal length. If None, then we assume
a list of all zeroes.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseTensorBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
table_ids=table_ids,
device_ordinal=device_ordinal,
max_sequence_lengths=max_sequence_lengths,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_tensor_batch.__doc__ = (
gen_tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch.__doc__)
|
|
# -*- coding: utf-8 -*-
"""ZeroMQ implementations of the Plaso queue interface."""
from __future__ import unicode_literals
import abc
import errno
import threading
import time
# The 'Queue' module was renamed to 'queue' in Python 3
try:
import Queue # pylint: disable=import-error
except ImportError:
import queue as Queue # pylint: disable=import-error
import zmq
from plaso.engine import logger
from plaso.engine import plaso_queue
from plaso.lib import errors
# pylint: disable=no-member
class ZeroMQQueue(plaso_queue.Queue):
"""Interface for a ZeroMQ backed queue.
Attributes:
name (str): name to identify the queue.
port (int): TCP port that the queue is connected or bound to. If the queue
is not yet bound or connected to a port, this value will be None.
timeout_seconds (int): number of seconds that calls to PopItem and PushItem
may block for, before returning queue.QueueEmpty.
"""
_SOCKET_ADDRESS = 'tcp://127.0.0.1'
_SOCKET_TYPE = None
_ZMQ_SOCKET_SEND_TIMEOUT_MILLISECONDS = 1500
_ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS = 1500
SOCKET_CONNECTION_BIND = 1
SOCKET_CONNECTION_CONNECT = 2
SOCKET_CONNECTION_TYPE = None
def __init__(
self, delay_open=True, linger_seconds=10, maximum_items=1000,
name='Unnamed', port=None, timeout_seconds=5):
"""Initializes a ZeroMQ backed queue.
Args:
delay_open (Optional[bool]): whether a ZeroMQ socket should be created
the first time the queue is pushed to or popped from, rather than at
queue object initialization. This is useful if a queue needs to be
passed to a child process from a parent process.
linger_seconds (Optional[int]): number of seconds that the underlying
ZeroMQ socket can remain open after the queue has been closed,
to allow queued items to be transferred to other ZeroMQ sockets.
maximum_items (Optional[int]): maximum number of items to queue on the
ZeroMQ socket. ZeroMQ refers to this value as "high water mark" or
"hwm". Note that this limit only applies at one "end" of the queue.
The default of 1000 is the ZeroMQ default value.
name (Optional[str]): Optional name to identify the queue.
port (Optional[int]): The TCP port to use for the queue. The default is
None, which indicates that the queue should choose a random port to
bind to.
timeout_seconds (Optional[int]): number of seconds that calls to PopItem
and PushItem may block for, before returning queue.QueueEmpty.
Raises:
ValueError: if the queue is configured to connect to an endpoint,
but no port is specified.
"""
if (self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_CONNECT
and not port):
raise ValueError('No port specified to connect to.')
super(ZeroMQQueue, self).__init__()
self._closed_event = None
self._high_water_mark = maximum_items
self._linger_seconds = linger_seconds
self._terminate_event = None
self._zmq_context = None
self._zmq_socket = None
self.name = name
self.port = port
self.timeout_seconds = timeout_seconds
if not delay_open:
self._CreateZMQSocket()
def _SendItem(self, zmq_socket, item, block=True):
"""Attempts to send an item to a ZeroMQ socket.
Args:
zmq_socket (zmq.Socket): used to the send the item.
item (object): sent on the queue. Will be pickled prior to sending.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Returns:
bool: whether the item was sent successfully.
"""
try:
logger.debug('{0:s} sending item'.format(self.name))
if block:
zmq_socket.send_pyobj(item)
else:
zmq_socket.send_pyobj(item, zmq.DONTWAIT)
logger.debug('{0:s} sent item'.format(self.name))
return True
except zmq.error.Again:
logger.debug('{0:s} could not send an item'.format(self.name))
except zmq.error.ZMQError as exception:
if exception.errno == errno.EINTR:
logger.error(
'ZMQ syscall interrupted in {0:s}.'.format(
self.name))
return False
def _ReceiveItemOnActivity(self, zmq_socket):
"""Attempts to receive an item from a ZeroMQ socket.
Args:
zmq_socket (zmq.Socket): used to the receive the item.
Returns:
object: item from the socket.
Raises:
QueueEmpty: if no item could be received within the timeout.
zmq.error.ZMQError: if an error occurs in ZeroMQ
"""
events = zmq_socket.poll(
self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS)
if events:
try:
received_object = self._zmq_socket.recv_pyobj()
return received_object
except zmq.error.Again:
logger.error(
'{0:s}. Failed to receive item in time.'.format(
self.name))
raise
except zmq.error.ZMQError as exception:
if exception.errno == errno.EINTR:
logger.error(
'ZMQ syscall interrupted in {0:s}. Queue aborting.'.format(
self.name))
raise
raise errors.QueueEmpty
def _SetSocketTimeouts(self):
"""Sets the timeouts for socket send and receive."""
# Note that timeout must be an integer value. If timeout is a float
# it appears that zmq will not enforce the timeout.
timeout = int(self.timeout_seconds * 1000)
receive_timeout = min(
self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS, timeout)
send_timeout = min(self._ZMQ_SOCKET_SEND_TIMEOUT_MILLISECONDS, timeout)
self._zmq_socket.setsockopt(zmq.RCVTIMEO, receive_timeout)
self._zmq_socket.setsockopt(zmq.SNDTIMEO, send_timeout)
def _SetSocketHighWaterMark(self):
"""Sets the high water mark for the socket.
This number is the maximum number of items that will be queued in the socket
on this end of the queue.
"""
self._zmq_socket.hwm = self._high_water_mark
def _CreateZMQSocket(self):
"""Creates a ZeroMQ socket."""
logger.debug('Creating socket for {0:s}'.format(self.name))
if not self._zmq_context:
self._zmq_context = zmq.Context()
# The terminate and close threading events need to be created when the
# socket is opened. Threading events are unpickleable objects and cannot
# passed in multiprocessing on Windows.
if not self._terminate_event:
self._terminate_event = threading.Event()
if not self._closed_event:
self._closed_event = threading.Event()
if self._zmq_socket:
logger.debug('Closing old socket for {0:s}'.format(self.name))
self._zmq_socket.close()
self._zmq_socket = None
self._zmq_socket = self._zmq_context.socket(self._SOCKET_TYPE)
self._SetSocketTimeouts()
self._SetSocketHighWaterMark()
if self.port:
address = '{0:s}:{1:d}'.format(self._SOCKET_ADDRESS, self.port)
if self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_CONNECT:
self._zmq_socket.connect(address)
logger.debug('{0:s} connected to {1:s}'.format(self.name, address))
else:
self._zmq_socket.bind(address)
logger.debug(
'{0:s} bound to specified port {1:s}'.format(self.name, address))
else:
self.port = self._zmq_socket.bind_to_random_port(self._SOCKET_ADDRESS)
logger.debug(
'{0:s} bound to random port {1:d}'.format(self.name, self.port))
def Open(self):
"""Opens this queue, causing the creation of a ZeroMQ socket.
Raises:
QueueAlreadyStarted: if the queue is already started, and a socket already
exists.
"""
if self._zmq_socket:
raise errors.QueueAlreadyStarted()
self._CreateZMQSocket()
def Close(self, abort=False):
"""Closes the queue.
Args:
abort (Optional[bool]): whether the Close is the result of an abort
condition. If True, queue contents may be lost.
Raises:
QueueAlreadyClosed: if the queue is not started, or has already been
closed.
RuntimeError: if closed or terminate event is missing.
"""
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
if not abort and self._closed_event.is_set():
raise errors.QueueAlreadyClosed()
self._closed_event.set()
if abort:
if not self._closed_event.is_set():
logger.warning(
'{0:s} queue aborting. Contents may be lost.'.format(self.name))
self._linger_seconds = 0
# We can't determine whether a there might be an operation being performed
# on the socket in a separate method or thread, so we'll signal that any
# such operation should cease.
self._terminate_event.set()
else:
logger.debug(
'{0:s} queue closing, will linger for up to {1:d} seconds'.format(
self.name, self._linger_seconds))
def IsBound(self):
"""Checks if the queue is bound to a port."""
return (self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_BIND and
self.port is not None)
def IsConnected(self):
"""Checks if the queue is connected to a port."""
return (self.SOCKET_CONNECTION_TYPE == self.SOCKET_CONNECTION_CONNECT and
self.port is not None)
def IsEmpty(self):
"""Checks if the queue is empty.
ZeroMQ queues don't have a concept of "empty" - there could always be
messages on the queue that a producer or consumer is unaware of. Thus,
the queue is never empty, so we return False. Note that it is possible that
a queue is unable to pop an item from a queue within a timeout, which will
cause PopItem to raise a QueueEmpty exception, but this is a different
condition.
Returns:
bool: False, to indicate the the queue isn't empty.
"""
return False
@abc.abstractmethod
def PushItem(self, item, block=True):
"""Pushes an item on to the queue.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
QueueAlreadyClosed: if the queue is closed.
"""
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def PopItem(self):
"""Pops an item off the queue.
Returns:
object: item from the queue.
Raises:
QueueEmpty: if the queue is empty, and no item could be popped within the
queue timeout.
"""
class ZeroMQPullQueue(ZeroMQQueue):
"""Parent class for Plaso queues backed by ZeroMQ PULL sockets.
This class should not be instantiated directly, a subclass should be
instantiated instead.
Instances of this class or subclasses may only be used to pop items, not to
push.
"""
_SOCKET_TYPE = zmq.PULL
def PopItem(self):
"""Pops an item off the queue.
If no ZeroMQ socket has been created, one will be created the first
time this method is called.
Returns:
object: item from the queue.
Raises:
KeyboardInterrupt: if the process is sent a KeyboardInterrupt while
popping an item.
QueueEmpty: if the queue is empty, and no item could be popped within the
queue timeout.
RuntimeError: if closed or terminate event is missing.
zmq.error.ZMQError: if a ZeroMQ error occurs.
"""
if not self._zmq_socket:
self._CreateZMQSocket()
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
logger.debug(
'Pop on {0:s} queue, port {1:d}'.format(self.name, self.port))
last_retry_timestamp = time.time() + self.timeout_seconds
while not self._closed_event.is_set() or not self._terminate_event.is_set():
try:
return self._ReceiveItemOnActivity(self._zmq_socket)
except errors.QueueEmpty:
if time.time() > last_retry_timestamp:
raise
except KeyboardInterrupt:
self.Close(abort=True)
raise
def PushItem(self, item, block=True):
"""Pushes an item on to the queue.
Provided for compatibility with the API, but doesn't actually work.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
WrongQueueType: As Push is not supported this queue.
"""
raise errors.WrongQueueType()
class ZeroMQPullConnectQueue(ZeroMQPullQueue):
"""A Plaso queue backed by a ZeroMQ PULL socket that connects to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = ZeroMQQueue.SOCKET_CONNECTION_CONNECT
class ZeroMQPushQueue(ZeroMQQueue):
"""Parent class for Plaso queues backed by ZeroMQ PUSH sockets.
This class should not be instantiated directly, a subclass should be
instantiated instead.
Instances of this class or subclasses may only be used to push items, not to
pop.
"""
_SOCKET_TYPE = zmq.PUSH
def PopItem(self):
"""Pops an item of the queue.
Provided for compatibility with the API, but doesn't actually work.
Raises:
WrongQueueType: As Pull is not supported this queue.
"""
raise errors.WrongQueueType()
def PushItem(self, item, block=True):
"""Push an item on to the queue.
If no ZeroMQ socket has been created, one will be created the first time
this method is called.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
KeyboardInterrupt: if the process is sent a KeyboardInterrupt while
pushing an item.
QueueFull: if it was not possible to push the item to the queue
within the timeout.
RuntimeError: if terminate event is missing.
zmq.error.ZMQError: if a ZeroMQ specific error occurs.
"""
if not self._zmq_socket:
self._CreateZMQSocket()
if not self._terminate_event:
raise RuntimeError('Missing terminate event.')
logger.debug(
'Push on {0:s} queue, port {1:d}'.format(self.name, self.port))
last_retry_timestamp = time.time() + self.timeout_seconds
while not self._terminate_event.is_set():
try:
send_successful = self._SendItem(self._zmq_socket, item, block)
if send_successful:
break
if time.time() > last_retry_timestamp:
logger.error('{0:s} unable to push item, raising.'.format(
self.name))
raise errors.QueueFull
except KeyboardInterrupt:
self.Close(abort=True)
raise
class ZeroMQPushBindQueue(ZeroMQPushQueue):
"""A Plaso queue backed by a ZeroMQ PUSH socket that binds to a port.
This queue may only be used to push items, not to pop.
"""
SOCKET_CONNECTION_TYPE = ZeroMQQueue.SOCKET_CONNECTION_BIND
class ZeroMQRequestQueue(ZeroMQQueue):
"""Parent class for Plaso queues backed by ZeroMQ REQ sockets.
This class should not be instantiated directly, a subclass should be
instantiated instead.
Instances of this class or subclasses may only be used to pop items, not to
push.
"""
_SOCKET_TYPE = zmq.REQ
def PopItem(self):
"""Pops an item off the queue.
If no ZeroMQ socket has been created, one will be created the first
time this method is called.
Returns:
object: item from the queue.
Raises:
KeyboardInterrupt: if the process is sent a KeyboardInterrupt while
popping an item.
QueueEmpty: if the queue is empty, and no item could be popped within the
queue timeout.
RuntimeError: if terminate event is missing.
zmq.error.ZMQError: if an error occurs in ZeroMQ.
"""
if not self._zmq_socket:
self._CreateZMQSocket()
if not self._terminate_event:
raise RuntimeError('Missing terminate event.')
logger.debug('Pop on {0:s} queue, port {1:d}'.format(
self.name, self.port))
last_retry_time = time.time() + self.timeout_seconds
while not self._terminate_event.is_set():
try:
self._zmq_socket.send_pyobj(None)
break
except zmq.error.Again:
# The existing socket is now out of sync, so we need to open a new one.
self._CreateZMQSocket()
if time.time() > last_retry_time:
logger.warning('{0:s} timeout requesting item'.format(self.name))
raise errors.QueueEmpty
continue
while not self._terminate_event.is_set():
try:
return self._ReceiveItemOnActivity(self._zmq_socket)
except errors.QueueEmpty:
continue
except KeyboardInterrupt:
self.Close(abort=True)
raise
def PushItem(self, item, block=True):
"""Pushes an item on to the queue.
Provided for compatibility with the API, but doesn't actually work.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
WrongQueueType: As Push is not supported this queue.
"""
raise errors.WrongQueueType
class ZeroMQRequestConnectQueue(ZeroMQRequestQueue):
"""A Plaso queue backed by a ZeroMQ REQ socket that connects to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = ZeroMQQueue.SOCKET_CONNECTION_CONNECT
class ZeroMQBufferedQueue(ZeroMQQueue):
"""Parent class for buffered Plaso queues.
Buffered queues use a regular Python queue to store items that are pushed or
popped from the queue without blocking on underlying ZeroMQ operations.
This class should not be instantiated directly, a subclass should be
instantiated instead.
"""
def __init__(
self, buffer_timeout_seconds=2, buffer_max_size=10000, delay_open=True,
linger_seconds=10, maximum_items=1000, name='Unnamed', port=None,
timeout_seconds=5):
"""Initializes a buffered, ZeroMQ backed queue.
Args:
buffer_max_size (Optional[int]): maximum number of items to store in
the buffer, before or after they are sent/received via ZeroMQ.
buffer_timeout_seconds(Optional[int]): number of seconds to wait when
doing a put or get to/from the internal buffer.
delay_open (Optional[bool]): whether a ZeroMQ socket should be created
the first time the queue is pushed to or popped from, rather than at
queue object initialization. This is useful if a queue needs to be
passed to a child process from a parent process.
linger_seconds (Optional[int]): number of seconds that the underlying
ZeroMQ socket can remain open after the queue object has been closed,
to allow queued items to be transferred to other ZeroMQ sockets.
maximum_items (Optional[int]): maximum number of items to queue on the
ZeroMQ socket. ZeroMQ refers to this value as "high water mark" or
"hwm". Note that this limit only applies at one "end" of the queue.
The default of 1000 is the ZeroMQ default value.
name (Optional[str]): name to identify the queue.
port (Optional[int]): The TCP port to use for the queue. None indicates
that the queue should choose a random port to bind to.
timeout_seconds (Optional[int]): number of seconds that calls to PopItem
and PushItem may block for, before returning queue.QueueEmpty.
"""
self._buffer_timeout_seconds = buffer_timeout_seconds
self._queue = Queue.Queue(maxsize=buffer_max_size)
self._zmq_thread = None
# We need to set up the internal buffer queue before we call super, so that
# if the call to super opens the ZMQSocket, the backing thread will work.
super(ZeroMQBufferedQueue, self).__init__(
delay_open=delay_open, linger_seconds=linger_seconds,
maximum_items=maximum_items, name=name, port=port,
timeout_seconds=timeout_seconds)
def _CreateZMQSocket(self):
"""Creates a ZeroMQ socket as well as a regular queue and a thread."""
super(ZeroMQBufferedQueue, self)._CreateZMQSocket()
if not self._zmq_thread:
thread_name = '{0:s}_zmq_responder'.format(self.name)
self._zmq_thread = threading.Thread(
target=self._ZeroMQResponder, args=[self._queue], name=thread_name)
self._zmq_thread.start()
@abc.abstractmethod
def _ZeroMQResponder(self, source_queue):
"""Listens for requests and replies to clients.
Args:
source_queue (Queue.queue): queue to to pull items from.
"""
def Close(self, abort=False):
"""Closes the queue.
Args:
abort (Optional[bool]): whether the Close is the result of an abort
condition. If True, queue contents may be lost.
Raises:
QueueAlreadyClosed: if the queue is not started, or has already been
closed.
RuntimeError: if closed or terminate event is missing.
"""
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
if not abort and self._closed_event.is_set():
raise errors.QueueAlreadyClosed()
self._closed_event.set()
if abort:
if not self._closed_event.is_set():
logger.warning(
'{0:s} queue aborting. Contents may be lost.'.format(self.name))
# We can't determine whether a there might be an operation being performed
# on the socket in a separate method or thread, so we'll signal that any
# such operation should cease.
self._terminate_event.set()
self._linger_seconds = 0
if self._zmq_thread:
logger.debug('[{0:s}] Waiting for thread to exit.'.format(self.name))
self._zmq_thread.join(timeout=self.timeout_seconds)
if self._zmq_thread.is_alive():
logger.error((
'{0:s} ZMQ responder thread did not exit within timeout').format(
self.name))
else:
logger.debug(
'{0:s} queue closing, will linger for up to {1:d} seconds'.format(
self.name, self._linger_seconds))
def Empty(self):
"""Removes all items from the internal buffer."""
try:
while True:
self._queue.get(False)
except Queue.Empty:
pass
class ZeroMQBufferedReplyQueue(ZeroMQBufferedQueue):
"""Parent class for buffered Plaso queues backed by ZeroMQ REP sockets.
This class should not be instantiated directly, a subclass should be
instantiated instead.
Instances of this class or subclasses may only be used to push items, not to
pop.
"""
_ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS = 4000
_ZMQ_SOCKET_SEND_TIMEOUT_MILLISECONDS = 2000
_SOCKET_TYPE = zmq.REP
def _ZeroMQResponder(self, source_queue):
"""Listens for requests and replies to clients.
Args:
source_queue (Queue.queue): queue to use to pull items from.
Raises:
RuntimeError: if closed or terminate event is missing.
"""
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
logger.debug('{0:s} responder thread started'.format(self.name))
item = None
while not self._terminate_event.is_set():
if not item:
try:
if self._closed_event.is_set():
item = source_queue.get_nowait()
else:
item = source_queue.get(True, self._buffer_timeout_seconds)
except Queue.Empty:
if self._closed_event.is_set():
break
continue
try:
# We need to receive a request before we can reply with the item.
self._ReceiveItemOnActivity(self._zmq_socket)
except errors.QueueEmpty:
if self._closed_event.is_set() and self._queue.empty():
break
continue
sent_successfully = self._SendItem(self._zmq_socket, item)
item = None
if not sent_successfully:
logger.error('Queue {0:s} unable to send item.'.format(self.name))
break
logger.info('Queue {0:s} responder exiting.'.format(self.name))
self._zmq_socket.close(self._linger_seconds)
def PopItem(self):
"""Pops an item of the queue.
Provided for compatibility with the API, but doesn't actually work.
Raises:
WrongQueueType: As Pop is not supported by this queue.
"""
raise errors.WrongQueueType()
def PushItem(self, item, block=True):
"""Push an item on to the queue.
If no ZeroMQ socket has been created, one will be created the first time
this method is called.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
QueueAlreadyClosed: if the queue is closed.
QueueFull: if the internal buffer was full and it was not possible to
push the item to the buffer within the timeout.
RuntimeError: if closed event is missing.
"""
if not self._closed_event:
raise RuntimeError('Missing closed event.')
if self._closed_event.is_set():
raise errors.QueueAlreadyClosed()
if not self._zmq_socket:
self._CreateZMQSocket()
try:
if block:
self._queue.put(item, timeout=self.timeout_seconds)
else:
self._queue.put(item, block=False)
except Queue.Full as exception:
raise errors.QueueFull(exception)
class ZeroMQBufferedReplyBindQueue(ZeroMQBufferedReplyQueue):
"""A Plaso queue backed by a ZeroMQ REP socket that binds to a port.
This queue may only be used to pop items, not to push.
"""
SOCKET_CONNECTION_TYPE = ZeroMQQueue.SOCKET_CONNECTION_BIND
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SpeakerConference'
db.create_table(u'p3_speakerconference', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('speaker', self.gf('django.db.models.fields.related.OneToOneField')(related_name='p3_speaker', unique=True, to=orm['conference.Speaker'])),
('first_time', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'p3', ['SpeakerConference'])
# Adding model 'TicketConference'
db.create_table(u'p3_ticketconference', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ticket', self.gf('django.db.models.fields.related.OneToOneField')(related_name='p3_conference', unique=True, to=orm['conference.Ticket'])),
('shirt_size', self.gf('django.db.models.fields.CharField')(default='l', max_length=4)),
('python_experience', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('diet', self.gf('django.db.models.fields.CharField')(default='omnivorous', max_length=10)),
('tagline', self.gf('django.db.models.fields.CharField')(max_length=60, blank=True)),
('days', self.gf('django.db.models.fields.TextField')(blank=True)),
('badge_image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('assigned_to', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
))
db.send_create_signal(u'p3', ['TicketConference'])
# Adding model 'TicketSIM'
db.create_table(u'p3_ticketsim', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ticket', self.gf('django.db.models.fields.related.OneToOneField')(related_name='p3_conference_sim', unique=True, to=orm['conference.Ticket'])),
('document', self.gf('django.db.models.fields.files.FileField')(max_length=100, blank=True)),
('sim_type', self.gf('django.db.models.fields.CharField')(default='std', max_length=5)),
('plan_type', self.gf('django.db.models.fields.CharField')(default='std', max_length=3)),
('number', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
))
db.send_create_signal(u'p3', ['TicketSIM'])
# Adding model 'HotelRoom'
db.create_table(u'p3_hotelroom', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('conference', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.Conference'])),
('room_type', self.gf('django.db.models.fields.CharField')(max_length=2)),
('quantity', self.gf('django.db.models.fields.PositiveIntegerField')()),
('amount', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'p3', ['HotelRoom'])
# Adding unique constraint on 'HotelRoom', fields ['conference', 'room_type']
db.create_unique(u'p3_hotelroom', ['conference_id', 'room_type'])
# Adding model 'TicketRoom'
db.create_table(u'p3_ticketroom', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ticket', self.gf('django.db.models.fields.related.OneToOneField')(related_name='p3_conference_room', unique=True, to=orm['conference.Ticket'])),
('document', self.gf('django.db.models.fields.files.FileField')(max_length=100, blank=True)),
('room_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['p3.HotelRoom'])),
('ticket_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('checkin', self.gf('django.db.models.fields.DateField')(db_index=True)),
('checkout', self.gf('django.db.models.fields.DateField')(db_index=True)),
('unused', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'p3', ['TicketRoom'])
# Adding model 'Donation'
db.create_table(u'p3_donation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['assopy.User'])),
('date', self.gf('django.db.models.fields.DateField')()),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=6, decimal_places=2)),
('message', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'p3', ['Donation'])
# Adding model 'Sprint'
db.create_table(u'p3_sprint', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['assopy.User'])),
('conference', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.Conference'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=150)),
('abstract', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'p3', ['Sprint'])
# Adding model 'SprintPresence'
db.create_table(u'p3_sprintpresence', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sprint', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['p3.Sprint'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['assopy.User'])),
))
db.send_create_signal(u'p3', ['SprintPresence'])
# Adding model 'P3Profile'
db.create_table(u'p3_p3profile', (
('profile', self.gf('django.db.models.fields.related.OneToOneField')(related_name='p3_profile', unique=True, primary_key=True, to=orm['conference.AttendeeProfile'])),
('tagline', self.gf('django.db.models.fields.CharField')(max_length=60, blank=True)),
('twitter', self.gf('django.db.models.fields.CharField')(max_length=80, blank=True)),
('image_gravatar', self.gf('django.db.models.fields.BooleanField')(default=False)),
('image_url', self.gf('django.db.models.fields.URLField')(max_length=500)),
('country', self.gf('django.db.models.fields.CharField')(default='', max_length=2, db_index=True, blank=True)),
('spam_recruiting', self.gf('django.db.models.fields.BooleanField')(default=False)),
('spam_user_message', self.gf('django.db.models.fields.BooleanField')(default=False)),
('spam_sms', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'p3', ['P3Profile'])
def backwards(self, orm):
# Removing unique constraint on 'HotelRoom', fields ['conference', 'room_type']
db.delete_unique(u'p3_hotelroom', ['conference_id', 'room_type'])
# Deleting model 'SpeakerConference'
db.delete_table(u'p3_speakerconference')
# Deleting model 'TicketConference'
db.delete_table(u'p3_ticketconference')
# Deleting model 'TicketSIM'
db.delete_table(u'p3_ticketsim')
# Deleting model 'HotelRoom'
db.delete_table(u'p3_hotelroom')
# Deleting model 'TicketRoom'
db.delete_table(u'p3_ticketroom')
# Deleting model 'Donation'
db.delete_table(u'p3_donation')
# Deleting model 'Sprint'
db.delete_table(u'p3_sprint')
# Deleting model 'SprintPresence'
db.delete_table(u'p3_sprintpresence')
# Deleting model 'P3Profile'
db.delete_table(u'p3_p3profile')
models = {
u'assopy.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numcode': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'vat_company': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'vat_company_verify': ('django.db.models.fields.CharField', [], {'default': "'-'", 'max_length': '1'}),
'vat_person': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'assopy.user': {
'Meta': {'object_name': 'User'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'assopy_id': ('django.db.models.fields.CharField', [], {'max_length': '22', 'unique': 'True', 'null': 'True'}),
'card_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'cf_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['assopy.Country']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '36', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'assopy_user'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'vat_number': ('django.db.models.fields.CharField', [], {'max_length': '22', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'conference.attendeeprofile': {
'Meta': {'object_name': 'AttendeeProfile'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'company_homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'job_title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'personal_homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '6'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'x'", 'max_length': '1'})
},
u'conference.conference': {
'Meta': {'object_name': 'Conference'},
'cfp_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cfp_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'primary_key': 'True'}),
'conference_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'conference_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'voting_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'voting_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'conference.conferencetag': {
'Meta': {'object_name': 'ConferenceTag'},
'category': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'conference.conferencetaggeditem': {
'Meta': {'object_name': 'ConferenceTaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'conference_conferencetaggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'conference_conferencetaggeditem_items'", 'to': u"orm['conference.ConferenceTag']"})
},
u'conference.fare': {
'Meta': {'unique_together': "(('conference', 'code'),)", 'object_name': 'Fare'},
'blob': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'conference': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_validity': ('django.db.models.fields.DateField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'payment_type': ('django.db.models.fields.CharField', [], {'default': "'p'", 'max_length': '1'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'recipient_type': ('django.db.models.fields.CharField', [], {'default': "'p'", 'max_length': '1'}),
'start_validity': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'ticket_type': ('django.db.models.fields.CharField', [], {'default': "'conference'", 'max_length': '10', 'db_index': 'True'})
},
u'conference.multilingualcontent': {
'Meta': {'object_name': 'MultilingualContent'},
'body': ('django.db.models.fields.TextField', [], {}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'conference.speaker': {
'Meta': {'object_name': 'Speaker'},
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
u'conference.ticket': {
'Meta': {'object_name': 'Ticket'},
'fare': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Fare']"}),
'frozen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'ticket_type': ('django.db.models.fields.CharField', [], {'default': "'standard'", 'max_length': '8'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'p3.donation': {
'Meta': {'object_name': 'Donation'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['assopy.User']"})
},
u'p3.hotelroom': {
'Meta': {'unique_together': "(('conference', 'room_type'),)", 'object_name': 'HotelRoom'},
'amount': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Conference']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}),
'room_type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'p3.p3profile': {
'Meta': {'object_name': 'P3Profile'},
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2', 'db_index': 'True', 'blank': 'True'}),
'image_gravatar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'profile': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'p3_profile'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['conference.AttendeeProfile']"}),
'spam_recruiting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spam_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spam_user_message': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
},
u'p3.speakerconference': {
'Meta': {'object_name': 'SpeakerConference'},
'first_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'speaker': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'p3_speaker'", 'unique': 'True', 'to': u"orm['conference.Speaker']"})
},
u'p3.sprint': {
'Meta': {'object_name': 'Sprint'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Conference']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['assopy.User']"})
},
u'p3.sprintpresence': {
'Meta': {'object_name': 'SprintPresence'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sprint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['p3.Sprint']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['assopy.User']"})
},
u'p3.ticketconference': {
'Meta': {'object_name': 'TicketConference'},
'assigned_to': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'badge_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'days': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'diet': ('django.db.models.fields.CharField', [], {'default': "'omnivorous'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'python_experience': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'shirt_size': ('django.db.models.fields.CharField', [], {'default': "'l'", 'max_length': '4'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'ticket': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'p3_conference'", 'unique': 'True', 'to': u"orm['conference.Ticket']"})
},
u'p3.ticketroom': {
'Meta': {'object_name': 'TicketRoom'},
'checkin': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'checkout': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'document': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'room_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['p3.HotelRoom']"}),
'ticket': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'p3_conference_room'", 'unique': 'True', 'to': u"orm['conference.Ticket']"}),
'ticket_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'unused': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'p3.ticketsim': {
'Meta': {'object_name': 'TicketSIM'},
'document': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'plan_type': ('django.db.models.fields.CharField', [], {'default': "'std'", 'max_length': '3'}),
'sim_type': ('django.db.models.fields.CharField', [], {'default': "'std'", 'max_length': '5'}),
'ticket': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'p3_conference_sim'", 'unique': 'True', 'to': u"orm['conference.Ticket']"})
}
}
complete_apps = ['p3']
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for iRMC Management Driver
"""
import os
import xml.etree.ElementTree as ET
import mock
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import common as irmc_common
from ironic.drivers.modules.irmc import management as irmc_management
from ironic.drivers import utils as driver_utils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.drivers import third_party_driver_mock_specs as mock_specs
from ironic.tests.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_irmc_info()
class IRMCManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCManagementTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_irmc")
self.driver = driver_factory.get_driver("fake_irmc")
self.node = obj_utils.create_test_node(self.context,
driver='fake_irmc',
driver_info=driver_info)
self.info = irmc_common.parse_driver_info(self.node)
def test_get_properties(self):
expected = irmc_common.COMMON_PROPERTIES
expected.update(ipmitool.COMMON_PROPERTIES)
expected.update(ipmitool.CONSOLE_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(irmc_common, 'parse_driver_info', autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(irmc_common, 'parse_driver_info', autospec=True)
def test_validate_fail(self, mock_drvinfo):
side_effect = exception.InvalidParameterValue("Invalid Input")
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.validate,
task)
def test_management_interface_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM, boot_devices.BIOS,
boot_devices.SAFE]
self.assertEqual(sorted(expected), sorted(task.driver.management.
get_supported_boot_devices()))
@mock.patch.object(ipmitool.IPMIManagement, 'set_boot_device',
autospec=True)
def test_management_interface_set_boot_device_no_mode_ok(
self,
set_boot_device_mock):
"""no boot mode specified."""
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.PXE)
set_boot_device_mock.assert_called_once_with(
task.driver.management, task,
boot_devices.PXE,
False)
@mock.patch.object(ipmitool.IPMIManagement, 'set_boot_device',
autospec=True)
def test_management_interface_set_boot_device_bios_ok(
self,
set_boot_device_mock):
"""bios mode specified."""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_utils.add_node_capability(task, 'boot_mode', 'bios')
task.driver.management.set_boot_device(task, boot_devices.PXE)
set_boot_device_mock.assert_called_once_with(
task.driver.management, task,
boot_devices.PXE,
False)
@mock.patch.object(irmc_management.ipmitool, "send_raw", autospec=True)
def _test_management_interface_set_boot_device_uefi_ok(self, params,
expected_raw_code,
send_raw_mock):
send_raw_mock.return_value = [None, None]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = ''
driver_utils.add_node_capability(task, 'boot_mode', 'uefi')
self.driver.management.set_boot_device(task, **params)
send_raw_mock.assert_has_calls([
mock.call(task, "0x00 0x08 0x03 0x08"),
mock.call(task, expected_raw_code)])
def test_management_interface_set_boot_device_uefi_ok_pxe(self):
params = {'device': boot_devices.PXE, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x04 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x04 0x00 0x00 0x00")
def test_management_interface_set_boot_device_uefi_ok_disk(self):
params = {'device': boot_devices.DISK, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x08 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x08 0x00 0x00 0x00")
def test_management_interface_set_boot_device_uefi_ok_cdrom(self):
params = {'device': boot_devices.CDROM, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x14 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x14 0x00 0x00 0x00")
def test_management_interface_set_boot_device_uefi_ok_bios(self):
params = {'device': boot_devices.BIOS, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x18 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x18 0x00 0x00 0x00")
def test_management_interface_set_boot_device_uefi_ok_safe(self):
params = {'device': boot_devices.SAFE, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x0c 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x0c 0x00 0x00 0x00")
@mock.patch.object(irmc_management.ipmitool, "send_raw", autospec=True)
def test_management_interface_set_boot_device_uefi_ng(self,
send_raw_mock):
"""uefi mode, next boot only, unknown device."""
send_raw_mock.return_value = [None, None]
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_utils.add_node_capability(task, 'boot_mode', 'uefi')
self.assertRaises(exception.InvalidParameterValue,
self.driver.management.set_boot_device,
task,
"unknown")
@mock.patch.object(irmc_management, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', autospec=True)
def test_management_interface_get_sensors_data_scci_ok(self,
mock_get_irmc_report,
mock_scci):
"""'irmc_sensor_method' = 'scci' specified and OK data."""
with open(os.path.join(os.path.dirname(__file__),
'fake_sensors_data_ok.xml'), "r") as report:
fake_txt = report.read()
fake_xml = ET.fromstring(fake_txt)
mock_get_irmc_report.return_value = fake_xml
mock_scci.get_sensor_data.return_value = fake_xml.find(
"./System/SensorDataRecords")
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
sensor_dict = self.driver.management.get_sensors_data(task)
expected = {
'Fan (4)': {
'FAN1 SYS (29)': {
'Units': 'RPM',
'Sensor ID': 'FAN1 SYS (29)',
'Sensor Reading': '600 RPM'
},
'FAN2 SYS (29)': {
'Units': 'None',
'Sensor ID': 'FAN2 SYS (29)',
'Sensor Reading': 'None None'
}
},
'Temperature (1)': {
'Systemboard 1 (7)': {
'Units': 'degree C',
'Sensor ID': 'Systemboard 1 (7)',
'Sensor Reading': '80 degree C'
},
'Ambient (55)': {
'Units': 'degree C',
'Sensor ID': 'Ambient (55)',
'Sensor Reading': '42 degree C'
}
}
}
self.assertEqual(expected, sensor_dict)
@mock.patch.object(irmc_management, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', autospec=True)
def test_management_interface_get_sensors_data_scci_ng(self,
mock_get_irmc_report,
mock_scci):
"""'irmc_sensor_method' = 'scci' specified and NG data."""
with open(os.path.join(os.path.dirname(__file__),
'fake_sensors_data_ng.xml'), "r") as report:
fake_txt = report.read()
fake_xml = ET.fromstring(fake_txt)
mock_get_irmc_report.return_value = fake_xml
mock_scci.get_sensor_data.return_value = fake_xml.find(
"./System/SensorDataRecords")
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
sensor_dict = self.driver.management.get_sensors_data(task)
self.assertEqual(len(sensor_dict), 0)
@mock.patch.object(ipmitool.IPMIManagement, 'get_sensors_data',
autospec=True)
def test_management_interface_get_sensors_data_ipmitool_ok(
self,
get_sensors_data_mock):
"""'irmc_sensor_method' = 'ipmitool' specified."""
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'ipmitool'
task.driver.management.get_sensors_data(task)
get_sensors_data_mock.assert_called_once_with(
task.driver.management, task)
@mock.patch.object(irmc_common, 'get_irmc_report', autospec=True)
def test_management_interface_get_sensors_data_exception1(
self,
get_irmc_report_mock):
"""'FailedToGetSensorData Exception."""
get_irmc_report_mock.side_effect = Exception("Report Error")
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
e = self.assertRaises(exception.FailedToGetSensorData,
self.driver.management.get_sensors_data,
task)
self.assertEqual("Failed to get sensor data for node 1be26c0b-" +
"03f2-4d2e-ae87-c02d7f33c123. Error: Report Error",
str(e))
|
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""MB - the Meta-Build wrapper around GYP and GN
MB is a wrapper script for GYP and GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
from __future__ import print_function
import argparse
import ast
import errno
import json
import os
import pipes
import pprint
import re
import shutil
import sys
import subprocess
import tempfile
def main(args):
mbw = MetaBuildWrapper()
mbw.ParseArgs(args)
return mbw.args.func()
class MetaBuildWrapper(object):
def __init__(self):
p = os.path
d = os.path.dirname
self.chromium_src_dir = p.normpath(d(d(d(p.abspath(__file__)))))
self.default_config = p.join(self.chromium_src_dir, 'tools', 'mb',
'mb_config.pyl')
self.executable = sys.executable
self.platform = sys.platform
self.sep = os.sep
self.args = argparse.Namespace()
self.configs = {}
self.masters = {}
self.mixins = {}
self.private_configs = []
self.common_dev_configs = []
self.unsupported_configs = []
def ParseArgs(self, argv):
def AddCommonOptions(subp):
subp.add_argument('-b', '--builder',
help='builder name to look up config from')
subp.add_argument('-m', '--master',
help='master name to look up config from')
subp.add_argument('-c', '--config',
help='configuration to analyze')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.add_argument('-g', '--goma-dir', default=self.ExpandUser('~/goma'),
help='path to goma directory (default is %(default)s).')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
subp.add_argument('-v', '--verbose', action='store_true',
help='verbose logging')
parser = argparse.ArgumentParser(prog='mb')
subps = parser.add_subparsers()
subp = subps.add_parser('analyze',
help='analyze whether changes to a set of files '
'will cause a set of binaries to be rebuilt.')
AddCommonOptions(subp)
subp.add_argument('path', nargs=1,
help='path build was generated into.')
subp.add_argument('input_path', nargs=1,
help='path to a file containing the input arguments '
'as a JSON object.')
subp.add_argument('output_path', nargs=1,
help='path to a file containing the output arguments '
'as a JSON object.')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('gen',
help='generate a new set of build files')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
subp = subps.add_parser('isolate',
help='generate the .isolate files for a given'
'binary')
AddCommonOptions(subp)
subp.add_argument('path', nargs=1,
help='path build was generated into')
subp.add_argument('target', nargs=1,
help='ninja target to generate the isolate for')
subp.set_defaults(func=self.CmdIsolate)
subp = subps.add_parser('lookup',
help='look up the command for a given config or '
'builder')
AddCommonOptions(subp)
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser('run',
help='build and run the isolated version of a '
'binary')
AddCommonOptions(subp)
subp.add_argument('-j', '--jobs', dest='jobs', type=int,
help='Number of jobs to pass to ninja')
subp.add_argument('--no-build', dest='build', default=True,
action='store_false',
help='Do not build, just isolate and run')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.add_argument('target', nargs=1,
help='ninja target to build and run')
subp.set_defaults(func=self.CmdRun)
subp = subps.add_parser('validate',
help='validate the config file')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.set_defaults(func=self.CmdValidate)
subp = subps.add_parser('help',
help='Get help on a subcommand.')
subp.add_argument(nargs='?', action='store', dest='subcommand',
help='The command to get help for.')
subp.set_defaults(func=self.CmdHelp)
self.args = parser.parse_args(argv)
def CmdAnalyze(self):
vals = self.Lookup()
if vals['type'] == 'gn':
return self.RunGNAnalyze(vals)
else:
return self.RunGYPAnalyze(vals)
def CmdGen(self):
vals = self.Lookup()
self.ClobberIfNeeded(vals)
if vals['type'] == 'gn':
return self.RunGNGen(vals)
else:
return self.RunGYPGen(vals)
def CmdHelp(self):
if self.args.subcommand:
self.ParseArgs([self.args.subcommand, '--help'])
else:
self.ParseArgs(['--help'])
def CmdIsolate(self):
vals = self.GetConfig()
if not vals:
return 1
if vals['type'] == 'gn':
return self.RunGNIsolate(vals)
else:
return self.Build('%s_run' % self.args.target[0])
def CmdLookup(self):
vals = self.Lookup()
if vals['type'] == 'gn':
cmd = self.GNCmd('gen', '_path_', vals['gn_args'])
env = None
else:
cmd, env = self.GYPCmd('_path_', vals)
self.PrintCmd(cmd, env)
return 0
def CmdRun(self):
vals = self.GetConfig()
if not vals:
return 1
build_dir = self.args.path[0]
target = self.args.target[0]
if vals['type'] == 'gn':
if self.args.build:
ret = self.Build(target)
if ret:
return ret
ret = self.RunGNIsolate(vals)
if ret:
return ret
else:
ret = self.Build('%s_run' % target)
if ret:
return ret
ret, _, _ = self.Run([
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'run',
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target))],
force_verbose=False, buffer_output=False)
return ret
def CmdValidate(self):
errs = []
# Read the file to make sure it parses.
self.ReadConfigFile()
# Figure out the whole list of configs and ensure that no config is
# listed in more than one category.
all_configs = {}
for config in self.common_dev_configs:
all_configs[config] = 'common_dev_configs'
for config in self.private_configs:
if config in all_configs:
errs.append('config "%s" listed in "private_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'private_configs'
for config in self.unsupported_configs:
if config in all_configs:
errs.append('config "%s" listed in "unsupported_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'unsupported_configs'
for master in self.masters:
for builder in self.masters[master]:
config = self.masters[master][builder]
if config in all_configs and all_configs[config] not in self.masters:
errs.append('Config "%s" used by a bot is also listed in "%s".' %
(config, all_configs[config]))
else:
all_configs[config] = master
# Check that every referenced config actually exists.
for config, loc in all_configs.items():
if not config in self.configs:
errs.append('Unknown config "%s" referenced from "%s".' %
(config, loc))
# Check that every actual config is actually referenced.
for config in self.configs:
if not config in all_configs:
errs.append('Unused config "%s".' % config)
# Figure out the whole list of mixins, and check that every mixin
# listed by a config or another mixin actually exists.
referenced_mixins = set()
for config, mixins in self.configs.items():
for mixin in mixins:
if not mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by config "%s".' %
(mixin, config))
referenced_mixins.add(mixin)
for mixin in self.mixins:
for sub_mixin in self.mixins[mixin].get('mixins', []):
if not sub_mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
(sub_mixin, mixin))
referenced_mixins.add(sub_mixin)
# Check that every mixin defined is actually referenced somewhere.
for mixin in self.mixins:
if not mixin in referenced_mixins:
errs.append('Unreferenced mixin "%s".' % mixin)
if errs:
raise MBErr(('mb config file %s has problems:' % self.args.config_file) +
'\n ' + '\n '.join(errs))
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
def GetConfig(self):
build_dir = self.args.path[0]
vals = {}
if self.args.builder or self.args.master or self.args.config:
vals = self.Lookup()
if vals['type'] == 'gn':
# Re-run gn gen in order to ensure the config is consistent with the
# build dir.
self.RunGNGen(vals)
return vals
# TODO: We can only get the config for GN build dirs, not GYP build dirs.
# GN stores the args that were used in args.gn in the build dir,
# but GYP doesn't store them anywhere. We should consider modifying
# gyp_chromium to record the arguments it runs with in a similar
# manner.
mb_type_path = self.PathJoin(self.ToAbsPath(build_dir), 'mb_type')
if not self.Exists(mb_type_path):
gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn')
if not self.Exists(gn_args_path):
self.Print('Must either specify a path to an existing GN build dir '
'or pass in a -m/-b pair or a -c flag to specify the '
'configuration')
return {}
else:
mb_type = 'gn'
else:
mb_type = self.ReadFile(mb_type_path).strip()
if mb_type == 'gn':
vals = self.GNValsFromDir(build_dir)
else:
vals = {}
vals['type'] = mb_type
return vals
def GNValsFromDir(self, build_dir):
args_contents = self.ReadFile(
self.PathJoin(self.ToAbsPath(build_dir), 'args.gn'))
gn_args = []
for l in args_contents.splitlines():
fields = l.split(' ')
name = fields[0]
val = ' '.join(fields[2:])
gn_args.append('%s=%s' % (name, val))
return {
'gn_args': ' '.join(gn_args),
'type': 'gn',
}
def Lookup(self):
vals = self.ReadBotConfig()
if not vals:
self.ReadConfigFile()
config = self.ConfigFromArgs()
if not config in self.configs:
raise MBErr('Config "%s" not found in %s' %
(config, self.args.config_file))
vals = self.FlattenConfig(config)
# Do some basic sanity checking on the config so that we
# don't have to do this in every caller.
assert 'type' in vals, 'No meta-build type specified in the config'
assert vals['type'] in ('gn', 'gyp'), (
'Unknown meta-build type "%s"' % vals['gn_args'])
return vals
def ReadBotConfig(self):
if not self.args.master or not self.args.builder:
return {}
path = self.PathJoin(self.chromium_src_dir, 'ios', 'build', 'bots',
self.args.master, self.args.builder + '.json')
if not self.Exists(path):
return {}
contents = json.loads(self.ReadFile(path))
gyp_vals = contents.get('GYP_DEFINES', {})
if isinstance(gyp_vals, dict):
gyp_defines = ' '.join('%s=%s' % (k, v) for k, v in gyp_vals.items())
else:
gyp_defines = ' '.join(gyp_vals)
gn_args = ' '.join(contents.get('gn_args', []))
return {
'type': contents.get('mb_type', ''),
'gn_args': gn_args,
'gyp_defines': gyp_defines,
}
def ReadConfigFile(self):
if not self.Exists(self.args.config_file):
raise MBErr('config file not found at %s' % self.args.config_file)
try:
contents = ast.literal_eval(self.ReadFile(self.args.config_file))
except SyntaxError as e:
raise MBErr('Failed to parse config file "%s": %s' %
(self.args.config_file, e))
self.common_dev_configs = contents['common_dev_configs']
self.configs = contents['configs']
self.masters = contents['masters']
self.mixins = contents['mixins']
self.private_configs = contents['private_configs']
self.unsupported_configs = contents['unsupported_configs']
def ConfigFromArgs(self):
if self.args.config:
if self.args.master or self.args.builder:
raise MBErr('Can not specific both -c/--config and -m/--master or '
'-b/--builder')
return self.args.config
if not self.args.master or not self.args.builder:
raise MBErr('Must specify either -c/--config or '
'(-m/--master and -b/--builder)')
if not self.args.master in self.masters:
raise MBErr('Master name "%s" not found in "%s"' %
(self.args.master, self.args.config_file))
if not self.args.builder in self.masters[self.args.master]:
raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' %
(self.args.builder, self.args.master, self.args.config_file))
return self.masters[self.args.master][self.args.builder]
def FlattenConfig(self, config):
mixins = self.configs[config]
vals = {
'type': None,
'gn_args': [],
'gyp_defines': '',
'gyp_crosscompile': False,
}
visited = []
self.FlattenMixins(mixins, vals, visited)
return vals
def FlattenMixins(self, mixins, vals, visited):
for m in mixins:
if m not in self.mixins:
raise MBErr('Unknown mixin "%s"' % m)
# TODO: check for cycles in mixins.
visited.append(m)
mixin_vals = self.mixins[m]
if 'type' in mixin_vals:
vals['type'] = mixin_vals['type']
if 'gn_args' in mixin_vals:
if vals['gn_args']:
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
if 'gyp_crosscompile' in mixin_vals:
vals['gyp_crosscompile'] = mixin_vals['gyp_crosscompile']
if 'gyp_defines' in mixin_vals:
if vals['gyp_defines']:
vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines']
else:
vals['gyp_defines'] = mixin_vals['gyp_defines']
if 'mixins' in mixin_vals:
self.FlattenMixins(mixin_vals['mixins'], vals, visited)
return vals
def ClobberIfNeeded(self, vals):
path = self.args.path[0]
build_dir = self.ToAbsPath(path)
mb_type_path = self.PathJoin(build_dir, 'mb_type')
needs_clobber = False
new_mb_type = vals['type']
if self.Exists(build_dir):
if self.Exists(mb_type_path):
old_mb_type = self.ReadFile(mb_type_path)
if old_mb_type != new_mb_type:
self.Print("Build type mismatch: was %s, will be %s, clobbering %s" %
(old_mb_type, new_mb_type, path))
needs_clobber = True
else:
# There is no 'mb_type' file in the build directory, so this probably
# means that the prior build(s) were not done through mb, and we
# have no idea if this was a GYP build or a GN build. Clobber it
# to be safe.
self.Print("%s/mb_type missing, clobbering to be safe" % path)
needs_clobber = True
if self.args.dryrun:
return
if needs_clobber:
self.RemoveDirectory(build_dir)
self.MaybeMakeDirectory(build_dir)
self.WriteFile(mb_type_path, new_mb_type)
def RunGNGen(self, vals):
build_dir = self.args.path[0]
cmd = self.GNCmd('gen', build_dir, vals['gn_args'], extra_args=['--check'])
swarming_targets = []
if getattr(self.args, 'swarming_targets_file', None):
# We need GN to generate the list of runtime dependencies for
# the compile targets listed (one per line) in the file so
# we can run them via swarming. We use ninja_to_gn.pyl to convert
# the compile targets to the matching GN labels.
contents = self.ReadFile(self.args.swarming_targets_file)
swarming_targets = contents.splitlines()
gn_isolate_map = ast.literal_eval(self.ReadFile(self.PathJoin(
self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl')))
gn_labels = []
for target in swarming_targets:
if not target in gn_isolate_map:
raise MBErr('test target "%s" not found in %s' %
(target, '//testing/buildbot/gn_isolate_map.pyl'))
gn_labels.append(gn_isolate_map[target]['label'])
gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps')
# Since GN hasn't run yet, the build directory may not even exist.
self.MaybeMakeDirectory(self.ToAbsPath(build_dir))
self.WriteFile(gn_runtime_deps_path, '\n'.join(gn_labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
ret, _, _ = self.Run(cmd)
if ret:
# If `gn gen` failed, we should exit early rather than trying to
# generate isolates. Run() will have already logged any error output.
self.Print('GN gen failed: %d' % ret)
return ret
for target in swarming_targets:
if gn_isolate_map[target]['type'] == 'gpu_browser_test':
runtime_deps_target = 'browser_tests'
elif gn_isolate_map[target]['type'] == 'script':
# For script targets, the build target is usually a group,
# for which gn generates the runtime_deps next to the stamp file
# for the label, which lives under the obj/ directory.
label = gn_isolate_map[target]['label']
runtime_deps_target = 'obj/%s.stamp' % label.replace(':', '/')
else:
runtime_deps_target = target
if self.platform == 'win32':
deps_path = self.ToAbsPath(build_dir,
runtime_deps_target + '.exe.runtime_deps')
else:
deps_path = self.ToAbsPath(build_dir,
runtime_deps_target + '.runtime_deps')
if not self.Exists(deps_path):
raise MBErr('did not generate %s' % deps_path)
command, extra_files = self.GetIsolateCommand(target, vals,
gn_isolate_map)
runtime_deps = self.ReadFile(deps_path).splitlines()
self.WriteIsolateFiles(build_dir, command, target, runtime_deps,
extra_files)
return 0
def RunGNIsolate(self, vals):
gn_isolate_map = ast.literal_eval(self.ReadFile(self.PathJoin(
self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl')))
build_dir = self.args.path[0]
target = self.args.target[0]
command, extra_files = self.GetIsolateCommand(target, vals, gn_isolate_map)
label = gn_isolate_map[target]['label']
ret, out, _ = self.Call(['gn', 'desc', build_dir, label, 'runtime_deps'])
if ret:
return ret
runtime_deps = out.splitlines()
self.WriteIsolateFiles(build_dir, command, target, runtime_deps,
extra_files)
ret, _, _ = self.Run([
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'check',
'-i',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target))],
buffer_output=False)
return ret
def WriteIsolateFiles(self, build_dir, command, target, runtime_deps,
extra_files):
isolate_path = self.ToAbsPath(build_dir, target + '.isolate')
self.WriteFile(isolate_path,
pprint.pformat({
'variables': {
'command': command,
'files': sorted(runtime_deps + extra_files),
}
}) + '\n')
self.WriteJSON(
{
'args': [
'--isolated',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
'--isolate',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
def GNCmd(self, subcommand, path, gn_args='', extra_args=None):
if self.platform == 'linux2':
subdir = 'linux64'
elif self.platform == 'darwin':
subdir = 'mac'
else:
subdir = 'win'
gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, 'gn')
cmd = [gn_path, subcommand, path]
gn_args = gn_args.replace("$(goma_dir)", self.args.goma_dir)
if gn_args:
cmd.append('--args=%s' % gn_args)
if extra_args:
cmd.extend(extra_args)
return cmd
def RunGYPGen(self, vals):
path = self.args.path[0]
output_dir = self.ParseGYPConfigPath(path)
cmd, env = self.GYPCmd(output_dir, vals)
ret, _, _ = self.Run(cmd, env=env)
return ret
def RunGYPAnalyze(self, vals):
output_dir = self.ParseGYPConfigPath(self.args.path[0])
if self.args.verbose:
inp = self.ReadInputJSON(['files', 'test_targets',
'additional_compile_targets'])
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
cmd, env = self.GYPCmd(output_dir, vals)
cmd.extend(['-f', 'analyzer',
'-G', 'config_path=%s' % self.args.input_path[0],
'-G', 'analyzer_output_path=%s' % self.args.output_path[0]])
ret, _, _ = self.Run(cmd, env=env)
if not ret and self.args.verbose:
outp = json.loads(self.ReadFile(self.args.output_path[0]))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return ret
def GetIsolateCommand(self, target, vals, gn_isolate_map):
# This needs to mirror the settings in //build/config/ui.gni:
# use_x11 = is_linux && !use_ozone.
# TODO(dpranke): Figure out how to keep this in sync better.
use_x11 = (self.platform == 'linux2' and
not 'target_os="android"' in vals['gn_args'] and
not 'use_ozone=true' in vals['gn_args'])
asan = 'is_asan=true' in vals['gn_args']
msan = 'is_msan=true' in vals['gn_args']
tsan = 'is_tsan=true' in vals['gn_args']
executable_suffix = '.exe' if self.platform == 'win32' else ''
test_type = gn_isolate_map[target]['type']
cmdline = []
extra_files = []
if use_x11 and test_type == 'windowed_test_launcher':
extra_files = [
'xdisplaycheck',
'../../testing/test_env.py',
'../../testing/xvfb.py',
]
cmdline = [
'../../testing/xvfb.py',
'.',
'./' + str(target),
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type in ('windowed_test_launcher', 'console_test_launcher'):
extra_files = [
'../../testing/test_env.py'
]
cmdline = [
'../../testing/test_env.py',
'./' + str(target) + executable_suffix,
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type == 'gpu_browser_test':
extra_files = [
'../../testing/test_env.py'
]
gtest_filter = gn_isolate_map[target]['gtest_filter']
cmdline = [
'../../testing/test_env.py',
'./browser_tests' + executable_suffix,
'--test-launcher-bot-mode',
'--enable-gpu',
'--test-launcher-jobs=1',
'--gtest_filter=%s' % gtest_filter,
]
elif test_type == 'script':
extra_files = [
'../../testing/test_env.py'
]
cmdline = [
'../../testing/test_env.py',
'../../' + self.ToSrcRelPath(gn_isolate_map[target]['script'])
] + gn_isolate_map[target].get('args', [])
elif test_type in ('raw'):
extra_files = []
cmdline = [
'./' + str(target) + executable_suffix,
] + gn_isolate_map[target].get('args')
else:
self.WriteFailureAndRaise('No command line for %s found (test type %s).'
% (target, test_type), output_path=None)
return cmdline, extra_files
def ToAbsPath(self, build_path, *comps):
return self.PathJoin(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
*comps)
def ToSrcRelPath(self, path):
"""Returns a relative path from the top of the repo."""
# TODO: Support normal paths in addition to source-absolute paths.
assert(path.startswith('//'))
return path[2:].replace('/', self.sep)
def ParseGYPConfigPath(self, path):
rpath = self.ToSrcRelPath(path)
output_dir, _, _ = rpath.rpartition(self.sep)
return output_dir
def GYPCmd(self, output_dir, vals):
gyp_defines = vals['gyp_defines']
goma_dir = self.args.goma_dir
# GYP uses shlex.split() to split the gyp defines into separate arguments,
# so we can support backslashes and and spaces in arguments by quoting
# them, even on Windows, where this normally wouldn't work.
if '\\' in goma_dir or ' ' in goma_dir:
goma_dir = "'%s'" % goma_dir
gyp_defines = gyp_defines.replace("$(goma_dir)", goma_dir)
cmd = [
self.executable,
self.PathJoin('build', 'gyp_chromium'),
'-G',
'output_dir=' + output_dir,
]
# Ensure that we have an environment that only contains
# the exact values of the GYP variables we need.
env = os.environ.copy()
if 'GYP_CHROMIUM_NO_ACTION' in env:
del env['GYP_CHROMIUM_NO_ACTION']
if 'GYP_CROSSCOMPILE' in env:
del env['GYP_CROSSCOMPILE']
env['GYP_DEFINES'] = gyp_defines
if vals['gyp_crosscompile']:
env['GYP_CROSSCOMPILE'] = '1'
return cmd, env
def RunGNAnalyze(self, vals):
# analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
ret = self.RunGNGen(vals)
if ret:
return ret
inp = self.ReadInputJSON(['files', 'test_targets',
'additional_compile_targets'])
if self.args.verbose:
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
# TODO(crbug.com/555273) - currently GN treats targets and
# additional_compile_targets identically since we can't tell the
# difference between a target that is a group in GN and one that isn't.
# We should eventually fix this and treat the two types differently.
targets = (set(inp['test_targets']) |
set(inp['additional_compile_targets']))
output_path = self.args.output_path[0]
# Bail out early if a GN file was modified, since 'gn refs' won't know
# what to do about it. Also, bail out early if 'all' was asked for,
# since we can't deal with it yet.
if (any(f.endswith('.gn') or f.endswith('.gni') for f in inp['files']) or
'all' in targets):
self.WriteJSON({
'status': 'Found dependency (all)',
'compile_targets': sorted(targets),
'test_targets': sorted(targets & set(inp['test_targets'])),
}, output_path)
return 0
# This shouldn't normally happen, but could due to unusual race conditions,
# like a try job that gets scheduled before a patch lands but runs after
# the patch has landed.
if not inp['files']:
self.Print('Warning: No files modified in patch, bailing out early.')
self.WriteJSON({
'status': 'No dependency',
'compile_targets': [],
'test_targets': [],
}, output_path)
return 0
ret = 0
response_file = self.TempFile()
response_file.write('\n'.join(inp['files']) + '\n')
response_file.close()
matching_targets = set()
try:
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all', '--as=output']
ret, out, _ = self.Run(cmd, force_verbose=False)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
build_dir = self.ToSrcRelPath(self.args.path[0]) + self.sep
for output in out.splitlines():
build_output = output.replace(build_dir, '')
if build_output in targets:
matching_targets.add(build_output)
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all']
ret, out, _ = self.Run(cmd, force_verbose=False)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
for label in out.splitlines():
build_target = label[2:]
# We want to accept 'chrome/android:chrome_public_apk' and
# just 'chrome_public_apk'. This may result in too many targets
# getting built, but we can adjust that later if need be.
for input_target in targets:
if (input_target == build_target or
build_target.endswith(':' + input_target)):
matching_targets.add(input_target)
finally:
self.RemoveFile(response_file.name)
if matching_targets:
self.WriteJSON({
'status': 'Found dependency',
'compile_targets': sorted(matching_targets),
'test_targets': sorted(matching_targets &
set(inp['test_targets'])),
}, output_path)
else:
self.WriteJSON({
'status': 'No dependency',
'compile_targets': [],
'test_targets': [],
}, output_path)
if self.args.verbose:
outp = json.loads(self.ReadFile(output_path))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return 0
def ReadInputJSON(self, required_keys):
path = self.args.input_path[0]
output_path = self.args.output_path[0]
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
try:
inp = json.loads(self.ReadFile(path))
except Exception as e:
self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
(path, e), output_path)
for k in required_keys:
if not k in inp:
self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
output_path)
return inp
def WriteFailureAndRaise(self, msg, output_path):
if output_path:
self.WriteJSON({'error': msg}, output_path, force_verbose=True)
raise MBErr(msg)
def WriteJSON(self, obj, path, force_verbose=False):
try:
self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n',
force_verbose=force_verbose)
except Exception as e:
raise MBErr('Error %s writing to the output path "%s"' %
(e, path))
def PrintCmd(self, cmd, env):
if self.platform == 'win32':
env_prefix = 'set '
env_quoter = QuoteForSet
shell_quoter = QuoteForCmd
else:
env_prefix = ''
env_quoter = pipes.quote
shell_quoter = pipes.quote
def print_env(var):
if env and var in env:
self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
print_env('GYP_CROSSCOMPILE')
print_env('GYP_DEFINES')
if cmd[0] == self.executable:
cmd = ['python'] + cmd[1:]
self.Print(*[shell_quoter(arg) for arg in cmd])
def PrintJSON(self, obj):
self.Print(json.dumps(obj, indent=2, sort_keys=True))
def Print(self, *args, **kwargs):
# This function largely exists so it can be overridden for testing.
print(*args, **kwargs)
def Build(self, target):
build_dir = self.ToSrcRelPath(self.args.path[0])
ninja_cmd = ['ninja', '-C', build_dir]
if self.args.jobs:
ninja_cmd.extend(['-j', '%d' % self.args.jobs])
ninja_cmd.append(target)
ret, _, _ = self.Run(ninja_cmd, force_verbose=False, buffer_output=False)
return ret
def Run(self, cmd, env=None, force_verbose=True, buffer_output=True):
# This function largely exists so it can be overridden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.PrintCmd(cmd, env)
if self.args.dryrun:
return 0, '', ''
ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output)
if self.args.verbose or force_verbose:
if ret:
self.Print(' -> returned %d' % ret)
if out:
self.Print(out, end='')
if err:
self.Print(err, end='', file=sys.stderr)
return ret, out, err
def Call(self, cmd, env=None, buffer_output=True):
if buffer_output:
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
else:
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
env=env)
p.wait()
out = err = ''
return p.returncode, out, err
def ExpandUser(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.expanduser(path)
def Exists(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.exists(path)
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def PathJoin(self, *comps):
# This function largely exists so it can be overriden for testing.
return os.path.join(*comps)
def ReadFile(self, path):
# This function largely exists so it can be overriden for testing.
with open(path) as fp:
return fp.read()
def RemoveFile(self, path):
# This function largely exists so it can be overriden for testing.
os.remove(path)
def RemoveDirectory(self, abs_path):
if self.platform == 'win32':
# In other places in chromium, we often have to retry this command
# because we're worried about other processes still holding on to
# file handles, but when MB is invoked, it will be early enough in the
# build that their should be no other processes to interfere. We
# can change this if need be.
self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
else:
shutil.rmtree(abs_path, ignore_errors=True)
def TempFile(self, mode='w'):
# This function largely exists so it can be overriden for testing.
return tempfile.NamedTemporaryFile(mode=mode, delete=False)
def WriteFile(self, path, contents, force_verbose=False):
# This function largely exists so it can be overriden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
with open(path, 'w') as fp:
return fp.write(contents)
class MBErr(Exception):
pass
# See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful
# details of this next section, which handles escaping command lines
# so that they can be copied and pasted into a cmd window.
UNSAFE_FOR_SET = set('^<>&|')
UNSAFE_FOR_CMD = UNSAFE_FOR_SET.union(set('()%'))
ALL_META_CHARS = UNSAFE_FOR_CMD.union(set('"'))
def QuoteForSet(arg):
if any(a in UNSAFE_FOR_SET for a in arg):
arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg)
return arg
def QuoteForCmd(arg):
# First, escape the arg so that CommandLineToArgvW will parse it properly.
# From //tools/gyp/pylib/gyp/msvs_emulation.py:23.
if arg == '' or ' ' in arg or '"' in arg:
quote_re = re.compile(r'(\\*)"')
arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
# Then check to see if the arg contains any metacharacters other than
# double quotes; if it does, quote everything (including the double
# quotes) for safety.
if any(a in UNSAFE_FOR_CMD for a in arg):
arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg)
return arg
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except MBErr as e:
print(e)
sys.exit(1)
except KeyboardInterrupt:
print("interrupted, exiting", stream=sys.stderr)
sys.exit(130)
|
|
# Copyright 2008-2009 Owen Taylor
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import os
import gobject
import gtk
import pango
from application import application
from format_escaped import format_escaped
from notebook import NotebookFile
from shell_buffer import ShellBuffer
from shell_view import ShellView
from save_file import SaveFileBuilder
class Editor(gobject.GObject):
def __init__(self, notebook):
gobject.GObject.__init__(self)
self.notebook = notebook
self._unsaved_index = application.allocate_unsaved_index()
#######################################################
# Utility
#######################################################
def _clear_unsaved(self):
if self._unsaved_index is not None:
application.free_unsaved_index(self._unsaved_index)
self._unsaved_index = None
def _update_filename(self, *args):
self.notify('filename')
self.notify('title')
def _update_modified(self, *args):
self.notify('modified')
self.notify('title')
def _update_state(self, *args):
self.notify('state')
def _update_file(self):
self.notify('file')
def __prompt_for_name(self, title, save_button_text, action, check_name=None):
builder = SaveFileBuilder(title, self._get_display_name(), save_button_text, check_name)
builder.dialog.set_transient_for(self.widget.get_toplevel())
if self._get_filename() is not None:
builder.name_entry.set_text(os.path.basename(self._get_filename()))
builder.prompt_for_name(self.notebook.folder, self._get_extension(), action)
builder.dialog.destroy()
#######################################################
# Implemented by subclasses
#######################################################
def _get_display_name(self):
raise NotImplementedError()
def _get_modified(self):
raise NotImplementedError()
def _get_state(self):
return NotebookFile.NONE
def _get_filename(self):
return NotImplementedError()
def _get_file(self):
return NotImplementedError()
def _get_extension(self):
return NotImplementedError()
def _save(self, filename):
return NotImplementedError()
#######################################################
# Public API
#######################################################
def close(self):
if self._unsaved_index is not None:
application.free_unsaved_index(self._unsaved_index)
self._unsaved_index = None
self.widget.destroy()
def confirm_discard(self, before_quit=False):
if not self.modified:
return True
if before_quit:
message_format = self.DISCARD_FORMAT_BEFORE_QUIT
continue_button_text = '_Quit without saving'
else:
message_format = self.DISCARD_FORMAT
continue_button_text = '_Discard'
if self._get_filename() is None:
save_button_text = gtk.STOCK_SAVE_AS
else:
save_button_text = gtk.STOCK_SAVE
message = format_escaped("<big><b>" + message_format + "</b></big>", self._get_display_name())
dialog = gtk.MessageDialog(parent=self.widget.get_toplevel(), buttons=gtk.BUTTONS_NONE,
type=gtk.MESSAGE_WARNING)
dialog.set_markup(message)
dialog.add_buttons(continue_button_text, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
save_button_text, 1)
dialog.set_default_response(1)
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
return True
elif response == 1:
self.save()
if self.modified:
return False
else:
return True
else:
return False
def load(self, filename, escape=False):
"""Load a file from disk into the editor. Can raise IOError if the
file cannot be read, and reunicode.ConversionError if the file contains
invalid characters. (reunicode.ConversionError will not be raised if
escape is True)
@param filename the file to load
@param escape if true, invalid byte and character sequences in the input
will be converted into \\x<nn> and \\u<nnnn> escape sequences.
"""
raise NotImplementedError()
def save(self, filename=None):
if filename is None:
filename = self._get_filename()
if filename is None:
def action(fullname):
self._save(fullname)
self._clear_unsaved()
self.notebook.refresh()
self.__prompt_for_name(title="Save As...", save_button_text="_Save", action=action)
else:
self._save(filename)
def rename(self):
if self._get_filename() is None:
self.save()
return
old_name = os.path.basename(self._get_filename())
title = "Rename '%s'" % old_name
def check_name(name):
return name != "" and name != old_name
def action(fullname):
old_filename = self._get_filename()
self._save(fullname)
self._clear_unsaved()
os.remove(old_filename)
self.notebook.refresh()
self.__prompt_for_name(title=title, save_button_text="_Rename", action=action, check_name=check_name)
@property
def needs_calculate(self):
return (self.state != NotebookFile.EXECUTE_SUCCESS and
self.state != NotebookFile.NONE and
self.state != NotebookFile.EXECUTING)
def calculate(self):
pass
def undo(self):
pass
def redo(self):
pass
@gobject.property
def filename(self):
return self._get_filename()
@gobject.property
def file(self):
return self._get_file()
@gobject.property
def modified(self):
return self._get_modified()
@gobject.property
def state(self):
return self._get_state()
@gobject.property
def title(self):
if self.modified:
return "*" + self._get_display_name()
else:
return self._get_display_name()
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "violin"
_path_str = "violin.marker"
_valid_props = {"color", "line", "opacity", "outliercolor", "size", "symbol"}
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.violin.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
outliercolor
Sets the border line color of the outlier
sample points. Defaults to marker.color
outlierwidth
Sets the border line width (in px) of the
outlier sample points.
width
Sets the width (in px) of the lines bounding
the marker points.
Returns
-------
plotly.graph_objs.violin.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# outliercolor
# ------------
@property
def outliercolor(self):
"""
Sets the color of the outlier sample points.
The 'outliercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outliercolor"]
@outliercolor.setter
def outliercolor(self, val):
self["outliercolor"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, '0', 'circle', 100, '100', 'circle-open', 200, '200',
'circle-dot', 300, '300', 'circle-open-dot', 1, '1',
'square', 101, '101', 'square-open', 201, '201',
'square-dot', 301, '301', 'square-open-dot', 2, '2',
'diamond', 102, '102', 'diamond-open', 202, '202',
'diamond-dot', 302, '302', 'diamond-open-dot', 3, '3',
'cross', 103, '103', 'cross-open', 203, '203',
'cross-dot', 303, '303', 'cross-open-dot', 4, '4', 'x',
104, '104', 'x-open', 204, '204', 'x-dot', 304, '304',
'x-open-dot', 5, '5', 'triangle-up', 105, '105',
'triangle-up-open', 205, '205', 'triangle-up-dot', 305,
'305', 'triangle-up-open-dot', 6, '6', 'triangle-down',
106, '106', 'triangle-down-open', 206, '206',
'triangle-down-dot', 306, '306', 'triangle-down-open-dot',
7, '7', 'triangle-left', 107, '107', 'triangle-left-open',
207, '207', 'triangle-left-dot', 307, '307',
'triangle-left-open-dot', 8, '8', 'triangle-right', 108,
'108', 'triangle-right-open', 208, '208',
'triangle-right-dot', 308, '308',
'triangle-right-open-dot', 9, '9', 'triangle-ne', 109,
'109', 'triangle-ne-open', 209, '209', 'triangle-ne-dot',
309, '309', 'triangle-ne-open-dot', 10, '10',
'triangle-se', 110, '110', 'triangle-se-open', 210, '210',
'triangle-se-dot', 310, '310', 'triangle-se-open-dot', 11,
'11', 'triangle-sw', 111, '111', 'triangle-sw-open', 211,
'211', 'triangle-sw-dot', 311, '311',
'triangle-sw-open-dot', 12, '12', 'triangle-nw', 112,
'112', 'triangle-nw-open', 212, '212', 'triangle-nw-dot',
312, '312', 'triangle-nw-open-dot', 13, '13', 'pentagon',
113, '113', 'pentagon-open', 213, '213', 'pentagon-dot',
313, '313', 'pentagon-open-dot', 14, '14', 'hexagon', 114,
'114', 'hexagon-open', 214, '214', 'hexagon-dot', 314,
'314', 'hexagon-open-dot', 15, '15', 'hexagon2', 115,
'115', 'hexagon2-open', 215, '215', 'hexagon2-dot', 315,
'315', 'hexagon2-open-dot', 16, '16', 'octagon', 116,
'116', 'octagon-open', 216, '216', 'octagon-dot', 316,
'316', 'octagon-open-dot', 17, '17', 'star', 117, '117',
'star-open', 217, '217', 'star-dot', 317, '317',
'star-open-dot', 18, '18', 'hexagram', 118, '118',
'hexagram-open', 218, '218', 'hexagram-dot', 318, '318',
'hexagram-open-dot', 19, '19', 'star-triangle-up', 119,
'119', 'star-triangle-up-open', 219, '219',
'star-triangle-up-dot', 319, '319',
'star-triangle-up-open-dot', 20, '20',
'star-triangle-down', 120, '120',
'star-triangle-down-open', 220, '220',
'star-triangle-down-dot', 320, '320',
'star-triangle-down-open-dot', 21, '21', 'star-square',
121, '121', 'star-square-open', 221, '221',
'star-square-dot', 321, '321', 'star-square-open-dot', 22,
'22', 'star-diamond', 122, '122', 'star-diamond-open',
222, '222', 'star-diamond-dot', 322, '322',
'star-diamond-open-dot', 23, '23', 'diamond-tall', 123,
'123', 'diamond-tall-open', 223, '223',
'diamond-tall-dot', 323, '323', 'diamond-tall-open-dot',
24, '24', 'diamond-wide', 124, '124', 'diamond-wide-open',
224, '224', 'diamond-wide-dot', 324, '324',
'diamond-wide-open-dot', 25, '25', 'hourglass', 125,
'125', 'hourglass-open', 26, '26', 'bowtie', 126, '126',
'bowtie-open', 27, '27', 'circle-cross', 127, '127',
'circle-cross-open', 28, '28', 'circle-x', 128, '128',
'circle-x-open', 29, '29', 'square-cross', 129, '129',
'square-cross-open', 30, '30', 'square-x', 130, '130',
'square-x-open', 31, '31', 'diamond-cross', 131, '131',
'diamond-cross-open', 32, '32', 'diamond-x', 132, '132',
'diamond-x-open', 33, '33', 'cross-thin', 133, '133',
'cross-thin-open', 34, '34', 'x-thin', 134, '134',
'x-thin-open', 35, '35', 'asterisk', 135, '135',
'asterisk-open', 36, '36', 'hash', 136, '136',
'hash-open', 236, '236', 'hash-dot', 336, '336',
'hash-open-dot', 37, '37', 'y-up', 137, '137',
'y-up-open', 38, '38', 'y-down', 138, '138',
'y-down-open', 39, '39', 'y-left', 139, '139',
'y-left-open', 40, '40', 'y-right', 140, '140',
'y-right-open', 41, '41', 'line-ew', 141, '141',
'line-ew-open', 42, '42', 'line-ns', 142, '142',
'line-ns-open', 43, '43', 'line-ne', 143, '143',
'line-ne-open', 44, '44', 'line-nw', 144, '144',
'line-nw-open', 45, '45', 'arrow-up', 145, '145',
'arrow-up-open', 46, '46', 'arrow-down', 146, '146',
'arrow-down-open', 47, '47', 'arrow-left', 147, '147',
'arrow-left-open', 48, '48', 'arrow-right', 148, '148',
'arrow-right-open', 49, '49', 'arrow-bar-up', 149, '149',
'arrow-bar-up-open', 50, '50', 'arrow-bar-down', 150,
'150', 'arrow-bar-down-open', 51, '51', 'arrow-bar-left',
151, '151', 'arrow-bar-left-open', 52, '52',
'arrow-bar-right', 152, '152', 'arrow-bar-right-open']
Returns
-------
Any
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
line
:class:`plotly.graph_objects.violin.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
outliercolor
Sets the color of the outlier sample points.
size
Sets the marker size (in px).
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
"""
def __init__(
self,
arg=None,
color=None,
line=None,
opacity=None,
outliercolor=None,
size=None,
symbol=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.violin.Marker`
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
line
:class:`plotly.graph_objects.violin.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
outliercolor
Sets the color of the outlier sample points.
size
Sets the marker size (in px).
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.violin.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.violin.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("outliercolor", None)
_v = outliercolor if outliercolor is not None else _v
if _v is not None:
self["outliercolor"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("symbol", None)
_v = symbol if symbol is not None else _v
if _v is not None:
self["symbol"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
from test_map_base import TestMapBase
from unittest.mock import call, patch
from werkzeug.datastructures import OrderedMultiDict
from io import BytesIO
from mrt_file_server.utils.nbt_utils import load_compressed_nbt_file, get_nbt_map_value
import os
import pytest
class TestMapUpload(TestMapBase):
def setup(self):
TestMapBase.setup(self)
self.uploads_dir = self.app.config["MAP_UPLOADS_DIR"]
self.reset_map_uploads_dir()
def teardown(self):
TestMapBase.teardown(self)
self.reset_map_uploads_dir()
# Tests
def test_upload_page_should_show_valid_map_id_range(self):
response = self.client.get("/map/upload")
last_allowed_id_range = 1000
last_map_id = 2000
actual_html = response.data.decode('utf-8')
expected_lower_map_id_html = "<span id=\"lower_map_id\" style=\"color: green;\">{}</span>".format(last_map_id - last_allowed_id_range + 1)
expected_upper_map_id_html = "<span id=\"upper_map_id\" style=\"color: green;\">{}</span>".format(last_map_id)
assert expected_lower_map_id_html in actual_html
assert expected_upper_map_id_html in actual_html
@patch("mrt_file_server.utils.log_utils.log_adapter")
def test_upload_single_file_should_be_successful(self, mock_logger):
username = "Frumple"
filename = "map_1500.dat"
message_key = "MAP_UPLOAD_SUCCESS"
self.copy_test_data_file("existing_unlocked.dat", self.uploads_dir, filename)
original_file_content = self.load_test_data_file(filename)
data = OrderedMultiDict()
data.add("userName", username)
data.add("map", (BytesIO(original_file_content), filename))
response = self.perform_upload(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
# Verify that the new map file was uploaded and locked
expected_nbt_file = self.load_test_data_nbt_file(filename)
uploaded_nbt_file = self.load_uploaded_nbt_file(filename)
self.verify_matching_nbt_values(expected_nbt_file, uploaded_nbt_file)
assert get_nbt_map_value(uploaded_nbt_file, "locked") == 1
self.verify_flash_message_by_key(message_key, response.data, filename)
mock_logger.info.assert_called_with(self.get_log_message(message_key), filename, username)
@patch("mrt_file_server.utils.log_utils.log_adapter")
def test_upload_multiple_files_should_be_successful(self, mock_logger):
username = "Frumple"
message_key = "MAP_UPLOAD_SUCCESS"
# Upload 7 files
filenames = [
"map_1500.dat",
"map_2000.dat",
"map_1501.dat",
"map_1502.dat",
"map_1001.dat",
"map_1503.dat",
"map_1504.dat"]
for filename in filenames:
self.copy_test_data_file("existing_unlocked.dat", self.uploads_dir, filename)
original_files = self.load_test_data_files(filenames)
data = OrderedMultiDict()
data.add("userName", username)
for filename in original_files:
data.add("map", (BytesIO(original_files[filename]), filename))
response = self.perform_upload(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
logger_calls = []
for filename in original_files:
# Verify that the new map files were uploaded and locked
expected_nbt_file = self.load_test_data_nbt_file(filename)
uploaded_nbt_file = self.load_uploaded_nbt_file(filename)
self.verify_matching_nbt_values(expected_nbt_file, uploaded_nbt_file)
assert get_nbt_map_value(uploaded_nbt_file, "locked") == 1
self.verify_flash_message_by_key(message_key, response.data, filename)
logger_calls.append(call(self.get_log_message(message_key), filename, username))
mock_logger.info.assert_has_calls(logger_calls, any_order = True)
@patch("mrt_file_server.utils.log_utils.log_adapter")
@pytest.mark.parametrize("username, message_key", [
("", "MAP_UPLOAD_USERNAME_EMPTY"),
("Eris The Eagle", "MAP_UPLOAD_USERNAME_WHITESPACE")
])
def test_upload_with_invalid_username_should_fail(self, mock_logger, username, message_key):
filename = "map_1500.dat"
existing_filename = "existing_unlocked.dat"
self.copy_test_data_file(existing_filename, self.uploads_dir, filename)
existing_file_content = self.load_test_data_file(existing_filename)
upload_file_content = self.load_test_data_file(filename)
data = OrderedMultiDict()
data.add("userName", username)
data.add("map", (BytesIO(upload_file_content), filename))
response = self.perform_upload(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
# Verify that the existing map file was NOT overwritten
self.verify_file_content(self.uploads_dir, filename, existing_file_content)
self.verify_flash_message_by_key(message_key, response.data)
if username:
mock_logger.warn.assert_called_with(self.get_log_message(message_key), username)
else:
mock_logger.warn.assert_called_with(self.get_log_message(message_key))
@patch("mrt_file_server.utils.log_utils.log_adapter")
def test_upload_with_no_files_should_fail(self, mock_logger):
username = "Frumple"
message_key = "MAP_UPLOAD_NO_FILES"
data = OrderedMultiDict()
data.add("userName", username)
response = self.perform_upload(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
self.verify_flash_message_by_key(message_key, response.data)
mock_logger.warn.assert_called_with(self.get_log_message(message_key), username)
@patch("mrt_file_server.utils.log_utils.log_adapter")
def test_upload_with_too_many_files_should_fail(self, mock_logger):
username = "Frumple"
existing_filename = "existing_unlocked.dat"
message_key = "MAP_UPLOAD_TOO_MANY_FILES"
# Upload 11 files, over the limit of 10.
filenames = [
"map_1001.dat",
"map_1500.dat",
"map_1501.dat",
"map_1502.dat",
"map_1503.dat",
"map_1504.dat",
"map_1505.dat",
"map_1506.dat",
"map_1507.dat",
"map_1508.dat",
"map_2000.dat"]
for filename in filenames:
self.copy_test_data_file(existing_filename, self.uploads_dir, filename)
existing_file_content = self.load_test_data_file(existing_filename)
upload_files = self.load_test_data_files(filenames)
data = OrderedMultiDict()
data.add("userName", username)
for filename in upload_files:
data.add("map", (BytesIO(upload_files[filename]), filename))
response = self.perform_upload(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
# Verify that none of the existing map files were overwritten
for filename in filenames:
self.verify_file_content(self.uploads_dir, filename, existing_file_content)
self.verify_flash_message_by_key(message_key, response.data)
mock_logger.warn.assert_called_with(self.get_log_message(message_key), username)
@patch("mrt_file_server.utils.log_utils.log_adapter")
@pytest.mark.parametrize("filename", [
("1510.dat"), # Does not start with map_
("map_.dat"), # No Map ID
("map_-1.dat"), # Negative Map ID
("map_1510a.dat"), # Invalid Map ID
("map_1510.png") # Wrong extension
])
def test_upload_with_invalid_filename_should_fail(self, mock_logger, filename):
username = "Frumple"
message_key = "MAP_UPLOAD_FILENAME_INVALID"
upload_file_content = self.load_test_data_file(filename)
data = OrderedMultiDict()
data.add("userName", username)
data.add("map", (BytesIO(upload_file_content), filename))
response = self.perform_upload(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
# Verify that the new map file was NOT uploaded
uploaded_file_path = os.path.join(self.uploads_dir, filename)
assert os.path.isfile(uploaded_file_path) == False
self.verify_flash_message_by_key(message_key, response.data, filename)
mock_logger.warn.assert_called_with(self.get_log_message(message_key), filename, username)
@patch("mrt_file_server.utils.log_utils.log_adapter")
@pytest.mark.parametrize("filename", [
("idcounts.dat"),
("raids.dat"),
("scoreboard.dat"),
("villages.dat")
])
def test_upload_with_invalid_filename_and_file_already_exists_should_fail(self, mock_logger, filename):
username = "Frumple"
message_key = "MAP_UPLOAD_FILENAME_INVALID"
self.copy_test_data_file(filename, self.uploads_dir)
existing_file_content = self.load_test_data_file(filename)
# Upload a valid map file, but rename it to the existing filename
upload_file_content = self.load_test_data_file("map_1500.dat")
data = OrderedMultiDict()
data.add("userName", username)
data.add("map", (BytesIO(upload_file_content), filename))
response = self.perform_upload(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
# Verify that the existing map file was NOT overwritten
self.verify_file_content(self.uploads_dir, filename, existing_file_content)
self.verify_flash_message_by_key(message_key, response.data, filename)
mock_logger.warn.assert_called_with(self.get_log_message(message_key), filename, username)
@patch("mrt_file_server.utils.log_utils.log_adapter")
@pytest.mark.parametrize("filename, message_key", [
("map_1520.dat", "MAP_UPLOAD_FILE_TOO_LARGE"), # File size too large
("map_1000.dat", "MAP_UPLOAD_MAP_ID_OUT_OF_RANGE"), # Map ID too low
("map_2001.dat", "MAP_UPLOAD_MAP_ID_OUT_OF_RANGE"), # Map ID too high
("map_1530.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # idcounts.dat
("map_1531.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # Player .dat file
("map_1532.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # Empty .dat file
("map_1533.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # Without dimension tag
("map_1534.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # Without locked tag
("map_1535.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # Without colors tag
("map_1536.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # Without scale tag
("map_1537.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # Without trackingPosition tag
("map_1538.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # Without xCenter tag
("map_1539.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID"), # Without zCenter tag
("map_1540.dat", "MAP_UPLOAD_MAP_FORMAT_INVALID") # PNG with extension renamed to .dat
])
def test_upload_with_invalid_file_should_fail(self, mock_logger, filename, message_key):
username = "Frumple"
existing_filename = "existing_unlocked.dat"
self.copy_test_data_file(existing_filename, self.uploads_dir, filename)
existing_file_content = self.load_test_data_file(existing_filename)
upload_file_content = self.load_test_data_file(filename)
data = OrderedMultiDict()
data.add("userName", username)
data.add("map", (BytesIO(upload_file_content), filename))
response = self.perform_upload(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
# Verify that the existing map file was NOT overwritten
self.verify_file_content(self.uploads_dir, filename, existing_file_content)
self.verify_flash_message_by_key(message_key, response.data, filename)
mock_logger.warn.assert_called_with(self.get_log_message(message_key), filename, username)
@patch("mrt_file_server.utils.log_utils.log_adapter")
def test_upload_where_existing_file_is_already_locked_should_fail(self, mock_logger):
username = "Frumple"
filename = "map_1500.dat"
existing_filename = "existing_locked.dat"
message_key = "MAP_UPLOAD_EXISTING_MAP_LOCKED"
self.copy_test_data_file(existing_filename, self.uploads_dir, filename)
existing_file_content = self.load_test_data_file(existing_filename)
upload_file_content = self.load_test_data_file(filename)
data = OrderedMultiDict()
data.add("userName", username)
data.add("map", (BytesIO(upload_file_content), filename))
response = self.perform_upload(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
# Verify that the existing map file was NOT overwritten
self.verify_file_content(self.uploads_dir, filename, existing_file_content)
self.verify_flash_message_by_key(message_key, response.data, filename)
mock_logger.warn.assert_called_with(self.get_log_message(message_key), filename, username)
@patch("mrt_file_server.utils.log_utils.log_adapter")
def test_upload_same_file_twice_should_fail(self, mock_logger):
username = "Frumple"
filename = "map_1500.dat"
message_key = "MAP_UPLOAD_EXISTING_MAP_LOCKED"
self.copy_test_data_file("existing_unlocked.dat", self.uploads_dir, filename)
original_file_content = self.load_test_data_file(filename)
first_data = OrderedMultiDict()
first_data.add("userName", username)
first_data.add("map", (BytesIO(original_file_content), filename))
first_response = self.perform_upload(first_data)
second_data = OrderedMultiDict()
second_data.add("userName", username)
second_data.add("map", (BytesIO(original_file_content), filename))
second_response = self.perform_upload(second_data)
assert second_response.status_code == 200
assert second_response.mimetype == "text/html"
# Verify that the new map file was uploaded and locked,
# but the "existing file locked" error message appears after second upload
expected_nbt_file = self.load_test_data_nbt_file(filename)
uploaded_nbt_file = self.load_uploaded_nbt_file(filename)
self.verify_matching_nbt_values(expected_nbt_file, uploaded_nbt_file)
assert get_nbt_map_value(uploaded_nbt_file, "locked") == 1
self.verify_flash_message_by_key(message_key, second_response.data, filename)
mock_logger.warn.assert_called_with(self.get_log_message(message_key), filename, username)
# Helper Functions
def perform_upload(self, data):
return self.client.post("/map/upload", content_type = "multipart/form-data", data = data)
def reset_map_uploads_dir(self):
self.remove_files(self.uploads_dir, "dat")
self.copy_test_data_file("idcounts.dat", self.uploads_dir)
def load_test_data_nbt_file(self, filename):
return load_compressed_nbt_file(os.path.join(self.TEST_DATA_DIR, filename))
def load_uploaded_nbt_file(self, filename):
return load_compressed_nbt_file(os.path.join(self.uploads_dir, filename))
def verify_matching_nbt_values(self, expected_nbt_file, actual_nbt_file):
assert get_nbt_map_value(actual_nbt_file, "scale") == get_nbt_map_value(expected_nbt_file, "scale")
assert get_nbt_map_value(actual_nbt_file, "dimension") == get_nbt_map_value(expected_nbt_file, "dimension")
assert get_nbt_map_value(actual_nbt_file, "trackingPosition") == get_nbt_map_value(expected_nbt_file, "trackingPosition")
assert get_nbt_map_value(actual_nbt_file, "unlimitedTracking") == get_nbt_map_value(expected_nbt_file, "unlimitedTracking")
assert get_nbt_map_value(actual_nbt_file, "xCenter") == get_nbt_map_value(expected_nbt_file, "xCenter")
assert get_nbt_map_value(actual_nbt_file, "zCenter") == get_nbt_map_value(expected_nbt_file, "zCenter")
assert get_nbt_map_value(actual_nbt_file, "banners") == get_nbt_map_value(expected_nbt_file, "banners")
assert get_nbt_map_value(actual_nbt_file, "frames") == get_nbt_map_value(expected_nbt_file, "frames")
assert get_nbt_map_value(actual_nbt_file, "colors") == get_nbt_map_value(expected_nbt_file, "colors")
|
|
# Copyright 2007 by Tiago Antao <[email protected]>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Utility functions for working with FDist (DEPRECATED)."""
from Bio.PopGen.GenePop import FileParser
import Bio.PopGen.FDist
# Quite a few utility functions could be done (like remove pop,
# add locus, etc...). The recommended strategy is convert back
# and forth from/to GenePop and use GenePop Utils
def convert_genepop_to_fdist(gp_rec, report_pops=None):
"""Converts a GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (either standard or big)
Returns:
FDist record.
"""
if hasattr(gp_rec, "populations"):
return _convert_genepop_to_fdist(gp_rec)
else:
return _convert_genepop_to_fdist_big(gp_rec, report_pops)
def _convert_genepop_to_fdist(gp_rec):
"""Converts a standard GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Standard)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
fd_rec.num_pops = len(gp_rec.populations)
for lc_i in range(len(gp_rec.loci_list)):
alleles = []
pop_data = []
for pop_i in range(len(gp_rec.populations)):
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
alleles.sort() # Dominance requires this
# here we go again (necessary...)
for pop_i in range(len(gp_rec.populations)):
allele_counts = {}
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None:
count = allele_counts.get(al, 0)
allele_counts[al] = count + 1
allele_array = [] # We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def _convert_genepop_to_fdist_big(gp_rec, report_pops=None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 1
fd_rec.num_loci = len(gp_rec.loci_list)
num_loci = len(gp_rec.loci_list)
loci = []
for i in range(num_loci):
loci.append(set())
pops = []
work_rec = FileParser.read(gp_rec.fname)
lParser = work_rec.get_individual()
def init_pop():
my_pop = []
for i in range(num_loci):
my_pop.append({})
return my_pop
curr_pop = init_pop()
num_pops = 1
if report_pops:
report_pops(num_pops)
while lParser:
if lParser is not True:
for loci_pos in range(num_loci):
for al in lParser[1][loci_pos]:
if al is not None:
loci[loci_pos].add(al)
curr_pop[loci_pos][al] = curr_pop[loci_pos].get(al, 0) + 1
else:
pops.append(curr_pop)
num_pops += 1
if report_pops:
report_pops(num_pops)
curr_pop = init_pop()
lParser = work_rec.get_individual()
work_rec._handle.close() # TODO - Needs a proper fix
pops.append(curr_pop)
fd_rec.num_pops = num_pops
for loci_pos in range(num_loci):
alleles = sorted(loci[loci_pos])
loci_rec = [len(alleles), []]
for pop in pops:
pop_rec = []
for allele in alleles:
pop_rec.append(pop[loci_pos].get(allele, 0))
loci_rec[1].append(pop_rec)
fd_rec.loci_data.append(tuple(loci_rec))
return fd_rec
def _convert_genepop_to_fdist_big_old(gp_rec, report_loci=None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
def countPops(rec):
f2 = FileParser.read(rec.fname)
popCnt = 1
while f2.skip_population():
popCnt += 1
return popCnt
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
work_rec0 = FileParser.read(gp_rec.fname)
fd_rec.num_pops = countPops(work_rec0)
num_loci = len(gp_rec.loci_list)
for lc_i in range(num_loci):
if report_loci:
report_loci(lc_i, num_loci)
work_rec = FileParser.read(gp_rec.fname)
work_rec2 = FileParser.read(gp_rec.fname)
alleles = []
pop_data = []
lParser = work_rec.get_individual()
while lParser:
if lParser is not True:
for al in lParser[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
lParser = work_rec.get_individual()
# here we go again (necessary...)
alleles.sort()
def process_pop(pop_data, alleles, allele_counts):
allele_array = [] # We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
lParser = work_rec2.get_individual()
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None] = 0
while lParser:
if lParser is True:
process_pop(pop_data, alleles, allele_counts)
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None] = 0
else:
for al in lParser[1][lc_i]:
allele_counts[al] += 1
lParser = work_rec2.get_individual()
process_pop(pop_data, alleles, allele_counts)
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def approximate_fst(desired_fst, simulated_fst, parameter_fst,
max_run_fst=1, min_run_fst=0, limit=0.005):
"""Calculates the next Fst attempt in order to approximate a
desired Fst.
"""
if abs(simulated_fst - desired_fst) < limit:
return parameter_fst, max_run_fst, min_run_fst
if simulated_fst > desired_fst:
max_run_fst = parameter_fst
next_parameter_fst = (min_run_fst + parameter_fst) / 2
else:
min_run_fst = parameter_fst
next_parameter_fst = (max_run_fst + parameter_fst) / 2
return next_parameter_fst, max_run_fst, min_run_fst
|
|
__author__ = 'chris'
import json
import os
from txrestapi.resource import APIResource
from txrestapi.methods import GET, POST, DELETE
from twisted.web import server
from twisted.web.resource import NoResource
from twisted.web import http
from twisted.internet import defer, reactor
from binascii import unhexlify
from constants import DATA_FOLDER
from twisted.protocols.basic import FileSender
from protos.countries import CountryCode
from protos import objects
from keyutils.keys import KeyChain
from dht.utils import digest
from market.profile import Profile
from market.contracts import Contract
from collections import OrderedDict
DEFAULT_RECORDS_COUNT = 20
DEFAULT_RECORDS_OFFSET = 0
class OpenBazaarAPI(APIResource):
"""
This RESTful API allows clients to pull relevant data from the
OpenBazaar daemon for use in a GUI or other application.
"""
def __init__(self, mserver, kserver, protocol):
self.mserver = mserver
self.kserver = kserver
self.protocol = protocol
self.db = mserver.db
self.keychain = KeyChain(self.db)
APIResource.__init__(self)
@GET('^/api/v1/get_image')
def get_image(self, request):
@defer.inlineCallbacks
def _showImage(resp=None):
@defer.inlineCallbacks
def _setContentDispositionAndSend(file_path, extension, content_type):
request.setHeader('content-disposition', 'filename="%s.%s"' % (file_path, extension))
request.setHeader('content-type', content_type)
f = open(file_path, "rb")
yield FileSender().beginFileTransfer(f, request)
f.close()
defer.returnValue(0)
if os.path.exists(image_path):
yield _setContentDispositionAndSend(image_path, ".jpg", "image/jpeg")
else:
request.setResponseCode(http.NOT_FOUND)
request.write("No such image '%s'" % request.path)
request.finish()
if "hash" in request.args:
if self.db.HashMap().get_file(unhexlify(request.args["hash"][0])) is not None:
image_path = self.db.HashMap().get_file(unhexlify(request.args["hash"][0]))
else:
image_path = DATA_FOLDER + "cache/" + request.args["hash"][0]
if not os.path.exists(image_path) and "guid" in request.args:
def get_node(node):
if node is not None:
self.mserver.get_image(node, unhexlify(request.args["hash"][0])).addCallback(_showImage)
else:
_showImage()
self.kserver.resolve(unhexlify(request.args["guid"][0])).addCallback(get_node)
else:
_showImage()
else:
request.write(NoResource().render(request))
request.finish()
return server.NOT_DONE_YET
@GET('^/api/v1/profile')
def get_profile(self, request):
def parse_profile(profile):
if profile is not None:
profile_json = {
"profile": {
"name": profile.name,
"location": str(CountryCode.Name(profile.location)),
"encryption_key": profile.encryption_key.public_key.encode("hex"),
"nsfw": profile.nsfw,
"vendor": profile.vendor,
"moderator": profile.moderator,
"handle": profile.handle,
"about": profile.about,
"website": profile.website,
"email": profile.email,
"primary_color": profile.primary_color,
"secondary_color": profile.secondary_color,
"background_color": profile.background_color,
"text_color": profile.text_color,
"pgp_key": profile.pgp_key.public_key,
"avatar_hash": profile.avatar_hash.encode("hex"),
"header_hash": profile.header_hash.encode("hex"),
"social_accounts": {}
}
}
if "guid" in request.args:
profile_json["profile"]["guid"] = request.args["guid"][0]
else:
profile_json["profile"]["guid"] = self.keychain.guid.encode("hex")
for account in profile.social:
profile_json["profile"]["social_accounts"][str(
objects.Profile.SocialAccount.SocialType.Name(account.type)).lower()] = {
"username": account.username,
"proof_url": account.proof_url
}
request.setHeader('content-type', "application/json")
request.write(json.dumps(profile_json, indent=4))
request.finish()
else:
request.write(json.dumps({}))
request.finish()
if "guid" in request.args:
def get_node(node):
if node is not None:
self.mserver.get_profile(node).addCallback(parse_profile)
else:
request.write(json.dumps({}))
request.finish()
self.kserver.resolve(unhexlify(request.args["guid"][0])).addCallback(get_node)
else:
parse_profile(Profile(self.db).get())
return server.NOT_DONE_YET
@GET('^/api/v1/get_listings')
def get_listings(self, request):
def parse_listings(listings):
if listings is not None:
response = {"listings": []}
for l in listings.listing:
listing_json = {
"title": l.title,
"contract_hash": l.contract_hash.encode("hex"),
"thumbnail_hash": l.thumbnail_hash.encode("hex"),
"category": l.category,
"price": l.price,
"currency_code": l.currency_code,
"nsfw": l.nsfw,
"origin": str(CountryCode.Name(l.origin)),
"ships_to": []
}
for country in l.ships_to:
listing_json["ships_to"].append(str(CountryCode.Name(country)))
response["listings"].append(listing_json)
request.setHeader('content-type', "application/json")
request.write(json.dumps(response, indent=4))
request.finish()
else:
request.write(json.dumps({}))
request.finish()
if "guid" in request.args:
def get_node(node):
if node is not None:
self.mserver.get_listings(node).addCallback(parse_listings)
else:
request.write(json.dumps({}))
request.finish()
self.kserver.resolve(unhexlify(request.args["guid"][0])).addCallback(get_node)
else:
ser = self.db.ListingsStore().get_proto()
if ser is not None:
l = objects.Listings()
l.ParseFromString(ser)
parse_listings(l)
else:
parse_listings(None)
return server.NOT_DONE_YET
@GET('^/api/v1/get_followers')
def get_followers(self, request):
def parse_followers(followers):
if followers is not None:
response = {"followers": []}
for f in followers.followers:
follower_json = {
"guid": f.guid.encode("hex"),
"handle": f.metadata.handle,
"name": f.metadata.name,
"avatar_hash": f.metadata.avatar_hash.encode("hex"),
"nsfw": f.metadata.nsfw
}
response["followers"].append(follower_json)
request.setHeader('content-type', "application/json")
request.write(json.dumps(response, indent=4))
request.finish()
else:
request.write(json.dumps({}))
request.finish()
if "guid" in request.args:
def get_node(node):
if node is not None:
self.mserver.get_followers(node).addCallback(parse_followers)
else:
request.write(json.dumps({}))
request.finish()
self.kserver.resolve(unhexlify(request.args["guid"][0])).addCallback(get_node)
else:
ser = self.db.FollowData().get_followers()
if ser is not None:
f = objects.Followers()
f.ParseFromString(ser)
parse_followers(f)
else:
parse_followers(None)
return server.NOT_DONE_YET
@GET('^/api/v1/get_following')
def get_following(self, request):
def parse_following(following):
if following is not None:
response = {"following": []}
for f in following.users:
user_json = {
"guid": f.guid.encode("hex"),
"handle": f.metadata.handle,
"name": f.metadata.name,
"avatar_hash": f.metadata.avatar_hash.encode("hex"),
"nsfw": f.metadata.nsfw
}
response["following"].append(user_json)
request.setHeader('content-type', "application/json")
request.write(json.dumps(response, indent=4))
request.finish()
else:
request.write(json.dumps({}))
request.finish()
if "guid" in request.args:
def get_node(node):
if node is not None:
self.mserver.get_following(node).addCallback(parse_following)
else:
request.write(json.dumps({}))
request.finish()
self.kserver.resolve(unhexlify(request.args["guid"][0])).addCallback(get_node)
else:
ser = self.db.FollowData().get_following()
if ser is not None:
f = objects.Following()
f.ParseFromString(ser)
parse_following(f)
else:
parse_following(None)
return server.NOT_DONE_YET
@POST('^/api/v1/follow')
def follow(self, request):
if "guid" in request.args:
def get_node(node):
if node is not None:
self.mserver.follow(node)
request.write(json.dumps({"success": True}))
request.finish()
else:
request.write(json.dumps({"success": False, "reason": "could not resolve guid"}, indent=4))
request.finish()
self.kserver.resolve(unhexlify(request.args["guid"][0])).addCallback(get_node)
return server.NOT_DONE_YET
@POST('^/api/v1/unfollow')
def unfollow(self, request):
if "guid" in request.args:
def get_node(node):
if node is not None:
self.mserver.unfollow(node)
request.write(json.dumps({"success": True}))
request.finish()
else:
request.write(json.dumps({"success": False, "reason": "could not resolve guid"}, indent=4))
request.finish()
self.kserver.resolve(unhexlify(request.args["guid"][0])).addCallback(get_node)
return server.NOT_DONE_YET
# pylint: disable=R0201
@POST('^/api/v1/profile')
def update_profile(self, request):
try:
p = Profile(self.db)
if not p.get().encryption_key \
and "name" not in request.args \
and "location" not in request.args:
request.write(json.dumps({"success": False, "reason": "name or location not included"}, indent=4))
request.finish()
return False
u = objects.Profile()
if "name" in request.args:
u.name = request.args["name"][0]
if "location" in request.args:
# This needs to be formatted. Either here or from the UI.
u.location = CountryCode.Value(request.args["location"][0].upper())
if "handle" in request.args:
u.handle = request.args["handle"][0]
if "about" in request.args:
u.about = request.args["about"][0]
if "short_description" in request.args:
u.short_description = request.args["short_description"][0]
if "nsfw" in request.args:
u.nsfw = True
if "vendor" in request.args:
u.vendor = True
if "moderator" in request.args:
u.moderator = True
if "website" in request.args:
u.website = request.args["website"][0]
if "email" in request.args:
u.email = request.args["email"][0]
if "primary_color" in request.args:
u.primary_color = int(request.args["primary_color"][0])
if "secondary_color" in request.args:
u.secondary_color = int(request.args["secondary_color"][0])
if "background_color" in request.args:
u.background_color = int(request.args["background_color"][0])
if "text_color" in request.args:
u.text_color = int(request.args["text_color"][0])
if "avatar" in request.args:
with open(DATA_FOLDER + "store/avatar", 'wb') as outfile:
outfile.write(request.args["avatar"][0])
avatar_hash = digest(request.args["avatar"][0])
self.db.HashMap().insert(avatar_hash, DATA_FOLDER + "store/avatar")
u.avatar_hash = avatar_hash
if "header" in request.args:
with open(DATA_FOLDER + "store/header", 'wb') as outfile:
outfile.write(request.args["header"][0])
header_hash = digest(request.args["header"][0])
self.db.HashMap().insert(header_hash, DATA_FOLDER + "store/header")
u.header_hash = header_hash
if "pgp_key" in request.args and "signature" in request.args:
p.add_pgp_key(request.args["pgp_key"][0], request.args["signature"][0],
self.keychain.guid.encode("hex"))
enc = u.PublicKey()
enc.public_key = self.keychain.encryption_pubkey
enc.signature = self.keychain.signing_key.sign(enc.public_key)[:64]
u.encryption_key.MergeFrom(enc)
p.update(u)
request.write(json.dumps({"success": True}))
request.finish()
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
@POST('^/api/v1/social_accounts')
def add_social_account(self, request):
try:
p = Profile(self.db)
if "account_type" in request.args and "username" in request.args and "proof" in request.args:
p.add_social_account(request.args["account_type"][0], request.args["username"][0],
request.args["proof"][0])
request.write(json.dumps({"success": True}))
request.finish()
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
@DELETE('^/api/v1/social_accounts')
def delete_social_account(self, request):
try:
p = Profile(self.db)
if "account_type" in request.args:
p.remove_social_account(request.args["account_type"][0])
request.write(json.dumps({"success": True}))
request.finish()
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
@GET('^/api/v1/contracts')
def get_contract(self, request):
def parse_contract(contract):
if contract is not None:
request.setHeader('content-type', "application/json")
request.write(json.dumps(contract, indent=4))
request.finish()
else:
request.write(json.dumps({}))
request.finish()
if "id" in request.args:
if "guid" in request.args:
def get_node(node):
if node is not None:
self.mserver.get_contract(node, unhexlify(request.args["id"][0]))\
.addCallback(parse_contract)
else:
request.write(json.dumps({}))
request.finish()
try:
with open(DATA_FOLDER + "cache/" + request.args["id"][0], "r") as filename:
contract = json.loads(filename.read(), object_pairs_hook=OrderedDict)
parse_contract(contract)
except Exception:
self.kserver.resolve(unhexlify(request.args["guid"][0])).addCallback(get_node)
else:
try:
with open(self.db.HashMap().get_file(unhexlify(request.args["id"][0])), "r") as filename:
contract = json.loads(filename.read(), object_pairs_hook=OrderedDict)
parse_contract(contract)
except Exception:
parse_contract(None)
else:
request.write(json.dumps({}))
request.finish()
return server.NOT_DONE_YET
@POST('^/api/v1/contracts')
def set_contract(self, request):
try:
if "options" in request.args:
options = {}
for option in request.args["options"]:
options[option] = request.args[option]
c = Contract(self.db)
c.create(
str(request.args["expiration_date"][0]),
request.args["metadata_category"][0],
request.args["title"][0],
request.args["description"][0],
request.args["currency_code"][0],
request.args["price"][0],
request.args["process_time"][0],
True if "nsfw" in request.args else False,
shipping_origin=request.args["shipping_origin"][0] if "shipping_origin" in request.args else None,
shipping_regions=request.args["ships_to"] if "ships_to" in request.args else None,
est_delivery_domestic=request.args["est_delivery_domestic"][0]
if "est_delivery_domestic" in request.args else None,
est_delivery_international=request.args["est_delivery_international"][0]
if "est_delivery_international" in request.args else None,
terms_conditions=request.args["terms_conditions"][0]
if request.args["terms_conditions"][0] is not "" else None,
returns=request.args["returns"][0] if request.args["returns"][0] is not "" else None,
shipping_currency_code=request.args["shipping_currency_code"][0],
shipping_domestic=request.args["shipping_domestic"][0],
shipping_international=request.args["shipping_international"][0],
keywords=request.args["keywords"] if "keywords" in request.args else None,
category=request.args["category"][0] if request.args["category"][0] is not "" else None,
condition=request.args["condition"][0] if request.args["condition"][0] is not "" else None,
sku=request.args["sku"][0] if request.args["sku"][0] is not "" else None,
images=request.args["images"],
free_shipping=True if "free_shipping" in request.args else False,
options=options if "options" in request.args else None,
moderators=request.args["moderators"] if "moderators" in request.args else None)
for keyword in request.args["keywords"]:
self.kserver.set(digest(keyword.lower()), c.get_contract_id(),
self.kserver.node.getProto().SerializeToString())
request.write(json.dumps({"success": True}))
request.finish()
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
@DELETE('^/api/v1/contracts')
def delete_contract(self, request):
try:
if "id" in request.args:
c = Contract(self.db, hash_value=unhexlify(request.args["id"][0]))
for keyword in c.contract["vendor_offer"]["listing"]["item"]["keywords"]:
self.kserver.delete(keyword.lower(), c.get_contract_id(),
self.keychain.signing_key.sign(c.get_contract_id())[:64])
c.delete()
request.write(json.dumps({"success": True}))
request.finish()
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
@GET('^/api/v1/shutdown')
def shutdown(self, request):
self.protocol.shutdown()
reactor.stop()
@POST('^/api/v1/make_moderator')
def make_moderator(self, request):
try:
self.mserver.make_moderator()
request.write(json.dumps({"success": True}))
request.finish()
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
@POST('^/api/v1/unmake_moderator')
def unmake_moderator(self, request):
try:
self.mserver.unmake_moderator()
request.write(json.dumps({"success": True}))
request.finish()
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
@POST('^/api/v1/purchase_contract')
def purchase_contract(self, request):
try:
def handle_response(resp, contract):
if resp:
contract.await_funding(self.protocol.ws, self.protocol.blockchain, resp)
request.write(json.dumps({"success": True, "payment_address": payment_address}, indent=4))
request.finish()
else:
request.write(json.dumps({"success": False, "reason": "seller rejected contract"}, indent=4))
request.finish()
options = None
if "options" in request.args:
options = {}
for option in request.args["options"]:
options[option] = request.args[option]
c = Contract(self.db, hash_value=unhexlify(request.args["id"][0]), testnet=self.protocol.testnet)
payment_address = c.\
add_purchase_info(request.args["quantity"][0],
request.args["ship_to"][0] if "ship_to" in request.args else None,
request.args["address"][0] if "address" in request.args else None,
request.args["city"][0] if "city" in request.args else None,
request.args["state"][0] if "state" in request.args else None,
request.args["postal_code"][0] if "postal_code" in request.args else None,
request.args["country"][0] if "country" in request.args else None,
request.args["moderator"][0] if "moderator" in request.args else None,
options)
def get_node(node):
if node is not None:
self.mserver.purchase(node, c).addCallback(handle_response, c)
else:
request.write("False")
request.finish()
seller_guid = unhexlify(c.contract["vendor_offer"]["listing"]["id"]["guid"])
self.kserver.resolve(seller_guid).addCallback(get_node)
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
@POST('^/api/v1/confirm_order')
def confirm_order(self, request):
try:
def respond(success):
if success:
request.write(json.dumps({"success": True}))
request.finish()
else:
request.write(json.dumps({"success": False, "reason": "Failed to send order confirmation"}))
request.finish()
file_path = DATA_FOLDER + "store/listings/in progress/" + request.args["id"][0] + ".json"
with open(file_path, 'r') as filename:
order = json.load(filename, object_pairs_hook=OrderedDict)
c = Contract(self.db, contract=order, testnet=self.protocol.testnet)
c.add_order_confirmation(request.args["payout_address"][0],
comments=request.args["comments"][0] if "comments" in request.args else None,
shipper=request.args["shipper"][0] if "shipper" in request.args else None,
tracking_number=request.args["tracking_number"][0]
if "tracking_number" in request.args else None,
est_delivery=request.args["est_delivery"][0]
if "est_delivery" in request.args else None,
url=request.args["url"][0] if "url" in request.args else None,
password=request.args["password"][0] if "password" in request.args else None)
guid = c.contract["buyer_order"]["order"]["id"]["guid"]
self.mserver.confirm_order(guid, c).addCallback(respond)
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
@POST('^/api/v1/upload_image')
def upload_image(self, request):
try:
ret = []
for image in request.args["image"]:
hash_value = digest(image).encode("hex")
with open(DATA_FOLDER + "store/media/" + hash_value, 'w') as outfile:
outfile.write(image)
self.db.HashMap().insert(digest(image), DATA_FOLDER + "store/media/" + hash_value)
ret.append(hash_value)
request.write(json.dumps({"success": True, "image_hashes": ret}, indent=4))
request.finish()
return server.NOT_DONE_YET
except Exception, e:
request.write(json.dumps({"success": False, "reason": e.message}, indent=4))
request.finish()
return server.NOT_DONE_YET
|
|
# This file is part of beets.
# Copyright 2016, Malte Ried.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the `filefilter` plugin.
"""
import os
import shutil
import unittest
from test import _common
from test.helper import capture_log
from test.test_importer import ImportHelper
from beets import config
from mediafile import MediaFile
from beets.util import displayable_path, bytestring_path
from beetsplug.filefilter import FileFilterPlugin
class FileFilterPluginTest(unittest.TestCase, ImportHelper):
def setUp(self):
self.setup_beets()
self.__create_import_dir(2)
self._setup_import_session()
config['import']['pretend'] = True
def tearDown(self):
self.teardown_beets()
def __copy_file(self, dest_path, metadata):
# Copy files
resource_path = os.path.join(_common.RSRC, b'full.mp3')
shutil.copy(resource_path, dest_path)
medium = MediaFile(dest_path)
# Set metadata
for attr in metadata:
setattr(medium, attr, metadata[attr])
medium.save()
def __create_import_dir(self, count):
self.import_dir = os.path.join(self.temp_dir, b'testsrcdir')
if os.path.isdir(self.import_dir):
shutil.rmtree(self.import_dir)
self.artist_path = os.path.join(self.import_dir, b'artist')
self.album_path = os.path.join(self.artist_path, b'album')
self.misc_path = os.path.join(self.import_dir, b'misc')
os.makedirs(self.album_path)
os.makedirs(self.misc_path)
metadata = {
'artist': 'Tag Artist',
'album': 'Tag Album',
'albumartist': None,
'mb_trackid': None,
'mb_albumid': None,
'comp': None,
}
self.album_paths = []
for i in range(count):
metadata['track'] = i + 1
metadata['title'] = 'Tag Title Album %d' % (i + 1)
track_file = bytestring_path('%02d - track.mp3' % (i + 1))
dest_path = os.path.join(self.album_path, track_file)
self.__copy_file(dest_path, metadata)
self.album_paths.append(dest_path)
self.artist_paths = []
metadata['album'] = None
for i in range(count):
metadata['track'] = i + 10
metadata['title'] = 'Tag Title Artist %d' % (i + 1)
track_file = bytestring_path('track_%d.mp3' % (i + 1))
dest_path = os.path.join(self.artist_path, track_file)
self.__copy_file(dest_path, metadata)
self.artist_paths.append(dest_path)
self.misc_paths = []
for i in range(count):
metadata['artist'] = 'Artist %d' % (i + 42)
metadata['track'] = i + 5
metadata['title'] = 'Tag Title Misc %d' % (i + 1)
track_file = bytestring_path('track_%d.mp3' % (i + 1))
dest_path = os.path.join(self.misc_path, track_file)
self.__copy_file(dest_path, metadata)
self.misc_paths.append(dest_path)
def __run(self, expected_lines, singletons=False):
self.load_plugins('filefilter')
import_files = [self.import_dir]
self._setup_import_session(singletons=singletons)
self.importer.paths = import_files
with capture_log() as logs:
self.importer.run()
self.unload_plugins()
FileFilterPlugin.listeners = None
logs = [line for line in logs if not line.startswith('Sending event:')]
self.assertEqual(logs, expected_lines)
def test_import_default(self):
""" The default configuration should import everything.
"""
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
' %s' % displayable_path(self.artist_paths[1]),
'Album: %s' % displayable_path(self.album_path),
' %s' % displayable_path(self.album_paths[0]),
' %s' % displayable_path(self.album_paths[1]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
' %s' % displayable_path(self.misc_paths[1])
])
def test_import_nothing(self):
config['filefilter']['path'] = 'not_there'
self.__run(['No files imported from %s' % displayable_path(
self.import_dir)])
# Global options
def test_import_global(self):
config['filefilter']['path'] = '.*track_1.*\\.mp3'
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
])
self.__run([
'Singleton: %s' % displayable_path(self.artist_paths[0]),
'Singleton: %s' % displayable_path(self.misc_paths[0])
], singletons=True)
# Album options
def test_import_album(self):
config['filefilter']['album_path'] = '.*track_1.*\\.mp3'
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
])
self.__run([
'Singleton: %s' % displayable_path(self.artist_paths[0]),
'Singleton: %s' % displayable_path(self.artist_paths[1]),
'Singleton: %s' % displayable_path(self.album_paths[0]),
'Singleton: %s' % displayable_path(self.album_paths[1]),
'Singleton: %s' % displayable_path(self.misc_paths[0]),
'Singleton: %s' % displayable_path(self.misc_paths[1])
], singletons=True)
# Singleton options
def test_import_singleton(self):
config['filefilter']['singleton_path'] = '.*track_1.*\\.mp3'
self.__run([
'Singleton: %s' % displayable_path(self.artist_paths[0]),
'Singleton: %s' % displayable_path(self.misc_paths[0])
], singletons=True)
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
' %s' % displayable_path(self.artist_paths[1]),
'Album: %s' % displayable_path(self.album_path),
' %s' % displayable_path(self.album_paths[0]),
' %s' % displayable_path(self.album_paths[1]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
' %s' % displayable_path(self.misc_paths[1])
])
# Album and singleton options
def test_import_both(self):
config['filefilter']['album_path'] = '.*track_1.*\\.mp3'
config['filefilter']['singleton_path'] = '.*track_2.*\\.mp3'
self.__run([
'Album: %s' % displayable_path(self.artist_path),
' %s' % displayable_path(self.artist_paths[0]),
'Album: %s' % displayable_path(self.misc_path),
' %s' % displayable_path(self.misc_paths[0]),
])
self.__run([
'Singleton: %s' % displayable_path(self.artist_paths[1]),
'Singleton: %s' % displayable_path(self.misc_paths[1])
], singletons=True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
#!/usr/bin/env python
#
# GrovePi Python library
# v1.2.2
#
# This file provides the basic functions for using the GrovePi
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# Karan Nayan
# Initial Date: 13 Feb 2014
# Last Updated: 01 June 2015
# http://www.dexterindustries.com/
import sys
import time
import math
import struct
debug =0
if sys.version_info<(3,0):
p_version=2
else:
p_version=3
if sys.platform == 'uwp':
import winrt_smbus as smbus
bus = smbus.SMBus(1)
else:
import smbus
import RPi.GPIO as GPIO
rev = GPIO.RPI_REVISION
if rev == 2 or rev == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
# I2C Address of Arduino
address = 0x04
# Command Format
# digitalRead() command format header
dRead_cmd = [1]
# digitalWrite() command format header
dWrite_cmd = [2]
# analogRead() command format header
aRead_cmd = [3]
# analogWrite() command format header
aWrite_cmd = [4]
# pinMode() command format header
pMode_cmd = [5]
# Ultrasonic read
uRead_cmd = [7]
# Get firmware version
version_cmd = [8]
# Accelerometer (+/- 1.5g) read
acc_xyz_cmd = [20]
# RTC get time
rtc_getTime_cmd = [30]
# DHT Pro sensor temperature
dht_temp_cmd = [40]
# Grove LED Bar commands
# Initialise
ledBarInit_cmd = [50]
# Set orientation
ledBarOrient_cmd = [51]
# Set level
ledBarLevel_cmd = [52]
# Set single LED
ledBarSetOne_cmd = [53]
# Toggle single LED
ledBarToggleOne_cmd = [54]
# Set all LEDs
ledBarSet_cmd = [55]
# Get current state
ledBarGet_cmd = [56]
# Grove 4 Digit Display commands
# Initialise
fourDigitInit_cmd = [70]
# Set brightness, not visible until next cmd
fourDigitBrightness_cmd = [71]
# Set numeric value without leading zeros
fourDigitValue_cmd = [72]
# Set numeric value with leading zeros
fourDigitValueZeros_cmd = [73]
# Set individual digit
fourDigitIndividualDigit_cmd = [74]
# Set individual leds of a segment
fourDigitIndividualLeds_cmd = [75]
# Set left and right values with colon
fourDigitScore_cmd = [76]
# Analog read for n seconds
fourDigitAnalogRead_cmd = [77]
# Entire display on
fourDigitAllOn_cmd = [78]
# Entire display off
fourDigitAllOff_cmd = [79]
# Grove Chainable RGB LED commands
# Store color for later use
storeColor_cmd = [90]
# Initialise
chainableRgbLedInit_cmd = [91]
# Initialise and test with a simple color
chainableRgbLedTest_cmd = [92]
# Set one or more leds to the stored color by pattern
chainableRgbLedSetPattern_cmd = [93]
# set one or more leds to the stored color by modulo
chainableRgbLedSetModulo_cmd = [94]
# sets leds similar to a bar graph, reversible
chainableRgbLedSetLevel_cmd = [95]
# Read the button from IR sensor
ir_read_cmd=[21]
# Set pin for the IR reciever
ir_recv_pin_cmd=[22]
dus_sensor_read_cmd=[10]
dust_sensor_en_cmd=[14]
dust_sensor_dis_cmd=[15]
encoder_read_cmd=[11]
encoder_en_cmd=[16]
encoder_dis_cmd=[17]
flow_read_cmd=[12]
flow_disable_cmd=[13]
flow_en_cmd=[18]
# This allows us to be more specific about which commands contain unused bytes
unused = 0
# Function declarations of the various functions used for encoding and sending
# data from RPi to Arduino
# Write I2C block
def write_i2c_block(address, block):
try:
return bus.write_i2c_block_data(address, 1, block)
except IOError:
if debug:
print ("IOError")
return -1
# Read I2C byte
def read_i2c_byte(address):
try:
return bus.read_byte(address)
except IOError:
if debug:
print ("IOError")
return -1
# Read I2C block
def read_i2c_block(address):
try:
return bus.read_i2c_block_data(address, 1)
except IOError:
if debug:
print ("IOError")
return -1
# Arduino Digital Read
def digitalRead(pin):
write_i2c_block(address, dRead_cmd + [pin, unused, unused])
time.sleep(.1)
n = read_i2c_byte(address)
return n
# Arduino Digital Write
def digitalWrite(pin, value):
write_i2c_block(address, dWrite_cmd + [pin, value, unused])
return 1
# Setting Up Pin mode on Arduino
def pinMode(pin, mode):
if mode == "OUTPUT":
write_i2c_block(address, pMode_cmd + [pin, 1, unused])
elif mode == "INPUT":
write_i2c_block(address, pMode_cmd + [pin, 0, unused])
return 1
# Read analog value from Pin
def analogRead(pin):
bus.write_i2c_block_data(address, 1, aRead_cmd + [pin, unused, unused])
time.sleep(.1)
bus.read_byte(address)
number = bus.read_i2c_block_data(address, 1)
time.sleep(.1)
return number[1] * 256 + number[2]
# Write PWM
def analogWrite(pin, value):
write_i2c_block(address, aWrite_cmd + [pin, value, unused])
return 1
# Read temp in Celsius from Grove Temperature Sensor
def temp(pin, model = '1.0'):
# each of the sensor revisions use different thermistors, each with their own B value constant
if model == '1.2':
bValue = 4250 # sensor v1.2 uses thermistor ??? (assuming NCP18WF104F03RC until SeeedStudio clarifies)
elif model == '1.1':
bValue = 4250 # sensor v1.1 uses thermistor NCP18WF104F03RC
else:
bValue = 3975 # sensor v1.0 uses thermistor TTC3A103*39H
a = analogRead(pin)
resistance = (float)(1023 - a) * 10000 / a
t = (float)(1 / (math.log(resistance / 10000) / bValue + 1 / 298.15) - 273.15)
return t
# Read value from Grove Ultrasonic
def ultrasonicRead(pin):
write_i2c_block(address, uRead_cmd + [pin, unused, unused])
time.sleep(.2)
read_i2c_byte(address)
number = read_i2c_block(address)
return (number[1] * 256 + number[2])
# Read the firmware version
def version():
write_i2c_block(address, version_cmd + [unused, unused, unused])
time.sleep(.1)
read_i2c_byte(address)
number = read_i2c_block(address)
return "%s.%s.%s" % (number[1], number[2], number[3])
# Read Grove Accelerometer (+/- 1.5g) XYZ value
def acc_xyz():
write_i2c_block(address, acc_xyz_cmd + [unused, unused, unused])
time.sleep(.1)
read_i2c_byte(address)
number = read_i2c_block(address)
if number[1] > 32:
number[1] = - (number[1] - 224)
if number[2] > 32:
number[2] = - (number[2] - 224)
if number[3] > 32:
number[3] = - (number[3] - 224)
return (number[1], number[2], number[3])
# Read from Grove RTC
def rtc_getTime():
write_i2c_block(address, rtc_getTime_cmd + [unused, unused, unused])
time.sleep(.1)
read_i2c_byte(address)
number = read_i2c_block(address)
return number
# Read and return temperature and humidity from Grove DHT Pro
def dht(pin, module_type):
write_i2c_block(address, dht_temp_cmd + [pin, module_type, unused])
# Delay necessary for proper reading fron DHT sensor
time.sleep(.6)
try:
read_i2c_byte(address)
number = read_i2c_block(address)
time.sleep(.1)
if number == -1:
return [-1,-1]
except (TypeError, IndexError):
return [-1,-1]
# data returned in IEEE format as a float in 4 bytes
if p_version==2:
h=''
for element in (number[1:5]):
h+=chr(element)
t_val=struct.unpack('f', h)
t = round(t_val[0], 2)
h = ''
for element in (number[5:9]):
h+=chr(element)
hum_val=struct.unpack('f',h)
hum = round(hum_val[0], 2)
else:
t_val=bytearray(number[1:5])
h_val=bytearray(number[5:9])
t=round(struct.unpack('f',t_val)[0],2)
hum=round(struct.unpack('f',h_val)[0],2)
return [t, hum]
# Grove LED Bar - initialise
# orientation: (0 = red to green, 1 = green to red)
def ledBar_init(pin, orientation):
write_i2c_block(address, ledBarInit_cmd + [pin, orientation, unused])
return 1
# Grove LED Bar - set orientation
# orientation: (0 = red to green, 1 = green to red)
def ledBar_orientation(pin, orientation):
write_i2c_block(address, ledBarOrient_cmd + [pin, orientation, unused])
return 1
# Grove LED Bar - set level
# level: (0-10)
def ledBar_setLevel(pin, level):
write_i2c_block(address, ledBarLevel_cmd + [pin, level, unused])
return 1
# Grove LED Bar - set single led
# led: which led (1-10)
# state: off or on (0-1)
def ledBar_setLed(pin, led, state):
write_i2c_block(address, ledBarSetOne_cmd + [pin, led, state])
return 1
# Grove LED Bar - toggle single led
# led: which led (1-10)
def ledBar_toggleLed(pin, led):
write_i2c_block(address, ledBarToggleOne_cmd + [pin, led, unused])
return 1
# Grove LED Bar - set all leds
# state: (0-1023) or (0x00-0x3FF) or (0b0000000000-0b1111111111) or (int('0000000000',2)-int('1111111111',2))
def ledBar_setBits(pin, state):
byte1 = state & 255
byte2 = state >> 8
write_i2c_block(address, ledBarSet_cmd + [pin, byte1, byte2])
return 1
# Grove LED Bar - get current state
# state: (0-1023) a bit for each of the 10 LEDs
def ledBar_getBits(pin):
write_i2c_block(address, ledBarGet_cmd + [pin, unused, unused])
time.sleep(.2)
read_i2c_byte(0x04)
block = read_i2c_block(0x04)
return block[1] ^ (block[2] << 8)
# Grove 4 Digit Display - initialise
def fourDigit_init(pin):
write_i2c_block(address, fourDigitInit_cmd + [pin, unused, unused])
return 1
# Grove 4 Digit Display - set numeric value with or without leading zeros
# value: (0-65535) or (0000-FFFF)
def fourDigit_number(pin, value, leading_zero):
# split the value into two bytes so we can render 0000-FFFF on the display
byte1 = value & 255
byte2 = value >> 8
# separate commands to overcome current 4 bytes per command limitation
if (leading_zero):
write_i2c_block(address, fourDigitValue_cmd + [pin, byte1, byte2])
else:
write_i2c_block(address, fourDigitValueZeros_cmd + [pin, byte1, byte2])
time.sleep(.05)
return 1
# Grove 4 Digit Display - set brightness
# brightness: (0-7)
def fourDigit_brightness(pin, brightness):
# not actually visible until next command is executed
write_i2c_block(address, fourDigitBrightness_cmd + [pin, brightness, unused])
time.sleep(.05)
return 1
# Grove 4 Digit Display - set individual segment (0-9,A-F)
# segment: (0-3)
# value: (0-15) or (0-F)
def fourDigit_digit(pin, segment, value):
write_i2c_block(address, fourDigitIndividualDigit_cmd + [pin, segment, value])
time.sleep(.05)
return 1
# Grove 4 Digit Display - set 7 individual leds of a segment
# segment: (0-3)
# leds: (0-255) or (0-0xFF) one bit per led, segment 2 is special, 8th bit is the colon
def fourDigit_segment(pin, segment, leds):
write_i2c_block(address, fourDigitIndividualLeds_cmd + [pin, segment, leds])
time.sleep(.05)
return 1
# Grove 4 Digit Display - set left and right values (0-99), with leading zeros and a colon
# left: (0-255) or (0-FF)
# right: (0-255) or (0-FF)
# colon will be lit
def fourDigit_score(pin, left, right):
write_i2c_block(address, fourDigitScore_cmd + [pin, left, right])
time.sleep(.05)
return 1
# Grove 4 Digit Display - display analogRead value for n seconds, 4 samples per second
# analog: analog pin to read
# duration: analog read for this many seconds
def fourDigit_monitor(pin, analog, duration):
write_i2c_block(address, fourDigitAnalogRead_cmd + [pin, analog, duration])
time.sleep(duration + .05)
return 1
# Grove 4 Digit Display - turn entire display on (88:88)
def fourDigit_on(pin):
write_i2c_block(address, fourDigitAllOn_cmd + [pin, unused, unused])
time.sleep(.05)
return 1
# Grove 4 Digit Display - turn entire display off
def fourDigit_off(pin):
write_i2c_block(address, fourDigitAllOff_cmd + [pin, unused, unused])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - store a color for later use
# red: 0-255
# green: 0-255
# blue: 0-255
def storeColor(red, green, blue):
write_i2c_block(address, storeColor_cmd + [red, green, blue])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - initialise
# numLeds: how many leds do you have in the chain
def chainableRgbLed_init(pin, numLeds):
write_i2c_block(address, chainableRgbLedInit_cmd + [pin, numLeds, unused])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - initialise and test with a simple color
# numLeds: how many leds do you have in the chain
# testColor: (0-7) 3 bits in total - a bit for red, green and blue, eg. 0x04 == 0b100 (0bRGB) == rgb(255, 0, 0) == #FF0000 == red
# ie. 0 black, 1 blue, 2 green, 3 cyan, 4 red, 5 magenta, 6 yellow, 7 white
def chainableRgbLed_test(pin, numLeds, testColor):
write_i2c_block(address, chainableRgbLedTest_cmd + [pin, numLeds, testColor])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - set one or more leds to the stored color by pattern
# pattern: (0-3) 0 = this led only, 1 all leds except this led, 2 this led and all leds inwards, 3 this led and all leds outwards
# whichLed: index of led you wish to set counting outwards from the GrovePi, 0 = led closest to the GrovePi
def chainableRgbLed_pattern(pin, pattern, whichLed):
write_i2c_block(address, chainableRgbLedSetPattern_cmd + [pin, pattern, whichLed])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - set one or more leds to the stored color by modulo
# offset: index of led you wish to start at, 0 = led closest to the GrovePi, counting outwards
# divisor: when 1 (default) sets stored color on all leds >= offset, when 2 sets every 2nd led >= offset and so on
def chainableRgbLed_modulo(pin, offset, divisor):
write_i2c_block(address, chainableRgbLedSetModulo_cmd + [pin, offset, divisor])
time.sleep(.05)
return 1
# Grove Chainable RGB LED - sets leds similar to a bar graph, reversible
# level: (0-10) the number of leds you wish to set to the stored color
# reversible (0-1) when 0 counting outwards from GrovePi, 0 = led closest to the GrovePi, otherwise counting inwards
def chainableRgbLed_setLevel(pin, level, reverse):
write_i2c_block(address, chainableRgbLedSetLevel_cmd + [pin, level, reverse])
time.sleep(.05)
return 1
# Grove - Infrared Receiver- get the commands received from the Grove IR sensor
def ir_read_signal():
try:
write_i2c_block(address,ir_read_cmd+[unused,unused,unused])
time.sleep(.1)
data_back= bus.read_i2c_block_data(address, 1)[0:21]
if (data_back[1]!=255):
return data_back
return [-1]*21
except IOError:
return [-1]*21
# Grove - Infrared Receiver- set the pin on which the Grove IR sensor is connected
def ir_recv_pin(pin):
write_i2c_block(address,ir_recv_pin_cmd+[pin,unused,unused])
def dust_sensor_en():
write_i2c_block(address, dust_sensor_en_cmd + [unused, unused, unused])
time.sleep(.2)
def dust_sensor_dis():
write_i2c_block(address, dust_sensor_dis_cmd + [unused, unused, unused])
time.sleep(.2)
def dustSensorRead():
write_i2c_block(address, dus_sensor_read_cmd + [unused, unused, unused])
time.sleep(.2)
#read_i2c_byte(address)
#number = read_i2c_block(address)
#return (number[1] * 256 + number[2])
data_back= bus.read_i2c_block_data(address, 1)[0:4]
#print data_back[:4]
if data_back[0]!=255:
lowpulseoccupancy=(data_back[3]*256*256+data_back[2]*256+data_back[1])
#print [data_back[0],lowpulseoccupancy]
return [data_back[0],lowpulseoccupancy]
else:
return [-1,-1]
print (data_back)
def encoder_en():
write_i2c_block(address, encoder_en_cmd + [unused, unused, unused])
time.sleep(.2)
def encoder_dis():
write_i2c_block(address, encoder_dis_cmd + [unused, unused, unused])
time.sleep(.2)
def encoderRead():
write_i2c_block(address, encoder_read_cmd + [unused, unused, unused])
time.sleep(.2)
data_back= bus.read_i2c_block_data(address, 1)[0:2]
#print data_back
if data_back[0]!=255:
return [data_back[0],data_back[1]]
else:
return [-1,-1]
def flowDisable():
write_i2c_block(address, flow_disable_cmd + [unused, unused, unused])
time.sleep(.2)
def flowEnable():
write_i2c_block(address, flow_en_cmd + [unused, unused, unused])
time.sleep(.2)
def flowRead():
write_i2c_block(address, flow_read_cmd + [unused, unused, unused])
time.sleep(.2)
data_back= bus.read_i2c_block_data(address, 1)[0:3]
#print data_back
if data_back[0]!=255:
return [data_back[0],data_back[2]*256+data_back[1]]
else:
return [-1,-1]
|
|
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QSizePolicy
from plotcontrols import PlotControls
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from numpy import nanmin, nanmax
from custom_colormap import get_colormap
from datahandler import data_handler_factory
from plothandler import plot_handler_factory
class MplLayout(QtWidgets.QWidget):
"""
This class is responsible for drawing the plots.
Parameters
----------
statusBar : QtWidgets.QStatusBar instance
statusBar of the parent FolderBrowser instance.
parent : QtWidgets.QMainWindow instance
The parent FolderBrowser instance.
The starting point for this class was the Matplotlib example file
embedding_in_qt5.py
from
https://matplotlib.org/examples/user_interfaces/embedding_in_qt5.html
"""
def __init__(self, statusBar=None, parent=None):
super().__init__()
self.statusBar = statusBar
self.parent = parent
self.init_fig_and_canvas()
self.cmap_names = ['Reds', 'Blues_r', 'dark symmetric',
'light symmetric', 'inferno', 'viridis', 'afmhot']
self.plot_2D_types = ('Auto', 'imshow', 'pcolormesh')
self.plotcontrols = PlotControls(self.cmap_names, self.plot_2D_types)
self.set_callback_functions()
self.init_navi_toolbar()
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.navi_toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.plotcontrols)
self.setLayout(layout)
self.none_str = '---'
self.sel_col_names = self.plotcontrols.get_sel_cols()
self.plot_data = [None] * 3
self.cbar = None
self.cmap_name = self.cmap_names[0]
self.cmap = plt.get_cmap(self.cmap_name)
self.lims = [None] * 3
self.aspect = 'auto'
self.update_is_scheduled = False
self.title = None
self.labels = [None] * 3
self.scilimits = (-3,3)
self.n_active_cols = None
self.plot_2D_type = None
def reset_and_plot(self, sweep):
self.sweep = sweep
raw_col_names = list(self.sweep.data.dtype.names)
pcol_names = self.sweep.pdata.get_names()
all_names = raw_col_names + pcol_names
col3_names = all_names + [self.none_str]
col_names = [all_names, all_names, col3_names]
self.plotcontrols.reset_col_boxes(col_names)
self.update_sel_cols()
def update_sel_cols(self, new_num=None):
col_names = self.plotcontrols.get_sel_cols()
new_col_names = [n for n in col_names if n != self.none_str]
# Try to make 1D plot if '---' is selected in the third comboBox.
self.plot_is_2D = len(new_col_names) == 3
self.data_is_1D = self.sweep.dimension == 1
plot_is_invalid = self.plot_is_2D and self.data_is_1D
if plot_is_invalid:
msg = "You can't do a 2D plot, since the data is only 1D."
self.statusBar.showMessage(msg, 2000)
self.plotcontrols.set_text_on_box(2, self.none_str)
self.update_sel_cols()
return
self.set_data_for_plot(new_col_names)
tmp = (self.plot_dim, self.data_h.n_data_arrs)
if tmp in ((1,2), (2,3)) and self.data_h.data_is_valid:
self.update_is_scheduled = True
self.set_labels()
self.update_lims()
self.update_plot()
else:
self.clear_axis(redraw=True)
def set_data_for_plot(self, new_col_names):
new_plot_data = [None] * len(new_col_names)
for i, col_name in enumerate(new_col_names):
sweep = self.sweep
raw_data_col_names = sweep.data.dtype.names
pdata_col_names = sweep.pdata.name_func_dict.keys()
if col_name in raw_data_col_names:
new_plot_data[i] = sweep.data[col_name]
elif col_name in pdata_col_names:
try:
new_plot_data[i] = sweep.pdata[col_name]
except Exception as error:
msg = 'Calculation of pseudocolumn failed'
self.statusBar.showMessage(msg, 2000)
new_data_h = data_handler_factory(*new_plot_data)
self.sel_col_names = new_col_names
self.n_active_cols = len(new_col_names)
ax = self.canvas.figure.axes[0]
plot_dim = self.n_active_cols - 1
self.plot_dim = plot_dim
self.plot_h = plot_handler_factory(ax, new_data_h, plot_dim=plot_dim)
self.data_h = new_data_h
def set_labels(self):
self.labels = [None] * self.n_active_cols
for i, _ in enumerate(self.labels):
col_name = self.sel_col_names[i]
self.labels[i] = self.sweep.get_label(col_name)
def update_lims(self):
"""
user_lims are limits set by user in the lim_boxes.
For both 1D and 2D plots extent is data limits.
"""
ext = [None] * self.n_active_cols
user_lims = self.plotcontrols.get_lims()
self.lims = [None] * self.n_active_cols
for i, lim in enumerate(self.lims):
ext = self.data_h.get_extent_of_data_dim(i)
self.lims[i] = self.combine_lim_lists(user_lims[i], ext)
self.update_cmap()
if not self.update_is_scheduled:
self.update_plot()
def update_cmap(self, cmap_name=None):
"""
cmap_name: string corresponding to a built-in matplotlib colormap
OR 'symmetric' which is defined below.
"""
if not self.plot_is_2D:
return
if type(cmap_name) is int:
cmap_name = self.cmap_names[cmap_name]
if cmap_name is None:
cmap_name = self.cmap_name
self.cmap_name = cmap_name
self.cmap = get_colormap(cmap_name, self.lims[2])
if not self.update_is_scheduled:
self.update_plot()
def update_aspect(self):
self.aspect = self.plotcontrols.get_aspect()
if not self.update_is_scheduled:
self.update_plot()
def update_plot(self):
if self.plot_is_2D: self._update_2D_plot()
else: self._update_1D_plot()
self.update_is_scheduled = False
def _update_1D_plot(self):
self.clear_axis(redraw=False)
self.plot_h.plot()
self.common_plot_update()
def _update_2D_plot(self):
fig = self.canvas.figure
if self.plot_2D_type == 'imshow' and not self.data_h.imshow_eligible:
self.clear_axis(redraw=True)
return
self.clear_axis(redraw=False)
self.image = self.plot_h.plot(plot_type=self.plot_2D_type)
self.cbar = fig.colorbar(mappable=self.image)
self.cbar.formatter.set_powerlimits(self.scilimits)
self.image.set_cmap(self.cmap)
self.image.set_clim(self.lims[2])
self.cbar.set_label(self.labels[2])
self.cbar.draw_all()
self.common_plot_update()
def common_plot_update(self):
ax = self.canvas.figure.axes[0]
ax.ticklabel_format(style='sci', axis='both',
scilimits=self.scilimits, useOffset=False)
ax.autoscale_view(True, True, True)
ax.relim()
ax.set_xlabel(self.labels[0])
ax.set_ylabel(self.labels[1])
ax.set_xlim(self.lims[0])
ax.set_ylim(self.lims[1])
ax.set_title(self.title, fontsize=11)
ax.set_aspect(self.aspect)
self.custom_tight_layout()
self.canvas.draw()
def clear_axis(self, redraw=True):
try:
self.cbar.remove()
self.cbar = None
self.image = None
except AttributeError:
pass
for ax in self.canvas.figure.axes:
ax.cla()
ax.relim()
ax.autoscale()
if redraw:
self.custom_tight_layout()
self.canvas.draw()
def custom_tight_layout(self):
# Sometimes we'll get an error:
# ValueError: bottom cannot be >= top
# This is a confirmed bug when using tight_layout():
# https://github.com/matplotlib/matplotlib/issues/5456
try:
self.canvas.figure.tight_layout()
except ValueError:
msg = ('Title is wider than figure.'
'This causes undesired behavior and is a known bug.')
self.statusBar.showMessage(msg, 2000)
def set_callback_functions(self):
pt = self.plotcontrols
for box in pt.col_boxes:
box.activated.connect(self.update_sel_cols)
for box in pt.lim_boxes:
box.editingFinished.connect(self.update_lims)
pt.cmap_sel.activated.connect(self.update_cmap)
pt.plot_2D_type_sel.activated.connect(self.set_plot_2D_type)
pt.aspect_box.editingFinished.connect(self.update_aspect)
def init_fig_and_canvas(self):
fig = Figure(facecolor='white')
fig.add_subplot(1, 1, 1)
self.canvas = FigureCanvasQTAgg(fig)
policy = QSizePolicy.Expanding
self.canvas.setSizePolicy(policy, policy)
def init_navi_toolbar(self):
self.navi_toolbar = NavigationToolbar2QT(self.canvas, self)
self.navi_toolbar.setStyleSheet('border: none')
self.navi_toolbar.setMaximumHeight(20)
def copy_fig_to_clipboard(self):
image = QtWidgets.QWidget.grab(self.canvas).toImage()
QtWidgets.QApplication.clipboard().setImage(image)
def set_plot_2D_type(self, new_type=None):
new_type = self.plotcontrols.get_sel_2D_type()
assert new_type in self.plot_2D_types
if new_type == 'Auto':
new_type = None
self.plot_2D_type = new_type
if not self.update_is_scheduled:
self.update_plot()
def set_title(self, title):
self.title = title
@staticmethod
def combine_lim_lists(list1, list2):
if list1 is None or list2 is None:
return None
assert len(list1) == len(list2)
out_list = [None] * len(list1)
for i in range(len(list1)):
if list1[i] is None:
out_list[i] = list2[i]
else:
out_list[i] = list1[i]
return out_list
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions used by target assigner."""
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
def image_shape_to_grids(height, width):
"""Computes xy-grids given the shape of the image.
Args:
height: The height of the image.
width: The width of the image.
Returns:
A tuple of two tensors:
y_grid: A float tensor with shape [height, width] representing the
y-coordinate of each pixel grid.
x_grid: A float tensor with shape [height, width] representing the
x-coordinate of each pixel grid.
"""
out_height = tf.cast(height, tf.float32)
out_width = tf.cast(width, tf.float32)
x_range = tf.range(out_width, dtype=tf.float32)
y_range = tf.range(out_height, dtype=tf.float32)
x_grid, y_grid = tf.meshgrid(x_range, y_range, indexing='xy')
return (y_grid, x_grid)
def coordinates_to_heatmap(y_grid,
x_grid,
y_coordinates,
x_coordinates,
sigma,
channel_onehot,
channel_weights=None):
"""Returns the heatmap targets from a set of point coordinates.
This function maps a set of point coordinates to the output heatmap image
applied using a Gaussian kernel. Note that this function be can used by both
object detection and keypoint estimation tasks. For object detection, the
"channel" refers to the object class. For keypoint estimation, the "channel"
refers to the number of keypoint types.
Args:
y_grid: A 2D tensor with shape [height, width] which contains the grid
y-coordinates given in the (output) image dimensions.
x_grid: A 2D tensor with shape [height, width] which contains the grid
x-coordinates given in the (output) image dimensions.
y_coordinates: A 1D tensor with shape [num_instances] representing the
y-coordinates of the instances in the output space coordinates.
x_coordinates: A 1D tensor with shape [num_instances] representing the
x-coordinates of the instances in the output space coordinates.
sigma: A 1D tensor with shape [num_instances] representing the standard
deviation of the Gaussian kernel to be applied to the point.
channel_onehot: A 2D tensor with shape [num_instances, num_channels]
representing the one-hot encoded channel labels for each point.
channel_weights: A 1D tensor with shape [num_instances] corresponding to the
weight of each instance.
Returns:
heatmap: A tensor of size [height, width, num_channels] representing the
heatmap. Output (height, width) match the dimensions of the input grids.
"""
num_instances, num_channels = (
shape_utils.combined_static_and_dynamic_shape(channel_onehot))
x_grid = tf.expand_dims(x_grid, 2)
y_grid = tf.expand_dims(y_grid, 2)
# The raw center coordinates in the output space.
x_diff = x_grid - tf.math.floor(x_coordinates)
y_diff = y_grid - tf.math.floor(y_coordinates)
squared_distance = x_diff**2 + y_diff**2
gaussian_map = tf.exp(-squared_distance / (2 * sigma * sigma))
reshaped_gaussian_map = tf.expand_dims(gaussian_map, axis=-1)
reshaped_channel_onehot = tf.reshape(channel_onehot,
(1, 1, num_instances, num_channels))
gaussian_per_box_per_class_map = (
reshaped_gaussian_map * reshaped_channel_onehot)
if channel_weights is not None:
reshaped_weights = tf.reshape(channel_weights, (1, 1, num_instances, 1))
gaussian_per_box_per_class_map *= reshaped_weights
# Take maximum along the "instance" dimension so that all per-instance
# heatmaps of the same class are merged together.
heatmap = tf.reduce_max(gaussian_per_box_per_class_map, axis=2)
# Maximum of an empty tensor is -inf, the following is to avoid that.
heatmap = tf.maximum(heatmap, 0)
return heatmap
def compute_floor_offsets_with_indices(y_source,
x_source,
y_target=None,
x_target=None):
"""Computes offsets from floored source(floored) to target coordinates.
This function computes the offsets from source coordinates ("floored" as if
they were put on the grids) to target coordinates. Note that the input
coordinates should be the "absolute" coordinates in terms of the output image
dimensions as opposed to the normalized coordinates (i.e. values in [0, 1]).
If the input y and x source have the second dimension (representing the
neighboring pixels), then the offsets are computed from each of the
neighboring pixels to their corresponding target (first dimension).
Args:
y_source: A tensor with shape [num_points] (or [num_points, num_neighbors])
representing the absolute y-coordinates (in the output image space) of the
source points.
x_source: A tensor with shape [num_points] (or [num_points, num_neighbors])
representing the absolute x-coordinates (in the output image space) of the
source points.
y_target: A tensor with shape [num_points] representing the absolute
y-coordinates (in the output image space) of the target points. If not
provided, then y_source is used as the targets.
x_target: A tensor with shape [num_points] representing the absolute
x-coordinates (in the output image space) of the target points. If not
provided, then x_source is used as the targets.
Returns:
A tuple of two tensors:
offsets: A tensor with shape [num_points, 2] (or
[num_points, num_neighbors, 2]) representing the offsets of each input
point.
indices: A tensor with shape [num_points, 2] (or
[num_points, num_neighbors, 2]) representing the indices of where the
offsets should be retrieved in the output image dimension space.
Raise:
ValueError: source and target shapes have unexpected values.
"""
y_source_floored = tf.floor(y_source)
x_source_floored = tf.floor(x_source)
source_shape = shape_utils.combined_static_and_dynamic_shape(y_source)
if y_target is None and x_target is None:
y_target = y_source
x_target = x_source
else:
target_shape = shape_utils.combined_static_and_dynamic_shape(y_target)
if len(source_shape) == 2 and len(target_shape) == 1:
_, num_neighbors = source_shape
y_target = tf.tile(
tf.expand_dims(y_target, -1), multiples=[1, num_neighbors])
x_target = tf.tile(
tf.expand_dims(x_target, -1), multiples=[1, num_neighbors])
elif source_shape != target_shape:
raise ValueError('Inconsistent source and target shape.')
y_offset = y_target - y_source_floored
x_offset = x_target - x_source_floored
y_source_indices = tf.cast(y_source_floored, tf.int32)
x_source_indices = tf.cast(x_source_floored, tf.int32)
indices = tf.stack([y_source_indices, x_source_indices], axis=-1)
offsets = tf.stack([y_offset, x_offset], axis=-1)
return offsets, indices
def get_valid_keypoint_mask_for_class(keypoint_coordinates,
class_id,
class_onehot,
class_weights=None,
keypoint_indices=None):
"""Mask keypoints by their class ids and indices.
For a given task, we may want to only consider a subset of instances or
keypoints. This function is used to provide the mask (in terms of weights) to
mark those elements which should be considered based on the classes of the
instances and optionally, their keypoint indices. Note that the NaN values
in the keypoints will also be masked out.
Args:
keypoint_coordinates: A float tensor with shape [num_instances,
num_keypoints, 2] which contains the coordinates of each keypoint.
class_id: An integer representing the target class id to be selected.
class_onehot: A 2D tensor of shape [num_instances, num_classes] repesents
the onehot (or k-hot) encoding of the class for each instance.
class_weights: A 1D tensor of shape [num_instances] repesents the weight of
each instance. If not provided, all instances are weighted equally.
keypoint_indices: A list of integers representing the keypoint indices used
to select the values on the keypoint dimension. If provided, the output
dimension will be [num_instances, len(keypoint_indices)]
Returns:
A tuple of tensors:
mask: A float tensor of shape [num_instances, K], where K is num_keypoints
or len(keypoint_indices) if provided. The tensor has values either 0 or
1 indicating whether an element in the input keypoints should be used.
keypoints_nan_to_zeros: Same as input keypoints with the NaN values
replaced by zeros and selected columns corresponding to the
keypoint_indices (if provided). The shape of this tensor will always be
the same as the output mask.
"""
num_keypoints = tf.shape(keypoint_coordinates)[1]
class_mask = class_onehot[:, class_id]
reshaped_class_mask = tf.tile(
tf.expand_dims(class_mask, axis=-1), multiples=[1, num_keypoints])
not_nan = tf.math.logical_not(tf.math.is_nan(keypoint_coordinates))
mask = reshaped_class_mask * tf.cast(not_nan[:, :, 0], dtype=tf.float32)
keypoints_nan_to_zeros = tf.where(not_nan, keypoint_coordinates,
tf.zeros_like(keypoint_coordinates))
if class_weights is not None:
reshaped_class_weight = tf.tile(
tf.expand_dims(class_weights, axis=-1), multiples=[1, num_keypoints])
mask = mask * reshaped_class_weight
if keypoint_indices is not None:
mask = tf.gather(mask, indices=keypoint_indices, axis=1)
keypoints_nan_to_zeros = tf.gather(
keypoints_nan_to_zeros, indices=keypoint_indices, axis=1)
return mask, keypoints_nan_to_zeros
def blackout_pixel_weights_by_box_regions(height, width, boxes, blackout):
"""Blackout the pixel weights in the target box regions.
This function is used to generate the pixel weight mask (usually in the output
image dimension). The mask is to ignore some regions when computing loss.
Args:
height: int, height of the (output) image.
width: int, width of the (output) image.
boxes: A float tensor with shape [num_instances, 4] indicating the
coordinates of the four corners of the boxes.
blackout: A boolean tensor with shape [num_instances] indicating whether to
blackout (zero-out) the weights within the box regions.
Returns:
A float tensor with shape [height, width] where all values within the
regions of the blackout boxes are 0.0 and 1.0 else where.
"""
num_instances, _ = shape_utils.combined_static_and_dynamic_shape(boxes)
# If no annotation instance is provided, return all ones (instead of
# unexpected values) to avoid NaN loss value.
if num_instances == 0:
return tf.ones([height, width], dtype=tf.float32)
(y_grid, x_grid) = image_shape_to_grids(height, width)
y_grid = tf.expand_dims(y_grid, axis=0)
x_grid = tf.expand_dims(x_grid, axis=0)
y_min = tf.expand_dims(boxes[:, 0:1], axis=-1)
x_min = tf.expand_dims(boxes[:, 1:2], axis=-1)
y_max = tf.expand_dims(boxes[:, 2:3], axis=-1)
x_max = tf.expand_dims(boxes[:, 3:], axis=-1)
# Make the mask with all 1.0 in the box regions.
# Shape: [num_instances, height, width]
in_boxes = tf.cast(
tf.logical_and(
tf.logical_and(y_grid >= y_min, y_grid <= y_max),
tf.logical_and(x_grid >= x_min, x_grid <= x_max)),
dtype=tf.float32)
# Shape: [num_instances, height, width]
blackout = tf.tile(
tf.expand_dims(tf.expand_dims(blackout, axis=-1), axis=-1),
[1, height, width])
# Select only the boxes specified by blackout.
selected_in_boxes = tf.where(blackout, in_boxes, tf.zeros_like(in_boxes))
out_boxes = tf.reduce_max(selected_in_boxes, axis=0)
out_boxes = tf.ones_like(out_boxes) - out_boxes
return out_boxes
def _get_yx_indices_offset_by_radius(radius):
"""Gets the y and x index offsets that are within the radius."""
y_offsets = []
x_offsets = []
for y_offset in range(-radius, radius + 1, 1):
for x_offset in range(-radius, radius + 1, 1):
if x_offset ** 2 + y_offset ** 2 <= radius ** 2:
y_offsets.append(y_offset)
x_offsets.append(x_offset)
return (tf.constant(y_offsets, dtype=tf.float32),
tf.constant(x_offsets, dtype=tf.float32))
def get_surrounding_grids(height, width, y_coordinates, x_coordinates, radius):
"""Gets the indices of the surrounding pixels of the input y, x coordinates.
This function returns the pixel indices corresponding to the (floor of the)
input coordinates and their surrounding pixels within the radius. If the
radius is set to 0, then only the pixels that correspond to the floor of the
coordinates will be returned. If the radius is larger than 0, then all of the
pixels within the radius of the "floor pixels" will also be returned. For
example, if the input coorindate is [2.1, 3.5] and radius is 1, then the five
pixel indices will be returned: [2, 3], [1, 3], [2, 2], [2, 4], [3, 3]. Also,
if the surrounding pixels are outside of valid image region, then the returned
pixel indices will be [0, 0] and its corresponding "valid" value will be
False.
Args:
height: int, the height of the output image.
width: int, the width of the output image.
y_coordinates: A tensor with shape [num_points] representing the absolute
y-coordinates (in the output image space) of the points.
x_coordinates: A tensor with shape [num_points] representing the absolute
x-coordinates (in the output image space) of the points.
radius: int, the radius of the neighboring pixels to be considered and
returned. If set to 0, then only the pixel indices corresponding to the
floor of the input coordinates will be returned.
Returns:
A tuple of three tensors:
y_indices: A [num_points, num_neighbors] float tensor representing the
pixel y indices corresponding to the input points within radius. The
"num_neighbors" is determined by the size of the radius.
x_indices: A [num_points, num_neighbors] float tensor representing the
pixel x indices corresponding to the input points within radius. The
"num_neighbors" is determined by the size of the radius.
valid: A [num_points, num_neighbors] boolean tensor representing whether
each returned index is in valid image region or not.
"""
# Floored y, x: [num_points, 1].
y_center = tf.expand_dims(tf.math.floor(y_coordinates), axis=-1)
x_center = tf.expand_dims(tf.math.floor(x_coordinates), axis=-1)
y_offsets, x_offsets = _get_yx_indices_offset_by_radius(radius)
# Indices offsets: [1, num_neighbors].
y_offsets = tf.expand_dims(y_offsets, axis=0)
x_offsets = tf.expand_dims(x_offsets, axis=0)
# Floor + offsets: [num_points, num_neighbors].
y_output = y_center + y_offsets
x_output = x_center + x_offsets
default_output = tf.zeros_like(y_output)
valid = tf.logical_and(
tf.logical_and(x_output >= 0, x_output < width),
tf.logical_and(y_output >= 0, y_output < height))
y_output = tf.where(valid, y_output, default_output)
x_output = tf.where(valid, x_output, default_output)
return (y_output, x_output, valid)
|
|
# Copyright 2012-2013, Andrey Kislyuk and argcomplete contributors.
# Licensed under the Apache License. See https://github.com/kislyuk/argcomplete for more info.
from __future__ import print_function, unicode_literals, division, absolute_import
import os, sys, argparse, contextlib, subprocess, locale, re
import io
from . import my_shlex as shlex
USING_PYTHON2 = True if sys.version_info < (3, 0) else False
if USING_PYTHON2:
from cStringIO import StringIO
BytesIO = StringIO
builtin_str = str
bytes = str
str = unicode
basestring = basestring
builtin_int = int
int = long
open = io.open
else:
basestring = str
sys_encoding = locale.getpreferredencoding()
_DEBUG = '_ARC_DEBUG' in os.environ
debug_stream = sys.stderr
def debug(*args):
if _DEBUG:
print(file=debug_stream, *args)
BASH_FILE_COMPLETION_FALLBACK = 79
BASH_DIR_COMPLETION_FALLBACK = 80
safe_actions = (argparse._StoreAction,
argparse._StoreConstAction,
argparse._StoreTrueAction,
argparse._StoreFalseAction,
argparse._AppendAction,
argparse._AppendConstAction,
argparse._CountAction)
from . import completers
from .my_argparse import IntrospectiveArgumentParser, action_is_satisfied, action_is_open
@contextlib.contextmanager
def mute_stdout():
stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
yield
sys.stdout = stdout
@contextlib.contextmanager
def mute_stderr():
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
yield
sys.stderr.close()
sys.stderr = stderr
class ArgcompleteException(Exception):
pass
def split_line(line, point):
lexer = shlex.shlex(line, posix=True, punctuation_chars=True)
words = []
def split_word(word):
# TODO: make this less ugly
point_in_word = len(word) + point - lexer.instream.tell()
if isinstance(lexer.state, basestring) and lexer.state in lexer.whitespace:
point_in_word += 1
if point_in_word > len(word):
debug("In trailing whitespace")
words.append(word)
word = ''
prefix, suffix = word[:point_in_word], word[point_in_word:]
prequote = ''
# posix
if lexer.state is not None and lexer.state in lexer.quotes:
prequote = lexer.state
# non-posix
#if len(prefix) > 0 and prefix[0] in lexer.quotes:
# prequote, prefix = prefix[0], prefix[1:]
first_colon_pos = lexer.first_colon_pos if ':' in word else None
return prequote, prefix, suffix, words, first_colon_pos
while True:
try:
word = lexer.get_token()
if word == lexer.eof:
# TODO: check if this is ever unsafe
# raise ArgcompleteException("Unexpected end of input")
return "", "", "", words, None
if lexer.instream.tell() >= point:
debug("word", word, "split, lexer state: '{s}'".format(s=lexer.state))
return split_word(word)
words.append(word)
except ValueError:
debug("word", lexer.token, "split (lexer stopped, state: '{s}')".format(s=lexer.state))
if lexer.instream.tell() >= point:
return split_word(lexer.token)
else:
raise ArgcompleteException("Unexpected internal state. Please report this bug at https://github.com/kislyuk/argcomplete/issues.")
def default_validator(completion, prefix):
return completion.startswith(prefix)
class CompletionFinder(object):
'''
Inherit from this class if you wish to override any of the stages below. Otherwise, use ``argcomplete.autocomplete()``
directly (it's a convenience instance of this class). It has the same signature as
:meth:`CompletionFinder.__call__()`.
'''
def __init__(self):
pass
def __call__(self, argument_parser, always_complete_options=True, exit_method=os._exit, output_stream=None,
exclude=None, validator=None):
'''
:param argument_parser: The argument parser to autocomplete on
:type argument_parser: :class:`argparse.ArgumentParser`
:param always_complete_options: Whether or not to autocomplete options even if an option string opening character (normally ``-``) has not been entered
:type always_complete_options: boolean
:param exit_method: Method used to stop the program after printing completions. Defaults to :meth:`os._exit`. If you want to perform a normal exit that calls exit handlers, use :meth:`sys.exit`.
:type exit_method: callable
:param exclude: List of strings representing options to be omitted from autocompletion
:type exclude: iterable
:param validator: Function to filter all completions through before returning (called with two string arguments, completion and prefix; return value is evaluated as a boolean)
:type validator: callable
.. note:: If you are not subclassing CompletionFinder to override its behaviors, use ``argcomplete.autocomplete()`` directly. It has the same signature as this method.
Produces tab completions for ``argument_parser``. See module docs for more info.
Argcomplete only executes actions if their class is known not to have side effects. Custom action classes can be
added to argcomplete.safe_actions, if their values are wanted in the ``parsed_args`` completer argument, or their
execution is otherwise desirable.
'''
if '_ARGCOMPLETE' not in os.environ:
# not an argument completion invocation
return
global debug_stream
try:
debug_stream = os.fdopen(9, 'w')
except:
debug_stream = sys.stderr
if output_stream is None:
try:
output_stream = os.fdopen(8, 'wb')
except:
debug("Unable to open fd 8 for writing, quitting")
exit_method(1)
if validator is None:
validator = default_validator
self.validator = validator
self.always_complete_options = always_complete_options
self.exclude = exclude
# print("", stream=debug_stream)
# for v in 'COMP_CWORD', 'COMP_LINE', 'COMP_POINT', 'COMP_TYPE', 'COMP_KEY', '_ARGCOMPLETE_COMP_WORDBREAKS', 'COMP_WORDS':
# print(v, os.environ[v], stream=debug_stream)
ifs = os.environ.get('_ARGCOMPLETE_IFS', '\013')
if len(ifs) != 1:
debug("Invalid value for IFS, quitting [{v}]".format(v=ifs))
exit_method(1)
comp_line = os.environ['COMP_LINE']
comp_point = int(os.environ['COMP_POINT'])
# Adjust comp_point for wide chars
if USING_PYTHON2:
comp_point = len(comp_line[:comp_point].decode(sys_encoding))
else:
comp_point = len(comp_line.encode(sys_encoding)[:comp_point].decode(sys_encoding))
if USING_PYTHON2:
comp_line = comp_line.decode(sys_encoding)
cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos = split_line(comp_line, comp_point)
if os.environ['_ARGCOMPLETE'] == "2": # Hook recognized the first word as the interpreter
comp_words.pop(0)
debug(u"\nLINE: '{l}'\nPREQUOTE: '{pq}'\nPREFIX: '{p}'".format(l=comp_line, pq=cword_prequote, p=cword_prefix), u"\nSUFFIX: '{s}'".format(s=cword_suffix), u"\nWORDS:", comp_words)
active_parsers = [argument_parser]
parsed_args = argparse.Namespace()
visited_actions = []
'''
Since argparse doesn't support much introspection, we monkey-patch it to replace the parse_known_args method and
all actions with hooks that tell us which action was last taken or about to be taken, and let us have the parser
figure out which subparsers need to be activated (then recursively monkey-patch those).
We save all active ArgumentParsers to extract all their possible option names later.
'''
def patchArgumentParser(parser):
parser.__class__ = IntrospectiveArgumentParser
for action in parser._actions:
# TODO: accomplish this with super
class IntrospectAction(action.__class__):
def __call__(self, parser, namespace, values, option_string=None):
debug('Action stub called on', self)
debug('\targs:', parser, namespace, values, option_string)
debug('\torig class:', self._orig_class)
debug('\torig callable:', self._orig_callable)
visited_actions.append(self)
if self._orig_class == argparse._SubParsersAction:
debug('orig class is a subparsers action: patching and running it')
active_subparser = self._name_parser_map[values[0]]
patchArgumentParser(active_subparser)
active_parsers.append(active_subparser)
self._orig_callable(parser, namespace, values, option_string=option_string)
elif self._orig_class in safe_actions:
self._orig_callable(parser, namespace, values, option_string=option_string)
if getattr(action, "_orig_class", None):
debug("Action", action, "already patched")
action._orig_class = action.__class__
action._orig_callable = action.__call__
action.__class__ = IntrospectAction
patchArgumentParser(argument_parser)
try:
debug("invoking parser with", comp_words[1:])
with mute_stderr():
a = argument_parser.parse_known_args(comp_words[1:], namespace=parsed_args)
debug("parsed args:", a)
except BaseException as e:
debug("\nexception", type(e), str(e), "while parsing args")
debug("Active parsers:", active_parsers)
debug("Visited actions:", visited_actions)
debug("Parse result namespace:", parsed_args)
completions = self.collect_completions(active_parsers, parsed_args, cword_prefix, debug)
completions = self.filter_completions(completions)
completions = self.quote_completions(completions, cword_prequote, first_colon_pos)
debug("\nReturning completions:", completions)
output_stream.write(ifs.join(completions).encode(sys_encoding))
output_stream.flush()
debug_stream.flush()
exit_method(0)
def collect_completions(self, active_parsers, parsed_args, cword_prefix, debug):
'''
Visits the active parsers and their actions, executes their completers or introspects them to collect their
option strings. Returns the resulting completions as a list of strings.
This method is exposed for overriding in subclasses; there is no need to use it directly.
'''
completions = []
for parser in active_parsers:
debug("Examining parser", parser)
for action in parser._actions:
debug("Examining action", action)
if isinstance(action, argparse._SubParsersAction):
subparser_activated = False
for subparser in action._name_parser_map.values():
if subparser in active_parsers:
subparser_activated = True
if subparser_activated:
# Parent parser completions are not valid in the subparser, so flush them
completions = []
else:
completions += [subcmd for subcmd in action.choices.keys() if subcmd.startswith(cword_prefix)]
elif self.always_complete_options or (len(cword_prefix) > 0 and cword_prefix[0] in parser.prefix_chars):
completions += [option for option in action.option_strings if option.startswith(cword_prefix)]
debug("Active actions (L={l}): {a}".format(l=len(parser.active_actions), a=parser.active_actions))
# Only run completers if current word does not start with - (is not an optional)
if len(cword_prefix) == 0 or cword_prefix[0] not in parser.prefix_chars:
for active_action in parser.active_actions:
if not active_action.option_strings: # action is a positional
if action_is_satisfied(active_action) and not action_is_open(active_action):
debug("Skipping", active_action)
continue
debug("Activating completion for", active_action, active_action._orig_class)
#completer = getattr(active_action, 'completer', DefaultCompleter())
completer = getattr(active_action, 'completer', None)
if completer is None and active_action.choices is not None:
if not isinstance(active_action, argparse._SubParsersAction):
completer = completers.ChoicesCompleter(active_action.choices)
if completer:
if len(active_action.option_strings) > 0: # only for optionals
if not action_is_satisfied(active_action):
# This means the current action will fail to parse if the word under the cursor is not given
# to it, so give it exclusive control over completions (flush previous completions)
debug("Resetting completions because", active_action, "is unsatisfied")
completions = []
if callable(completer):
completions += [c for c in completer(prefix=cword_prefix, action=active_action,
parsed_args=parsed_args)
if self.validator(c, cword_prefix)]
else:
debug("Completer is not callable, trying the readline completer protocol instead")
for i in range(9999):
next_completion = completer.complete(cword_prefix, i)
if next_completion is None:
break
if self.validator(next_completion, cword_prefix):
completions.append(next_completion)
debug("Completions:", completions)
elif not isinstance(active_action, argparse._SubParsersAction):
debug("Completer not available, falling back")
try:
# TODO: what happens if completions contain newlines? How do I make compgen use IFS?
bashcomp_cmd = ['bash', '-c', "compgen -A file -- '{p}'".format(p=cword_prefix)]
completions += subprocess.check_output(bashcomp_cmd).decode(sys_encoding).splitlines()
except subprocess.CalledProcessError:
pass
return completions
def filter_completions(self, completions):
'''
Ensures collected completions are Unicode text, de-duplicates them, and excludes those specified by ``exclude``.
Returns the filtered completions as an iterable.
This method is exposed for overriding in subclasses; there is no need to use it directly.
'''
# On Python 2, we have to make sure all completions are unicode objects before we continue and output them.
# Otherwise, because python disobeys the system locale encoding and uses ascii as the default encoding, it will try
# to implicitly decode string objects using ascii, and fail.
if USING_PYTHON2:
for i in range(len(completions)):
if isinstance(completions[i], bytes):
completions[i] = completions[i].decode(sys_encoding)
# De-duplicate completions and remove excluded ones
if self.exclude is None:
self.exclude = set()
seen = set(self.exclude)
return [c for c in completions if c not in seen and not seen.add(c)]
def quote_completions(self, completions, cword_prequote, first_colon_pos):
'''
If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes
occurrences of that quote character in the completions, and adds the quote to the beginning of each completion.
Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of
completions before the first colon.
If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``),
adds a space after the completion.
This method is exposed for overriding in subclasses; there is no need to use it directly.
'''
comp_wordbreaks = os.environ.get('_ARGCOMPLETE_COMP_WORDBREAKS', os.environ.get('COMP_WORDBREAKS', " \t\"'@><=;|&(:."))
if USING_PYTHON2:
comp_wordbreaks = comp_wordbreaks.decode(sys_encoding)
punctuation_chars = u'();<>|&!`'
for char in punctuation_chars:
if char not in comp_wordbreaks:
comp_wordbreaks += char
# If the word under the cursor was quoted, escape the quote char and add the leading quote back in.
# Otherwise, escape all COMP_WORDBREAKS chars.
if cword_prequote == '':
# Bash mangles completions which contain colons.
# This workaround has the same effect as __ltrim_colon_completions in bash_completion.
if first_colon_pos:
completions = [c[first_colon_pos+1:] for c in completions]
for wordbreak_char in comp_wordbreaks:
completions = [c.replace(wordbreak_char, '\\'+wordbreak_char) for c in completions]
else:
if cword_prequote == '"':
for char in '`$!':
completions = [c.replace(char, '\\'+char) for c in completions]
completions = [cword_prequote+c.replace(cword_prequote, '\\'+cword_prequote) for c in completions]
# Note: similar functionality in bash is turned off by supplying the "-o nospace" option to complete.
# We can't use that functionality because bash is not smart enough to recognize continuation characters (/) for
# which no space should be added.
continuation_chars = '=/:'
if len(completions) == 1 and completions[0][-1] not in continuation_chars:
if cword_prequote == '' and not completions[0].endswith(' '):
completions[0] += ' '
return completions
autocomplete = CompletionFinder()
autocomplete.__doc__ = ''' Use this to access argcomplete. See :meth:`argcomplete.CompletionFinder.__call__()`. '''
def warn(*args):
'''
Prints **args** to standard error when running completions. This will interrupt the user's command line interaction;
use it to indicate an error condition that is preventing your completer from working.
'''
print("\n", file=debug_stream, *args)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Code for model cloning, plus model-related API entries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import saving
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.util.tf_export import tf_export
# API entries importable from `keras.models`:
Model = training.Model # pylint: disable=invalid-name
Sequential = sequential.Sequential # pylint: disable=invalid-name
save_model = saving.save_model
load_model = saving.load_model
model_from_config = saving.model_from_config
model_from_yaml = saving.model_from_yaml
model_from_json = saving.model_from_json
def _clone_functional_model(model, input_tensors=None):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
layer_map = {} # Cache for created layers.
tensor_map = {} # Map {reference_tensor: corresponding_tensor}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_layers = []
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(
batch_shape=layer._batch_input_shape,
dtype=layer.dtype,
sparse=layer.sparse,
name=layer.name)
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[layer] = newly_created_input_layer
for original_input_layer, cloned_input_layer in zip(model._input_layers,
input_layers):
layer_map[original_input_layer] = cloned_input_layer
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
input_tensors = generic_utils.to_list(input_tensors)
input_tensors_ = []
for i, x in enumerate(input_tensors):
if not K.is_keras_tensor(x):
name = model._input_layers[i].name
input_tensor = Input(tensor=x, name='input_wrapper_for_' + name)
input_tensors_.append(input_tensor)
# Cache newly created input layer.
original_input_layer = x._keras_history[0]
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[original_input_layer] = newly_created_input_layer
else:
input_tensors_.append(x)
input_tensors = input_tensors_
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = y
# Iterated over every node in the reference model, in depth order.
depth_keys = list(model._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = model._nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
# Clone layer.
new_layer = layer.__class__.from_config(layer.get_config())
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, InputLayer):
continue
# Gather inputs to call the new layer.
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_tensors = []
for x in reference_input_tensors:
if x in tensor_map:
computed_tensors.append(tensor_map[x])
if len(computed_tensors) == len(reference_input_tensors):
# Call layer.
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_tensors) == 1:
computed_tensor = computed_tensors[0]
output_tensors = generic_utils.to_list(layer(computed_tensor,
**kwargs))
computed_tensors = [computed_tensor]
else:
computed_tensors = computed_tensors
output_tensors = generic_utils.to_list(layer(computed_tensors,
**kwargs))
for x, y in zip(reference_output_tensors, output_tensors):
tensor_map[x] = y
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
output_tensors.append(tensor_map[x])
return Model(input_tensors, output_tensors, name=model.name)
def _clone_sequential_model(model, input_tensors=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
Returns:
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
def clone(layer):
return layer.__class__.from_config(layer.get_config())
layers = [clone(layer) for layer in model.layers]
if input_tensors is None:
return Sequential(layers=layers, name=model.name)
else:
if len(generic_utils.to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
x = generic_utils.to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history[0]
if isinstance(origin_layer, InputLayer):
return Sequential(layers=[origin_layer] + layers, name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history[0]
return Sequential(layers=[input_layer] + layers, name=model.name)
@tf_export('keras.models.clone_model')
def clone_model(model, input_tensors=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Arguments:
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
Raises:
ValueError: in case of invalid `model` argument value.
"""
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors)
# "Clone" a subclassed model by reseting all of the attributes.
def _in_place_subclassed_model_reset(model):
"""Substitute for model cloning that works for subclassed models.
Subclassed models cannot be cloned because their topology is not serializable.
To "instantiate" an identical model in a new TF graph, we reuse the original
model object, but we clear its state.
After calling this function on a model instance, you can use the model
instance as if it were a model clone (in particular you can use it in a new
graph).
This method clears the state of the input model. It is thus destructive.
However the original state can be restored fully by calling
`_in_place_subclassed_model_state_restoration`.
Args:
model: Instance of a Keras model created via subclassing.
Raises:
ValueError: In case the model uses a subclassed model as inner layer.
"""
assert not model._is_graph_network # Only makes sense for subclassed networks
# Retrieve all layers tracked by the model as well as their attribute names
attributes_cache = {}
for name in dir(model):
try:
value = getattr(model, name)
except (AttributeError, ValueError, TypeError):
continue
if isinstance(value, Layer):
attributes_cache[name] = value
assert value in model._layers
elif isinstance(value, (list, tuple)) and name not in ('layers', '_layers'):
# Handle case: list/tuple of layers (also tracked by the Network API).
if value and all(isinstance(val, Layer) for val in value):
raise ValueError('We do not support the use of list-of-layers '
'attributes in subclassed models used with '
'`model_to_estimator` at this time. Found list '
'model: %s' % name)
# Replace layers on the model with fresh layers
layers_to_names = {value: key for key, value in attributes_cache.items()}
original_layers = model._layers[:]
model._layers = data_structures.NoDependency([])
for layer in original_layers: # We preserve layer order.
config = layer.get_config()
# This will not work for nested subclassed models used as layers.
# This would be theoretically possible to support, but would add complexity.
# Only do it if users complain.
if isinstance(layer, Network) and not layer._is_graph_network:
raise ValueError('We do not support the use of nested subclassed models '
'in `model_to_estimator` at this time. Found nested '
'model: %s' % layer)
fresh_layer = layer.__class__.from_config(config)
name = layers_to_names[layer]
setattr(model, name, fresh_layer)
# Cache original model build attributes (in addition to layers)
if (not hasattr(model, '_original_attributes_cache') or
model._original_attributes_cache is None):
if model.built:
attributes_to_cache = [
'inputs',
'outputs',
'_feed_outputs',
'_feed_output_names',
'_feed_output_shapes',
'_feed_loss_fns',
'loss_weights_list',
'targets',
'_feed_targets',
'sample_weight_modes',
'weighted_metrics',
'metrics_names',
'metrics_tensors',
'metrics_updates',
'stateful_metric_names',
'total_loss',
'sample_weights',
'_feed_sample_weights',
'train_function',
'test_function',
'predict_function',
'_collected_trainable_weights',
'_feed_inputs',
'_feed_input_names',
'_feed_input_shapes',
'optimizer',
]
for name in attributes_to_cache:
attributes_cache[name] = getattr(model, name)
model._original_attributes_cache = data_structures.NoDependency(
attributes_cache)
# Reset built state
model.built = False
model.inputs = None
model.outputs = None
def in_place_subclassed_model_state_restoration(model):
"""Restores the original state of a model after it was "reset".
This undoes this action of `_in_place_subclassed_model_reset`, which is called
in `clone_and_build_model` if `in_place_reset` is set to True.
Args:
model: Instance of a Keras model created via subclassing, on which
`_in_place_subclassed_model_reset` was previously called.
"""
assert not model._is_graph_network
# Restore layers and build attributes
if (hasattr(model, '_original_attributes_cache') and
model._original_attributes_cache is not None):
# Models have sticky attribute assignment, so we want to be careful to add
# back the previous attributes and track Layers by their original names
# without adding dependencies on "utility" attributes which Models exempt
# when they're constructed.
model._layers = data_structures.NoDependency([])
for name, value in model._original_attributes_cache.items():
if not isinstance(value, checkpointable.CheckpointableBase):
# If this value is not already checkpointable, it's probably that way
# for a reason; we don't want to start tracking data structures that the
# original Model didn't.
value = data_structures.NoDependency(value)
setattr(model, name, value)
model._original_attributes_cache = None
else:
# Restore to the state of a never-called model.
model.built = False
model.inputs = None
model.outputs = None
def clone_and_build_model(
model, input_tensors=None, target_tensors=None, custom_objects=None,
compile_clone=True, in_place_reset=False, optimizer_iterations=None):
"""Clone a `Model` and build/compile it with the same settings used before.
This function can be be run in the same graph or in a separate graph from the
model. When using a separate graph, `in_place_reset` must be `False`.
Args:
model: `tf.keras.Model` object. Can be Functional, Sequential, or
sub-classed.
input_tensors: Optional list of input tensors to build the model upon. If
not provided, placeholders will be created.
target_tensors: Optional list of target tensors for compiling the model. If
not provided, placeholders will be created.
custom_objects: Optional dictionary mapping string names to custom classes
or functions.
compile_clone: Boolean, whether to compile model clone (default `True`).
in_place_reset: Boolean, whether to reset the model in place. Only used if
the model is not a graph network. If the model is a subclassed model, then
this argument must be set to `True` (default `False`). To restore the
original model, use the function
`in_place_subclassed_model_state_restoration(model)`.
optimizer_iterations: An iterations variable that will be incremented by the
optimizer if the clone is compiled. This argument is used when a Keras
model is cloned into an Estimator model function, because Estimators
create their own global step variable.
Returns:
Clone of the model.
Raises:
ValueError: if trying to clone a subclassed model, and `in_place_reset` is
set to False.
"""
if model._is_graph_network:
if custom_objects:
with CustomObjectScope(custom_objects):
clone = clone_model(model, input_tensors=input_tensors)
else:
clone = clone_model(model, input_tensors=input_tensors)
else:
if not in_place_reset:
raise ValueError(
'Model is not a graph network (usually means that it is a subclassed '
'model). The model cannot be cloned, but there is a workaround where '
'the model is reset in-place. To use this, please set the argument '
'`in_place_reset` to `True`. This will reset the attributes in the '
'original model. To restore the attributes, call '
'`in_place_subclassed_model_state_restoration(model)`.')
clone = model
_in_place_subclassed_model_reset(clone)
if input_tensors is not None:
if isinstance(input_tensors, (list, tuple)) and len(input_tensors) == 1:
input_tensors = input_tensors[0]
clone._set_inputs(input_tensors)
# Compile/Build model
if not compile_clone:
if isinstance(clone, Sequential):
clone.build()
elif model.optimizer:
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = optimizers.TFOptimizer(
model.optimizer.optimizer, optimizer_iterations)
K.track_tf_optimizer(optimizer)
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
if optimizer_iterations is not None:
optimizer.iterations = optimizer_iterations
clone.compile(
optimizer,
model.loss,
metrics=model.metrics,
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=model.weighted_metrics,
target_tensors=target_tensors)
return clone
|
|
from __future__ import print_function, division
import abc
import numpy as np
from numpy.linalg import slogdet
import math
from scipy import stats
from scipy.special import gammaln, multigammaln
LOG2PI = math.log(2*math.pi)
LOG2 = math.log(2)
LOGPI = math.log(math.pi)
class CollapsibleDistribution(object):
""" Abstract base class for a family of conjugate distributions.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def log_marginal_likelihood(self):
pass
class FrozenDistribution(object):
""" Abstract base class for a probability distribution whose
parameters are fixed.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractmethod
def __call__(self):
pass
class NormalInverseWishart(CollapsibleDistribution):
"""
Multivariate Normal likelihood with multivariate Normal prior on
mean and Inverse-Wishart prior on the covariance matrix.
All math taken from Kevin Murphy's 2007 technical report,
'Conjugate Bayesian analysis of the Gaussian distribution'.
"""
def __init__(self, **prior_hyperparameters):
self.nu_0 = prior_hyperparameters['nu_0']
self.mu_0 = prior_hyperparameters['mu_0']
self.kappa_0 = prior_hyperparameters['kappa_0']
self.lambda_0 = prior_hyperparameters['lambda_0']
self.d = float(len(self.mu_0))
self.log_z = self.calc_log_z(self.mu_0, self.lambda_0, self.kappa_0,
self.nu_0)
@staticmethod
def update_parameters(X, _mu, _lambda, _kappa, _nu, _d):
n = X.shape[0]
xbar = np.mean(X, axis=0)
kappa_n = _kappa + n
nu_n = _nu + n
mu_n = (_kappa*_mu + n*xbar)/kappa_n
S = np.zeros(_lambda.shape) if n == 1 else (n-1)*np.cov(X.T)
dt = (xbar-_mu)[np.newaxis]
back = np.dot(dt.T, dt)
lambda_n = _lambda + S + (_kappa*n/kappa_n)*back
assert(mu_n.shape[0] == _mu.shape[0])
assert(lambda_n.shape[0] == _lambda.shape[0])
assert(lambda_n.shape[1] == _lambda.shape[1])
return mu_n, lambda_n, kappa_n, nu_n
@staticmethod
def calc_log_z(_mu, _lambda, _kappa, _nu):
d = len(_mu)
sign, detr = slogdet(_lambda)
log_z = (LOG2*(_nu*d/2.0)
+ (d/2.0)*math.log(2*math.pi/_kappa)
+ multigammaln(_nu/2, d) - (_nu/2.0)*detr)
return log_z
def log_marginal_likelihood(self, X):
n = X.shape[0]
params_n = self.update_parameters(X, self.mu_0, self.lambda_0,
self.kappa_0, self.nu_0, self.d)
log_z_n = self.calc_log_z(*params_n)
return log_z_n - self.log_z - LOG2PI*(n*self.d/2)
def log_posterior_predictive(self, X_new, X_old):
""" log_posterior_predictive(X_new, X_old)
Find the posterior predictive probabilitiy p(X_new|X_old)
where X_old is some data we already have and X_new is the
point at which we want the posterior predictive prob.
The posterior predictive distribution is a (multivariate)
t-distribution.
The formula required is given by
en.wikipedia.org/wiki/Conjugate_prior
Parameters
----------
X_old : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
X_new : ndarray
The point for which we want the posterior predicitve.
"""
params_old = self.update_parameters(X_old, self.mu_0,
self.lambda_0,
self.kappa_0, self.nu_0,
self.d)
t_sigma = ((params_old[2]+1) / (params_old[2]*(params_old[3]-self.d+1))
* params_old[1])
t_sigma_inv = np.linalg.inv(t_sigma)
t_dof = params_old[3]-self.d+1
t_z = X_new - params_old[0]
t_logdiff = math.log(1+np.sum(t_z*np.dot(t_sigma_inv, t_z))
/ t_dof)
sgn, det = np.linalg.slogdet(t_sigma)
prob = (gammaln((t_dof+self.d)/2)
- gammaln(t_dof/2)
- self.d/2*math.log(t_dof)
- self.d/2*LOGPI
- det/2
- (t_dof+self.d)/2*t_logdiff)
return prob
def conditional_sample(self, X, size=1):
""" conditional_sample(X)
Sample from the posterior predictive distribution
conditioned on some data X.
The posterior predicitve distribution follows a
multivariate t distribution, as per
en.wikipedia.org/wiki/Conjugate_prior .
The multivariate is sampled by performing
x = mu + Z sqrt(nu/u) ,
where
Z ~ N(0, sigma) ,
u ~ chi2(nu) ,
this implies
x ~ t_nu(mu, sigma)
Parameters
----------
X : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
size : int, optional
The number of samples to be drawn.
"""
output = np.zeros((size, self.d))
params_n = self.update_parameters(X, self.mu_0, self.lambda_0,
self.kappa_0, self.nu_0,
self.d)
t_dof = params_n[3] - self.d + 1
t_cov = (params_n[2]+1) / (params_n[2]*t_dof) * params_n[1]
mvn_rv = stats.multivariate_normal(cov=t_cov)
chi2_rv = stats.chi2(t_dof)
for it in range(size):
# Sample u from chi2 dist
u = chi2_rv.rvs()
# Sample from multivariate Normal
z = mvn_rv.rvs()
output[it, :] = params_n[0] + z*math.sqrt(t_dof/u)
return output
class NormalFixedCovar(CollapsibleDistribution):
"""
Multivariate Normal likelihood with multivariate Normal prior on
mean and a fixed covariance matrix.
All math taken from Kevin Murphy's 2007 technical report,
'Conjugate Bayesian analysis of the Gaussian distribution'.
"""
def __init__(self, **prior_hyperparameters):
self.mu_0 = prior_hyperparameters['mu_0']
self.sigma_0 = prior_hyperparameters['sigma_0']
self.S = prior_hyperparameters['S']
self.d = float(len(self.mu_0))
sgn, self.sigma_0_det = np.linalg.slogdet(self.sigma_0)
sgn, self.S_det = np.linalg.slogdet(self.S)
self.sigma_0_inv = np.linalg.inv(self.sigma_0)
self.S_inv = np.linalg.inv(self.S)
self.log_z0 = self.calc_log_z(self.mu_0, self.sigma_0, self.S)
@staticmethod
def update_parameters(X, _mu, _sigma, S, _d):
n = X.shape[0]
xbar = np.mean(X, axis=0)
_sigma_inv = np.linalg.inv(_sigma)
S_inv = np.linalg.inv(S)
# update variance on mean
sigma_n_inv = _sigma_inv + n*S_inv
sigma_n = np.linalg.inv(sigma_n_inv)
# update mean
mu_n = (np.dot(_sigma_inv, _mu)
+ n*np.dot(S_inv, xbar))
mu_n = np.dot(sigma_n, mu_n)
assert(mu_n.shape[0] == _mu.shape[0])
assert(sigma_n.shape[0] == _sigma.shape[0])
assert(sigma_n.shape[1] == _sigma.shape[1])
return mu_n, sigma_n, S
@staticmethod
def calc_log_z(_mu, _sigma, S):
d = len(_mu)
sign, detr = slogdet(_sigma)
_sigma_inv = np.linalg.inv(_sigma)
log_z = detr/2 + np.sum(_mu*np.dot(_sigma_inv, _mu))
return log_z
def log_marginal_likelihood(self, X):
n = X.shape[0]
params_n = self.update_parameters(X, self.mu_0, self.sigma_0,
self.S, self.d)
log_z_n = self.calc_log_z(*params_n)
Q = 0.
for i in range(n):
Q += np.sum(X[i, :]*np.dot(self.S_inv, X[i, :]))
lml = log_z_n - self.log_z0 - LOG2PI*(n*self.d/2) - Q - self.S_det*n/2
return lml
def log_posterior_predictive(self, X_new, X_old):
""" log_posterior_predictive(X_new, X_old)
Find the posterior predictive probabilitiy p(X_new|X_old)
where X_old is some data we already have and X_new is the
point at which we want the posterior predictive prob.
The posterior predictive distribution is a (multivariate)
t-distribution.
The formula required is given by
en.wikipedia.org/wiki/Conjugate_prior
Parameters
----------
X_old : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
X_new : ndarray
The point for which we want the posterior predicitve.
"""
params_old = self.update_parameters(X_old, self.mu_0,
self.sigma_0, self.S,
self.d)
z_sigma = params_old[1]+self.S
z_sigma_inv = np.linalg.inv(z_sigma)
diff = X_new-params_old[0]
z = np.sum(diff*np.dot(z_sigma_inv, diff))
sgn, det = np.linalg.slogdet(z_sigma)
prob = (- self.d/2*LOG2PI - det/2 - z/2)
return prob
def conditional_sample(self, X, size=1):
""" conditional_sample(X)
Sample from the posterior predictive distribution
conditioned on some data X.
For the Normal distribution the samples
are found by sampling froma (multivariate) Normal.
Parameters
----------
X : ndarray
The existing data on which the posterior predicitve
is to be conditioned.
size : int, optional
The number of samples to be drawn.
"""
output = np.zeros((size, self.d))
params_n = self.update_parameters(X, self.mu_0, self.sigma_0, self.S,
self.d)
for it in range(size):
# get covariance
cov = params_n[1]+self.S
# Sample from multivariate Normal
output[it, :] = stats.multivariate_normal.rvs(
mean=params_n[0], cov=cov)
return output
|
|
#Written by: Holguer A Becerra.
#Based on Brian Bennett Visual C++ Code
import serial
SupportedProgRomBanks=2;
SupportedChrRomBanks=1;#
DbgPacketOpCodeEcho = 0x00 # echo packet body back to debugger
DbgPacketOpCodeCpuMemRd = 0x01 # read CPU memory
DbgPacketOpCodeCpuMemWr = 0x02 # write CPU memory
DbgPacketOpCodeDbgHlt = 0x03 # debugger break (stop execution)
DbgPacketOpCodeDbgRun = 0x04 # debugger run (resume execution)
DbgPacketOpCodeCpuRegRd = 0x05 # read CPU register
DbgPacketOpCodeCpuRegWr = 0x06 # read CPU register
DbgPacketOpCodeQueryHlt = 0x07 # query if the cpu is currently halted
DbgPacketOpCodeQueryErrCode = 0x08 # query NES error code
DbgPacketOpCodePpuMemRd = 0x09 # read PPU memory
DbgPacketOpCodePpuMemWr = 0x0A # write PPU memory
DbgPacketOpCodePpuDisable = 0x0B # disable PPU
DbgPacketOpCodeCartSetCfg = 0x0C # set cartridge config from iNES header
CpuRegPcl = 0x00# PCL: Program Counter Low
CpuRegPch = 0x01# PCH: Program Counter High
CpuRegAc = 0x02# AC: Accumulator reg
CpuRegX = 0x03# X: X index reg
CpuRegY = 0x04# Y: Y index reg
CpuRegP = 0x05# P: Processor Status reg
CpuRegS = 0x06# S: Stack Pointer reg
#addr, // memory address to write
#numBytes, // number of bytes to write
#pData // data to write
send_data='';
ser = serial.Serial();
ser.port = "com5"
ser.baudrate = 38400;
ser.bytesize = serial.EIGHTBITS #number of bits per bytes
ser.parity = serial.PARITY_ODD #set parity check: no parity
ser.stopbits = serial.STOPBITS_ONE #number of stop bits
ser.open();
def send_byte(data):
ser.write(chr(data));
return 0;
rom=open('roms/wild_gun_man.nes','rb');
header=rom.read(4);
print header
if(header=='NES\x1a'):
print 'Reading valid rom';
else:
print 'Not valid NES ROM.. Sorry';
exit(1)
progRomBanks= rom.read(1);
chrRomBanks= rom.read(1);
progRomBanks=ord(progRomBanks)
chrRomBanks=ord(chrRomBanks)
print 'progRomBanks=' + str(progRomBanks)
print 'chrRomBanks=' + str(progRomBanks)
if(progRomBanks>SupportedProgRomBanks or chrRomBanks>SupportedChrRomBanks):
print 'Too many ROM Banks\nYou should try yo expand the memory to UxROM on the FPGA'
exit(1)
Flag6=ord(rom.read(1));
Flag7=ord(rom.read(1));
Flag7_1=ord(rom.read(1));
if (Flag6 & 0x08):
print "Only horizontal and vertical mirroring are supported."
exit(1)
if(Flag6 & 0x04):
print 'SRAM at 6000-7FFFh battery backed. Yes'
else:
print 'SRAM at 6000-7FFFh battery backed. No'
if(Flag6 & 0x02):
print '512 byte trainer at 7000-71FFh'
else:
print 'no trainer present'
if(Flag6 & 0x01):
print 'Four screen mode Yes'
else:
print 'Four screen mode No'
if ((((Flag6 & 0xF0) >> 4)| (Flag7 & 0xF0)) != 0):
print "Only mapper 0 is supported.";
exit(1)
#debugger break (stop execution)
send_byte(DbgPacketOpCodeDbgHlt);
#disable PPU
send_byte(DbgPacketOpCodePpuDisable);
#Set iNES header info to configure mappers.
#send DbgPacketOpCodePpuDisable
#send progRomBanks
#send chrRomBanks
#send Flag6
#send Flag7
#send Flag7_1
print 'Set iNES header info to configure mappers.'
send_byte(DbgPacketOpCodeCartSetCfg);
send_byte(progRomBanks);
send_byte(chrRomBanks);
send_byte(Flag6);
send_byte(Flag7);
send_byte(Flag7_1);
#math size of banks
prgRomDataSize = progRomBanks * 0x4000;
chrRomDataSize = chrRomBanks * 0x2000;
totalBytes = prgRomDataSize + chrRomDataSize;
transferBlockSize = 0x400;
#copy PRG ROM data
_i=0
prgRomOffset=0;
while(_i<(prgRomDataSize / transferBlockSize)):
prgRomOffset = transferBlockSize * _i;
rom.seek(16+prgRomOffset)
m_pData=''
m_pData = m_pData +chr(DbgPacketOpCodeCpuMemWr);
m_pData = m_pData + chr((0x8000 + prgRomOffset) & 0xff);
m_pData = m_pData +chr(((0x8000 + prgRomOffset) & 0xff00)>>8);
m_pData = m_pData +chr(transferBlockSize & 0xff);
m_pData = m_pData +chr((transferBlockSize & 0xff00)>>8);
send_data=rom.read(transferBlockSize);
m_pData=m_pData+send_data
ser.flushOutput()
ser.write(m_pData)
print '->Programming PRG ROM ADDR ' + hex(0x8000 + prgRomOffset)
_i=_i+1;
#copy CHR ROM data
_i=0;
while(_i<(chrRomDataSize / transferBlockSize)):
chrRomOffset = transferBlockSize * _i;
rom.seek(16+prgRomDataSize+chrRomOffset)
m_pData=''
m_pData = m_pData +chr(DbgPacketOpCodePpuMemWr);
m_pData = m_pData + chr((chrRomOffset) & 0xff);
m_pData = m_pData +chr(((chrRomOffset) & 0xff00)>>8);
m_pData = m_pData +chr(transferBlockSize & 0xff);
m_pData = m_pData +chr((transferBlockSize & 0xff00)>>8);
send_data=rom.read(transferBlockSize);
m_pData=m_pData+send_data
ser.flushOutput();
ser.write(m_pData)
print '->Programming CHR ROM ADDR ' + hex(chrRomOffset)
_i=_i+1;
# Update PC to point at the reset interrupt vector location.
rom.seek(16+prgRomDataSize - 4)
pclVal = rom.read(1);
rom.seek(16 + prgRomDataSize - 3)
pchVal = rom.read(1);
print 'Update PC to point at the reset interrupt vector location'
m_pData ='';
m_pData = m_pData+ chr(DbgPacketOpCodeCpuRegWr);
m_pData = m_pData+ chr(CpuRegPcl);
m_pData = m_pData+ pclVal;
ser.write(m_pData)
m_pData ='';
m_pData = m_pData+ chr(DbgPacketOpCodeCpuRegWr);
m_pData = m_pData+ chr(CpuRegPch);
m_pData = m_pData+ pchVal;
ser.write(m_pData)
# Issue a debug run command.
print 'sending run command'
send_byte(DbgPacketOpCodeDbgRun);
rom.close()
ser.close()
|
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import datetime
import ipaddress
from email.utils import parseaddr
import idna
import six
from six.moves import urllib_parse
from cryptography import x509
from cryptography.x509.oid import (
CRLEntryExtensionOID, CertificatePoliciesOID, ExtensionOID
)
def _obj2txt(backend, obj):
# Set to 80 on the recommendation of
# https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values
buf_len = 80
buf = backend._ffi.new("char[]", buf_len)
res = backend._lib.OBJ_obj2txt(buf, buf_len, obj, 1)
backend.openssl_assert(res > 0)
return backend._ffi.buffer(buf, res)[:].decode()
def _decode_x509_name_entry(backend, x509_name_entry):
obj = backend._lib.X509_NAME_ENTRY_get_object(x509_name_entry)
backend.openssl_assert(obj != backend._ffi.NULL)
data = backend._lib.X509_NAME_ENTRY_get_data(x509_name_entry)
backend.openssl_assert(data != backend._ffi.NULL)
value = _asn1_string_to_utf8(backend, data)
oid = _obj2txt(backend, obj)
return x509.NameAttribute(x509.ObjectIdentifier(oid), value)
def _decode_x509_name(backend, x509_name):
count = backend._lib.X509_NAME_entry_count(x509_name)
attributes = []
prev_set_id = -1
for x in range(count):
entry = backend._lib.X509_NAME_get_entry(x509_name, x)
attribute = _decode_x509_name_entry(backend, entry)
set_id = backend._lib.Cryptography_X509_NAME_ENTRY_set(entry)
if set_id != prev_set_id:
attributes.append(set([attribute]))
else:
# is in the same RDN a previous entry
attributes[-1].add(attribute)
prev_set_id = set_id
return x509.Name(x509.RelativeDistinguishedName(rdn) for rdn in attributes)
def _decode_general_names(backend, gns):
num = backend._lib.sk_GENERAL_NAME_num(gns)
names = []
for i in range(num):
gn = backend._lib.sk_GENERAL_NAME_value(gns, i)
backend.openssl_assert(gn != backend._ffi.NULL)
names.append(_decode_general_name(backend, gn))
return names
def _decode_general_name(backend, gn):
if gn.type == backend._lib.GEN_DNS:
data = _asn1_string_to_bytes(backend, gn.d.dNSName)
if not data:
decoded = u""
elif data.startswith(b"*."):
# This is a wildcard name. We need to remove the leading wildcard,
# IDNA decode, then re-add the wildcard. Wildcard characters should
# always be left-most (RFC 2595 section 2.4).
decoded = u"*." + idna.decode(data[2:])
else:
# Not a wildcard, decode away. If the string has a * in it anywhere
# invalid this will raise an InvalidCodePoint
decoded = idna.decode(data)
if data.startswith(b"."):
# idna strips leading periods. Name constraints can have that
# so we need to re-add it. Sigh.
decoded = u"." + decoded
return x509.DNSName(decoded)
elif gn.type == backend._lib.GEN_URI:
data = _asn1_string_to_ascii(backend, gn.d.uniformResourceIdentifier)
parsed = urllib_parse.urlparse(data)
if parsed.hostname:
hostname = idna.decode(parsed.hostname)
else:
hostname = ""
if parsed.port:
netloc = hostname + u":" + six.text_type(parsed.port)
else:
netloc = hostname
# Note that building a URL in this fashion means it should be
# semantically indistinguishable from the original but is not
# guaranteed to be exactly the same.
uri = urllib_parse.urlunparse((
parsed.scheme,
netloc,
parsed.path,
parsed.params,
parsed.query,
parsed.fragment
))
return x509.UniformResourceIdentifier(uri)
elif gn.type == backend._lib.GEN_RID:
oid = _obj2txt(backend, gn.d.registeredID)
return x509.RegisteredID(x509.ObjectIdentifier(oid))
elif gn.type == backend._lib.GEN_IPADD:
data = _asn1_string_to_bytes(backend, gn.d.iPAddress)
data_len = len(data)
if data_len == 8 or data_len == 32:
# This is an IPv4 or IPv6 Network and not a single IP. This
# type of data appears in Name Constraints. Unfortunately,
# ipaddress doesn't support packed bytes + netmask. Additionally,
# IPv6Network can only handle CIDR rather than the full 16 byte
# netmask. To handle this we convert the netmask to integer, then
# find the first 0 bit, which will be the prefix. If another 1
# bit is present after that the netmask is invalid.
base = ipaddress.ip_address(data[:data_len // 2])
netmask = ipaddress.ip_address(data[data_len // 2:])
bits = bin(int(netmask))[2:]
prefix = bits.find('0')
# If no 0 bits are found it is a /32 or /128
if prefix == -1:
prefix = len(bits)
if "1" in bits[prefix:]:
raise ValueError("Invalid netmask")
ip = ipaddress.ip_network(base.exploded + u"/{0}".format(prefix))
else:
ip = ipaddress.ip_address(data)
return x509.IPAddress(ip)
elif gn.type == backend._lib.GEN_DIRNAME:
return x509.DirectoryName(
_decode_x509_name(backend, gn.d.directoryName)
)
elif gn.type == backend._lib.GEN_EMAIL:
data = _asn1_string_to_ascii(backend, gn.d.rfc822Name)
name, address = parseaddr(data)
parts = address.split(u"@")
if name or not address:
# parseaddr has found a name (e.g. Name <email>) or the entire
# value is an empty string.
raise ValueError("Invalid rfc822name value")
elif len(parts) == 1:
# Single label email name. This is valid for local delivery. No
# IDNA decoding can be done since there is no domain component.
return x509.RFC822Name(address)
else:
# A normal email of the form [email protected]. Let's attempt to
# decode the domain component and return the entire address.
return x509.RFC822Name(
parts[0] + u"@" + idna.decode(parts[1])
)
elif gn.type == backend._lib.GEN_OTHERNAME:
type_id = _obj2txt(backend, gn.d.otherName.type_id)
value = _asn1_to_der(backend, gn.d.otherName.value)
return x509.OtherName(x509.ObjectIdentifier(type_id), value)
else:
# x400Address or ediPartyName
raise x509.UnsupportedGeneralNameType(
"{0} is not a supported type".format(
x509._GENERAL_NAMES.get(gn.type, gn.type)
),
gn.type
)
def _decode_ocsp_no_check(backend, ext):
return x509.OCSPNoCheck()
def _decode_crl_number(backend, ext):
asn1_int = backend._ffi.cast("ASN1_INTEGER *", ext)
asn1_int = backend._ffi.gc(asn1_int, backend._lib.ASN1_INTEGER_free)
return x509.CRLNumber(_asn1_integer_to_int(backend, asn1_int))
class _X509ExtensionParser(object):
def __init__(self, ext_count, get_ext, handlers):
self.ext_count = ext_count
self.get_ext = get_ext
self.handlers = handlers
def parse(self, backend, x509_obj):
extensions = []
seen_oids = set()
for i in range(self.ext_count(backend, x509_obj)):
ext = self.get_ext(backend, x509_obj, i)
backend.openssl_assert(ext != backend._ffi.NULL)
crit = backend._lib.X509_EXTENSION_get_critical(ext)
critical = crit == 1
oid = x509.ObjectIdentifier(
_obj2txt(backend, backend._lib.X509_EXTENSION_get_object(ext))
)
if oid in seen_oids:
raise x509.DuplicateExtension(
"Duplicate {0} extension found".format(oid), oid
)
try:
handler = self.handlers[oid]
except KeyError:
if critical:
raise x509.UnsupportedExtension(
"Critical extension {0} is not currently supported"
.format(oid), oid
)
else:
# Dump the DER payload into an UnrecognizedExtension object
data = backend._lib.X509_EXTENSION_get_data(ext)
backend.openssl_assert(data != backend._ffi.NULL)
der = backend._ffi.buffer(data.data, data.length)[:]
unrecognized = x509.UnrecognizedExtension(oid, der)
extensions.append(
x509.Extension(oid, critical, unrecognized)
)
else:
ext_data = backend._lib.X509V3_EXT_d2i(ext)
if ext_data == backend._ffi.NULL:
backend._consume_errors()
raise ValueError(
"The {0} extension is invalid and can't be "
"parsed".format(oid)
)
value = handler(backend, ext_data)
extensions.append(x509.Extension(oid, critical, value))
seen_oids.add(oid)
return x509.Extensions(extensions)
def _decode_certificate_policies(backend, cp):
cp = backend._ffi.cast("Cryptography_STACK_OF_POLICYINFO *", cp)
cp_freefunc = backend._ffi.addressof(
backend._lib._original_lib, "POLICYINFO_free"
)
cp = backend._ffi.gc(
cp, lambda c: backend._lib.sk_POLICYINFO_pop_free(c, cp_freefunc)
)
num = backend._lib.sk_POLICYINFO_num(cp)
certificate_policies = []
for i in range(num):
qualifiers = None
pi = backend._lib.sk_POLICYINFO_value(cp, i)
oid = x509.ObjectIdentifier(_obj2txt(backend, pi.policyid))
if pi.qualifiers != backend._ffi.NULL:
qnum = backend._lib.sk_POLICYQUALINFO_num(pi.qualifiers)
qualifiers = []
for j in range(qnum):
pqi = backend._lib.sk_POLICYQUALINFO_value(
pi.qualifiers, j
)
pqualid = x509.ObjectIdentifier(
_obj2txt(backend, pqi.pqualid)
)
if pqualid == CertificatePoliciesOID.CPS_QUALIFIER:
cpsuri = backend._ffi.buffer(
pqi.d.cpsuri.data, pqi.d.cpsuri.length
)[:].decode('ascii')
qualifiers.append(cpsuri)
else:
assert pqualid == CertificatePoliciesOID.CPS_USER_NOTICE
user_notice = _decode_user_notice(
backend, pqi.d.usernotice
)
qualifiers.append(user_notice)
certificate_policies.append(
x509.PolicyInformation(oid, qualifiers)
)
return x509.CertificatePolicies(certificate_policies)
def _decode_user_notice(backend, un):
explicit_text = None
notice_reference = None
if un.exptext != backend._ffi.NULL:
explicit_text = _asn1_string_to_utf8(backend, un.exptext)
if un.noticeref != backend._ffi.NULL:
organization = _asn1_string_to_utf8(
backend, un.noticeref.organization
)
num = backend._lib.sk_ASN1_INTEGER_num(
un.noticeref.noticenos
)
notice_numbers = []
for i in range(num):
asn1_int = backend._lib.sk_ASN1_INTEGER_value(
un.noticeref.noticenos, i
)
notice_num = _asn1_integer_to_int(backend, asn1_int)
notice_numbers.append(notice_num)
notice_reference = x509.NoticeReference(
organization, notice_numbers
)
return x509.UserNotice(notice_reference, explicit_text)
def _decode_basic_constraints(backend, bc_st):
basic_constraints = backend._ffi.cast("BASIC_CONSTRAINTS *", bc_st)
basic_constraints = backend._ffi.gc(
basic_constraints, backend._lib.BASIC_CONSTRAINTS_free
)
# The byte representation of an ASN.1 boolean true is \xff. OpenSSL
# chooses to just map this to its ordinal value, so true is 255 and
# false is 0.
ca = basic_constraints.ca == 255
path_length = _asn1_integer_to_int_or_none(
backend, basic_constraints.pathlen
)
return x509.BasicConstraints(ca, path_length)
def _decode_subject_key_identifier(backend, asn1_string):
asn1_string = backend._ffi.cast("ASN1_OCTET_STRING *", asn1_string)
asn1_string = backend._ffi.gc(
asn1_string, backend._lib.ASN1_OCTET_STRING_free
)
return x509.SubjectKeyIdentifier(
backend._ffi.buffer(asn1_string.data, asn1_string.length)[:]
)
def _decode_authority_key_identifier(backend, akid):
akid = backend._ffi.cast("AUTHORITY_KEYID *", akid)
akid = backend._ffi.gc(akid, backend._lib.AUTHORITY_KEYID_free)
key_identifier = None
authority_cert_issuer = None
if akid.keyid != backend._ffi.NULL:
key_identifier = backend._ffi.buffer(
akid.keyid.data, akid.keyid.length
)[:]
if akid.issuer != backend._ffi.NULL:
authority_cert_issuer = _decode_general_names(
backend, akid.issuer
)
authority_cert_serial_number = _asn1_integer_to_int_or_none(
backend, akid.serial
)
return x509.AuthorityKeyIdentifier(
key_identifier, authority_cert_issuer, authority_cert_serial_number
)
def _decode_authority_information_access(backend, aia):
aia = backend._ffi.cast("Cryptography_STACK_OF_ACCESS_DESCRIPTION *", aia)
aia = backend._ffi.gc(aia, backend._lib.sk_ACCESS_DESCRIPTION_free)
num = backend._lib.sk_ACCESS_DESCRIPTION_num(aia)
access_descriptions = []
for i in range(num):
ad = backend._lib.sk_ACCESS_DESCRIPTION_value(aia, i)
backend.openssl_assert(ad.method != backend._ffi.NULL)
oid = x509.ObjectIdentifier(_obj2txt(backend, ad.method))
backend.openssl_assert(ad.location != backend._ffi.NULL)
gn = _decode_general_name(backend, ad.location)
access_descriptions.append(x509.AccessDescription(oid, gn))
return x509.AuthorityInformationAccess(access_descriptions)
def _decode_key_usage(backend, bit_string):
bit_string = backend._ffi.cast("ASN1_BIT_STRING *", bit_string)
bit_string = backend._ffi.gc(bit_string, backend._lib.ASN1_BIT_STRING_free)
get_bit = backend._lib.ASN1_BIT_STRING_get_bit
digital_signature = get_bit(bit_string, 0) == 1
content_commitment = get_bit(bit_string, 1) == 1
key_encipherment = get_bit(bit_string, 2) == 1
data_encipherment = get_bit(bit_string, 3) == 1
key_agreement = get_bit(bit_string, 4) == 1
key_cert_sign = get_bit(bit_string, 5) == 1
crl_sign = get_bit(bit_string, 6) == 1
encipher_only = get_bit(bit_string, 7) == 1
decipher_only = get_bit(bit_string, 8) == 1
return x509.KeyUsage(
digital_signature,
content_commitment,
key_encipherment,
data_encipherment,
key_agreement,
key_cert_sign,
crl_sign,
encipher_only,
decipher_only
)
def _decode_general_names_extension(backend, gns):
gns = backend._ffi.cast("GENERAL_NAMES *", gns)
gns = backend._ffi.gc(gns, backend._lib.GENERAL_NAMES_free)
general_names = _decode_general_names(backend, gns)
return general_names
def _decode_subject_alt_name(backend, ext):
return x509.SubjectAlternativeName(
_decode_general_names_extension(backend, ext)
)
def _decode_issuer_alt_name(backend, ext):
return x509.IssuerAlternativeName(
_decode_general_names_extension(backend, ext)
)
def _decode_name_constraints(backend, nc):
nc = backend._ffi.cast("NAME_CONSTRAINTS *", nc)
nc = backend._ffi.gc(nc, backend._lib.NAME_CONSTRAINTS_free)
permitted = _decode_general_subtrees(backend, nc.permittedSubtrees)
excluded = _decode_general_subtrees(backend, nc.excludedSubtrees)
return x509.NameConstraints(
permitted_subtrees=permitted, excluded_subtrees=excluded
)
def _decode_general_subtrees(backend, stack_subtrees):
if stack_subtrees == backend._ffi.NULL:
return None
num = backend._lib.sk_GENERAL_SUBTREE_num(stack_subtrees)
subtrees = []
for i in range(num):
obj = backend._lib.sk_GENERAL_SUBTREE_value(stack_subtrees, i)
backend.openssl_assert(obj != backend._ffi.NULL)
name = _decode_general_name(backend, obj.base)
subtrees.append(name)
return subtrees
def _decode_policy_constraints(backend, pc):
pc = backend._ffi.cast("POLICY_CONSTRAINTS *", pc)
pc = backend._ffi.gc(pc, backend._lib.POLICY_CONSTRAINTS_free)
require_explicit_policy = _asn1_integer_to_int_or_none(
backend, pc.requireExplicitPolicy
)
inhibit_policy_mapping = _asn1_integer_to_int_or_none(
backend, pc.inhibitPolicyMapping
)
return x509.PolicyConstraints(
require_explicit_policy, inhibit_policy_mapping
)
def _decode_extended_key_usage(backend, sk):
sk = backend._ffi.cast("Cryptography_STACK_OF_ASN1_OBJECT *", sk)
sk = backend._ffi.gc(sk, backend._lib.sk_ASN1_OBJECT_free)
num = backend._lib.sk_ASN1_OBJECT_num(sk)
ekus = []
for i in range(num):
obj = backend._lib.sk_ASN1_OBJECT_value(sk, i)
backend.openssl_assert(obj != backend._ffi.NULL)
oid = x509.ObjectIdentifier(_obj2txt(backend, obj))
ekus.append(oid)
return x509.ExtendedKeyUsage(ekus)
_DISTPOINT_TYPE_FULLNAME = 0
_DISTPOINT_TYPE_RELATIVENAME = 1
def _decode_crl_distribution_points(backend, cdps):
cdps = backend._ffi.cast("Cryptography_STACK_OF_DIST_POINT *", cdps)
dp_freefunc = backend._ffi.addressof(
backend._lib._original_lib, "DIST_POINT_free"
)
cdps = backend._ffi.gc(
cdps, lambda c: backend._lib.sk_DIST_POINT_pop_free(c, dp_freefunc)
)
num = backend._lib.sk_DIST_POINT_num(cdps)
dist_points = []
for i in range(num):
full_name = None
relative_name = None
crl_issuer = None
reasons = None
cdp = backend._lib.sk_DIST_POINT_value(cdps, i)
if cdp.reasons != backend._ffi.NULL:
# We will check each bit from RFC 5280
# ReasonFlags ::= BIT STRING {
# unused (0),
# keyCompromise (1),
# cACompromise (2),
# affiliationChanged (3),
# superseded (4),
# cessationOfOperation (5),
# certificateHold (6),
# privilegeWithdrawn (7),
# aACompromise (8) }
reasons = []
get_bit = backend._lib.ASN1_BIT_STRING_get_bit
if get_bit(cdp.reasons, 1):
reasons.append(x509.ReasonFlags.key_compromise)
if get_bit(cdp.reasons, 2):
reasons.append(x509.ReasonFlags.ca_compromise)
if get_bit(cdp.reasons, 3):
reasons.append(x509.ReasonFlags.affiliation_changed)
if get_bit(cdp.reasons, 4):
reasons.append(x509.ReasonFlags.superseded)
if get_bit(cdp.reasons, 5):
reasons.append(x509.ReasonFlags.cessation_of_operation)
if get_bit(cdp.reasons, 6):
reasons.append(x509.ReasonFlags.certificate_hold)
if get_bit(cdp.reasons, 7):
reasons.append(x509.ReasonFlags.privilege_withdrawn)
if get_bit(cdp.reasons, 8):
reasons.append(x509.ReasonFlags.aa_compromise)
reasons = frozenset(reasons)
if cdp.CRLissuer != backend._ffi.NULL:
crl_issuer = _decode_general_names(backend, cdp.CRLissuer)
# Certificates may have a crl_issuer/reasons and no distribution
# point so make sure it's not null.
if cdp.distpoint != backend._ffi.NULL:
# Type 0 is fullName, there is no #define for it in the code.
if cdp.distpoint.type == _DISTPOINT_TYPE_FULLNAME:
full_name = _decode_general_names(
backend, cdp.distpoint.name.fullname
)
# OpenSSL code doesn't test for a specific type for
# relativename, everything that isn't fullname is considered
# relativename. Per RFC 5280:
#
# DistributionPointName ::= CHOICE {
# fullName [0] GeneralNames,
# nameRelativeToCRLIssuer [1] RelativeDistinguishedName }
else:
rns = cdp.distpoint.name.relativename
rnum = backend._lib.sk_X509_NAME_ENTRY_num(rns)
attributes = set()
for i in range(rnum):
rn = backend._lib.sk_X509_NAME_ENTRY_value(
rns, i
)
backend.openssl_assert(rn != backend._ffi.NULL)
attributes.add(
_decode_x509_name_entry(backend, rn)
)
relative_name = x509.RelativeDistinguishedName(attributes)
dist_points.append(
x509.DistributionPoint(
full_name, relative_name, reasons, crl_issuer
)
)
return x509.CRLDistributionPoints(dist_points)
def _decode_inhibit_any_policy(backend, asn1_int):
asn1_int = backend._ffi.cast("ASN1_INTEGER *", asn1_int)
asn1_int = backend._ffi.gc(asn1_int, backend._lib.ASN1_INTEGER_free)
skip_certs = _asn1_integer_to_int(backend, asn1_int)
return x509.InhibitAnyPolicy(skip_certs)
# CRLReason ::= ENUMERATED {
# unspecified (0),
# keyCompromise (1),
# cACompromise (2),
# affiliationChanged (3),
# superseded (4),
# cessationOfOperation (5),
# certificateHold (6),
# -- value 7 is not used
# removeFromCRL (8),
# privilegeWithdrawn (9),
# aACompromise (10) }
_CRL_ENTRY_REASON_CODE_TO_ENUM = {
0: x509.ReasonFlags.unspecified,
1: x509.ReasonFlags.key_compromise,
2: x509.ReasonFlags.ca_compromise,
3: x509.ReasonFlags.affiliation_changed,
4: x509.ReasonFlags.superseded,
5: x509.ReasonFlags.cessation_of_operation,
6: x509.ReasonFlags.certificate_hold,
8: x509.ReasonFlags.remove_from_crl,
9: x509.ReasonFlags.privilege_withdrawn,
10: x509.ReasonFlags.aa_compromise,
}
_CRL_ENTRY_REASON_ENUM_TO_CODE = {
x509.ReasonFlags.unspecified: 0,
x509.ReasonFlags.key_compromise: 1,
x509.ReasonFlags.ca_compromise: 2,
x509.ReasonFlags.affiliation_changed: 3,
x509.ReasonFlags.superseded: 4,
x509.ReasonFlags.cessation_of_operation: 5,
x509.ReasonFlags.certificate_hold: 6,
x509.ReasonFlags.remove_from_crl: 8,
x509.ReasonFlags.privilege_withdrawn: 9,
x509.ReasonFlags.aa_compromise: 10
}
def _decode_crl_reason(backend, enum):
enum = backend._ffi.cast("ASN1_ENUMERATED *", enum)
enum = backend._ffi.gc(enum, backend._lib.ASN1_ENUMERATED_free)
code = backend._lib.ASN1_ENUMERATED_get(enum)
try:
return x509.CRLReason(_CRL_ENTRY_REASON_CODE_TO_ENUM[code])
except KeyError:
raise ValueError("Unsupported reason code: {0}".format(code))
def _decode_invalidity_date(backend, inv_date):
generalized_time = backend._ffi.cast(
"ASN1_GENERALIZEDTIME *", inv_date
)
generalized_time = backend._ffi.gc(
generalized_time, backend._lib.ASN1_GENERALIZEDTIME_free
)
return x509.InvalidityDate(
_parse_asn1_generalized_time(backend, generalized_time)
)
def _decode_cert_issuer(backend, gns):
gns = backend._ffi.cast("GENERAL_NAMES *", gns)
gns = backend._ffi.gc(gns, backend._lib.GENERAL_NAMES_free)
general_names = _decode_general_names(backend, gns)
return x509.CertificateIssuer(general_names)
def _asn1_to_der(backend, asn1_type):
buf = backend._ffi.new("unsigned char **")
res = backend._lib.i2d_ASN1_TYPE(asn1_type, buf)
backend.openssl_assert(res >= 0)
backend.openssl_assert(buf[0] != backend._ffi.NULL)
buf = backend._ffi.gc(
buf, lambda buffer: backend._lib.OPENSSL_free(buffer[0])
)
return backend._ffi.buffer(buf[0], res)[:]
def _asn1_integer_to_int(backend, asn1_int):
bn = backend._lib.ASN1_INTEGER_to_BN(asn1_int, backend._ffi.NULL)
backend.openssl_assert(bn != backend._ffi.NULL)
bn = backend._ffi.gc(bn, backend._lib.BN_free)
return backend._bn_to_int(bn)
def _asn1_integer_to_int_or_none(backend, asn1_int):
if asn1_int == backend._ffi.NULL:
return None
else:
return _asn1_integer_to_int(backend, asn1_int)
def _asn1_string_to_bytes(backend, asn1_string):
return backend._ffi.buffer(asn1_string.data, asn1_string.length)[:]
def _asn1_string_to_ascii(backend, asn1_string):
return _asn1_string_to_bytes(backend, asn1_string).decode("ascii")
def _asn1_string_to_utf8(backend, asn1_string):
buf = backend._ffi.new("unsigned char **")
res = backend._lib.ASN1_STRING_to_UTF8(buf, asn1_string)
if res == -1:
raise ValueError(
"Unsupported ASN1 string type. Type: {0}".format(asn1_string.type)
)
backend.openssl_assert(buf[0] != backend._ffi.NULL)
buf = backend._ffi.gc(
buf, lambda buffer: backend._lib.OPENSSL_free(buffer[0])
)
return backend._ffi.buffer(buf[0], res)[:].decode('utf8')
def _parse_asn1_time(backend, asn1_time):
backend.openssl_assert(asn1_time != backend._ffi.NULL)
generalized_time = backend._lib.ASN1_TIME_to_generalizedtime(
asn1_time, backend._ffi.NULL
)
backend.openssl_assert(generalized_time != backend._ffi.NULL)
generalized_time = backend._ffi.gc(
generalized_time, backend._lib.ASN1_GENERALIZEDTIME_free
)
return _parse_asn1_generalized_time(backend, generalized_time)
def _parse_asn1_generalized_time(backend, generalized_time):
time = _asn1_string_to_ascii(
backend, backend._ffi.cast("ASN1_STRING *", generalized_time)
)
return datetime.datetime.strptime(time, "%Y%m%d%H%M%SZ")
_EXTENSION_HANDLERS = {
ExtensionOID.BASIC_CONSTRAINTS: _decode_basic_constraints,
ExtensionOID.SUBJECT_KEY_IDENTIFIER: _decode_subject_key_identifier,
ExtensionOID.KEY_USAGE: _decode_key_usage,
ExtensionOID.SUBJECT_ALTERNATIVE_NAME: _decode_subject_alt_name,
ExtensionOID.EXTENDED_KEY_USAGE: _decode_extended_key_usage,
ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _decode_authority_key_identifier,
ExtensionOID.AUTHORITY_INFORMATION_ACCESS: (
_decode_authority_information_access
),
ExtensionOID.CERTIFICATE_POLICIES: _decode_certificate_policies,
ExtensionOID.CRL_DISTRIBUTION_POINTS: _decode_crl_distribution_points,
ExtensionOID.OCSP_NO_CHECK: _decode_ocsp_no_check,
ExtensionOID.INHIBIT_ANY_POLICY: _decode_inhibit_any_policy,
ExtensionOID.ISSUER_ALTERNATIVE_NAME: _decode_issuer_alt_name,
ExtensionOID.NAME_CONSTRAINTS: _decode_name_constraints,
ExtensionOID.POLICY_CONSTRAINTS: _decode_policy_constraints,
}
_REVOKED_EXTENSION_HANDLERS = {
CRLEntryExtensionOID.CRL_REASON: _decode_crl_reason,
CRLEntryExtensionOID.INVALIDITY_DATE: _decode_invalidity_date,
CRLEntryExtensionOID.CERTIFICATE_ISSUER: _decode_cert_issuer,
}
_CRL_EXTENSION_HANDLERS = {
ExtensionOID.CRL_NUMBER: _decode_crl_number,
ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _decode_authority_key_identifier,
ExtensionOID.ISSUER_ALTERNATIVE_NAME: _decode_issuer_alt_name,
ExtensionOID.AUTHORITY_INFORMATION_ACCESS: (
_decode_authority_information_access
),
}
_CERTIFICATE_EXTENSION_PARSER = _X509ExtensionParser(
ext_count=lambda backend, x: backend._lib.X509_get_ext_count(x),
get_ext=lambda backend, x, i: backend._lib.X509_get_ext(x, i),
handlers=_EXTENSION_HANDLERS
)
_CSR_EXTENSION_PARSER = _X509ExtensionParser(
ext_count=lambda backend, x: backend._lib.sk_X509_EXTENSION_num(x),
get_ext=lambda backend, x, i: backend._lib.sk_X509_EXTENSION_value(x, i),
handlers=_EXTENSION_HANDLERS
)
_REVOKED_CERTIFICATE_EXTENSION_PARSER = _X509ExtensionParser(
ext_count=lambda backend, x: backend._lib.X509_REVOKED_get_ext_count(x),
get_ext=lambda backend, x, i: backend._lib.X509_REVOKED_get_ext(x, i),
handlers=_REVOKED_EXTENSION_HANDLERS,
)
_CRL_EXTENSION_PARSER = _X509ExtensionParser(
ext_count=lambda backend, x: backend._lib.X509_CRL_get_ext_count(x),
get_ext=lambda backend, x, i: backend._lib.X509_CRL_get_ext(x, i),
handlers=_CRL_EXTENSION_HANDLERS,
)
|
|
import logging
import re
import sys
import html5lib
from html5lib.sanitizer import HTMLSanitizer
from html5lib.serializer.htmlserializer import HTMLSerializer
from encoding import force_unicode
from sanitizer import BleachSanitizer
VERSION = (1, 1, 1)
__version__ = '.'.join(map(str, VERSION))
__all__ = ['clean', 'linkify']
log = logging.getLogger('bleach')
ALLOWED_TAGS = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
ALLOWED_ATTRIBUTES = {
'a': ['href', 'title'],
'abbr': ['title'],
'acronym': ['title'],
}
ALLOWED_STYLES = []
TLDS = """ac ad ae aero af ag ai al am an ao aq ar arpa as asia at au aw ax az
ba bb bd be bf bg bh bi biz bj bm bn bo br bs bt bv bw by bz ca cat
cc cd cf cg ch ci ck cl cm cn co com coop cr cu cv cx cy cz de dj dk
dm do dz ec edu ee eg er es et eu fi fj fk fm fo fr ga gb gd ge gf gg
gh gi gl gm gn gov gp gq gr gs gt gu gw gy hk hm hn hr ht hu id ie il
im in info int io iq ir is it je jm jo jobs jp ke kg kh ki km kn kp
kr kw ky kz la lb lc li lk lr ls lt lu lv ly ma mc md me mg mh mil mk
ml mm mn mo mobi mp mq mr ms mt mu museum mv mw mx my mz na name nc ne
net nf ng ni nl no np nr nu nz om org pa pe pf pg ph pk pl pm pn pr pro
ps pt pw py qa re ro rs ru rw sa sb sc sd se sg sh si sj sk sl sm sn so
sr st su sv sy sz tc td tel tf tg th tj tk tl tm tn to tp tr travel tt
tv tw tz ua ug uk us uy uz va vc ve vg vi vn vu wf ws xn ye yt yu za zm
zw""".split()
TLDS.reverse()
url_re = re.compile(
r"""\(* # Match any opening parentheses.
\b(?<![@.])(?:\w[\w-]*:/{0,3}(?:(?:\w+:)?\w+@)?)? # http://
([\w-]+\.)+(?:%s)(?:\:\d+)?(?!\.\w)\b # xx.yy.tld(:##)?
(?:[/?][^\s\{\}\|\\\^\[\]`<>"]*)?
# /path/zz (excluding "unsafe" chars from RFC 1738,
# except for # and ~, which happen in practice)
""" % u'|'.join(TLDS), re.VERBOSE | re.UNICODE)
proto_re = re.compile(r'^[\w-]+:/{0,3}')
punct_re = re.compile(r'([\.,]+)$')
email_re = re.compile(
r"""(?<!//)
(([-!#$%&'*+/=?^_`{}|~0-9A-Z]+
(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)* # dot-atom
|^"([\001-\010\013\014\016-\037!#-\[\]-\177]
|\\[\001-011\013\014\016-\177])*" # quoted-string
)@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6})\.? # domain
""",
re.IGNORECASE | re.MULTILINE | re.VERBOSE)
NODE_TEXT = 4 # The numeric ID of a text node in simpletree.
identity = lambda x: x # The identity function.
def clean(text, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES,
styles=ALLOWED_STYLES, strip=False, strip_comments=True,
strip_script_content=False):
"""Clean an HTML fragment and return it"""
if not text:
return u''
text = force_unicode(text)
if text.startswith(u'<!--'):
text = u' ' + text
class s(BleachSanitizer):
allowed_elements = tags
allowed_attributes = attributes
allowed_css_properties = styles
strip_disallowed_elements = strip
strip_html_comments = strip_comments
strip_scripts = strip_script_content
parser = html5lib.HTMLParser(tokenizer=s)
return _render(parser.parseFragment(text)).strip()
def linkify(text, nofollow=True, target=None, filter_url=identity,
filter_text=identity, skip_pre=False, parse_email=False):
"""Convert URL-like strings in an HTML fragment to links.
linkify() converts strings that look like URLs or domain names in a
blob of text that may be an HTML fragment to links, while preserving
(a) links already in the string, (b) urls found in attributes, and
(c) email addresses.
If the nofollow argument is True (the default) then rel="nofollow"
will be added to links created by linkify() as well as links already
found in the text.
The target argument will optionally add a target attribute with the
given value to links created by linkify() as well as links already
found in the text.
linkify() uses up to two filters on each link. For links created by
linkify(), the href attribute is passed through filter_url()
and the text of the link is passed through filter_text(). For links
already found in the document, the href attribute is passed through
filter_url(), but the text is untouched.
"""
text = force_unicode(text)
if not text:
return u''
parser = html5lib.HTMLParser(tokenizer=HTMLSanitizer)
forest = parser.parseFragment(text)
if nofollow:
rel = u'rel="nofollow"'
else:
rel = u''
def replace_nodes(tree, new_frag, node):
new_tree = parser.parseFragment(new_frag)
for n in new_tree.childNodes:
tree.insertBefore(n, node)
tree.removeChild(node)
def strip_wrapping_parentheses(fragment):
"""Strips wrapping parentheses.
Returns a tuple of the following format::
(string stripped from wrapping parentheses,
count of stripped opening parentheses,
count of stripped closing parentheses)
"""
opening_parentheses = closing_parentheses = 0
# Count consecutive opening parentheses
# at the beginning of the fragment (string).
for char in fragment:
if char == '(':
opening_parentheses += 1
else:
break
if opening_parentheses:
newer_frag = ''
# Cut the consecutive opening brackets from the fragment.
fragment = fragment[opening_parentheses:]
# Reverse the fragment for easier detection of parentheses
# inside the URL.
reverse_fragment = fragment[::-1]
skip = False
for char in reverse_fragment:
# Remove the closing parentheses if it has a matching
# opening parentheses (they are balanced).
if (char == ')' and
closing_parentheses < opening_parentheses and
not skip):
closing_parentheses += 1
continue
# Do not remove ')' from the URL itself.
elif char != ')':
skip = True
newer_frag += char
fragment = newer_frag[::-1]
return fragment, opening_parentheses, closing_parentheses
def linkify_nodes(tree, parse_text=True):
for node in tree.childNodes:
if node.type == NODE_TEXT and parse_text:
new_frag = node.toxml()
if parse_email:
new_frag = re.sub(email_re, email_repl, new_frag)
if new_frag != node.toxml():
replace_nodes(tree, new_frag, node)
linkify_nodes(tree, False)
continue
new_frag = re.sub(url_re, link_repl, new_frag)
replace_nodes(tree, new_frag, node)
elif node.name == 'a':
if 'href' in node.attributes:
if nofollow:
node.attributes['rel'] = 'nofollow'
if target is not None:
node.attributes['target'] = target
href = node.attributes['href']
node.attributes['href'] = filter_url(href)
elif skip_pre and node.name == 'pre':
linkify_nodes(node, False)
else:
linkify_nodes(node)
def email_repl(match):
repl = u'<a href="mailto:%(mail)s">%(mail)s</a>'
return repl % {'mail': match.group(0).replace('"', '"')}
def link_repl(match):
url = match.group(0)
open_brackets = close_brackets = 0
if url.startswith('('):
url, open_brackets, close_brackets = (
strip_wrapping_parentheses(url)
)
end = u''
m = re.search(punct_re, url)
if m:
end = m.group(0)
url = url[0:m.start()]
if re.search(proto_re, url):
href = url
else:
href = u''.join([u'http://', url])
repl = u'%s<a href="%s" %s>%s</a>%s%s'
attribs = [rel]
if target is not None:
attribs.append('target="%s"' % target)
return repl % ('(' * open_brackets,
filter_url(href), ' '.join(attribs), filter_text(url),
end, ')' * close_brackets)
linkify_nodes(forest)
return _render(forest)
def _render(tree):
"""Try rendering as HTML, then XML, then give up."""
try:
return force_unicode(_serialize(tree))
except Exception, e:
log.error('HTML: %r' % e, exc_info=sys.exc_info())
try:
return force_unicode(tree.toxml())
except Exception, e:
log.error('XML: %r' % e, exc_info=sys.exc_info())
return u''
def _serialize(domtree):
walker = html5lib.treewalkers.getTreeWalker('simpletree')
stream = walker(domtree)
serializer = HTMLSerializer(quote_attr_values=True,
omit_optional_tags=False)
return serializer.render(stream)
|
|
"""Elpy backend using the Rope library.
This backend uses the Rope library:
http://rope.sourceforge.net/
"""
import os
import time
import rope.contrib.codeassist
import rope.base.project
import rope.base.libutils
import rope.base.exceptions
import rope.contrib.findit
from elpy import rpc
import elpy.pydocutils
VALIDATE_EVERY_SECONDS = 5
MAXFIXES = 5
class RopeBackend(object):
"""The Rope backend class.
Implements the RPC calls we can pass on to Rope. Also subclasses
the native backend to provide methods Rope does not provide, if
any.
"""
name = "rope"
def __init__(self, project_root):
super(RopeBackend, self).__init__()
self.last_validation = 0
if not os.path.exists(project_root):
raise rpc.Fault(
"rope does not support files without a local project root",
code=400
)
self.project_root = project_root
self.completions = {}
prefs = dict(ignored_resources=['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git'],
python_files=['*.py'],
save_objectdb=False,
compress_objectdb=False,
automatic_soa=True,
soa_followed_calls=0,
perform_doa=True,
validate_objectdb=True,
max_history_items=32,
save_history=False,
compress_history=False,
indent_size=4,
extension_modules=[],
import_dynload_stdmods=True,
ignore_syntax_errors=False,
ignore_bad_imports=False)
self.project = rope.base.project.Project(self.project_root,
ropefolder=None,
**prefs)
def get_resource(self, filename):
if filename is not None and os.path.exists(filename):
return rope.base.libutils.path_to_resource(self.project,
filename,
'file')
else:
return None
def validate(self):
"""Validate the stored project.
This should be called before every use of Rope. It will
revalidate the project, but do some call throttling.
"""
now = time.time()
if now > self.last_validation + VALIDATE_EVERY_SECONDS:
try:
self.project.validate()
except rope.base.exceptions.ResourceNotFoundError:
pass
self.last_validation = now
def call_rope(self, rope_function, filename, source, offset,
**kwargs):
self.validate()
resource = self.get_resource(filename)
try:
return rope_function(self.project,
source, offset,
resource,
maxfixes=MAXFIXES,
**kwargs)
except Exception:
return None
def rpc_get_completions(self, filename, source, offset):
proposals = self.call_rope(
rope.contrib.codeassist.code_assist,
filename, source, offset
)
if proposals is None:
return []
try:
starting_offset = rope.contrib.codeassist.starting_offset(source,
offset)
except Exception:
return []
prefixlen = offset - starting_offset
try:
self.completions = dict((proposal.name, proposal)
for proposal in proposals)
return [{'name': proposal.name,
'suffix': proposal.name[prefixlen:],
'annotation': proposal.type,
'meta': str(proposal)}
for proposal in proposals]
except Exception:
return []
def rpc_get_completion_docstring(self, completion):
proposal = self.completions.get(completion)
if proposal is None:
return None
else:
return proposal.get_doc()
def rpc_get_completion_location(self, completion):
proposal = self.completions.get(completion)
if proposal is None:
return None
else:
if not proposal.pyname:
return None
module, lineno = proposal.pyname.get_definition_location()
if module is None:
return None
resource = module.get_module().get_resource()
return (resource.real_path, lineno)
def rpc_get_definition(self, filename, source, offset):
location = self.call_rope(
rope.contrib.findit.find_definition,
filename, source, offset
)
if location is None:
return None
else:
return (location.resource.real_path, location.offset)
def rpc_get_calltip(self, filename, source, offset):
offset = find_called_name_offset(source, offset)
if 0 < offset < len(source) and source[offset] == ')':
offset -= 1
calltip = self.call_rope(
rope.contrib.codeassist.get_calltip,
filename, source, offset,
remove_self=True
)
if calltip is None:
return None
calltip = calltip.replace(".__init__(", "(")
calltip = calltip.replace("(self)", "()")
calltip = calltip.replace("(self, ", "(")
# "elpy.tests.support.source_and_offset(source)"
# =>
# "support.source_and_offset(source)"
try:
openpos = calltip.index("(")
period2 = calltip.rindex(".", 0, openpos)
period1 = calltip.rindex(".", 0, period2)
calltip = calltip[period1 + 1:]
except ValueError:
pass
return calltip
def rpc_get_docstring(self, filename, source, offset):
return self.call_rope(
rope.contrib.codeassist.get_doc,
filename, source, offset
)
def find_called_name_offset(source, orig_offset):
"""Return the offset of a calling function.
This only approximates movement.
"""
offset = min(orig_offset, len(source) - 1)
paren_count = 0
while True:
if offset <= 1:
return orig_offset
elif source[offset] == '(':
if paren_count == 0:
return offset - 1
else:
paren_count -= 1
elif source[offset] == ')':
paren_count += 1
offset -= 1
##################################################################
# A recurring problem in Rope for Elpy is that it searches the whole
# project root for Python files. If the user edits a file in their
# home directory, this can easily read a whole lot of files, making
# Rope practically useless. We change the file finding algorithm here
# to only recurse into directories with an __init__.py file in them.
def find_source_folders(self, folder):
for resource in folder.get_folders():
if self._is_package(resource):
return [folder]
result = []
for resource in folder.get_files():
if resource.name.endswith('.py'):
result.append(folder)
break
for resource in folder.get_folders():
if self._is_package(resource):
result.append(resource)
return result
import rope.base.pycore
rope.base.pycore.PyCore._find_source_folders = find_source_folders
def get_files(self):
if self.files is None:
self.files = get_python_project_files(self.project)
return self.files
rope.base.project._FileListCacher.get_files = get_files
def get_python_project_files(project):
for dirname, subdirs, files in os.walk(project.root.real_path):
for filename in files:
yield rope.base.libutils.path_to_resource(
project, os.path.join(dirname, filename), 'file')
subdirs[:] = [subdir for subdir in subdirs
if os.path.exists(os.path.join(dirname, subdir,
"__init__.py"))]
##################################################################
# Monkey patching a method in rope because it doesn't complete import
# statements.
orig_code_completions = (rope.contrib.codeassist.
_PythonCodeAssist._code_completions)
def code_completions(self):
proposals = get_import_completions(self)
if proposals:
return proposals
else:
return orig_code_completions(self)
def get_import_completions(self):
if not self.word_finder.is_import_statement(self.offset):
return []
modulename = self.word_finder.get_primary_at(self.offset)
# Rope can handle modules in packages
if "." in modulename:
return []
return dict((name, FakeProposal(name))
for name in elpy.pydocutils.get_modules()
if name.startswith(modulename))
class FakeProposal(object):
def __init__(self, name):
self.name = name
self.type = "mock"
def get_doc(self):
return None
rope.contrib.codeassist._PythonCodeAssist._code_completions = code_completions
|
|
from __future__ import division, print_function, absolute_import
import numpy as np
import tables
import warnings
from scipy import sparse
try:
import pandas as pd
_pandas = True
except ImportError:
_pandas = False
from deepdish import six
IO_VERSION = 8
DEEPDISH_IO_PREFIX = 'DEEPDISH_IO'
DEEPDISH_IO_VERSION_STR = DEEPDISH_IO_PREFIX + '_VERSION'
DEEPDISH_IO_UNPACK = DEEPDISH_IO_PREFIX + '_DEEPDISH_IO_UNPACK'
# Types that should be saved as pytables attribute
ATTR_TYPES = (int, float, bool, six.string_types,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.bool_, np.complex64, np.complex128)
if _pandas:
class _HDFStoreWithHandle(pd.io.pytables.HDFStore):
def __init__(self, handle):
self._path = None
self._complevel = None
self._complib = None
self._fletcher32 = False
self._filters = None
self._handle = handle
def is_pandas_dataframe(level):
return ('pandas_version' in level._v_attrs and
'pandas_type' in level._v_attrs)
class ForcePickle(object):
"""
When saving an object with `deepdish.io.save`, you can wrap objects in this
class to force them to be pickled. They will automatically be unpacked at
load time.
"""
def __init__(self, obj):
self.obj = obj
def _dict_native_ok(d):
"""
This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled.
"""
if len(d) >= 256:
return False
# All keys must be strings
for k in d:
if not isinstance(k, six.string_types):
return False
return True
def _get_compression_filters(compression=True):
if compression is True:
compression = 'blosc'
if compression is False or compression is None:
ff = None
else:
if isinstance(compression, (tuple, list)):
compression, level = compression
else:
level = 9
try:
ff = tables.Filters(complevel=level, complib=compression,
shuffle=True)
except Exception:
warnings.warn(("(deepdish.io.save) Missing compression method {}: "
"no compression will be used.").format(compression))
ff = None
return ff
def _save_ndarray(handler, group, name, x, filters=None):
if np.issubdtype(x.dtype, np.unicode_):
# Convert unicode strings to pure byte arrays
strtype = b'unicode'
itemsize = x.itemsize // 4
atom = tables.UInt8Atom()
x = x.view(dtype=np.uint8)
elif np.issubdtype(x.dtype, np.string_):
strtype = b'ascii'
itemsize = x.itemsize
atom = tables.StringAtom(itemsize)
else:
atom = tables.Atom.from_dtype(x.dtype)
strtype = None
itemsize = None
assert np.min(x.shape) > 0, ("deepdish.io.save does not support saving "
"numpy arrays with a zero-length axis")
# For small arrays, compression actually leads to larger files, so we are
# settings a threshold here. The threshold has been set through
# experimentation.
if filters is not None and x.size > 300:
node = handler.create_carray(group, name, atom=atom,
shape=x.shape,
chunkshape=None,
filters=filters)
else:
node = handler.create_array(group, name, atom=atom,
shape=x.shape)
if strtype is not None:
node._v_attrs.strtype = strtype
node._v_attrs.itemsize = itemsize
node[:] = x
def _save_pickled(handler, group, level, name=None):
warnings.warn(('(deepdish.io.save) Pickling {}: This may cause '
'incompatibities (for instance between Python 2 and '
'3) and should ideally be avoided').format(level),
DeprecationWarning)
node = handler.create_vlarray(group, name, tables.ObjectAtom())
node.append(level)
def _save_level(handler, group, level, name=None, filters=None):
if isinstance(level, ForcePickle):
_save_pickled(handler, group, level, name=name)
elif isinstance(level, dict) and _dict_native_ok(level):
# First create a new group
new_group = handler.create_group(group, name,
"dict:{}".format(len(level)))
for k, v in level.items():
if isinstance(k, six.string_types):
_save_level(handler, new_group, v, name=k)
elif isinstance(level, list) and len(level) < 256:
# Lists can contain other dictionaries and numpy arrays, so we don't
# want to serialize them. Instead, we will store each entry as i0, i1,
# etc.
new_group = handler.create_group(group, name,
"list:{}".format(len(level)))
for i, entry in enumerate(level):
level_name = 'i{}'.format(i)
_save_level(handler, new_group, entry, name=level_name)
elif isinstance(level, tuple) and len(level) < 256:
# Lists can contain other dictionaries and numpy arrays, so we don't
# want to serialize them. Instead, we will store each entry as i0, i1,
# etc.
new_group = handler.create_group(group, name,
"tuple:{}".format(len(level)))
for i, entry in enumerate(level):
level_name = 'i{}'.format(i)
_save_level(handler, new_group, entry, name=level_name)
elif isinstance(level, np.ndarray):
_save_ndarray(handler, group, name, level, filters=filters)
elif _pandas and isinstance(level, (pd.DataFrame, pd.Series, pd.Panel)):
store = _HDFStoreWithHandle(handler)
store.put(group._v_pathname + '/' + name, level)
elif isinstance(level, (sparse.dok_matrix,
sparse.lil_matrix)):
raise NotImplementedError(
'deepdish.io.save does not support DOK or LIL matrices; '
'please convert before saving to one of the following supported '
'types: BSR, COO, CSR, CSC, DIA')
elif isinstance(level, (sparse.csr_matrix,
sparse.csc_matrix,
sparse.bsr_matrix)):
new_group = handler.create_group(group, name, "sparse:")
_save_ndarray(handler, new_group, 'data', level.data, filters=filters)
_save_ndarray(handler, new_group, 'indices', level.indices, filters=filters)
_save_ndarray(handler, new_group, 'indptr', level.indptr, filters=filters)
_save_ndarray(handler, new_group, 'shape', np.asarray(level.shape))
new_group._v_attrs.format = level.format
new_group._v_attrs.maxprint = level.maxprint
elif isinstance(level, sparse.dia_matrix):
new_group = handler.create_group(group, name, "sparse:")
_save_ndarray(handler, new_group, 'data', level.data, filters=filters)
_save_ndarray(handler, new_group, 'offsets', level.offsets, filters=filters)
_save_ndarray(handler, new_group, 'shape', np.asarray(level.shape))
new_group._v_attrs.format = level.format
new_group._v_attrs.maxprint = level.maxprint
elif isinstance(level, sparse.coo_matrix):
new_group = handler.create_group(group, name, "sparse:")
_save_ndarray(handler, new_group, 'data', level.data, filters=filters)
_save_ndarray(handler, new_group, 'col', level.col, filters=filters)
_save_ndarray(handler, new_group, 'row', level.row, filters=filters)
_save_ndarray(handler, new_group, 'shape', np.asarray(level.shape))
new_group._v_attrs.format = level.format
new_group._v_attrs.maxprint = level.maxprint
elif isinstance(level, ATTR_TYPES):
setattr(group._v_attrs, name, level)
elif isinstance(level, six.binary_type):
setattr(group._v_attrs, name, level)
elif level is None:
# Store a None as an empty group
new_group = handler.create_group(group, name, "nonetype:")
else:
_save_pickled(handler, group, level, name=name)
def _load_specific_level(handler, grp, path, sel=None):
if path == '':
if sel is not None:
return _load_sliced_level(handler, grp, sel)
else:
return _load_level(handler, grp)
vv = path.split('/', 1)
if len(vv) == 1:
if hasattr(grp, vv[0]):
if sel is not None:
return _load_sliced_level(handler, getattr(grp, vv[0]), sel)
else:
return _load_level(handler, getattr(grp, vv[0]))
elif hasattr(grp, '_v_attrs') and vv[0] in grp._v_attrs:
if sel is not None:
raise ValueError("Cannot slice this type")
v = grp._v_attrs[vv[0]]
if isinstance(v, np.string_):
v = v.decode('utf-8')
return v
else:
raise ValueError('Undefined entry "{}"'.format(vv[0]))
else:
level, rest = vv
if level == '':
return _load_specific_level(handler, grp.root, rest, sel=sel)
else:
if hasattr(grp, level):
return _load_specific_level(handler, getattr(grp, level),
rest, sel=sel)
else:
raise ValueError('Undefined group "{}"'.format(level))
def _load_pickled(level):
if isinstance(level[0], ForcePickle):
return level[0].obj
else:
return level[0]
def _load_level(handler, level):
if isinstance(level, tables.Group):
dct = {}
# Load sub-groups
for grp in level:
lev = _load_level(handler, grp)
n = grp._v_name
# Check if it's a complicated pair or a string-value pair
if n.startswith('__pair'):
dct[lev['key']] = lev['value']
else:
dct[n] = lev
# Load attributes
for name in level._v_attrs._f_list():
if name.startswith(DEEPDISH_IO_PREFIX):
continue
v = level._v_attrs[name]
dct[name] = v
if level._v_title.startswith('list:'):
N = int(level._v_title[len('list:'):])
lst = []
for i in range(N):
lst.append(dct['i{}'.format(i)])
return lst
elif level._v_title.startswith('tuple:'):
N = int(level._v_title[len('tuple:'):])
lst = []
for i in range(N):
lst.append(dct['i{}'.format(i)])
return tuple(lst)
elif level._v_title.startswith('nonetype:'):
return None
elif is_pandas_dataframe(level):
assert _pandas, "pandas is required to read this file"
store = _HDFStoreWithHandle(handler)
return store.get(level._v_pathname)
elif level._v_title.startswith('sparse:'):
frm = level._v_attrs.format
if frm in ('csr', 'csc', 'bsr'):
shape = tuple(level.shape[:])
cls = {'csr': sparse.csr_matrix,
'csc': sparse.csc_matrix,
'bsr': sparse.bsr_matrix}
matrix = cls[frm](shape)
matrix.data = level.data[:]
matrix.indices = level.indices[:]
matrix.indptr = level.indptr[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
elif frm == 'dia':
shape = tuple(level.shape[:])
matrix = sparse.dia_matrix(shape)
matrix.data = level.data[:]
matrix.offsets = level.offsets[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
elif frm == 'coo':
shape = tuple(level.shape[:])
matrix = sparse.coo_matrix(shape)
matrix.data = level.data[:]
matrix.col = level.col[:]
matrix.row = level.row[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
else:
raise ValueError('Unknown sparse matrix type: {}'.format(frm))
else:
return dct
elif isinstance(level, tables.VLArray):
if level.shape == (1,):
return _load_pickled(level)
else:
return level[:]
elif isinstance(level, tables.Array):
if 'strtype' in level._v_attrs:
strtype = level._v_attrs.strtype
itemsize = level._v_attrs.itemsize
if strtype == b'unicode':
return level[:].view(dtype=(np.unicode_, itemsize))
elif strtype == b'ascii':
return level[:].view(dtype=(np.string_, itemsize))
return level[:]
def _load_sliced_level(handler, level, sel):
if isinstance(level, tables.VLArray):
if level.shape == (1,):
return _load_pickled(level)
else:
return level[sel]
elif isinstance(level, tables.Array):
return level[sel]
else:
raise ValueError('Cannot partially load this data type using `sel`')
def save(path, data, compression='blosc'):
"""
Save any Python structure to an HDF5 file. It is particularly suited for
Numpy arrays. This function works similar to ``numpy.save``, except if you
save a Python object at the top level, you do not need to issue
``data.flat[1]`` to retrieve it from inside a Numpy array of type
``object``.
Four types of objects get saved natively in HDF5, the rest get serialized
automatically. For most needs, you should be able to stick to the four,
which are:
* Dictionaries
* Lists and tuples
* Basic data types (including strings and None)
* Numpy arrays
A recommendation is to always convert your data to using only these four
ingredients. That way your data will always be retrievable by any HDF5
reader. A class that helps you with this is `deepdish.util.Saveable`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : file-like object or string
File or filename to which the data is saved.
data : anything
Data to be saved. This can be anything from a Numpy array, a string, an
object, or a dictionary containing all of them including more
dictionaries.
compression : string or tuple
Set compression method, choosing from `blosc`, `zlib`, `lzo`, `bzip2`
and more (see PyTables documentation). It can also be specified as a
tuple (e.g. ``('blosc', 5)``), with the latter value specifying the
level of compression, choosing from 0 (no compression) to 9 (maximum
compression). Set to `None` to turn off compression. The default is
`blosc` since it is fast; for greater compression rate, try for
instance `zlib`.
See also
--------
load
"""
if not isinstance(path, str):
path = path.name
filters = _get_compression_filters(compression)
with tables.open_file(path, mode='w') as h5file:
# If the data is a dictionary, put it flatly in the root
group = h5file.root
group._v_attrs[DEEPDISH_IO_VERSION_STR] = IO_VERSION
# Sparse matrices match isinstance(data, dict), so we'll have to be
# more strict with the type checking
if type(data) == type({}) and _dict_native_ok(data):
for key, value in data.items():
_save_level(h5file, group, value, name=key, filters=filters)
else:
_save_level(h5file, group, data, name='data', filters=filters)
# Mark this to automatically unpack when loaded
group._v_attrs[DEEPDISH_IO_UNPACK] = True
def load(path, group=None, sel=None, unpack=False):
"""
Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : file-like object or string
File or filename from which to load the data.
group : string
Load a specific group in the HDF5 hierarchy.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `deepdish.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save
"""
if not isinstance(path, str):
path = path.name
with tables.open_file(path, mode='r') as h5file:
if group is not None:
data = _load_specific_level(h5file, h5file, group, sel=sel)
else:
grp = h5file.root
auto_unpack = (DEEPDISH_IO_UNPACK in grp._v_attrs and
grp._v_attrs[DEEPDISH_IO_UNPACK])
do_unpack = unpack or auto_unpack
if do_unpack and len(grp._v_children) == 1:
name = next(iter(grp._v_children))
data = _load_specific_level(h5file, grp, name, sel=sel)
do_unpack = False
elif sel is not None:
raise ValueError("Must specify group with `sel` unless it "
"automatically unpacks")
else:
data = _load_level(h5file, grp)
if DEEPDISH_IO_VERSION_STR in grp._v_attrs:
v = grp._v_attrs[DEEPDISH_IO_VERSION_STR]
else:
v = 0
if v > IO_VERSION:
warnings.warn('This file was saved with a newer version of '
'deepdish. Please upgrade to make sure it loads '
'correctly.')
# Attributes can't be unpacked with the method above, so fall back
# to this
if do_unpack and isinstance(data, dict) and len(data) == 1:
data = next(iter(data.values()))
return data
|
|
"""
Support for LED lights that can be controlled using PWM.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.pwm/
"""
import logging
import voluptuous as vol
from homeassistant.const import CONF_NAME, CONF_TYPE
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_HS_COLOR, ATTR_TRANSITION,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_TRANSITION, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
REQUIREMENTS = ['pwmled==1.3.0']
_LOGGER = logging.getLogger(__name__)
CONF_LEDS = 'leds'
CONF_DRIVER = 'driver'
CONF_PINS = 'pins'
CONF_FREQUENCY = 'frequency'
CONF_ADDRESS = 'address'
CONF_DRIVER_GPIO = 'gpio'
CONF_DRIVER_PCA9685 = 'pca9685'
CONF_DRIVER_TYPES = [CONF_DRIVER_GPIO, CONF_DRIVER_PCA9685]
CONF_LED_TYPE_SIMPLE = 'simple'
CONF_LED_TYPE_RGB = 'rgb'
CONF_LED_TYPE_RGBW = 'rgbw'
CONF_LED_TYPES = [CONF_LED_TYPE_SIMPLE, CONF_LED_TYPE_RGB, CONF_LED_TYPE_RGBW]
DEFAULT_COLOR = [0, 0]
SUPPORT_SIMPLE_LED = (SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION)
SUPPORT_RGB_LED = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_TRANSITION)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_LEDS): vol.All(cv.ensure_list, [
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_DRIVER): vol.In(CONF_DRIVER_TYPES),
vol.Required(CONF_PINS): vol.All(cv.ensure_list,
[cv.positive_int]),
vol.Required(CONF_TYPE): vol.In(CONF_LED_TYPES),
vol.Optional(CONF_FREQUENCY): cv.positive_int,
vol.Optional(CONF_ADDRESS): cv.byte
}
])
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PWM LED lights."""
from pwmled.led import SimpleLed
from pwmled.led.rgb import RgbLed
from pwmled.led.rgbw import RgbwLed
from pwmled.driver.gpio import GpioDriver
from pwmled.driver.pca9685 import Pca9685Driver
leds = []
for led_conf in config[CONF_LEDS]:
driver_type = led_conf[CONF_DRIVER]
pins = led_conf[CONF_PINS]
opt_args = {}
if CONF_FREQUENCY in led_conf:
opt_args['freq'] = led_conf[CONF_FREQUENCY]
if driver_type == CONF_DRIVER_GPIO:
driver = GpioDriver(pins, **opt_args)
elif driver_type == CONF_DRIVER_PCA9685:
if CONF_ADDRESS in led_conf:
opt_args['address'] = led_conf[CONF_ADDRESS]
driver = Pca9685Driver(pins, **opt_args)
else:
_LOGGER.error("Invalid driver type")
return
name = led_conf[CONF_NAME]
led_type = led_conf[CONF_TYPE]
if led_type == CONF_LED_TYPE_SIMPLE:
led = PwmSimpleLed(SimpleLed(driver), name)
elif led_type == CONF_LED_TYPE_RGB:
led = PwmRgbLed(RgbLed(driver), name)
elif led_type == CONF_LED_TYPE_RGBW:
led = PwmRgbLed(RgbwLed(driver), name)
else:
_LOGGER.error("Invalid led type")
return
leds.append(led)
add_entities(leds)
class PwmSimpleLed(Light):
"""Representation of a simple one-color PWM LED."""
def __init__(self, led, name):
"""Initialize one-color PWM LED."""
self._led = led
self._name = name
self._is_on = False
self._brightness = 255
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the group."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._is_on
@property
def brightness(self):
"""Return the brightness property."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_SIMPLE_LED
def turn_on(self, **kwargs):
"""Turn on a led."""
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_TRANSITION in kwargs:
transition_time = kwargs[ATTR_TRANSITION]
self._led.transition(
transition_time,
is_on=True,
brightness=_from_hass_brightness(self._brightness))
else:
self._led.set(is_on=True,
brightness=_from_hass_brightness(self._brightness))
self._is_on = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn off a LED."""
if self.is_on:
if ATTR_TRANSITION in kwargs:
transition_time = kwargs[ATTR_TRANSITION]
self._led.transition(transition_time, is_on=False)
else:
self._led.off()
self._is_on = False
self.schedule_update_ha_state()
class PwmRgbLed(PwmSimpleLed):
"""Representation of a RGB(W) PWM LED."""
def __init__(self, led, name):
"""Initialize a RGB(W) PWM LED."""
super().__init__(led, name)
self._color = DEFAULT_COLOR
@property
def hs_color(self):
"""Return the color property."""
return self._color
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_RGB_LED
def turn_on(self, **kwargs):
"""Turn on a LED."""
if ATTR_HS_COLOR in kwargs:
self._color = kwargs[ATTR_HS_COLOR]
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_TRANSITION in kwargs:
transition_time = kwargs[ATTR_TRANSITION]
self._led.transition(
transition_time,
is_on=True,
brightness=_from_hass_brightness(self._brightness),
color=_from_hass_color(self._color))
else:
self._led.set(is_on=True,
brightness=_from_hass_brightness(self._brightness),
color=_from_hass_color(self._color))
self._is_on = True
self.schedule_update_ha_state()
def _from_hass_brightness(brightness):
"""Convert Home Assistant brightness units to percentage."""
return brightness / 255
def _from_hass_color(color):
"""Convert Home Assistant RGB list to Color tuple."""
from pwmled import Color
rgb = color_util.color_hs_to_RGB(*color)
return Color(*tuple(rgb))
|
|
"""Basic types for building a reconstruction."""
import numpy as np
import cv2
class Pose(object):
"""Defines the pose parameters of a camera.
The extrinsic parameters are defined by a 3x1 rotation vector which
maps the camera rotation respect to the origin frame (rotation) and
a 3x1 translation vector which maps the camera translation respect
to the origin frame (translation).
Attributes:
rotation (vector): the rotation vector.
translation (vector): the rotation vector.
"""
def __init__(self, rotation=np.zeros(3), translation=np.zeros(3)):
self.rotation = rotation
self.translation = translation
@property
def rotation(self):
"""Rotation in angle-axis format."""
return self._rotation
@rotation.setter
def rotation(self, value):
self._rotation = np.asarray(value, dtype=float)
@property
def translation(self):
"""Translation vector."""
return self._translation
@translation.setter
def translation(self, value):
self._translation = np.asarray(value, dtype=float)
def transform(self, point):
"""Transform a point from world to this pose coordinates."""
return self.get_rotation_matrix().dot(point) + self.translation
def transform_many(self, points):
"""Transform points from world coordinates to this pose."""
return points.dot(self.get_rotation_matrix().T) + self.translation
def transform_inverse(self, point):
"""Transform a point from this pose to world coordinates."""
return self.get_rotation_matrix().T.dot(point - self.translation)
def transform_inverse_many(self, points):
"""Transform points from this pose to world coordinates."""
return (points - self.translation).dot(self.get_rotation_matrix())
def get_rotation_matrix(self):
"""Get rotation as a 3x3 matrix."""
return cv2.Rodrigues(self.rotation)[0]
def set_rotation_matrix(self, rotation_matrix, permissive=False):
"""Set rotation as a 3x3 matrix.
>>> pose = Pose()
>>> pose.rotation = np.array([0., 1., 2.])
>>> R = pose.get_rotation_matrix()
>>> pose.set_rotation_matrix(R)
>>> np.allclose(pose.rotation, [0., 1., 2.])
True
>>> pose.set_rotation_matrix([[3,-4, 1], [ 5, 3,-7], [-9, 2, 6]])
Traceback (most recent call last):
...
ValueError: Not orthogonal
>>> pose.set_rotation_matrix([[0, 0, 1], [-1, 0, 0], [0, 1, 0]])
Traceback (most recent call last):
...
ValueError: Determinant not 1
"""
R = np.array(rotation_matrix, dtype=float)
if not permissive:
if not np.isclose(np.linalg.det(R), 1):
raise ValueError("Determinant not 1")
if not np.allclose(np.linalg.inv(R), R.T):
raise ValueError("Not orthogonal")
self.rotation = cv2.Rodrigues(R)[0].ravel()
def get_origin(self):
"""The origin of the pose in world coordinates."""
return -self.get_rotation_matrix().T.dot(self.translation)
def set_origin(self, origin):
"""Set the origin of the pose in world coordinates.
>>> pose = Pose()
>>> pose.rotation = np.array([0., 1., 2.])
>>> origin = [1., 2., 3.]
>>> pose.set_origin(origin)
>>> np.allclose(origin, pose.get_origin())
True
"""
self.translation = -self.get_rotation_matrix().dot(origin)
def get_Rt(self):
"""Get pose as a 3x4 matrix (R|t)."""
Rt = np.empty((3, 4))
Rt[:, :3] = self.get_rotation_matrix()
Rt[:, 3] = self.translation
return Rt
def compose(self, other):
"""Get the composition of this pose with another.
composed = self * other
"""
selfR = self.get_rotation_matrix()
otherR = other.get_rotation_matrix()
R = np.dot(selfR, otherR)
t = selfR.dot(other.translation) + self.translation
res = Pose()
res.set_rotation_matrix(R)
res.translation = t
return res
def inverse(self):
"""Get the inverse of this pose."""
inverse = Pose()
R = self.get_rotation_matrix()
inverse.set_rotation_matrix(R.T)
inverse.translation = -R.T.dot(self.translation)
return inverse
class ShotMetadata(object):
"""Defines GPS data from a taken picture.
Attributes:
orientation (int): the exif orientation tag (1-8).
capture_time (real): the capture time.
gps_dop (real): the GPS dop.
gps_position (vector): the GPS position.
"""
def __init__(self):
self.orientation = None
self.gps_dop = None
self.gps_position = None
self.accelerometer = None
self.compass = None
self.capture_time = None
self.skey = None
class ShotMesh(object):
"""Triangular mesh of points visible in a shot
Attributes:
vertices: (list of vectors) mesh vertices
faces: (list of triplets) triangles' topology
"""
def __init__(self):
self.vertices = None
self.faces = None
class Camera(object):
"""Abstract camera class.
A camera is unique defined for its identification description (id),
the projection type (projection_type) and its internal calibration
parameters, which depend on the particular Camera sub-class.
Attributes:
id (str): camera description.
projection_type (str): projection type.
"""
pass
class PerspectiveCamera(Camera):
"""Define a perspective camera.
Attributes:
width (int): image width.
height (int): image height.
focal (real): estimated focal lenght.
k1 (real): estimated first distortion parameter.
k2 (real): estimated second distortion parameter.
focal_prior (real): prior focal lenght.
k1_prior (real): prior first distortion parameter.
k2_prior (real): prior second distortion parameter.
"""
def __init__(self):
"""Defaut constructor."""
self.id = None
self.projection_type = 'perspective'
self.width = None
self.height = None
self.focal = None
self.k1 = None
self.k2 = None
self.focal_prior = None
self.k1_prior = None
self.k2_prior = None
def __repr__(self):
return '{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})'.format(
self.__class__.__name__,
self.id, self.projection_type, self.width, self.height,
self.focal, self.k1, self.k2,
self.focal_prior, self.k1_prior, self.k2_prior)
def project(self, point):
"""Project a 3D point in camera coordinates to the image plane."""
# Normalized image coordinates
xn = point[0] / point[2]
yn = point[1] / point[2]
# Radial distortion
r2 = xn * xn + yn * yn
distortion = 1.0 + r2 * (self.k1 + self.k2 * r2)
return np.array([self.focal * distortion * xn,
self.focal * distortion * yn])
def project_many(self, points):
"""Project 3D points in camera coordinates to the image plane."""
distortion = np.array([self.k1, self.k2, 0, 0, 0])
K, R, t = self.get_K(), np.zeros(3), np.zeros(3)
pixels, _ = cv2.projectPoints(points, R, t, K, distortion)
return pixels.reshape((-1, 2))
def pixel_bearing(self, pixel):
"""Unit vector pointing to the pixel viewing direction."""
point = np.asarray(pixel).reshape((1, 1, 2))
distortion = np.array([self.k1, self.k2, 0., 0.])
x, y = cv2.undistortPoints(point, self.get_K(), distortion).flat
l = np.sqrt(x * x + y * y + 1.0)
return np.array([x / l, y / l, 1.0 / l])
def pixel_bearing_many(self, pixels):
"""Unit vectors pointing to the pixel viewing directions."""
points = pixels.reshape((-1, 1, 2)).astype(np.float64)
distortion = np.array([self.k1, self.k2, 0., 0.])
up = cv2.undistortPoints(points, self.get_K(), distortion)
up = up.reshape((-1, 2))
x = up[:, 0]
y = up[:, 1]
l = np.sqrt(x * x + y * y + 1.0)
return np.column_stack((x / l, y / l, 1.0 / l))
def pixel_bearings(self, pixels):
"""Deprecated: use pixel_bearing_many."""
return self.pixel_bearing_many(pixels)
def back_project(self, pixel, depth):
"""Project a pixel to a fronto-parallel plane at a given depth."""
bearing = self.pixel_bearing(pixel)
scale = depth / bearing[2]
return scale * bearing
def back_project_many(self, pixels, depths):
"""Project pixels to fronto-parallel planes at given depths."""
bearings = self.pixel_bearing_many(pixels)
scales = depths / bearings[:, 2]
return scales[:, np.newaxis] * bearings
def get_K(self):
"""The calibration matrix."""
return np.array([[self.focal, 0., 0.],
[0., self.focal, 0.],
[0., 0., 1.]])
def get_K_in_pixel_coordinates(self, width=None, height=None):
"""The calibration matrix that maps to pixel coordinates.
Coordinates (0,0) correspond to the center of the top-left pixel,
and (width - 1, height - 1) to the center of bottom-right pixel.
You can optionally pass the width and height of the image, in case
you are using a resized versior of the original image.
"""
w = width or self.width
h = height or self.height
f = self.focal * max(w, h)
return np.array([[f, 0, 0.5 * (w - 1)],
[0, f, 0.5 * (h - 1)],
[0, 0, 1.0]])
class BrownPerspectiveCamera(Camera):
"""Define a perspective camera.
Attributes:
width (int): image width.
height (int): image height.
focal_x (real): estimated focal length for the X axis.
focal_y (real): estimated focal length for the Y axis.
c_x (real): estimated principal point X.
c_y (real): estimated principal point Y.
k1 (real): estimated first radial distortion parameter.
k2 (real): estimated second radial distortion parameter.
p1 (real): estimated first tangential distortion parameter.
p2 (real): estimated second tangential distortion parameter.
k3 (real): estimated third radial distortion parameter.
focal_x_prior (real): prior focal length for the X axis.
focal_y_prior (real): prior focal length for the Y axis.
c_x_prior (real): prior principal point X.
c_y_prior (real): prior principal point Y.
k1_prior (real): prior first radial distortion parameter.
k2_prior (real): prior second radial distortion parameter.
p1_prior (real): prior first tangential distortion parameter.
p2_prior (real): prior second tangential distortion parameter.
k3_prior (real): prior third radial distortion parameter.
"""
def __init__(self):
"""Defaut constructor."""
self.id = None
self.projection_type = 'brown'
self.width = None
self.height = None
self.focal_x = None
self.focal_y = None
self.c_x = None
self.c_y = None
self.k1 = None
self.k2 = None
self.p1 = None
self.p2 = None
self.k3 = None
self.focal_x_prior = None
self.focal_y_prior = None
self.c_x_prior = None
self.c_y_prior = None
self.k1_prior = None
self.k2_prior = None
self.p1_prior = None
self.p2_prior = None
self.k3_prior = None
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.__dict__)
def project(self, point):
"""Project a 3D point in camera coordinates to the image plane."""
# Normalized image coordinates
xn = point[0] / point[2]
yn = point[1] / point[2]
# Radial and tangential distortion
r2 = xn * xn + yn * yn
radial_distortion = 1.0 + r2 * (self.k1 + r2 * (self.k2 + r2 * self.k3))
x_tangential_distortion = 2 * self.p1 * xn * yn + self.p2 * (r2 + 2 * xn * xn)
x_distorted = xn * radial_distortion + x_tangential_distortion
y_tangential_distortion = self.p1 * (r2 + 2 * yn * yn) + 2 * self.p2 * xn * yn
y_distorted = yn * radial_distortion + y_tangential_distortion
return np.array([self.focal_x * x_distorted + self.c_x,
self.focal_y * y_distorted + self.c_y])
def project_many(self, points):
"""Project 3D points in camera coordinates to the image plane."""
distortion = np.array([self.k1, self.k2, self.p1, self.p2, self.k3])
K, R, t = self.get_K(), np.zeros(3), np.zeros(3)
pixels, _ = cv2.projectPoints(points, R, t, K, distortion)
return pixels.reshape((-1, 2))
def pixel_bearing(self, pixel):
"""Unit vector pointing to the pixel viewing direction."""
point = np.asarray(pixel).reshape((1, 1, 2))
distortion = np.array([self.k1, self.k2, self.p1, self.p2, self.k3])
x, y = cv2.undistortPoints(point, self.get_K(), distortion).flat
l = np.sqrt(x * x + y * y + 1.0)
return np.array([x / l, y / l, 1.0 / l])
def pixel_bearing_many(self, pixels):
"""Unit vector pointing to the pixel viewing directions."""
points = pixels.reshape((-1, 1, 2)).astype(np.float64)
distortion = np.array([self.k1, self.k2, self.p1, self.p2, self.k3])
up = cv2.undistortPoints(points, self.get_K(), distortion)
up = up.reshape((-1, 2))
x = up[:, 0]
y = up[:, 1]
l = np.sqrt(x * x + y * y + 1.0)
return np.column_stack((x / l, y / l, 1.0 / l))
def pixel_bearings(self, pixels):
"""Deprecated: use pixel_bearing_many."""
return self.pixel_bearing_many(pixels)
def back_project(self, pixel, depth):
"""Project a pixel to a fronto-parallel plane at a given depth."""
bearing = self.pixel_bearing(pixel)
scale = depth / bearing[2]
return scale * bearing
def back_project_many(self, pixels, depths):
"""Project pixels to fronto-parallel planes at given depths."""
bearings = self.pixel_bearing_many(pixels)
scales = depths / bearings[:, 2]
return scales[:, np.newaxis] * bearings
def get_K(self):
"""The calibration matrix."""
return np.array([[self.focal_x, 0., self.c_x],
[0., self.focal_y, self.c_y],
[0., 0., 1.]])
def get_K_in_pixel_coordinates(self, width=None, height=None):
"""The calibration matrix that maps to pixel coordinates.
Coordinates (0,0) correspond to the center of the top-left pixel,
and (width - 1, height - 1) to the center of bottom-right pixel.
You can optionally pass the width and height of the image, in case
you are using a resized versior of the original image.
"""
w = width or self.width
h = height or self.height
s = max(w, h)
normalized_to_pixel = np.array([
[s, 0, (w - 1) / 2.0],
[0, s, (h - 1) / 2.0],
[0, 0, 1],
])
return np.dot(normalized_to_pixel, self.get_K())
class FisheyeCamera(Camera):
"""Define a fisheye camera.
Attributes:
width (int): image width.
height (int): image height.
focal (real): estimated focal lenght.
k1 (real): estimated first distortion parameter.
k2 (real): estimated second distortion parameter.
focal_prior (real): prior focal lenght.
k1_prior (real): prior first distortion parameter.
k2_prior (real): prior second distortion parameter.
"""
def __init__(self):
"""Defaut constructor."""
self.id = None
self.projection_type = 'fisheye'
self.width = None
self.height = None
self.focal = None
self.k1 = None
self.k2 = None
self.focal_prior = None
self.k1_prior = None
self.k2_prior = None
def project(self, point):
"""Project a 3D point in camera coordinates to the image plane."""
x, y, z = point
l = np.sqrt(x**2 + y**2)
theta = np.arctan2(l, z)
theta_d = theta * (1.0 + theta**2 * (self.k1 + theta**2 * self.k2))
s = self.focal * theta_d / l
return np.array([s * x, s * y])
def project_many(self, points):
"""Project 3D points in camera coordinates to the image plane."""
points = points.reshape((-1, 1, 3)).astype(np.float64)
distortion = np.array([self.k1, self.k2, 0., 0.])
K, R, t = self.get_K(), np.zeros(3), np.zeros(3)
pixels, _ = cv2.fisheye.projectPoints(points, R, t, K, distortion)
return pixels.reshape((-1, 2))
def pixel_bearing(self, pixel):
"""Unit vector pointing to the pixel viewing direction."""
point = np.asarray(pixel).reshape((1, 1, 2))
distortion = np.array([self.k1, self.k2, 0., 0.])
x, y = cv2.fisheye.undistortPoints(point, self.get_K(), distortion).flat
l = np.sqrt(x * x + y * y + 1.0)
return np.array([x / l, y / l, 1.0 / l])
def pixel_bearing_many(self, pixels):
"""Unit vector pointing to the pixel viewing directions."""
points = pixels.reshape((-1, 1, 2)).astype(np.float64)
distortion = np.array([self.k1, self.k2, 0., 0.])
up = cv2.fisheye.undistortPoints(points, self.get_K(), distortion)
up = up.reshape((-1, 2))
x = up[:, 0]
y = up[:, 1]
l = np.sqrt(x * x + y * y + 1.0)
return np.column_stack((x / l, y / l, 1.0 / l))
def pixel_bearings(self, pixels):
"""Deprecated: use pixel_bearing_many."""
return self.pixel_bearing_many(pixels)
def back_project(self, pixel, depth):
"""Project a pixel to a fronto-parallel plane at a given depth."""
bearing = self.pixel_bearing(pixel)
scale = depth / bearing[2]
return scale * bearing
def back_project_many(self, pixels, depths):
"""Project pixels to fronto-parallel planes at given depths."""
bearings = self.pixel_bearing_many(pixels)
scales = depths / bearings[:, 2]
return scales[:, np.newaxis] * bearings
def get_K(self):
"""The calibration matrix."""
return np.array([[self.focal, 0., 0.],
[0., self.focal, 0.],
[0., 0., 1.]])
def get_K_in_pixel_coordinates(self, width=None, height=None):
"""The calibration matrix that maps to pixel coordinates.
Coordinates (0,0) correspond to the center of the top-left pixel,
and (width - 1, height - 1) to the center of bottom-right pixel.
You can optionally pass the width and height of the image, in case
you are using a resized versior of the original image.
"""
w = width or self.width
h = height or self.height
f = self.focal * max(w, h)
return np.array([[f, 0, 0.5 * (w - 1)],
[0, f, 0.5 * (h - 1)],
[0, 0, 1.0]])
class SphericalCamera(Camera):
"""A spherical camera generating equirectangular projections.
Attributes:
width (int): image width.
height (int): image height.
"""
def __init__(self):
"""Defaut constructor."""
self.id = None
self.projection_type = 'equirectangular'
self.width = None
self.height = None
def project(self, point):
"""Project a 3D point in camera coordinates to the image plane."""
x, y, z = point
lon = np.arctan2(x, z)
lat = np.arctan2(-y, np.sqrt(x**2 + z**2))
return np.array([lon / (2 * np.pi), -lat / (2 * np.pi)])
def project_many(self, points):
"""Project 3D points in camera coordinates to the image plane."""
x, y, z = points.T
lon = np.arctan2(x, z)
lat = np.arctan2(-y, np.sqrt(x**2 + z**2))
return np.column_stack([lon / (2 * np.pi), -lat / (2 * np.pi)])
def pixel_bearing(self, pixel):
"""Unit vector pointing to the pixel viewing direction."""
lon = pixel[0] * 2 * np.pi
lat = -pixel[1] * 2 * np.pi
x = np.cos(lat) * np.sin(lon)
y = -np.sin(lat)
z = np.cos(lat) * np.cos(lon)
return np.array([x, y, z])
def pixel_bearing_many(self, pixels):
"""Unit vector pointing to the pixel viewing directions."""
lon = pixels[:, 0] * 2 * np.pi
lat = -pixels[:, 1] * 2 * np.pi
x = np.cos(lat) * np.sin(lon)
y = -np.sin(lat)
z = np.cos(lat) * np.cos(lon)
return np.column_stack([x, y, z]).astype(float)
def pixel_bearings(self, pixels):
"""Deprecated: use pixel_bearing_many."""
return self.pixel_bearing_many(pixels)
class Shot(object):
"""Defines a shot in a reconstructed scene.
A shot here is refered as a unique view inside the scene defined by
the image filename (id), the used camera with its refined internal
parameters (camera), the fully camera pose respect to the scene origin
frame (pose) and the GPS data obtained in the moment that the picture
was taken (metadata).
Attributes:
id (str): picture filename.
camera (Camera): camera.
pose (Pose): extrinsic parameters.
metadata (ShotMetadata): GPS, compass, capture time, etc.
"""
def __init__(self):
"""Defaut constructor."""
self.id = None
self.camera = None
self.pose = None
self.metadata = None
self.mesh = None
def project(self, point):
"""Project a 3D point to the image plane."""
camera_point = self.pose.transform(point)
return self.camera.project(camera_point)
def project_many(self, points):
"""Project 3D points to the image plane."""
camera_point = self.pose.transform_many(points)
return self.camera.project_many(camera_point)
def back_project(self, pixel, depth):
"""Project a pixel to a fronto-parallel plane at a given depth.
The plane is defined by z = depth in the shot reference frame.
"""
point_in_cam_coords = self.camera.back_project(pixel, depth)
return self.pose.transform_inverse(point_in_cam_coords)
def back_project_many(self, pixels, depths):
"""Project pixels to fronto-parallel planes at given depths.
The planes are defined by z = depth in the shot reference frame.
"""
points_in_cam_coords = self.camera.back_project_many(pixels, depths)
return self.pose.transform_inverse_many(points_in_cam_coords)
def viewing_direction(self):
"""The viewing direction of the shot.
That is the positive camera Z axis in world coordinates.
"""
return self.pose.get_rotation_matrix().T.dot([0, 0, 1])
class Point(object):
"""Defines a 3D point.
Attributes:
id (int): identification number.
color (list(int)): list containing the RGB values.
coordinates (list(real)): list containing the 3D position.
reprojection_error (real): the reprojection error.
"""
def __init__(self):
"""Defaut constructor"""
self.id = None
self.color = None
self.coordinates = None
self.reprojection_error = None
class GroundControlPointObservation(object):
"""A ground control point observation.
Attributes:
lla: latitue, longitude and altitude
coordinates: x, y, z coordinates in topocentric reference frame
shot_id: the shot where the point is observed
shot_coordinates: 2d coordinates of the observation
"""
def __init__(self):
self.lla = None
self.coordinates = None
self.shot_id = None
self.shot_coordinates = None
class Reconstruction(object):
"""Defines the reconstructed scene.
Attributes:
cameras (Dict(Camera)): List of cameras.
shots (Dict(Shot)): List of reconstructed shots.
points (Dict(Point)): List of reconstructed points.
"""
def __init__(self):
"""Defaut constructor"""
self.cameras = {}
self.shots = {}
self.points = {}
def add_camera(self, camera):
"""Add a camera in the list
:param camera: The camera.
"""
self.cameras[camera.id] = camera
def get_camera(self, id):
"""Return a camera by id.
:return: If exists returns the camera, otherwise None.
"""
return self.cameras.get(id)
def add_shot(self, shot):
"""Add a shot in the list
:param shot: The shot.
"""
self.shots[shot.id] = shot
def get_shot(self, id):
"""Return a shot by id.
:return: If exists returns the shot, otherwise None.
"""
return self.shots.get(id)
def add_point(self, point):
"""Add a point in the list
:param point: The point.
"""
self.points[point.id] = point
def get_point(self, id):
"""Return a point by id.
:return: If exists returns the point, otherwise None.
"""
return self.points.get(id)
|
|
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Utils
"""
import uuid
import mock
import six
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import exception
from nova import objects
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
class SchedulerUtilsTestCase(test.NoDBTestCase):
"""Test case for scheduler utils methods."""
def setUp(self):
super(SchedulerUtilsTestCase, self).setUp()
self.context = 'fake-context'
def test_build_request_spec_without_image(self):
instance = {'uuid': 'fake-uuid'}
instance_type = objects.Flavor(**test_flavor.fake_flavor)
with mock.patch.object(flavors, 'extract_flavor') as mock_extract:
mock_extract.return_value = instance_type
request_spec = scheduler_utils.build_request_spec(self.context,
None,
[instance])
mock_extract.assert_called_once_with({'uuid': 'fake-uuid'})
self.assertEqual({}, request_spec['image'])
def test_build_request_spec_with_object(self):
instance_type = objects.Flavor()
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(instance, 'get_flavor') as mock_get:
mock_get.return_value = instance_type
request_spec = scheduler_utils.build_request_spec(self.context,
None,
[instance])
mock_get.assert_called_once_with()
self.assertIsInstance(request_spec['instance_properties'], dict)
@mock.patch.object(rpc, 'get_notifier', return_value=mock.Mock())
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(objects.Instance, 'save')
def test_set_vm_state_and_notify(self, mock_save, mock_add, mock_get):
expected_uuid = 'fake-uuid'
request_spec = dict(instance_properties=dict(uuid='other-uuid'))
updates = dict(vm_state='fake-vm-state')
service = 'fake-service'
method = 'fake-method'
exc_info = 'exc_info'
payload = dict(request_spec=request_spec,
instance_properties=request_spec.get(
'instance_properties', {}),
instance_id=expected_uuid,
state='fake-vm-state',
method=method,
reason=exc_info)
event_type = '%s.%s' % (service, method)
scheduler_utils.set_vm_state_and_notify(self.context,
expected_uuid,
service,
method,
updates,
exc_info,
request_spec)
mock_save.assert_called_once_with()
mock_add.assert_called_once_with(self.context, mock.ANY,
exc_info, mock.ANY)
self.assertIsInstance(mock_add.call_args[0][1], objects.Instance)
self.assertIsInstance(mock_add.call_args[0][3], tuple)
mock_get.return_value.error.assert_called_once_with(self.context,
event_type,
payload)
def test_build_filter_properties(self):
sched_hints = {'hint': ['over-there']}
forced_host = 'forced-host1'
forced_node = 'forced-node1'
instance_type = objects.Flavor()
filt_props = scheduler_utils.build_filter_properties(sched_hints,
forced_host, forced_node, instance_type)
self.assertEqual(sched_hints, filt_props['scheduler_hints'])
self.assertEqual([forced_host], filt_props['force_hosts'])
self.assertEqual([forced_node], filt_props['force_nodes'])
self.assertEqual(instance_type, filt_props['instance_type'])
def test_build_filter_properties_no_forced_host_no_force_node(self):
sched_hints = {'hint': ['over-there']}
forced_host = None
forced_node = None
instance_type = objects.Flavor()
filt_props = scheduler_utils.build_filter_properties(sched_hints,
forced_host, forced_node, instance_type)
self.assertEqual(sched_hints, filt_props['scheduler_hints'])
self.assertEqual(instance_type, filt_props['instance_type'])
self.assertNotIn('forced_host', filt_props)
self.assertNotIn('forced_node', filt_props)
def _test_populate_filter_props(self, host_state_obj=True,
with_retry=True,
force_hosts=None,
force_nodes=None):
if force_hosts is None:
force_hosts = []
if force_nodes is None:
force_nodes = []
if with_retry:
if ((len(force_hosts) == 1 and len(force_nodes) <= 1)
or (len(force_nodes) == 1 and len(force_hosts) <= 1)):
filter_properties = dict(force_hosts=force_hosts,
force_nodes=force_nodes)
elif len(force_hosts) > 1 or len(force_nodes) > 1:
filter_properties = dict(retry=dict(hosts=[]),
force_hosts=force_hosts,
force_nodes=force_nodes)
else:
filter_properties = dict(retry=dict(hosts=[]))
else:
filter_properties = dict()
if host_state_obj:
class host_state(object):
host = 'fake-host'
nodename = 'fake-node'
limits = 'fake-limits'
else:
host_state = dict(host='fake-host',
nodename='fake-node',
limits='fake-limits')
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
enable_retry_force_hosts = not force_hosts or len(force_hosts) > 1
enable_retry_force_nodes = not force_nodes or len(force_nodes) > 1
if with_retry or enable_retry_force_hosts or enable_retry_force_nodes:
# So we can check for 2 hosts
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if force_hosts:
expected_limits = None
else:
expected_limits = 'fake-limits'
self.assertEqual(expected_limits,
filter_properties.get('limits'))
if (with_retry and enable_retry_force_hosts
and enable_retry_force_nodes):
self.assertEqual([['fake-host', 'fake-node'],
['fake-host', 'fake-node']],
filter_properties['retry']['hosts'])
else:
self.assertNotIn('retry', filter_properties)
def test_populate_filter_props(self):
self._test_populate_filter_props()
def test_populate_filter_props_host_dict(self):
self._test_populate_filter_props(host_state_obj=False)
def test_populate_filter_props_no_retry(self):
self._test_populate_filter_props(with_retry=False)
def test_populate_filter_props_force_hosts_no_retry(self):
self._test_populate_filter_props(force_hosts=['force-host'])
def test_populate_filter_props_force_nodes_no_retry(self):
self._test_populate_filter_props(force_nodes=['force-node'])
def test_populate_filter_props_multi_force_hosts_with_retry(self):
self._test_populate_filter_props(force_hosts=['force-host1',
'force-host2'])
def test_populate_filter_props_multi_force_nodes_with_retry(self):
self._test_populate_filter_props(force_nodes=['force-node1',
'force-node2'])
def test_populate_retry_exception_at_max_attempts(self):
self.flags(scheduler_max_attempts=2)
msg = 'The exception text was preserved!'
filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
exc_reason=[msg]))
nvh = self.assertRaises(exception.MaxRetriesExceeded,
scheduler_utils.populate_retry,
filter_properties, 'fake-uuid')
# make sure 'msg' is a substring of the complete exception text
self.assertIn(msg, six.text_type(nvh))
def _check_parse_options(self, opts, sep, converter, expected):
good = scheduler_utils.parse_options(opts,
sep=sep,
converter=converter)
for item in expected:
self.assertIn(item, good)
def test_parse_options(self):
# check normal
self._check_parse_options(['foo=1', 'bar=-2.1'],
'=',
float,
[('foo', 1.0), ('bar', -2.1)])
# check convert error
self._check_parse_options(['foo=a1', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check separator missing
self._check_parse_options(['foo', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check key missing
self._check_parse_options(['=5', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
def test_validate_filters_configured(self):
self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
def test_validate_weighers_configured(self):
self.flags(scheduler_weight_classes=
['ServerGroupSoftAntiAffinityWeigher',
'FakeFilter1'])
self.assertTrue(scheduler_utils.validate_weigher(
'ServerGroupSoftAntiAffinityWeigher'))
self.assertTrue(scheduler_utils.validate_weigher('FakeFilter1'))
self.assertFalse(scheduler_utils.validate_weigher(
'ServerGroupSoftAffinityWeigher'))
def test_validate_weighers_configured_all_weighers(self):
self.assertTrue(scheduler_utils.validate_weigher(
'ServerGroupSoftAffinityWeigher'))
self.assertTrue(scheduler_utils.validate_weigher(
'ServerGroupSoftAntiAffinityWeigher'))
def _create_server_group(self, policy='anti-affinity'):
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.name = 'pele'
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
return group
def _get_group_details(self, group, policy=None):
group_hosts = ['hostB']
with test.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(
self.context, 'fake_uuid', group_hosts)
self.assertEqual(
(set(['hostA', 'hostB']), [policy], group.members),
group_info)
def test_get_group_details(self):
for policy in ['affinity', 'anti-affinity',
'soft-affinity', 'soft-anti-affinity']:
group = self._create_server_group(policy)
self._get_group_details(group, policy=policy)
def test_get_group_details_with_no_instance_uuid(self):
group_info = scheduler_utils._get_group_details(self.context, None)
self.assertIsNone(group_info)
def _get_group_details_with_filter_not_configured(self, policy):
self.flags(scheduler_default_filters=['fake'])
self.flags(scheduler_weight_classes=['fake'])
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
with test.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
) as (get_group,):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
scheduler_utils._SUPPORTS_SOFT_AFFINITY = None
scheduler_utils._SUPPORTS_SOFT_ANTI_AFFINITY = None
self.assertRaises(exception.UnsupportedPolicyException,
scheduler_utils._get_group_details,
self.context, 'fake-uuid')
def test_get_group_details_with_filter_not_configured(self):
policies = ['anti-affinity', 'affinity',
'soft-affinity', 'soft-anti-affinity']
for policy in policies:
self._get_group_details_with_filter_not_configured(policy)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_in_filter_properties(self, mock_ggd):
mock_ggd.return_value = scheduler_utils.GroupDetails(
hosts=set(['hostA', 'hostB']), policies=['policy'],
members=['instance1'])
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
['hostC'])
expected_filter_props = {'group_updated': True,
'group_hosts': set(['hostA', 'hostB']),
'group_policies': ['policy'],
'group_members': ['instance1']}
self.assertEqual(expected_filter_props, filter_props)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_no_group(self, mock_ggd):
mock_ggd.return_value = None
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
['hostC'])
self.assertNotIn('group_updated', filter_props)
self.assertNotIn('group_policies', filter_props)
self.assertEqual(['hostC'], filter_props['group_hosts'])
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_filter_not_configured(self, mock_ggd):
mock_ggd.side_effect = exception.NoValidHost(reason='whatever')
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
self.assertRaises(exception.NoValidHost,
scheduler_utils.setup_instance_group,
self.context, spec, filter_props)
|
|
import logging
import numpy as np
import threading
logger = logging.getLogger(__name__)
class Filter:
"""Processes input, possibly statefully."""
def apply_changes(self, other, *args, **kwargs):
"""Updates self with "new state" from other filter."""
raise NotImplementedError
def copy(self):
"""Creates a new object with same state as self.
Returns:
A copy of self.
"""
raise NotImplementedError
def sync(self, other):
"""Copies all state from other filter to self."""
raise NotImplementedError
def clear_buffer(self):
"""Creates copy of current state and clears accumulated state"""
raise NotImplementedError
def as_serializable(self):
raise NotImplementedError
class NoFilter(Filter):
is_concurrent = True
def __init__(self, *args):
pass
def __call__(self, x, update=True):
try:
return np.asarray(x)
except Exception:
raise ValueError("Failed to convert to array", x)
def apply_changes(self, other, *args, **kwargs):
pass
def copy(self):
return self
def sync(self, other):
pass
def clear_buffer(self):
pass
def as_serializable(self):
return self
# http://www.johndcook.com/blog/standard_deviation/
class RunningStat:
def __init__(self, shape=None):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def copy(self):
other = RunningStat()
other._n = self._n
other._M = np.copy(self._M)
other._S = np.copy(self._S)
return other
def push(self, x):
x = np.asarray(x)
# Unvectorized update of the running statistics.
if x.shape != self._M.shape:
raise ValueError(
"Unexpected input shape {}, expected {}, value = {}".format(
x.shape, self._M.shape, x))
n1 = self._n
self._n += 1
if self._n == 1:
self._M[...] = x
else:
delta = x - self._M
self._M[...] += delta / self._n
self._S[...] += delta * delta * n1 / self._n
def update(self, other):
n1 = self._n
n2 = other._n
n = n1 + n2
if n == 0:
# Avoid divide by zero, which creates nans
return
delta = self._M - other._M
delta2 = delta * delta
M = (n1 * self._M + n2 * other._M) / n
S = self._S + other._S + delta2 * n1 * n2 / n
self._n = n
self._M = M
self._S = S
def __repr__(self):
return "(n={}, mean_mean={}, mean_std={})".format(
self.n, np.mean(self.mean), np.mean(self.std))
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class MeanStdFilter(Filter):
"""Keeps track of a running mean for seen states"""
is_concurrent = False
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.shape = shape
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
# In distributed rollouts, each worker sees different states.
# The buffer is used to keep track of deltas amongst all the
# observation filters.
self.buffer = RunningStat(shape)
def clear_buffer(self):
self.buffer = RunningStat(self.shape)
def apply_changes(self, other, with_buffer=False):
"""Applies updates from the buffer of another filter.
Params:
other (MeanStdFilter): Other filter to apply info from
with_buffer (bool): Flag for specifying if the buffer should be
copied from other.
Examples:
>>> a = MeanStdFilter(())
>>> a(1)
>>> a(2)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[2, 1.5, 2]
>>> b = MeanStdFilter(())
>>> b(10)
>>> a.apply_changes(b, with_buffer=False)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[3, 4.333333333333333, 2]
>>> a.apply_changes(b, with_buffer=True)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[4, 5.75, 1]
"""
self.rs.update(other.buffer)
if with_buffer:
self.buffer = other.buffer.copy()
def copy(self):
"""Returns a copy of Filter."""
other = MeanStdFilter(self.shape)
other.sync(self)
return other
def as_serializable(self):
return self.copy()
def sync(self, other):
"""Syncs all fields together from other filter.
Examples:
>>> a = MeanStdFilter(())
>>> a(1)
>>> a(2)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[2, array(1.5), 2]
>>> b = MeanStdFilter(())
>>> b(10)
>>> print([b.rs.n, b.rs.mean, b.buffer.n])
[1, array(10.0), 1]
>>> a.sync(b)
>>> print([a.rs.n, a.rs.mean, a.buffer.n])
[1, array(10.0), 1]
"""
assert other.shape == self.shape, "Shapes don't match!"
self.demean = other.demean
self.destd = other.destd
self.clip = other.clip
self.rs = other.rs.copy()
self.buffer = other.buffer.copy()
def __call__(self, x, update=True):
x = np.asarray(x)
if update:
if len(x.shape) == len(self.rs.shape) + 1:
# The vectorized case.
for i in range(x.shape[0]):
self.rs.push(x[i])
self.buffer.push(x[i])
else:
# The unvectorized case.
self.rs.push(x)
self.buffer.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def __repr__(self):
return "MeanStdFilter({}, {}, {}, {}, {}, {})".format(
self.shape, self.demean, self.destd, self.clip, self.rs,
self.buffer)
class ConcurrentMeanStdFilter(MeanStdFilter):
is_concurrent = True
def __init__(self, *args, **kwargs):
super(ConcurrentMeanStdFilter, self).__init__(*args, **kwargs)
self._lock = threading.RLock()
def lock_wrap(func):
def wrapper(*args, **kwargs):
with self._lock:
return func(*args, **kwargs)
return wrapper
self.__getattribute__ = lock_wrap(self.__getattribute__)
def as_serializable(self):
"""Returns non-concurrent version of current class"""
other = MeanStdFilter(self.shape)
other.sync(self)
return other
def copy(self):
"""Returns a copy of Filter."""
other = ConcurrentMeanStdFilter(self.shape)
other.sync(self)
return other
def __repr__(self):
return "ConcurrentMeanStdFilter({}, {}, {}, {}, {}, {})".format(
self.shape, self.demean, self.destd, self.clip, self.rs,
self.buffer)
def get_filter(filter_config, shape):
# TODO(rliaw): move this into filter manager
if filter_config == "MeanStdFilter":
return MeanStdFilter(shape, clip=None)
elif filter_config == "ConcurrentMeanStdFilter":
return ConcurrentMeanStdFilter(shape, clip=None)
elif filter_config == "NoFilter":
return NoFilter()
elif callable(filter_config):
return filter_config(shape)
else:
raise Exception("Unknown observation_filter: " + str(filter_config))
|
|
__author__ = 'SolarLune'
import math
import time
import collections
import operator
from bge import logic, constraints, types, render
import mathutils
from .math import sign
lib_new_counter = 0
# Helper classes
class Polygon():
def __init__(self, polygon):
self.poly_data = polygon
self.vertices = []
self.neighbors = {}
self.vertices_original_positions = []
verts = [polygon.v1, polygon.v2, polygon.v3]
if self.poly_data.getNumVertex() == 4:
verts.append(polygon.v4)
for v in verts:
vert = self.poly_data.getMesh().getVertex(self.poly_data.material_id, v)
self.vertices.append(vert)
self.vertices_original_positions.append(vert.getXYZ())
def get_normal(self):
n = mathutils.Vector()
for vert in self.vertices:
n += vert.normal
n.normalize()
return n
def get_position(self):
pos = mathutils.Vector()
for vert in self.vertices:
pos += vert.XYZ
pos /= len(self.vertices)
return pos
normal = property(get_normal)
position = property(get_position)
class Mesh():
def __init__(self, mesh_data):
# TODO BGE MESH DATA TO CPOLYGON (look through list to find it)
# TODO Shared vertices - just use polygons_dict to get neighboring polygons
# TODO Alter polygon_list so multiple polygons can inhabit single positions
self.mesh_data = mesh_data
self.polygon_dict = collections.OrderedDict()
self.polygon_list = []
polies_dict = {}
polies_list = {}
for p in range(self.mesh_data.numPolygons):
poly = Polygon(self.mesh_data.getPolygon(p))
rpos_x = poly.position.x
rpos_y = poly.position.y
rpos_z = poly.position.z
position_tuple = (rpos_z, rpos_y, rpos_x)
polies_list[position_tuple] = poly
if not rpos_z in polies_dict:
polies_dict[rpos_z] = {}
if not rpos_y in polies_dict[rpos_z]:
polies_dict[rpos_z][rpos_y] = {}
polies_dict[rpos_z][rpos_y][rpos_x] = poly
for kvz in sorted(polies_dict):
self.polygon_dict[kvz] = collections.OrderedDict()
for kvy in sorted(polies_dict[kvz]):
self.polygon_dict[kvz][kvy] = collections.OrderedDict()
for kvx in sorted(polies_dict[kvz][kvy]):
self.polygon_dict[kvz][kvy][kvx] = polies_dict[kvz][kvy][kvx]
polies_sorted = sorted(polies_list.items(), key=lambda x: x[0])
for p in polies_sorted:
self.polygon_list.append(p[1])
self.vertices = []
for polygon in self.polygon_list:
for vertex in polygon.vertices:
self.vertices.append(vertex)
self.shared_vertices = {}
def get_shared_vertices(self):
"""
Separate function because this could be slow given enough vertices.
:return:
"""
clone_verts = []
for v1 in self.vertices:
if not v1 in clone_verts:
for v2 in self.vertices:
if v1 != v2:
if self.vertices_close(v1, v2):
if not v1 in self.shared_vertices:
self.shared_vertices[v1] = []
self.shared_vertices[v1].append(v2)
clone_verts.append(v2)
def get_polygon_neighbors(self, minimum_shared_vertices = 0):
"""
Separate function because this could be slow given enough faces.
:param poly:
:return:
"""
neighbors = {}
for poly in self.polygon_dict:
for other_poly in self.polygon_dict:
if poly != other_poly:
shared_vert_count = 0
for v1 in poly.vertices:
for v2 in other_poly.vertices:
if self.vertices_close(v1, v2):
shared_vert_count += 1
if shared_vert_count >= minimum_shared_vertices:
poly.neighbors[tuple((other_poly.position - poly.position).normalized())] = other_poly
other_poly.neighbors[tuple((poly.position - other_poly.position).normalized())] = poly
def vertices_close(self, vertex_one, vertex_two, margin=0.001):
return (vertex_one.getXYZ() - vertex_two.getXYZ()).magnitude < margin
def make_unique_mesh(mesh):
"""
Makes a new mesh with LibNew and returns it.
:param mesh: Name of the mesh to duplicate or a pointer to the mesh to duplicate.
:return:
"""
global lib_new_counter
if isinstance(mesh, types.KX_MeshProxy):
mesh = mesh.name
newmesh = logic.LibNew(mesh + str(lib_new_counter), 'Mesh', [mesh])[0]
lib_new_counter += 1
return newmesh
def get_all_vertices(o):
"""
Returns all vertices from all meshes in the object.
:param o:KX_GameObject - The object to get the vertices from.
:return: A list of vertices (KX_VertexProxy).
"""
verts = []
for mesh in o.meshes:
for mat in range(mesh.numMaterials):
for v in range(mesh.getVertexArrayLength(mat)):
verts.append(mesh.getVertex(mat, v))
return verts
def get_shared_vertices(mesh, vert, max_diff=0, mat=0):
"""Returns the vertices sharing the position occupied by the vertex 'vert'.
'mesh' = which object's vertices to check;
'vert' = which vertex to find the duplicates of
'mat' = which vertices of 'mesh' to check, according to material index, I believe.
note that this goes through all vertices, so this should only be used with lower-poly objects
and should only be done rarely to keep up efficiency."""
list = []
for a in range(mesh.getVertexArrayLength(mat)):
v = mesh.getVertex(mat, a)
if max_diff != 0:
if abs(v.x - vert.x) < max_diff and abs(v.y - vert.y) < max_diff and abs(v.z - vert.z) < max_diff:
list.append(v)
else:
if v.x == vert.x and v.y == vert.y and v.z == vert.z:
list.append(v)
return list
def soft_body_pin(softbodyobj, controls):
"""
Pins the soft body object to an object using its vertices (a control object). It will pin the soft-body
object to all of the vertices of all of the objects in the controls list. So, for controls pass a list like:
[ControlObject, ControlObject2, etc.]
where ControlObject are Game Objects fetched through the scene list, for example.
"""
softid = softbodyobj.getPhysicsId()
ctype = 2 # Constraint type, 1 = edge; 0 = point, 2 = angular?
for c in controls:
cid = c.getPhysicsId()
for vert in range(c.meshes[0].getVertexArrayLength(0)):
vpos = c.meshes[0].getVertex(0, vert).getXYZ()
constraints.createConstraint(softid, cid, ctype, vpos[0], vpos[1], vpos[2], 8, -1, 0.5)
def get_dimensions(object=None, roundit=3, offset=1, meshnum=0, factor_in_scale=1):
"""
Gets the dimensions of the object (what you see under dimensions in the properties window in the 3D menu).
mesh = which mesh to use to get the object's dimensions.
roundit = how far down to round the returned dimension values; set it to a negative number to not round the numbers off at all.
offset = Whether or not to return the offset point of the dimensions (the center point);
This negated (-offset, literally) is the origin point, generally.
meshnum = The index of the mesh to use. Usually 0 is okay.
factor_in_scale = If it should multiply the dimensions by the object's world scale.
"""
if object == None:
object = logic.getCurrentController().owner
s = object.worldScale
mesh = object.meshes[meshnum]
# print (dir(mesh))
verts = [[], [], []]
originpos = [0, 0, 0]
for mat in range(len(mesh.materials)):
for v in range(mesh.getVertexArrayLength(mat)):
vert = mesh.getVertex(mat, v)
pos = vert.getXYZ()
verts[0].append(pos[0])
verts[1].append(pos[1])
verts[2].append(pos[2])
verts[0].sort()
verts[1].sort()
verts[2].sort()
if offset != 0:
offsetpos = [
(verts[0][len(verts[0]) - 1] + verts[0][0]) / 2,
(verts[1][len(verts[1]) - 1] + verts[1][0]) / 2,
(verts[2][len(verts[2]) - 1] + verts[2][0]) / 2,
]
size = [(verts[0][len(verts[0]) - 1] - verts[0][0]),
(verts[1][len(verts[0]) - 1] - verts[1][0]),
(verts[2][len(verts[0]) - 1] - verts[2][0])]
if factor_in_scale:
size = [size[0] * s[0],
size[1] * s[1],
size[2] * s[2]]
if roundit >= 0:
size = [
round(size[0], roundit),
round(size[1], roundit),
round(size[2], roundit),
]
if offset:
return (mathutils.Vector(size), mathutils.Vector(offsetpos))
else:
return (mathutils.Vector(size), None)
def uv_scroll(uspd=0.0025, vspd=0.0, layer=0, mesh=None, mat=0, frequency=0):
"""
Scrolls the UV Coordinate of each vertex in the specified mesh by
uspd and vspd.
uspd = how fast to scroll on the X-axis (X)
vspd = how fast to scroll on the V-axis (Y)
layer = which UV-layer to scroll; 0 = first layer, 1 = second, 2 = both
mesh = which mesh to use for UV-animation
mat = which material to look for (I think it's organized by material)
frequency = frequency the script is run. Higher numbers means a choppier effect, so the function will compensate for
that (so basically, you can plug the frequency of the sensor running this script into this arg.) Defaults to 0.
"""
from bge import logic
cont = logic.getCurrentController()
obj = cont.owner
if mesh is None:
mesh = obj.meshes[mat]
f = frequency + 1
for v in range(mesh.getVertexArrayLength(mat)):
vert = mesh.getVertex(0, v)
if layer == 0 or layer == 2:
vert.u += uspd * f
vert.v += vspd * f
if layer == 1 or layer == 2:
vert.u2 += uspd * f
vert.v2 += vspd * f
# Flattening meshes together
def flatten(destination, sources):
"""
Author: SolarLune
Date Updated: 3/11/13
Sets all vertices of the destination object to match each vertex of each of the sourceobjects' meshes. Useful for
voxel-ish games.
destination = object that you want to alter
sources = list of objects that you want to flatten into the local mesh
Returns a list of the vertices that have been flattened into the destination mesh
- Notes -
The Blender Game Engine can handle a lot of polygons drawing at once, but it tends to chug
on drawing many objects, even if they are low-poly. A way to get around this is to make a
'static mesh', which is a basic mesh that has a high number of faces, and then move the
faces in that mesh to match many objects. The Blender Game Engine draws all of this in a
single batch, much more quickly than the objects individually. The objects can be invisible,
or even removed after the static mesh takes on their appearance. Obviously, this mesh
can't move the individual appearance of the objects, but it's useful for having a complex
world that doesn't need to be fully dynamic.
- Advantages -
. Allows the developer to draw a large number of objects much faster than usual.
- Disadvantages -
. The flattened mesh is a single object, so you can't really do any dynamics (moving individual objects
on part of a flattened mesh) without actually moving the vertices / faces for the mesh.
. Forces you to create a mesh that has a high number of faces, thereby raising load times considerably.
. The Flatten function moves the source object's vertices to match the objects in the objects list,
so this method only works correctly when the both the source and destination objects share materials (since
the BGE can't re-assign materials to the vertices).
. For speed, there's no check to ensure that you have enough faces in your destination mesh to 'cover' all of the
faces in the source meshes. Be aware of this if you find your destination mesh seems 'incomplete'.
"""
objindex = 0
vertindex = 0
objverts = {}
nomore = 0 # All through with objects?
mesh = destination.meshes[0]
lp = destination.worldPosition.copy()
targetobj = sources[objindex]
targetmesh = targetobj.meshes[0]
allobjverts = []
if not 'vertsavailable' in destination:
destination['vertsavailable'] = {}
for m in range(mesh.numMaterials):
destination['vertsavailable'][m] = [1 for x in range(mesh.getVertexArrayLength(m))]
unflatten_all(destination)
for m in range(mesh.numMaterials):
# Actually loop through the vertices
for v in range(mesh.getVertexArrayLength(m)):
if destination['vertsavailable'][m][v] == 0:
continue
vert = mesh.getVertex(m, v)
if nomore == 0:
destination['vertsavailable'][m][v] = 0
tv = targetmesh.getVertex(0, vertindex)
vl = targetmesh.getVertexArrayLength(0)
op = sources[objindex].worldPosition
vert.XYZ = tv.XYZ
vert.x *= targetobj.worldScale.x
vert.y *= targetobj.worldScale.y
vert.z *= targetobj.worldScale.z
vert.XYZ += (op - lp) # Set each vertex of the source mesh to match one of the target objects'
vert.UV = tv.UV # Mesh vertex position, UV, and normal properties, offset by the target
vert.normal = tv.normal # objects' world positions and the local mesh's world position (because all
vert.color = tv.color # of the vertices belong to the local mesh
vertindex += 1
try:
objverts[m].append(
v) # Append the vertex for the material index to the dictionary of verts used for the object
except KeyError:
objverts[m] = []
objverts[m].append(v)
if vertindex >= vl: # If there's no more vertices for the current object, move to the next one
vertindex = 0
objindex += 1
allobjverts.append(objverts)
if objindex < len(sources):
targetobj = sources[objindex]
targetmesh = targetobj.meshes[0]
objverts = {}
else:
nomore = 1
else:
break
return allobjverts
def unflatten(destination, verts):
"""
"Removes" the vertices from the flattened mesh.
destination = destination object to use.
verts = vertex index array indicating which vertices to unflatten.
"""
for m in verts:
for v in verts[m]:
vert = destination.meshes[0].getVertex(m, v)
vert.setXYZ([0, 0, 0])
destination['vertsavailable'][m][v] = 1
def unflatten_all(destination):
"""
Unflattens all vertices from the destination object.
"""
mesh = destination.meshes[0]
for m in range(mesh.numMaterials):
for v in range(mesh.getVertexArrayLength(m)):
vert = destination.meshes[0].getVertex(m, v)
vert.setXYZ([0, 0, 0])
destination['vertsavailable'][m][v] = 1
# Waves
def wave_planar(wave_num=1, wave_rate_x=1, wave_rate_y=1, wave_height=1.0, scale_color=0, only_color=1, obj=None):
"""
Moves the vertices of the mesh around to give a waving effect.
wave_num = how many waves to display on the mesh. Use a whole number >= 1 to increase the number of waves
the mesh attempts to display. Note that you need enough detail on the mesh to accurately display the waves.
Also note that numbers below 1 will display a part of a wave, and so will not loop correctly (when placed
next to other instances of the same waving mesh).
wave_rate = how many oscillations to make per second.
wave_height = how high the waves should reach from the base Z value that the mesh starts with.
scale_color = 0 means it will ignore the wave's vertex color channel.
scale_color = 1 means it will scale the wave effect by the brightness of the wave's red vertex color.
scale_color = 2 means it will scale the wave effect by the brightness of the wave's red vertex color
binarily (on or off, if the channel's value is greater than 0.5).
only_color = Only registers vertices to update if their red vertex color channel is greater than 0.5
obj = Object to influence. If None, then the object running this Python controller will be used.
"""
if obj is None:
o = logic.getCurrentController().owner
else:
o = obj
if not 'wave_planar_info' in o:
o['wave_planar_info'] = {}
o['wave_planar_info']['verts'] = {}
o['wave_planar_info']['dimensions'] = get_dimensions(o, factor_in_scale=0)[0]
for vert in get_all_vertices(o):
if only_color:
if vert.r > 0.5:
o['wave_planar_info']['verts'][
vert] = vert.getXYZ() # Only add the vertex to the list if it's red (cuts down processing)
else:
o['wave_planar_info']['verts'][vert] = vert.getXYZ()
t = time.clock()
d = o['wave_planar_info']['dimensions'].copy()
d.magnitude /= 2
twrx = t * wave_rate_x
twry = t * wave_rate_y
if not scale_color: # Moving these if blocks outside of the vertex changes
for vert in o['wave_planar_info']['verts']:
osc_time_x = (twrx + ((vert.x / d.x) * wave_num)) * math.pi
osc_time_y = (twry + ((vert.y / d.y) * wave_num)) * math.pi
wave = ((math.sin(osc_time_x) + math.cos(osc_time_y) / 2) + 0.5) * wave_height
vert.z = o['wave_planar_info']['verts'][vert].z + wave
elif scale_color == 1: # Scale wave by vertex color
for vert in o['wave_planar_info']['verts']:
osc_time_x = (twrx + ((vert.x / d.x) * wave_num)) * math.pi
osc_time_y = (twry + ((vert.y / d.y) * wave_num)) * math.pi
wave = ((math.sin(osc_time_x) + math.cos(osc_time_y) / 2) + 0.5) * wave_height
wave *= vert.r
vert.z = o['wave_planar_info']['verts'][vert].z + wave
else: # Scale wave by vertex color (binarily)
for vert in o['wave_planar_info']['verts']:
osc_time_x = (twrx + ((vert.x / d.x) * wave_num)) * math.pi
osc_time_y = (twry + ((vert.y / d.y) * wave_num)) * math.pi
wave = ((math.sin(osc_time_x) + math.cos(osc_time_y) / 2) + 0.5) * wave_height
if vert.r < 0.5:
wave = 0.0
vert.z = o['wave_planar_info']['verts'][vert].z + wave
|
|
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import oslo_messaging as messaging
from oslo_messaging.notify import dispatcher as oslo_dispatcher
from oslo_messaging import target
from oslo_utils import timeutils
from sqlalchemy import desc
from murano.common import config
from murano.common.helpers import token_sanitizer
from murano.db import models
from murano.db.services import environments
from murano.db.services import instances
from murano.db import session
from murano.common.i18n import _, _LI, _LW
from murano.openstack.common import log as logging
from murano.services import states
RPC_SERVICE = None
NOTIFICATION_SERVICE = None
LOG = logging.getLogger(__name__)
class ResultEndpoint(object):
@staticmethod
def process_result(context, result, environment_id):
secure_result = token_sanitizer.TokenSanitizer().sanitize(result)
LOG.debug('Got result from orchestration '
'engine:\n{0}'.format(secure_result))
model = result['model']
action_result = result.get('action', {})
unit = session.get_session()
environment = unit.query(models.Environment).get(environment_id)
if not environment:
LOG.warning(_LW('Environment result could not be handled, '
'specified environment not found in database'))
return
if model['Objects'] is None and model.get('ObjectsCopy', {}) is None:
environments.EnvironmentServices.remove(environment_id)
return
environment.description = model
if environment.description['Objects'] is not None:
environment.description['Objects']['services'] = \
environment.description['Objects'].pop('applications', [])
# environment.networking = result.get('networking', {})
action_name = 'Deployment'
deleted = False
else:
action_name = 'Deletion'
deleted = True
environment.version += 1
environment.save(unit)
# close deployment
deployment = get_last_deployment(unit, environment.id)
deployment.finished = timeutils.utcnow()
deployment.result = action_result
num_errors = unit.query(models.Status)\
.filter_by(level='error', task_id=deployment.id).count()
num_warnings = unit.query(models.Status)\
.filter_by(level='warning', task_id=deployment.id).count()
final_status_text = action_name + ' finished'
if num_errors:
final_status_text += " with errors"
elif num_warnings:
final_status_text += " with warnings"
status = models.Status()
status.task_id = deployment.id
status.text = final_status_text
status.level = 'info'
deployment.statuses.append(status)
deployment.save(unit)
# close session
conf_session = unit.query(models.Session).filter_by(
**{'environment_id': environment.id,
'state': states.SessionState.DEPLOYING if not deleted
else states.SessionState.DELETING}).first()
if num_errors > 0:
conf_session.state = \
states.SessionState.DELETE_FAILURE if deleted else \
states.SessionState.DEPLOY_FAILURE
else:
conf_session.state = states.SessionState.DEPLOYED
conf_session.save(unit)
# output application tracking information
message = _LI('EnvId: {0} TenantId: {1} Status: {2} Apps: {3}').format(
environment.id,
environment.tenant_id,
_('Failed') if num_errors + num_warnings > 0 else _('Successful'),
', '.join(map(
lambda a: a['?']['type'],
model['Objects']['services']
))
)
LOG.info(message)
def notification_endpoint_wrapper(priority='info'):
def wrapper(func):
class NotificationEndpoint(object):
def __init__(self):
setattr(self, priority, self._handler)
def _handler(self, ctxt, publisher_id, event_type,
payload, metadata):
if event_type == ('murano.%s' % func.__name__):
func(payload)
def __call__(self, payload):
return func(payload)
return NotificationEndpoint()
return wrapper
@notification_endpoint_wrapper()
def track_instance(payload):
LOG.debug('Got track instance request from orchestration '
'engine:\n{0}'.format(payload))
instance_id = payload['instance']
instance_type = payload.get('instance_type', 0)
environment_id = payload['environment']
unit_count = payload.get('unit_count')
type_name = payload['type_name']
type_title = payload.get('type_title')
instances.InstanceStatsServices.track_instance(
instance_id, environment_id, instance_type,
type_name, type_title, unit_count)
@notification_endpoint_wrapper()
def untrack_instance(payload):
LOG.debug('Got untrack instance request from orchestration '
'engine:\n{0}'.format(payload))
instance_id = payload['instance']
environment_id = payload['environment']
instances.InstanceStatsServices.destroy_instance(
instance_id, environment_id)
@notification_endpoint_wrapper()
def report_notification(report):
LOG.debug('Got report from orchestration '
'engine:\n{0}'.format(report))
report['entity_id'] = report['id']
del report['id']
status = models.Status()
status.update(report)
unit = session.get_session()
# connect with deployment
with unit.begin():
running_deployment = get_last_deployment(unit,
status.environment_id)
status.task_id = running_deployment.id
unit.add(status)
def get_last_deployment(unit, env_id):
query = unit.query(models.Task) \
.filter_by(environment_id=env_id) \
.order_by(desc(models.Task.started))
return query.first()
def _prepare_rpc_service(server_id):
endpoints = [ResultEndpoint()]
transport = messaging.get_transport(config.CONF)
s_target = target.Target('murano', 'results', server=server_id)
return messaging.get_rpc_server(transport, s_target, endpoints, 'eventlet')
def _prepare_notification_service(server_id):
endpoints = [report_notification, track_instance, untrack_instance]
transport = messaging.get_transport(config.CONF)
s_target = target.Target(topic='murano', server=server_id)
dispatcher = oslo_dispatcher.NotificationDispatcher(
[s_target], endpoints, None, True)
return messaging.MessageHandlingServer(transport, dispatcher, 'eventlet')
def get_rpc_service():
global RPC_SERVICE
if RPC_SERVICE is None:
RPC_SERVICE = _prepare_rpc_service(str(uuid.uuid4()))
return RPC_SERVICE
def get_notification_service():
global NOTIFICATION_SERVICE
if NOTIFICATION_SERVICE is None:
NOTIFICATION_SERVICE = _prepare_notification_service(str(uuid.uuid4()))
return NOTIFICATION_SERVICE
|
|
#
# Element generator factory by Fredrik Lundh.
#
# Source:
# http://online.effbot.org/2006_11_01_archive.htm#et-builder
# http://effbot.python-hosting.com/file/stuff/sandbox/elementlib/builder.py
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
The ``E`` Element factory for generating XML documents.
"""
import lxml.etree as ET
try:
from functools import partial
except ImportError:
# fake it for pre-2.5 releases
def partial(func, tag):
return lambda *args, **kwargs: func(tag, *args, **kwargs)
try:
callable
except NameError:
# Python 3
def callable(f):
return hasattr(f, '__call__')
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
class ElementMaker(object):
"""Element generator factory.
Unlike the ordinary Element factory, the E factory allows you to pass in
more than just a tag and some optional attributes; you can also pass in
text and other elements. The text is added as either text or tail
attributes, and elements are inserted at the right spot. Some small
examples::
>>> from lxml import etree as ET
>>> from lxml.builder import E
>>> ET.tostring(E("tag"))
'<tag/>'
>>> ET.tostring(E("tag", "text"))
'<tag>text</tag>'
>>> ET.tostring(E("tag", "text", key="value"))
'<tag key="value">text</tag>'
>>> ET.tostring(E("tag", E("subtag", "text"), "tail"))
'<tag><subtag>text</subtag>tail</tag>'
For simple tags, the factory also allows you to write ``E.tag(...)`` instead
of ``E('tag', ...)``::
>>> ET.tostring(E.tag())
'<tag/>'
>>> ET.tostring(E.tag("text"))
'<tag>text</tag>'
>>> ET.tostring(E.tag(E.subtag("text"), "tail"))
'<tag><subtag>text</subtag>tail</tag>'
Here's a somewhat larger example; this shows how to generate HTML
documents, using a mix of prepared factory functions for inline elements,
nested ``E.tag`` calls, and embedded XHTML fragments::
# some common inline elements
A = E.a
I = E.i
B = E.b
def CLASS(v):
# helper function, 'class' is a reserved word
return {'class': v}
page = (
E.html(
E.head(
E.title("This is a sample document")
),
E.body(
E.h1("Hello!", CLASS("title")),
E.p("This is a paragraph with ", B("bold"), " text in it!"),
E.p("This is another paragraph, with a ",
A("link", href="http://www.python.org"), "."),
E.p("Here are some reservered characters: <spam&egg>."),
ET.XML("<p>And finally, here is an embedded XHTML fragment.</p>"),
)
)
)
print ET.tostring(page)
Here's a prettyprinted version of the output from the above script::
<html>
<head>
<title>This is a sample document</title>
</head>
<body>
<h1 class="title">Hello!</h1>
<p>This is a paragraph with <b>bold</b> text in it!</p>
<p>This is another paragraph, with <a href="http://www.python.org">link</a>.</p>
<p>Here are some reservered characters: <spam&egg>.</p>
<p>And finally, here is an embedded XHTML fragment.</p>
</body>
</html>
For namespace support, you can pass a namespace map (``nsmap``)
and/or a specific target ``namespace`` to the ElementMaker class::
>>> E = ElementMaker(namespace="http://my.ns/")
>>> print(ET.tostring( E.test ))
<test xmlns="http://my.ns/"/>
>>> E = ElementMaker(namespace="http://my.ns/", nsmap={'p':'http://my.ns/'})
>>> print(ET.tostring( E.test ))
<p:test xmlns:p="http://my.ns/"/>
"""
def __init__(self, typemap=None,
namespace=None, nsmap=None, makeelement=None):
if namespace is not None:
self._namespace = '{' + namespace + '}'
else:
self._namespace = None
if nsmap:
self._nsmap = dict(nsmap)
else:
self._nsmap = None
if makeelement is not None:
assert callable(makeelement)
self._makeelement = makeelement
else:
self._makeelement = ET.Element
# initialize type map for this element factory
if typemap:
typemap = typemap.copy()
else:
typemap = {}
def add_text(elem, item):
try:
elem[-1].tail = (elem[-1].tail or "") + item
except IndexError:
elem.text = (elem.text or "") + item
def add_cdata(elem, cdata):
if elem.text:
raise ValueError("Can't add a CDATA section. Element already has some text: %r" % elem.text)
elem.text = cdata
if str not in typemap:
typemap[str] = add_text
if unicode not in typemap:
typemap[unicode] = add_text
if ET.CDATA not in typemap:
typemap[ET.CDATA] = add_cdata
def add_dict(elem, item):
attrib = elem.attrib
for k, v in item.items():
if isinstance(v, basestring):
attrib[k] = v
else:
attrib[k] = typemap[type(v)](None, v)
if dict not in typemap:
typemap[dict] = add_dict
self._typemap = typemap
def __call__(self, tag, *children, **attrib):
get = self._typemap.get
if self._namespace is not None and tag[0] != '{':
tag = self._namespace + tag
elem = self._makeelement(tag, nsmap=self._nsmap)
if attrib:
get(dict)(elem, attrib)
for item in children:
if callable(item):
item = item()
t = get(type(item))
if t is None:
if ET.iselement(item):
elem.append(item)
continue
for basetype in type(item).__mro__:
# See if the typemap knows of any of this type's bases.
t = get(basetype)
if t is not None:
break
else:
raise TypeError("bad argument type: %s(%r)" %
(type(item).__name__, item))
v = t(elem, item)
if v:
get(type(v))(elem, v)
return elem
def __getattr__(self, tag):
return partial(self, tag)
# create factory object
E = ElementMaker()
|
|
import csv
import gc
from multiprocessing import Pool
from os.path import dirname, join
from random import shuffle
import numpy as np
from sklearn.cluster import Birch
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import precision_recall_fscore_support
from sklearn.svm import LinearSVC
from modules.tokenizer import ngrams_tokenizer
from modules.cleaner import clean
from modules.similarity import *
print('PROGRESS: Initializing...')
count_vect = CountVectorizer(preprocessor=clean)
clf = LinearSVC(max_iter=10000)
splitBy = 20
calculations = [
Cosine(),
Dice(),
Jaccard(),
Overlap(),
]
thresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
ngrams = 1
print('PROGRESS: Loading datasets...')
with open(join(dirname(__file__), 'result/generated_datasets/traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
traffic_tweets = [line[0] for line in dataset]
with open(join(dirname(__file__), 'result/generated_datasets/non_traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
non_traffic_tweets = [line[0] for line in dataset]
with open(join(dirname(__file__), 'tweets_corpus/test_set_10000.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
dataset = [(line[0], line[1]) for line in dataset]
shuffle(dataset)
test = {
'data': [line[0] for line in dataset],
'target': [line[1] == 'traffic' for line in dataset],
}
print('PROGRESS: Shuffling datasets...')
shuffle(traffic_tweets)
shuffle(non_traffic_tweets)
print('PROGRESS: Reducing datasets by 1/{} ...'.format(splitBy))
traffic_tweets_size = int(len(traffic_tweets) / splitBy)
non_traffic_tweets_size = int(len(non_traffic_tweets) / splitBy)
traffic_tweets = traffic_tweets[:traffic_tweets_size]
non_traffic_tweets = non_traffic_tweets[:non_traffic_tweets_size]
print('PROGRESS: Extracting features of datasets...')
vectors = count_vect.fit_transform(traffic_tweets + non_traffic_tweets)
print('\tAll {} data feature vector shape: {}'.format(traffic_tweets_size + non_traffic_tweets_size, vectors.shape))
traffic_vectors = vectors[:traffic_tweets_size]
non_traffic_vectors = vectors[traffic_tweets_size:]
print('\tTraffic data feature vector shape: {}'.format(traffic_vectors.shape))
print('\tNon traffic data feature vector shape: {}'.format(non_traffic_vectors.shape))
test_vectors = count_vect.transform(test['data'])
print('\tTest data feature vector shape: {}'.format(test_vectors.shape))
print('PROGRESS: Train SVM with all the data...')
target = [True] * len(traffic_tweets) + [False] * len(non_traffic_tweets)
clf.fit(vectors, target)
print('PROGRESS: Evaluate the SVM model using test set...')
predicted = clf.predict(test_vectors)
accuracy = np.mean(predicted == test['target'])
prfs = precision_recall_fscore_support(test['target'], predicted)
# print('Training time: {}'.format(training_time))
print('\tAccuracy: {}'.format(accuracy))
print('\tPrecision: {}'.format(prfs[0][0]))
print('\tRecall: {}'.format(prfs[1][0]))
print('\tF-score: {}'.format(prfs[2][0]))
with open(join(dirname(__file__), 'birch_eval.csv'), 'a', newline='\n') as csv_output:
csv_writer = csv.writer(csv_output, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
csv_writer.writerow(('all', '', len(traffic_tweets), len(non_traffic_tweets), accuracy, prfs[0][0], prfs[1][0], prfs[2][0]))
for th in [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5]:
brc = Birch(branching_factor=50, n_clusters=None, threshold=th, compute_labels=True)
print('PROGRESS: Clustering dataset...')
brc.fit(traffic_vectors)
traffic_vectors = brc.subcluster_centers_
print('\tTraffic data centroids count: {}'.format(len(traffic_vectors)))
brc.fit(non_traffic_vectors)
non_traffic_vectors = brc.subcluster_centers_
print('\tNon traffic data centroids count: {}'.format(len(non_traffic_vectors)))
training_vectors = np.concatenate((traffic_vectors, non_traffic_vectors))
training_target = [True] * len(traffic_vectors) + [False] * len(non_traffic_vectors)
print('\tTotal centroid count: {}'.format(len(training_vectors)))
print('PROGRESS: Train SVM with cluster centroids...')
clf.fit(training_vectors, training_target)
print('PROGRESS: Evaluate the SVM model using test set...')
predicted = clf.predict(test_vectors)
accuracy = np.mean(predicted == test['target'])
prfs = precision_recall_fscore_support(test['target'], predicted)
# print('Training time: {}'.format(training_time))
print('\tAccuracy: {}'.format(accuracy))
print('\tPrecision: {}'.format(prfs[0][0]))
print('\tRecall: {}'.format(prfs[1][0]))
print('\tF-score: {}'.format(prfs[2][0]))
with open(join(dirname(__file__), 'birch_eval.csv'), 'a', newline='\n') as csv_output:
csv_writer = csv.writer(csv_output, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
csv_writer.writerow(('birch', th, len(traffic_vectors), len(non_traffic_vectors), accuracy, prfs[0][0], prfs[1][0], prfs[2][0]))
brc = None
gc.collect()
def calculate(calculation):
r = []
for threshold in thresholds:
print('PROGRESS: Reduce dataset using {} - {}...'.format(calculation.__class__.__name__, threshold))
cleaned_traffic_tweets = [(tweet, clean(tweet)) for tweet in traffic_tweets]
tokenized_traffic_tweets = [(tweet, ngrams_tokenizer(cleaned, ngrams)) for (tweet, cleaned) in cleaned_traffic_tweets]
cleaned_non_traffic_tweets = [(tweet, clean(tweet)) for (tweet) in non_traffic_tweets]
tokenized_non_traffic_tweets = [(tweet, ngrams_tokenizer(cleaned, ngrams)) for (tweet, cleaned) in cleaned_non_traffic_tweets]
distinct_traffic_tweets = []
distinct_non_traffic_tweets = []
for (tweet, tokens) in tokenized_traffic_tweets:
if len(distinct_traffic_tweets) == 0:
distinct_traffic_tweets.append((tweet, tokens))
else:
is_new = True
for (tweet2, tokens2) in distinct_traffic_tweets:
score = calculation.index(tokens, tokens2)
if score >= threshold:
is_new = False
if is_new:
distinct_traffic_tweets.append((tweet, tokens))
for (tweet, tokens) in tokenized_non_traffic_tweets:
if len(distinct_non_traffic_tweets) == 0:
distinct_non_traffic_tweets.append((tweet, tokens))
else:
is_new = True
for (tweet2, tokens2) in distinct_non_traffic_tweets:
score = calculation.index(tokens, tokens2)
if score >= threshold:
is_new = False
if is_new:
distinct_non_traffic_tweets.append((tweet, tokens))
distinct_traffic_tweets = [tweet for (tweet, tokens) in distinct_traffic_tweets]
distinct_non_traffic_tweets = [tweet for (tweet, tokens) in distinct_non_traffic_tweets]
print('\tTraffic tweets count: {}'.format(len(distinct_traffic_tweets)))
print('\tNon traffic tweets count: {}'.format(len(distinct_non_traffic_tweets)))
training_vectors = count_vect.fit_transform(distinct_traffic_tweets + distinct_non_traffic_tweets)
training_target = [True] * len(distinct_traffic_tweets) + [False] * len(distinct_non_traffic_tweets)
test_vectors = count_vect.transform(test['data'])
print('PROGRESS: Train SVM with reduced dataset using {} - {}...'.format(calculation.__class__.__name__, threshold))
clf.fit(training_vectors, training_target)
print('PROGRESS: Evaluate the SVM model using test set...')
predicted = clf.predict(test_vectors)
accuracy = np.mean(predicted == test['target'])
prfs = precision_recall_fscore_support(test['target'], predicted)
# print('Training time: {}'.format(training_time))
print('\tAccuracy: {}'.format(accuracy))
print('\tPrecision: {}'.format(prfs[0][0]))
print('\tRecall: {}'.format(prfs[1][0]))
print('\tF-score: {}'.format(prfs[2][0]))
r.append((calculation.__class__.__name__, threshold, len(distinct_traffic_tweets), len(distinct_non_traffic_tweets), accuracy, prfs[0][0], prfs[1][0], prfs[2][0]))
return r
p = Pool(4)
results = p.map(calculate, calculations)
with open(join(dirname(__file__), 'birch_eval.csv'), 'a', newline='\n') as csv_output:
csv_writer = csv.writer(csv_output, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
for result in results:
for r in result:
csv_writer.writerow(r)
|
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
################################################################################
# This file contains classes controlling tabs in the spreadsheets. A tab is
# a container of a sheet:
# SizeSpinBox
# StandardTabDockWidget
# StandardWidgetSheetTab
# StandardWidgetTabBar
# StandardWidgetTabBarEditor
# StandardWidgetToolBar
################################################################################
from PyQt4 import QtCore, QtGui
import os.path
from spreadsheet_registry import spreadsheetRegistry
from spreadsheet_sheet import StandardWidgetSheet
from spreadsheet_cell import QCellPresenter, QCellContainer, QCellToolBar
from spreadsheet_execute import assignPipelineCellLocations, \
executePipelineWithProgress
from spreadsheet_config import configuration
from vistrails.core.inspector import PipelineInspector
import spreadsheet_rc
################################################################################
class SizeSpinBox(QtGui.QSpinBox):
"""
SizeSpinBox is just an overrided spin box that will also emit
'editingFinished()' signal when the user interact with mouse
"""
def __init__(self, initValue=0, parent=None):
""" SizeSpinBox(initValue: int, parent: QWidget) -> SizeSpinBox
Initialize with a default width of 50 and a value of 0
"""
QtGui.QSpinBox.__init__(self, parent)
self.setMinimum(1)
self.setMinimumWidth(50)
self.setMaximumWidth(50)
self.setValue(initValue)
def mouseReleaseEvent(self, event):
""" mouseReleaseEvent(event: QMouseEvent) -> None
Emit 'editingFinished()' signal when the user release a mouse button
"""
QtGui.QSpinBox.mouseReleaseEvent(self, event)
self.emit(QtCore.SIGNAL("editingFinished()"))
class StandardWidgetToolBar(QtGui.QToolBar):
"""
StandardWidgetToolBar: The default toolbar for each sheet
container. By default, only FitToWindow and Table resizing are
included
"""
def __init__(self, parent=None):
""" StandardWidgetToolBar(parent: QWidget) -> StandardWidgetToolBar
Init the toolbar with default actions
"""
QtGui.QToolBar.__init__(self, parent)
self.sheetTab = parent
self.addAction(self.sheetTab.tabWidget.newSheetAction())
self.addAction(self.sheetTab.tabWidget.openAction())
self.addAction(self.sheetTab.tabWidget.saveAction())
self.addWidget(self.rowCountSpinBox())
self.addWidget(self.colCountSpinBox())
self.addAction(self.sheetTab.tabWidget.exportSheetToImageAction())
self.addSeparator()
self.layout().setSpacing(2)
self.currentToolBarAction = None
def rowCountSpinBox(self):
""" rowCountSpinBox() -> SizeSpinBox
Return the row spin box widget:
"""
if not hasattr(self, 'rowSpinBox'):
self.rowSpinBox = SizeSpinBox(self.sheetTab.sheet.rowCount())
self.rowSpinBox.setToolTip('The number of rows')
self.rowSpinBox.setStatusTip('Change the number of rows '
'of the current sheet')
self.connect(self.rowSpinBox,
QtCore.SIGNAL('editingFinished()'),
self.sheetTab.rowSpinBoxChanged)
return self.rowSpinBox
def colCountSpinBox(self):
""" colCountSpinBox() -> SizeSpinBox
Return the column spin box widget:
"""
if not hasattr(self, 'colSpinBox'):
self.colSpinBox = SizeSpinBox(self.sheetTab.sheet.columnCount())
self.colSpinBox.setToolTip('The number of columns')
self.colSpinBox.setStatusTip('Change the number of columns '
'of the current sheet')
self.connect(self.colSpinBox,
QtCore.SIGNAL('editingFinished()'),
self.sheetTab.colSpinBoxChanged)
return self.colSpinBox
def setCellToolBar(self, cellToolBar):
""" setCellToolBar(cellToolBar: QToolBar) -> None
Set the current cell toolbar on this toolbar. Use None to
remove the cell toolbar
"""
if (not self.currentToolBarAction or
self.widgetForAction(self.currentToolBarAction)!=cellToolBar):
if self.currentToolBarAction:
self.removeAction(self.currentToolBarAction)
if cellToolBar:
self.currentToolBarAction = self.addWidget(cellToolBar)
self.currentToolBarAction.setVisible(True)
self.currentToolBarAction.setEnabled(True)
else:
self.currentToolBarAction = None
class StandardWidgetSheetTabInterface(object):
"""
StandardWidgetSheetTabInterface is the interface for tab
controller to call for manipulating a tab
"""
### Belows are API Wrappers to connect to self.sheet
def __init__(self):
self.lastCellLocation = (0, 0)
self.emptyCellToolBar = None
def isSheetTabWidget(self):
""" isSheetTabWidget() -> boolean
Return True if this is a sheet tab widget
"""
return True
def getDimension(self):
""" getDimension() -> tuple
Get the sheet dimensions
"""
return (0,0)
def setDimension(self, rc, cc):
""" setDimension(rc: int, cc: int) -> None
Set the sheet dimensions
"""
pass
def getCell(self, row, col):
""" getCell(row: int, col: int) -> QWidget
Get cell at a specific row and column. In reality, this cell
widget is inside a QCellContainer and the cell container is
the actual widget under the cell
"""
cellWidget = self.getCellWidget(row, col)
if isinstance(cellWidget, QCellContainer):
return cellWidget.widget()
return cellWidget
def getCellWidget(self, row, col):
""" getCellWidget(row: int, col: int) -> QWidget
Get actual cell at a specific row and column. This will in
fact return the container widget of a cell
"""
return None
def setCellWidget(self, row, col, cellWidget):
""" setCellWidget(row: int,
col: int,
cellWidget: QWidget) -> None
Replace the current location (row, col) with a
widget. The widget will be put into a container to be
protected from being destroyed when taken out.
"""
pass
def setCellByWidget(self, row, col, cellWidget):
""" setCellByWidget(row: int,
col: int,
cellWidget: QWidget) -> None
Put the cellWidget inside a container and place it on the sheet
"""
if not isinstance(cellWidget, QCellContainer):
container = QCellContainer(cellWidget)
else:
container = cellWidget
self.setCellWidget(row, col, container)
self.lastCellLocation = (row, col)
def getCellToolBar(self, row, col):
""" getCellToolBar(row: int, col: int) -> QWidget
Return the toolbar widget at cell location (row, col)
"""
cell = self.getCell(row, col)
if cell:
if hasattr(cell, 'toolBarType'):
toolBarType = cell.toolBarType
else:
toolBarType = QCellToolBar
container = self.getCellWidget(row, col)
if isinstance(container, QCellContainer):
if container.toolBar==None:
container.toolBar = toolBarType(self)
return container.toolBar
else:
if self.emptyCellToolBar==None:
self.emptyCellToolBar = QCellToolBar(self)
return self.emptyCellToolBar
def getCellRect(self, row, col):
""" getCellRect(row: int, col: int) -> QRect
Return the rectangle surrounding the cell at location (row, col)
in parent coordinates
"""
return QtCore.QRect()
def getCellGlobalRect(self, row, col):
""" getCellGlobalRect(row: int, col: int) -> QRect
Return the rectangle surrounding the cell at location (row, col)
in global coordinates
"""
return QtCore.QRect()
def getFreeCell(self):
""" getFreeCell() -> tuple
Get a free cell location (row, col) on the spreadsheet
"""
(rowCount, colCount) = self.getDimension()
for r in xrange(rowCount):
for c in xrange(colCount):
w = self.getCell(r, c)
if w==None or (isinstance(w, QCellPresenter) and w.cellWidget==None):
return (r,c)
(r, c) = self.lastCellLocation
(rs, cs) = self.getSpan(r, c)
index = (colCount * r + c + cs) % (rowCount*colCount)
return (index/colCount, index%colCount)
def setCellByType(self, row, col, cellType, inputPorts):
""" setCellByType(row: int,
col: int,
cellType: a type inherits from QWidget,
inpurPorts: tuple) -> None
Replace the current location (row, col) with a cell of
cellType. If the current type of that cell is the same as
cellType, only the contents is updated with inputPorts.
"""
oldCell = self.getCell(row, col)
if cellType is None or not isinstance(oldCell, cellType):
if cellType:
newCell = cellType(self)
self.setCellByWidget(row, col, newCell)
newCell.show()
newCell.updateContents(inputPorts)
else:
self.setCellByWidget(row, col, None)
if hasattr(oldCell, 'deleteLater'):
oldCell.deleteLater()
else:
oldCell.updateContents(inputPorts)
self.lastCellLocation = (row, col)
def showHelpers(self, show, globalPos):
""" showHelpers(show: boolean, globalPos: QPoint) -> None
Show/hide the helpers (toolbar, resizer when the mouse is at
globalPos
"""
pass
def setCellPipelineInfo(self, row, col, info):
""" setCellPipelineInfo(row: int, col: int, info: any type) -> None
Provide a way for the spreadsheet to store vistrail
information, info, for the cell (row, col)
"""
if not (row,col) in self.pipelineInfo:
self.pipelineInfo[(row,col)] = {}
self.pipelineInfo[(row,col)] = info
def getCellPipelineInfo(self, row, col):
""" getCellPipelineInfo(row: int, col: int) -> any type
Provide a way for the spreadsheet to extract vistrail
information, info, for the cell (row, col)
"""
if not (row,col) in self.pipelineInfo:
return None
return self.pipelineInfo[(row,col)]
def getSelectedLocations(self):
""" getSelectedLocations() -> list
Return the selected locations (row, col) of the current sheet
"""
return []
def clearSelection(self):
""" clearSelection() -> None
Clear all the selection in the current sheet
"""
pass
def deleteCell(self, row, col):
""" deleteCell(row, col: int) -> None
Delete a cell in the sheet
"""
self.setCellByType(row, col, None, None)
self.setCellPipelineInfo(row, col, None)
def deleteAllCells(self):
""" deleteAllCells() -> None
Delete all cells in the sheet
"""
(rowCount, columnCount) = self.getDimension()
for r in xrange(rowCount):
for c in xrange(columnCount):
self.deleteCell(r, c)
def takeCell(self, row, col):
""" takeCell(row, col) -> QWidget
Free the cell widget at (row, col) from the tab and return as
the result of the function. If there is no widget at (row,
col). This returns None. The ownership of the widget is passed
to the caller.
"""
cell = self.getCellWidget(row, col)
if isinstance(cell, QCellContainer):
widget = cell.takeWidget()
self.setCellWidget(row, col, None)
return widget
else:
return cell
def setCellEditingMode(self, r, c, editing=True):
""" setCellEditingMode(r: int, c: int, editing: bool) -> None
Turn on/off the editing mode of a single cell
"""
if editing:
cellWidget = self.getCell(r, c)
if isinstance(cellWidget, QCellPresenter):
return
presenter = QCellPresenter()
presenter.assignCell(self, r, c)
cellWidget = self.takeCell(r, c)
self.setCellByWidget(r, c, presenter)
if cellWidget:
cellWidget.hide()
else:
presenter = self.getCell(r, c)
if not isinstance(presenter, QCellPresenter):
return
presenter = self.takeCell(r, c)
if presenter:
cellWidget = presenter.releaseCellWidget()
self.setCellByWidget(r, c, cellWidget)
presenter.hide()
def setEditingMode(self, editing=True):
""" setEditingMode(editing: bool) -> None
Turn on/off the editing mode of the tab
"""
# Turn off active cell selection
self.sheet.clearSelection()
self.sheet.setActiveCell(-1, -1)
# Go over all the cells and set the editing widget up
(rowCount, colCount) = self.getDimension()
for r in xrange(rowCount):
for c in xrange(colCount):
self.setCellEditingMode(r, c, editing)
QtCore.QCoreApplication.processEvents()
def swapCell(self, row, col, newSheet, newRow, newCol):
""" swapCell(row, col: int, newSheet: Sheet,
newRow, newCol: int) -> None
Swap the (row, col) of this sheet to (newRow, newCol) of newSheet
"""
myWidget = self.takeCell(row, col)
theirWidget = newSheet.takeCell(newRow, newCol)
self.setCellByWidget(row, col, theirWidget)
newSheet.setCellByWidget(newRow, newCol, myWidget)
info = self.getCellPipelineInfo(row, col)
self.setCellPipelineInfo(row, col,
newSheet.getCellPipelineInfo(newRow, newCol))
newSheet.setCellPipelineInfo(newRow, newCol, info)
def copyCell(self, row, col, newSheet, newRow, newCol):
""" copyCell(row, col: int, newSheet: Sheet,
newRow, newCol: int) -> None
Copy the (row, col) of this sheet to (newRow, newCol) of newSheet
"""
info = self.getCellPipelineInfo(row, col)
if info:
info = info[0]
mId = info['moduleId']
pipeline = newSheet.setPipelineToLocateAt(newRow, newCol,
info['pipeline'], [mId])
executePipelineWithProgress(pipeline, 'Copy Cell',
current_version=info['version'],
actions=info['actions'],
reason=info['reason'],
locator=info['locator'],
controller=info['controller'],
sinks=[mId])
def executePipelineToCell(self, pInfo, row, col, reason=''):
""" executePipelineToCell(p: tuple, row: int, col: int) -> None
p: (locator, version, actions, pipeline)
Execute a pipeline and put all of its cell to (row, col). This
need to be fixed to layout all cells inside the pipeline
"""
pipeline = self.setPipelineToLocateAt(row, col, pInfo[3])
executePipelineWithProgress(pipeline, 'Execute Cell',
locator=pInfo[0],
controller=pInfo[4],
current_version=pInfo[1],
actions=pInfo[2],
reason=reason)
def setPipelineToLocateAt(self, row, col, inPipeline, cellIds=[]):
""" setPipelineToLocateAt(row: int, col: int, inPipeline: Pipeline,
cellIds: [ids]) -> Pipeline
Modify the pipeline to have its cells (provided by cellIds) to
be located at (row, col) of this sheet
"""
sheetName = str(self.tabWidget.tabText(self.tabWidget.indexOf(self)))
# Note that we must increment row/col by 1 to match how the
# CellReference module expects them
return assignPipelineCellLocations(inPipeline, sheetName,
row + 1, col + 1, cellIds)
def getPipelineInfo(self, row, col):
""" getPipelineInfo(row: int, col: int) -> tuple
Return (locator, versionNumber, actions, pipeline, controller) for a cell
"""
info = self.getCellPipelineInfo(row, col)
if info:
return (info[0]['locator'],
info[0]['version'],
info[0]['actions'],
info[0]['pipeline'],
info[0]['controller'])
return None
def exportSheetToImage(self, fileName):
""" exportSheetToImage() -> None
Montage all the cell images and export to a file
"""
(rCount, cCount) = self.getDimension()
if rCount<1 or cCount<1: return
cellHeights = [self.getCellRect(r, 0).height()
for r in xrange(rCount)]
cellWidths = [self.getCellRect(0, c).width()
for c in xrange(cCount)]
finalImage = QtGui.QImage(sum(cellWidths), sum(cellHeights), QtGui.QImage.Format_ARGB32)
finalImage.fill(0xFFFFFFFF)
painter = QtGui.QPainter(finalImage)
y = 0
for r in xrange(rCount):
x = 0
for c in xrange(cCount):
widget = self.getCell(r, c)
if widget:
pix = widget.grabWindowPixmap()
cx = (cellWidths[c]-pix.width())/2
cy = (cellHeights[r]-pix.height())/2
painter.drawPixmap(x+cx, y+cy, widget.grabWindowPixmap())
x += cellWidths[c]
y += cellHeights[r]
painter.end()
#forcing png format if no extension was provided
(_,ext) = os.path.splitext(fileName)
if ext == '':
finalImage.save(fileName, 'png')
else:
#try to guess based on the extension
finalImage.save(fileName)
def exportSheetToImages(self, dirPath, format='png'):
""" exportSheetToImage() -> None
Montage all the cell images and export to a file
"""
(rCount, cCount) = self.getDimension()
for r in xrange(rCount):
for c in xrange(cCount):
widget = self.getCell(r, c)
if widget:
widget.grabWindowPixmap().save(dirPath+'/'+
chr(c+ord('a'))+
str(r+1)+
'.'+format)
def setSpan(self, row, col, rowSpan, colSpan):
""" setSpan(row, col, rowSpan, colSpan: int) -> None
Set the spanning at location (row, col). This is only a place
holder. Subclasses should implement this and getSpan for a
fully functioning spanning feature.
"""
pass
def getSpan(self, row, col):
""" setSpan(row, col: int) -> (rowSpan, colSpan: int)
Return the spanning at location (row, col). This is only a
place holder. Subclasses should implement this and setSpan for
a fully functioning spanning feature.
"""
return (1, 1)
class StandardWidgetSheetTab(QtGui.QWidget, StandardWidgetSheetTabInterface):
"""
StandardWidgetSheetTab is a container of StandardWidgetSheet with
a toolbar on top. This will be added directly to a QTabWidget for
displaying the spreadsheet.
"""
def __init__(self, tabWidget,row=None , col=None):
""" StandardWidgetSheet(tabWidget: QTabWidget,
row: int,
col: int) -> StandardWidgetSheet
Initialize with a toolbar and a sheet widget
"""
QtGui.QWidget.__init__(self, None)
StandardWidgetSheetTabInterface.__init__(self)
if not row:
row = configuration.rowCount
if not col:
col = configuration.columnCount
self.type = 'StandardWidgetSheetTab'
self.tabWidget = tabWidget
self.sheet = StandardWidgetSheet(row, col, self)
self.sheet.setFitToWindow(True)
self.toolBar = StandardWidgetToolBar(self)
self.vLayout = QtGui.QVBoxLayout()
self.vLayout.setSpacing(0)
self.vLayout.setMargin(0)
self.vLayout.addWidget(self.toolBar, 0)
self.vLayout.addWidget(self.sheet, 1)
self.setLayout(self.vLayout)
self.pipelineInfo = {}
self.setAcceptDrops(True)
def rowSpinBoxChanged(self):
""" rowSpinBoxChanged() -> None
Handle the number of row changed
"""
if self.toolBar.rowSpinBox.value()!=self.sheet.rowCount():
self.sheet.setRowCount(self.toolBar.rowSpinBox.value())
self.sheet.stretchCells()
self.setEditingMode(self.tabWidget.editingMode)
def colSpinBoxChanged(self):
""" colSpinBoxChanged() -> None
Handle the number of row changed
"""
if self.toolBar.colSpinBox.value()!=self.sheet.columnCount():
self.sheet.setColumnCount(self.toolBar.colSpinBox.value())
self.sheet.stretchCells()
self.setEditingMode(self.tabWidget.editingMode)
### Belows are API Wrappers to connect to self.sheet
def getDimension(self):
""" getDimension() -> tuple
Get the sheet dimensions
"""
return (self.sheet.rowCount(), self.sheet.columnCount())
def setDimension(self, rc, cc):
""" setDimension(rc: int, cc: int) -> None
Set the sheet dimensions
"""
self.toolBar.rowCountSpinBox().setValue(rc)
self.toolBar.colCountSpinBox().setValue(cc)
def getCellWidget(self, row, col):
""" getCellWidget(row: int, col: int) -> QWidget
Get cell at a specific row and column.
"""
return self.sheet.getCell(row, col)
def getCellRect(self, row, col):
""" getCellRect(row: int, col: int) -> QRect
Return the rectangle surrounding the cell at location (row, col)
in parent coordinates
"""
return self.sheet.getCellRect(row, col)
def getCellGlobalRect(self, row, col):
""" getCellGlobalRect(row: int, col: int) -> QRect
Return the rectangle surrounding the cell at location (row, col)
in global coordinates
"""
return self.sheet.getCellGlobalRect(row, col)
def showHelpers(self, show, globalPos):
""" showHelpers(show: boolean, globalPos: QPoint) -> None
Show/hide the helpers (toolbar, resizer) depending on the
status of show and the global position of the cursor
"""
localPos = self.sheet.viewport().mapFromGlobal(QtGui.QCursor.pos())
row = self.sheet.rowAt(localPos.y())
col = self.sheet.columnAt(localPos.x())
rect = self.sheet.getCellRect(row, col)
show = show and (rect.x()+rect.width()-localPos.x()<100 and
rect.y()+rect.height()-localPos.y()<100)
self.sheet.showHelpers(show, row, col)
def getSelectedLocations(self):
""" getSelectedLocations() -> list
Return the selected locations (row, col) of the current sheet
"""
indexes = self.sheet.selectedIndexes()
return [(idx.row(), idx.column()) for idx in indexes]
def clearSelection(self):
""" clearSelection() -> None
Clear all the selection in the current sheet
"""
self.sheet.clearSelection()
def setCellWidget(self, row, col, cellWidget):
""" setCellWidget(row: int,
col: int,
cellWidget: QWidget) -> None
Replace the current location (row, col) with a cell widget
"""
self.sheet.setCellByWidget(row, col, cellWidget)
def dragEnterEvent(self, event):
""" dragEnterEvent(event: QDragEnterEvent) -> None
Set to accept drops from the version tree
"""
mimeData = event.mimeData()
if hasattr(mimeData, 'versionId'):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
""" dragEnterEvent(event: QDragEnterEvent) -> None
Set to accept drops while moving from the version tree
"""
mimeData = event.mimeData()
if (hasattr(mimeData, 'versionId') and
hasattr(mimeData, 'controller')):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
""" Execute the pipeline at the particular location """
mimeData = event.mimeData()
if (hasattr(mimeData, 'versionId') and
hasattr(mimeData, 'controller')):
event.accept()
versionId = mimeData.versionId
controller = mimeData.controller
pipeline = controller.vistrail.getPipeline(versionId)
inspector = PipelineInspector()
inspector.inspect_spreadsheet_cells(pipeline)
inspector.inspect_ambiguous_modules(pipeline)
if len(inspector.spreadsheet_cells)==1:
localPos = self.sheet.viewport().mapFromGlobal(QtGui.QCursor.pos())
row = self.sheet.rowAt(localPos.y())
col = self.sheet.columnAt(localPos.x())
if (row!=-1 and col!=-1):
pipeline = self.setPipelineToLocateAt(row, col, pipeline)
executePipelineWithProgress(pipeline, 'Execute Cell',
controller=controller,
locator=controller.locator,
current_version=versionId,
reason='Drop Version')
else:
event.ignore()
def setSpan(self, row, col, rowSpan, colSpan):
""" setSpan(row, col, rowSpan, colSpan: int) -> None
Set the spanning at location (row, col).
"""
colSpan = max(colSpan, 1)
rowSpan = max(rowSpan, 1)
(curRowSpan, curColSpan) = self.getSpan(row, col)
if rowSpan!=curRowSpan or colSpan!=curColSpan:
# Need to remove all cell except the top-left
for r in xrange(rowSpan):
for c in xrange(colSpan):
if r!=0 or c!=0:
self.deleteCell(row+r, col+c)
# Take the current widget out
curWidget = self.takeCell(row, col)
# ... before setting the span
self.sheet.setSpan(row, col, rowSpan, colSpan)
# Then put it back in
if curWidget:
self.setCellByWidget(row, col, curWidget)
def getSpan(self, row, col):
""" setSpan(row, col: int) -> (rowSpan, colSpan: int)
Return the spanning at location (row, col). This is only a
place holder. Subclasses should implement this and setSpan for
a fully functioning spanning feature.
"""
return (self.sheet.rowSpan(row, col), self.sheet.columnSpan(row, col))
class StandardWidgetTabBarEditor(QtGui.QLineEdit):
"""
StandardWidgetTabBarEditor overrides QLineEdit to enable canceling
edit when Esc is pressed
"""
def __init__(self, text='', parent=None):
""" StandardWidgetTabBarEditor(text: str, parent: QWidget)
-> StandardWidgetTabBarEditor
Store the original text at during initialization
"""
QtGui.QLineEdit.__init__(self, text, parent)
self.originalText = text
def keyPressEvent(self, e):
""" keyPressEvent(e: QKeyEvent) -> None
Override keyPressEvent to handle Esc key
"""
if e.key()==QtCore.Qt.Key_Escape:
e.ignore()
self.setText(self.originalText)
self.clearFocus()
else:
QtGui.QLineEdit.keyPressEvent(self, e)
class StandardWidgetTabBar(QtGui.QTabBar):
"""
StandardWidgetTabBar: a customized QTabBar to allow double-click
to change tab name
"""
def __init__(self, parent=None):
""" StandardWidgetTabBar(parent: QWidget) -> StandardWidgetTabBar
Initialize like the original QTabWidget TabBar
"""
QtGui.QTabBar.__init__(self, parent)
self.setAcceptDrops(True)
self.setStatusTip('Move the sheet in, out and around'
'by dragging the tabs')
self.setDrawBase(False)
self.editingIndex = -1
self.editor = None
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.connect(self, QtCore.SIGNAL('currentChanged(int)'),
self.updateTabText)
self.startDragPos = None
self.dragging = False
self.targetTab = -1
self.innerRubberBand = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle,
self)
self.outerRubberBand = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle,
None)
def mouseDoubleClickEvent(self, e):
""" mouseDoubleClickEvent(e: QMouseEvent) -> None
Handle Double-Click event to start the editor
"""
if e.buttons()!=QtCore.Qt.LeftButton or self.editor: return
# Update the current editing tab widget
self.editingIndex = self.currentIndex()
# A hack to capture the rect of the triangular tab from commonstyle.cpp
rect = self.tabRect(self.editingIndex)
h = rect.height()-2
dx = h/3 + 3
rect.adjust(dx+1,1,-dx,-1)
# Display the editor inplace of the tab text
text = self.tabText(self.editingIndex)
self.editor = StandardWidgetTabBarEditor(text, self)
self.editor.setFont(self.font())
self.editor.setFrame(False)
self.editor.setGeometry(rect)
self.editor.setAlignment(QtCore.Qt.AlignHCenter)
self.editor.selectAll()
self.connect(self.editor, QtCore.SIGNAL('editingFinished()'),
self.updateTabText)
self.editor.show()
self.editor.setFocus(QtCore.Qt.MouseFocusReason)
def updateTabText(self, idx=0):
""" updateTabText(idx: int) -> None
Update the tab text after editing has been finished
"""
if self.editingIndex>=0 and self.editor:
self.setTabText(self.editingIndex, self.editor.text())
self.emit(QtCore.SIGNAL('tabTextChanged(int,QString)'),
self.editingIndex,self.editor.text())
self.editor.deleteLater()
self.editingIndex = -1
self.editor = None
def indexAtPos(self, p):
""" indexAtPos(p: QPoint) -> int Reimplement of the private
indexAtPos to find the tab index under a point
"""
if self.tabRect(self.currentIndex()).contains(p):
return self.currentIndex()
for i in xrange(self.count()):
if self.isTabEnabled(i) and self.tabRect(i).contains(p):
return i
return -1
def mousePressEvent(self, e):
""" mousePressEvent(e: QMouseEvent) -> None
Handle mouse press event to see if we should start to drag tabs or not
"""
QtGui.QTabBar.mousePressEvent(self, e)
if e.buttons()==QtCore.Qt.LeftButton and self.editor==None:
self.startDragPos = QtCore.QPoint(e.x(), e.y())
def getGlobalRect(self, index):
""" getGlobalRect(self, index: int)
Get the rectangle of a tab in global coordinates
"""
if index<0: return None
rect = self.tabRect(index)
rect.moveTo(self.mapToGlobal(rect.topLeft()))
return rect
def highlightTab(self, index):
""" highlightTab(index: int)
Highlight the rubber band of a tab
"""
if index==-1:
self.innerRubberBand.hide()
else:
self.innerRubberBand.setGeometry(self.tabRect(index))
self.innerRubberBand.show()
def mouseMoveEvent(self, e):
""" mouseMoveEvent(e: QMouseEvent) -> None
Handle dragging tabs in and out or around
"""
QtGui.QTabBar.mouseMoveEvent(self, e)
if self.startDragPos:
# We already move more than 4 pixels
if (self.startDragPos-e.pos()).manhattanLength()>=4:
self.startDragPos = None
self.dragging = True
if self.dragging:
t = self.indexAtPos(e.pos())
if t!=-1:
if t!=self.targetTab:
self.targetTab = t
self.outerRubberBand.hide()
self.highlightTab(t)
else:
self.highlightTab(-1)
if t!=self.targetTab:
self.targetTab = t
if self.count()>0:
if not self.outerRubberBand.isVisible():
index = self.getGlobalRect(self.currentIndex())
self.outerRubberBand.setGeometry(index)
self.outerRubberBand.move(e.globalPos())
self.outerRubberBand.show()
else:
self.outerRubberBand.move(e.globalPos())
def mouseReleaseEvent(self, e):
""" mouseReleaseEvent(e: QMouseEvent) -> None
Make sure the tab moved at the end
"""
QtGui.QTabBar.mouseReleaseEvent(self, e)
if self.dragging:
if self.targetTab!=-1 and self.targetTab!=self.currentIndex():
self.emit(QtCore.SIGNAL('tabMoveRequest(int,int)'),
self.currentIndex(),
self.targetTab)
elif self.targetTab==-1:
self.emit(QtCore.SIGNAL('tabSplitRequest(int,QPoint)'),
self.currentIndex(),
e.globalPos())
self.dragging = False
self.targetTab = -1
self.highlightTab(-1)
self.outerRubberBand.hide()
def slotIndex(self, pos):
""" slotIndex(pos: QPoint) -> int
Return the slot index between the slots at the cursor pos
"""
p = self.mapFromGlobal(pos)
for i in xrange(self.count()):
r = self.tabRect(i)
if self.isTabEnabled(i) and r.contains(p):
if p.x()<(r.x()+r.width()/2):
return i
else:
return i+1
return -1
def slotGeometry(self, idx):
""" slotGeometry(idx: int) -> QRect
Return the geometry between the slots at cursor pos
"""
if idx<0 or self.count()==0: return None
if idx<self.count():
rect = self.getGlobalRect(idx)
rect = QtCore.QRect(rect.x()-5, rect.y(), 5*2, rect.height())
return rect
else:
rect = self.getGlobalRect(self.count()-1)
rect = QtCore.QRect(rect.x()+rect.width()-5, rect.y(),
5*2, rect.height())
return rect
def dragEnterEvent(self, event):
""" dragEnterEvent(event: QDragEnterEvent) -> None
Set to accept drops from the other cell info
"""
mimeData = event.mimeData()
if hasattr(mimeData, 'cellInfo'):
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
idx = self.indexAtPos(event.pos())
if idx>=0:
self.setCurrentIndex(idx)
else:
event.ignore()
def dragMoveEvent(self, event):
""" dragMoveEvent(event: QDragMoveEvent) -> None
Set to accept drops from the other cell info
"""
idx = self.indexAtPos(event.pos())
if idx>=0:
self.setCurrentIndex(idx)
class StandardTabDockWidget(QtGui.QDockWidget):
"""
StandardTabDockWidget inherits from QDockWidget to contain a sheet
widget floating around that can be merge back to tab controller
"""
def __init__(self, title, tabWidget, tabBar, tabController):
""" StandardTabDockWidget(title: str,
tabWidget: QTabWidget,
tabBar: QTabBar,
tabController: StandardWidgetTabController)
-> StandardTabDockWidget
Initialize the dock widget to override the floating button
"""
QtGui.QDockWidget.__init__(self, title, None,
QtCore.Qt.FramelessWindowHint)
self.tabBar = tabBar
self.tabController = tabController
self.setFeatures(QtGui.QDockWidget.DockWidgetClosable |
QtGui.QDockWidget.DockWidgetMovable |
QtGui.QDockWidget.DockWidgetFloatable)
self.setFloating(True)
self.floatingButton = self.findFloatingButton()
if self.floatingButton:
self.floatingButton.blockSignals(True)
self.floatingButton.installEventFilter(self)
self.startDragPos = None
self.startDragging = False
self.windowRubberBand = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle,
None)
tabWidget.setParent(self)
self.setWidget(tabWidget)
tabWidget.show()
self.resize(tabWidget.size())
def findFloatingButton(self):
""" findFloatingButton() -> QAbstractButton
Hack to find the private Floating Button. Since there is only
one button exists, we just need to find QAbstractButton
"""
for c in self.children():
if isinstance(c, QtGui.QAbstractButton):
return c
return None
def eventFilter(self, q, e):
""" eventFilter(q: QObject, e: QEvent) -> depends on event type
Event filter the floating button to makes it merge to the tab controller
"""
if q and q==self.floatingButton:
if (e.type()==QtCore.QEvent.MouseButtonRelease and
e.button()&QtCore.Qt.LeftButton):
if self.isMaximized():
self.showNormal()
else:
self.showMaximized()
return False
return QtGui.QDockWidget.eventFilter(self, q, e)
def isTabControllerUnderMouse(self, tb):
""" Check if any of common parent of the tab controller and tb
is under the mouse """
tbp = []
while tb!=None:
tbp.append(tb)
tb = tb.parent()
tc = self.tabController
while tc!=None:
if tc in tbp:
return True
tc = tc.parent()
return False
def event(self, e):
""" event(e: QEvent) -> depends on event type
Handle movement of the dock widget to snap to the tab controller
"""
# MOUSE PRESS (QtCore.QEvent.NonClientAreaMouseButtonPress=174)
if e.type() in [QtCore.QEvent.MouseButtonPress,174]:
if e.type()==174:
gp = QtGui.QCursor.pos()
ontitle = True
else:
gp = e.globalPos()
ontitle = e.y()<self.widget().y() and e.buttons()&QtCore.Qt.LeftButton
if ontitle:
self.startDragPos = QtCore.QPoint(gp)
self.grabMouse()
return True
elif e.type()==QtCore.QEvent.MouseMove:
if not (e.buttons() & QtCore.Qt.LeftButton):
self.windowRubberBand.hide()
self.setMouseTracking(False)
return QtGui.QDockWidget.event(self, e)
gp = e.globalPos()
if (not self.startDragging and
self.startDragPos and
(self.startDragPos-gp).manhattanLength()>=4):
self.startDragging = True
self.windowRubberBand.setGeometry(self.geometry())
self.startDragPos = self.pos()-gp
self.windowRubberBand.show()
self.setMouseTracking(True)
if self.startDragging:
tb = QtGui.QApplication.widgetAt(gp)
if tb==self.tabBar:
idx = tb.slotIndex(gp)
if idx>=0:
self.windowRubberBand.setGeometry(tb.slotGeometry(idx))
elif (tb!=None and self.tabController.count()==0 and
self.isTabControllerUnderMouse(tb)):
r = self.tabController.frameGeometry()
r.moveTo(self.tabController.mapToGlobal(r.topLeft()))
self.windowRubberBand.setGeometry(r)
else:
rect = QtCore.QRect(self.startDragPos+gp, self.size())
self.windowRubberBand.setGeometry(rect)
return True
# MOUSE RELEASE (QtCore.QEvent.NonClientAreaMouseRelease=175)
elif e.type()==QtCore.QEvent.MouseButtonRelease:
if self.startDragging:
if e.type()==173:
gp = QtGui.QCursor.pos()
else:
gp = e.globalPos()
self.setMouseTracking(False)
self.windowRubberBand.hide()
self.startDragPos = None
self.startDragging = False
tb = QtGui.QApplication.widgetAt(gp)
if tb==self.tabBar:
idx = tb.slotIndex(gp)
if idx>=0:
self.hide()
self.tabController.mergeTab(self, idx)
elif (tb!=None and self.tabController.count()==0 and
self.isTabControllerUnderMouse(tb)):
self.hide()
self.tabController.mergeTab(self, 0)
else:
self.move(self.windowRubberBand.pos())
self.releaseMouse()
self.setFocus(QtCore.Qt.MouseFocusReason)
return True
# MOUSE DOUBLE CLICK (QtCore.QEvent.NonClientAreaMouseButtonDblClick=176)
elif e.type() in [QtCore.QEvent.MouseButtonDblClick, 176]:
if (e.type()==176) or (e.buttons()&QtCore.Qt.LeftButton):
self.hide()
self.tabController.mergeTab(self, self.tabController.count())
return True
return QtGui.QDockWidget.event(self, e)
def closeEvent(self, event):
""" On close event dock the sheet back to the spreadsheet window """
self.tabController.mergeTab(self, self.tabController.count())
event.accept()
spreadsheetRegistry.registerSheet('StandardWidgetSheetTab',
StandardWidgetSheetTab)
|
|
import asyncio
import signal
import ssl
import threading
try:
import aiohttp
except ImportError: # pragma: no cover
aiohttp = None
from . import client
from . import exceptions
from . import packet
from . import payload
async_signal_handler_set = False
def async_signal_handler():
"""SIGINT handler.
Disconnect all active async clients.
"""
async def _handler():
asyncio.get_event_loop().stop()
for c in client.connected_clients[:]:
if c.is_asyncio_based():
await c.disconnect()
else: # pragma: no cover
pass
asyncio.ensure_future(_handler())
class AsyncClient(client.Client):
"""An Engine.IO client for asyncio.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports, compatible with the asyncio
framework on Python 3.5 or newer.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param http_session: an initialized ``aiohttp.ClientSession`` object to be
used when sending requests to the server. Use it if
you need to add special client options such as proxy
servers, SSL certificates, etc.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
:param handle_sigint: Set to ``True`` to automatically handle disconnection
when the process is interrupted, or to ``False`` to
leave interrupt handling to the calling application.
Interrupt handling can only be enabled when the
client instance is created in the main thread.
"""
def is_asyncio_based(self):
return True
async def connect(self, url, headers=None, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Note: this method is a coroutine.
Example usage::
eio = engineio.Client()
await eio.connect('http://localhost:5000')
"""
global async_signal_handler_set
if self.handle_sigint and not async_signal_handler_set and \
threading.current_thread() == threading.main_thread():
try:
asyncio.get_event_loop().add_signal_handler(
signal.SIGINT, async_signal_handler)
async_signal_handler_set = True
except NotImplementedError: # pragma: no cover
self.logger.warning('Signal handler is unsupported')
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, str):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return await getattr(self, '_connect_' + self.transports[0])(
url, headers or {}, engineio_path)
async def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
Note: this method is a coroutine.
"""
if self.read_loop_task:
await self.read_loop_task
async def send(self, data):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
Note: this method is a coroutine.
"""
await self._send_packet(packet.Packet(packet.MESSAGE, data=data))
async def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
Note: this method is a coroutine.
"""
if self.state == 'connected':
await self._send_packet(packet.Packet(packet.CLOSE))
await self.queue.put(None)
self.state = 'disconnecting'
await self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
await self.ws.close()
if not abort:
await self.read_loop_task
self.state = 'disconnected'
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
The return value is a ``asyncio.Task`` object.
"""
return asyncio.ensure_future(target(*args, **kwargs))
async def sleep(self, seconds=0):
"""Sleep for the requested amount of time.
Note: this method is a coroutine.
"""
return await asyncio.sleep(seconds)
def create_queue(self):
"""Create a queue object."""
q = asyncio.Queue()
q.Empty = asyncio.QueueEmpty
return q
def create_event(self):
"""Create an event object."""
return asyncio.Event()
def __del__(self): # pragma: no cover
# try to close the aiohttp session if it is still open
if self.http and not self.http.closed:
try:
loop = asyncio.get_event_loop()
if loop.is_running():
loop.ensure_future(self.http.close())
else:
loop.run_until_complete(self.http.close())
except:
pass
async def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if aiohttp is None: # pragma: no cover
self.logger.error('aiohttp not installed -- cannot make HTTP '
'requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None or isinstance(r, str):
self._reset()
raise exceptions.ConnectionError(
r or 'Connection refused by the server')
if r.status < 200 or r.status >= 300:
self._reset()
try:
arg = await r.json()
except aiohttp.ClientError:
arg = None
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status), arg)
try:
p = payload.Payload(encoded_payload=(await r.read()).decode(
'utf-8'))
except ValueError:
raise exceptions.ConnectionError(
'Unexpected response from server') from None
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
await self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if await self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
async def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if aiohttp is None: # pragma: no cover
self.logger.error('aiohttp package not installed')
return False
websocket_url = self._get_engineio_url(url, engineio_path,
'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
if self.http is None or self.http.closed: # pragma: no cover
self.http = aiohttp.ClientSession()
# extract any new cookies passed in a header so that they can also be
# sent the the WebSocket route
cookies = {}
for header, value in headers.items():
if header.lower() == 'cookie':
cookies = dict(
[cookie.split('=', 1) for cookie in value.split('; ')])
del headers[header]
break
self.http.cookie_jar.update_cookies(cookies)
try:
if not self.ssl_verify:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
ws = await self.http.ws_connect(
websocket_url + self._get_url_timestamp(),
headers=headers, ssl=ssl_context)
else:
ws = await self.http.ws_connect(
websocket_url + self._get_url_timestamp(),
headers=headers)
except (aiohttp.client_exceptions.WSServerHandshakeError,
aiohttp.client_exceptions.ServerConnectionError,
aiohttp.client_exceptions.ClientConnectionError):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode()
try:
await ws.send_str(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = (await ws.receive()).data
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode()
try:
await ws.send_str(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = (await ws.receive()).data
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
self.ws = ws
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
async def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
await self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PING:
await self._send_packet(packet.Packet(packet.PONG, pkt.data))
elif pkt.packet_type == packet.CLOSE:
await self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
async def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
await self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
async def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None or self.http.closed:
self.http = aiohttp.ClientSession()
http_method = getattr(self.http, method.lower())
try:
if not self.ssl_verify:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout), ssl=False)
else:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout))
except (aiohttp.ClientError, asyncio.TimeoutError) as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
return str(exc)
async def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
ret = None
if event in self.handlers:
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
if run_async:
return self.start_background_task(self.handlers[event],
*args)
else:
try:
ret = await self.handlers[event](*args)
except asyncio.CancelledError: # pragma: no cover
pass
except:
self.logger.exception(event + ' async handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
else:
if run_async:
async def async_handler():
return self.handlers[event](*args)
return self.start_background_task(async_handler)
else:
try:
ret = self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
return ret
async def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None or isinstance(r, str):
self.logger.warning(
r or 'Connection refused by the server, aborting')
await self.queue.put(None)
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
await self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=(await r.read()).decode(
'utf-8'))
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
await self.queue.put(None)
break
for pkt in p.packets:
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = await asyncio.wait_for(
self.ws.receive(),
timeout=self.ping_interval + self.ping_timeout)
p = p.data
if p is None: # pragma: no cover
await self.queue.put(None)
break # the connection is broken
except asyncio.TimeoutError:
self.logger.warning(
'Server has stopped communicating, aborting')
await self.queue.put(None)
break
except aiohttp.client_exceptions.ServerDisconnectedError:
self.logger.info(
'Read loop: WebSocket connection was closed, aborting')
await self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error receiving packet: "%s", aborting',
str(e))
await self.queue.put(None)
break
try:
pkt = packet.Packet(encoded_packet=p)
except Exception as e: # pragma: no cover
self.logger.info(
'Unexpected error decoding packet: "%s", aborting', str(e))
await self.queue.put(None)
break
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [await asyncio.wait_for(self.queue.get(), timeout)]
except (self.queue.Empty, asyncio.TimeoutError,
asyncio.CancelledError):
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get_nowait())
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = await self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'text/plain'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None or isinstance(r, str):
self.logger.warning(
r or 'Connection refused by the server, aborting')
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
if pkt.binary:
await self.ws.send_bytes(pkt.encode())
else:
await self.ws.send_str(pkt.encode())
self.queue.task_done()
except (aiohttp.client_exceptions.ServerDisconnectedError,
BrokenPipeError, OSError):
self.logger.info(
'Write loop: WebSocket connection was closed, '
'aborting')
break
self.logger.info('Exiting write loop task')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from mock import MagicMock, call, patch
from uiautomator import AutomatorDevice, Selector
class TestDevice(unittest.TestCase):
def setUp(self):
self.device = AutomatorDevice()
self.device.server = MagicMock()
self.device.server.jsonrpc = MagicMock()
self.device.server.jsonrpc_wrap = MagicMock()
def test_info(self):
self.device.server.jsonrpc.deviceInfo = MagicMock()
self.device.server.jsonrpc.deviceInfo.return_value = {}
self.assertEqual(self.device.info, {})
self.device.server.jsonrpc.deviceInfo.assert_called_once_with()
def test_click(self):
self.device.server.jsonrpc.click = MagicMock()
self.device.server.jsonrpc.click.return_value = True
self.assertEqual(self.device.click(1, 2), True)
self.device.server.jsonrpc.click.assert_called_once_with(1, 2)
def test_swipe(self):
self.device.server.jsonrpc.swipe = MagicMock()
self.device.server.jsonrpc.swipe.return_value = True
self.assertEqual(self.device.swipe(1, 2, 3, 4, 100), True)
self.device.server.jsonrpc.swipe.assert_called_once_with(1, 2, 3, 4, 100)
def test_long_click(self):
self.device.server.jsonrpc.swipe = MagicMock()
self.device.server.jsonrpc.swipe.return_value = True
x, y = 100, 200
self.assertEqual(self.device.long_click(x, y), True)
self.device.server.jsonrpc.swipe.assert_called_once_with(x, y, x+1, y+1, 100)
def test_drag(self):
self.device.server.jsonrpc.drag = MagicMock()
self.device.server.jsonrpc.drag.return_value = True
self.assertEqual(self.device.drag(1, 2, 3, 4, 100), True)
self.device.server.jsonrpc.drag.assert_called_once_with(1, 2, 3, 4, 100)
def test_dump(self):
self.device.server.jsonrpc.dumpWindowHierarchy = MagicMock()
self.device.server.jsonrpc.dumpWindowHierarchy.return_value = "<?xml>"
self.assertEqual(self.device.dump("/tmp/test.xml"), "<?xml>")
self.device.server.jsonrpc.dumpWindowHierarchy.assert_called_once_with(True, None)
def test_screenshot(self):
self.device.server.jsonrpc.takeScreenshot = MagicMock()
self.device.server.jsonrpc.takeScreenshot.return_value = "1.png"
self.device.server.adb.cmd = cmd = MagicMock()
cmd.return_value.returncode = 0
self.assertEqual(self.device.screenshot("a.png", 1.0, 99), "a.png")
self.device.server.jsonrpc.takeScreenshot.assert_called_once_with("screenshot.png", 1.0, 99)
self.assertEqual(cmd.call_args_list, [call("pull", "1.png", "a.png"), call("shell", "rm", "1.png")])
self.device.server.jsonrpc.takeScreenshot.return_value = None
self.assertEqual(self.device.screenshot("a.png", 1.0, 100), None)
def test_freeze_rotation(self):
self.device.server.jsonrpc.freezeRotation = MagicMock()
self.device.freeze_rotation(True)
self.device.freeze_rotation(False)
self.assertEqual(self.device.server.jsonrpc.freezeRotation.call_args_list, [call(True), call(False)])
def test_orientation(self):
self.device.server.jsonrpc.deviceInfo = MagicMock()
orientation = {
0: "natural",
1: "left",
2: "upsidedown",
3: "right"
}
for i in range(4):
self.device.server.jsonrpc.deviceInfo.return_value = {"displayRotation": i}
self.assertEqual(self.device.orientation, orientation[i])
# set
orientations = [
(0, "natural", "n", 0),
(1, "left", "l", 90),
(2, "upsidedown", "u", 180),
(3, "right", "r", 270)
]
for values in orientations:
for value in values:
self.device.server.jsonrpc.setOrientation = MagicMock()
self.device.orientation = value
self.device.server.jsonrpc.setOrientation.assert_called_once_with(values[1])
with self.assertRaises(ValueError):
self.device.orientation = "invalid orientation"
def test_last_traversed_text(self):
self.device.server.jsonrpc.getLastTraversedText = MagicMock()
self.device.server.jsonrpc.getLastTraversedText.return_value = "abcdef"
self.assertEqual(self.device.last_traversed_text, "abcdef")
self.device.server.jsonrpc.getLastTraversedText.assert_called_once_with()
def test_clear_traversed_text(self):
self.device.server.jsonrpc.clearLastTraversedText = MagicMock()
self.device.clear_traversed_text()
self.device.server.jsonrpc.clearLastTraversedText.assert_called_once_with()
def test_open(self):
self.device.server.jsonrpc.openNotification = MagicMock()
self.device.open.notification()
self.device.server.jsonrpc.openNotification.assert_called_once_with()
self.device.server.jsonrpc.openQuickSettings = MagicMock()
self.device.open.quick_settings()
self.device.server.jsonrpc.openQuickSettings.assert_called_once_with()
def test_watchers(self):
names = ["a", "b", "c"]
self.device.server.jsonrpc.getWatchers = MagicMock()
self.device.server.jsonrpc.getWatchers.return_value = names
self.assertEqual(self.device.watchers, names)
self.device.server.jsonrpc.getWatchers.assert_called_once_with()
self.device.server.jsonrpc.hasAnyWatcherTriggered = MagicMock()
self.device.server.jsonrpc.hasAnyWatcherTriggered.return_value = True
self.assertEqual(self.device.watchers.triggered, True)
self.device.server.jsonrpc.hasAnyWatcherTriggered.assert_called_once_with()
self.device.server.jsonrpc.removeWatcher = MagicMock()
self.device.watchers.remove("a")
self.device.server.jsonrpc.removeWatcher.assert_called_once_with("a")
self.device.server.jsonrpc.removeWatcher = MagicMock()
self.device.watchers.remove()
self.assertEqual(self.device.server.jsonrpc.removeWatcher.call_args_list, [call(name) for name in names])
self.device.server.jsonrpc.resetWatcherTriggers = MagicMock()
self.device.watchers.reset()
self.device.server.jsonrpc.resetWatcherTriggers.assert_called_once_with()
self.device.server.jsonrpc.runWatchers = MagicMock()
self.device.watchers.run()
self.device.server.jsonrpc.runWatchers.assert_called_once_with()
def test_watcher(self):
self.device.server.jsonrpc.hasWatcherTriggered = MagicMock()
self.device.server.jsonrpc.hasWatcherTriggered.return_value = False
self.assertFalse(self.device.watcher("name").triggered)
self.device.server.jsonrpc.hasWatcherTriggered.assert_called_once_with("name")
self.device.server.jsonrpc.removeWatcher = MagicMock()
self.device.watcher("a").remove()
self.device.server.jsonrpc.removeWatcher.assert_called_once_with("a")
self.device.server.jsonrpc.registerClickUiObjectWatcher = MagicMock()
condition1 = {"text": "my text", "className": "android"}
condition2 = {"description": "my desc", "clickable": True}
target = {"className": "android.widget.Button", "text": "OK"}
self.device.watcher("watcher").when(**condition1).when(**condition2).click(**target)
self.device.server.jsonrpc.registerClickUiObjectWatcher.assert_called_once_with("watcher", [Selector(**condition1), Selector(**condition2)], Selector(**target))
self.device.server.jsonrpc.registerPressKeyskWatcher = MagicMock()
self.device.watcher("watcher2").when(**condition1).when(**condition2).press.back.home.power("menu")
self.device.server.jsonrpc.registerPressKeyskWatcher.assert_called_once_with(
"watcher2", [Selector(**condition1), Selector(**condition2)], ("back", "home", "power", "menu"))
def test_press(self):
key = ["home", "back", "left", "right", "up", "down", "center",
"menu", "search", "enter", "delete", "del", "recent",
"volume_up", "volume_down", "volume_mute", "camera", "power"]
self.device.server.jsonrpc.pressKey = MagicMock()
self.device.server.jsonrpc.pressKey.return_value = True
self.assertTrue(self.device.press.home())
self.device.server.jsonrpc.pressKey.return_value = False
self.assertFalse(self.device.press.back())
self.device.server.jsonrpc.pressKey.return_value = False
for k in key:
self.assertFalse(self.device.press(k))
self.assertEqual(self.device.server.jsonrpc.pressKey.call_args_list, [call("home"), call("back")] + [call(k) for k in key])
self.device.server.jsonrpc.pressKeyCode.return_value = True
self.assertTrue(self.device.press(1))
self.assertTrue(self.device.press(1, 2))
self.assertEqual(self.device.server.jsonrpc.pressKeyCode.call_args_list, [call(1), call(1, 2)])
def test_wakeup(self):
self.device.server.jsonrpc.wakeUp = MagicMock()
self.device.wakeup()
self.device.server.jsonrpc.wakeUp.assert_called_once_with()
self.device.server.jsonrpc.wakeUp = MagicMock()
self.device.screen.on()
self.device.server.jsonrpc.wakeUp.assert_called_once_with()
self.device.server.jsonrpc.wakeUp = MagicMock()
self.device.screen("on")
self.device.server.jsonrpc.wakeUp.assert_called_once_with()
def test_sleep(self):
self.device.server.jsonrpc.sleep = MagicMock()
self.device.sleep()
self.device.server.jsonrpc.sleep.assert_called_once_with()
self.device.server.jsonrpc.sleep = MagicMock()
self.device.screen.off()
self.device.server.jsonrpc.sleep.assert_called_once_with()
self.device.server.jsonrpc.sleep = MagicMock()
self.device.screen("off")
self.device.server.jsonrpc.sleep.assert_called_once_with()
def test_wait_idle(self):
self.device.server.jsonrpc_wrap.return_value.waitForIdle = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForIdle.return_value = True
self.assertTrue(self.device.wait.idle(timeout=10))
self.device.server.jsonrpc_wrap.return_value.waitForIdle.assert_called_once_with(10)
self.device.server.jsonrpc_wrap.return_value.waitForIdle = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForIdle.return_value = False
self.assertFalse(self.device.wait("idle", timeout=10))
self.device.server.jsonrpc_wrap.return_value.waitForIdle.assert_called_once_with(10)
def test_wait_update(self):
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.return_value = True
self.assertTrue(self.device.wait.update(timeout=10, package_name="android"))
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.assert_called_once_with("android", 10)
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.return_value = False
self.assertFalse(self.device.wait("update", timeout=100, package_name="android"))
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.assert_called_once_with("android", 100)
def test_get_info_attr(self):
info = {"test_a": 1, "test_b": "string", "displayWidth": 720, "displayHeight": 1024}
self.device.server.jsonrpc.deviceInfo = MagicMock()
self.device.server.jsonrpc.deviceInfo.return_value = info
for k in info:
self.assertEqual(getattr(self.device, k), info[k])
self.assertEqual(self.device.width, info["displayWidth"])
self.assertEqual(self.device.height, info["displayHeight"])
with self.assertRaises(AttributeError):
self.device.not_exists
def test_device_obj(self):
with patch("uiautomator.AutomatorDeviceObject") as AutomatorDeviceObject:
kwargs = {"text": "abc", "description": "description...", "clickable": True}
self.device(**kwargs)
AutomatorDeviceObject.assert_called_once_with(self.device, Selector(**kwargs))
with patch("uiautomator.AutomatorDeviceObject") as AutomatorDeviceObject:
AutomatorDeviceObject.return_value.exists = True
self.assertTrue(self.device.exists(clickable=True))
AutomatorDeviceObject.return_value.exists = False
self.assertFalse(self.device.exists(text="..."))
class TestDeviceWithSerial(unittest.TestCase):
def test_serial(self):
with patch('uiautomator.AutomatorServer') as AutomatorServer:
device = AutomatorDevice("abcdefhijklmn")
AutomatorServer.assert_called_once_with(serial="abcdefhijklmn", local_port=None)
|
|
#!/usr/bin/env python
"""Visualizing Character State Reconstruction Results using TreeGraph2
"""
#####################
# IMPORT OPERATIONS #
#####################
from __future__ import absolute_import
from __future__ import print_function
import six
from collections import OrderedDict as _ordDict
from os.path import isfile as _isfile
from xml.etree import ElementTree as _ET
import argparse
import re
import sys
import CustomFileOps as CFO
import CustomPhyloOps as CPO
import CustomStringOps as CSO
###############
# AUTHOR INFO #
###############
__author__ = "Michael Gruenstaeudl, PhD <[email protected]>"
__copyright__ = "Copyright (C) 2015 Michael Gruenstaeudl"
__info__ = "Visualizing Character State Reconstruction Results using TreeGraph2 (http://treegraph.bioinfweb.info/)"
__version__ = "2015.12.15.1100"
#################
# COMPATIBILITY #
#################
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
#############
# DEBUGGING #
#############
#import pdb
#pdb.set_trace()
####################
# GLOBAL VARIABLES #
####################
labelMargin_xml_1 = '<LabelMargin Left="1.0" Top="0.0" Right="1.0" Bottom="0.0"> </LabelMargin>'
labelMargin_xml_2 = '<LabelMargin Left="1.0" Top="1.0" Right="1.0" Bottom="1.0"> </LabelMargin>'
#labelMargin_xml_3 = '<LabelMargin Left="1.0" Top="0.0" Right="1.0" Bottom="1.0"></LabelMargin>' # optimal for phylograms
pieChartLabel_xml = '<PieChartLabel LineColor="#000000" LineWidth="0.2" Width="8.0" Height="8.0" InternalLines="true" NullLines="false" Id="internals" Above="true" LineNo="0" LinePos="0"> </PieChartLabel>'
textLabel_xml = '<TextLabel Text="" IsDecimal="false" TextColor="#000000" TextHeight="3.0" TextStyle="" FontFamily="Arial" DecimalFormat="#0.0#####" LocaleLang="en" LocaleCountry="" LocaleVariant="" Id="1" Above="true" LineNo="" LinePos="0"> </TextLabel>'
legendText_xml = '<Legend Text="" IsDecimal="false" Anchor0="" LegendPos="0" MinTreeDistance="0.0" LegendSpacing="1.0" LegendStyle="brace" Orientation="horizontal" LineColor="" LineWidth="3.0" EdgeRadius="0.0" TextColor="#FFFFFF" TextHeight="6.0" TextStyle="" FontFamily="Arial" DecimalFormat="#0.0#####" LocaleLang="en" LocaleCountry="" LocaleVariant=""> <LegendMargin Left="0.0" Top="0.0" Right="1.0" Bottom="0.0"></LegendMargin></Legend>'
default_palette = ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3","#fdb462","#b3de69","#fccde5","#d9d9d9","#bc80bd","#ccebc5","#ffed6f","#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6","#6a3d9a","#ffff99","#b15928"]
###########
# CLASSES #
###########
class CustomizeXTG_Nodes:
''' class for parsing XTG files: turning raw XTG code into publication-ready XTG code '''
def __init__(self, a):
self.inFn = a
def go(self):
# 1. Parsing of .xtg file:
try:
tree = _ET.parse(self.inFn)
root = tree.getroot()
except:
sys.exit(" ERROR: Parsing of XML code unsuccessful: " + sys.exc_info()[0])
# 2. Customizing XTG file
for n in root.iter('Node'):
n.attrib["LineWidth"] = "0.6"
n.attrib["EdgeRadius"] = "0.9"
n.attrib["TextHeight"] = "5.0"
n.attrib["TextStyle"] = "Italics"
for n in root.iter('Branch'):
n.attrib["LineWidth"] = "0.6"
tree.write(self.inFn)
# DEPRECATED CLASS:
#class CustomizeXTG_Global:
# ''' class for parsing XTG files: turning raw XTG code into
# publication-ready XTG code '''
#
# def __init__(self, a, b):
# self.inStr = a
# self.flags = b
#
# def go(self):
# tree = _ET.fromstring(str(self.inStr))
# if "NOROOT" in self.flags.upper():
# tree.attrib["ShowRooted"] = "false"
# if PY2:
# return _ET.tostring(tree)
# if PY3:
# return _ET.tostring(tree).decode()
class AddPieCharts:
'''class for adding pie labels to phylogenetic trees in XTG format'''
def __init__(self, a, b, c):
self.inStr = a
self.piedata = b
self.colorDict = c
def go(self):
# 1. Parsing of XML code:
try:
tree = _ET.fromstring(self.inStr)
except:
sys.exit(" ERROR: Parsing of XML code unsuccessful: " + sys.exc_info()[0])
piedata = self.piedata.splitlines()
# 2. Inserting pie chart XTG lines into infile by looping through inlist values
# 2.1. Parsing of piedata
for line in piedata:
aList = line.split(",")
node = aList[0]
nodeN = "Node_" + node
nodeinfo = 'UniqueName="' + nodeN + '"'
# FUTURE CODE:
#if aList[1] == "0": # REACTIVATE, WHEN N OF TREES WITHOUT NODE RELEVANT
# print " Warning: Node " + node + " was not present in reconstruction trees."
#if nodeinfo not in self.inStr:
# print " Warning: Node " + node + " was not present in plotting tree."
#if aList[1] != "0" and nodeinfo in self.inStr:
aDict = _ordDict()
try:
inL = aList[1].split(";")
except: # if aList[2] cannot be split (i.e., contains only a single elem)
inL = aList[1]
for i in inL:
if "E-" not in i and "0.00" not in i: # boolt = any("." in i for i in inL)
is_ast = bool("*" in inL)
m = i.strip("*").split(":")
area = m[0]
# enum = float(m[1]) # REACTIVATE, WHEN N OF TREES WITHOUT NODE RELEVANT
# divis = float(aList[1])
# prop = enum/divis
prop = float(m[1])
prop = str("{0:.3f}".format(prop))
if is_ast:
prop = prop + " *" # Adding back asterisk for significant reconstructions
aDict[area] = prop
if len(list(aDict.keys())) < 1:
print(" Warning: Pie data for node " + node + " not parsed.")
if type(list(aDict.values())) == list:
aSum = sum([float(i) for i in list(aDict.values())])
if aSum > 1: # If the reconstruction values do not constitute percentages
aDict.update((x, str("{0:.3f}".format(float(y)/aSum))) for x, y in list(aDict.items()))
for n in tree.iter("Node"): # Must be inside loop "for line in piedata"
if n.attrib["UniqueName"] == nodeN: # Stop at the correct node
# 2.2. Addition of pie labels
for c, (k, v) in enumerate(list(aDict.items()), start=1):
textLabel = _ET.fromstring(textLabel_xml)
textLabel.attrib["Text"] = k + " " + v
textLabel.attrib["LineNo"] = str(c)
textLabel.attrib["Id"] = k + " " + v # Very important that this is a label not used by any other on this particular branch
labelMargin_1 = _ET.fromstring(labelMargin_xml_1)
textLabel.insert(0, labelMargin_1)
n.find("Branch").append(textLabel) # Insert the textLabel into xtg code
# Addition of dummy text label for proper alignment - do NOT delete
textLabel = _ET.fromstring(textLabel_xml)
textLabel.attrib["Text"] = ""
textLabel.attrib["LineNo"] = "0"
labelMargin_1 = _ET.fromstring(labelMargin_xml_1)
textLabel.insert(0, labelMargin_1)
n.find("Branch").append(textLabel)
# 2.3. Addition of pie charts
pieChartLabel_1 = _ET.fromstring(pieChartLabel_xml)
labelMargin_2 = _ET.fromstring(labelMargin_xml_2)
pieChartLabel_1.insert(0, labelMargin_2)
dis = _ET.Element("DataIds")
dis.text = " " # trick! Getting an empty tag.
tmp = _ET.Element("Temp")
tmp.text = " "
for k, v in list(aDict.items()): # Loop through aDict and append PieColor definitions
di = _ET.Element("DataId")
di.attrib["PieColor"] = self.colorDict[k] # Color dictionary in action here
di.text = k
dis.append(di)
invd = _ET.Element("InvisibleData")
invd.attrib["Id"] = k
invd.attrib["Text"] = v
invd.attrib["IsDecimal"] = "true"
tmp.append(invd)
pieChartLabel_1.append(dis)
n.find("Branch").append(pieChartLabel_1)
n.find("Branch").append(tmp)
# REACTIVATE, WHEN TIME
# 1.4. Switch position of bootstrap values to below branches, b/c they would conflict with pie charts
# if "BS" in self.flags.upper():
# new_out_list = []
# kw = '<TextLabel Text="'
#
# for line in out_list:
# if (kw in line and CSO.afind(line, kw) in
# [str(s) for s in [1]+range(5, 10)]):
# bsvalue = CSO.exstr(line, 'Text="', '"')[:-2]
# line = CSO.replstr(line, 'Text="', '"', bsvalue)
# line = line.replace('IsDecimal="true"', 'IsDecimal="false"')
# line = line.replace('Above="true"', 'Above="false"')
# new_out_list.append(line)
#
# if (kw in line and CSO.afind(line, kw) in
# [str(s) for s in [0]]):
# line = line.replace('Above="true"', 'Above="false"')
# new_out_list.append(line)
#
# else:
# new_out_list.append(line)
if PY2:
outStr = _ET.tostring(tree)
if PY3:
outStr = _ET.tostring(tree).decode() # in Python 3, files are read as byte-like by default; need to decode to string
outStr = outStr.replace("<Temp>","")
outStr = outStr.replace("</Temp>","")
return outStr
class ConversionNEX2XTG:
''' class for performing nex2xtg conversion in TreeGraph2 via commandline;
needs inputfile name <a> and command to start TreeGraph2 <b> as input '''
def __init__(self, a, b, c):
self.inFn = a
self.outFn = b
self.pathToTG2 = c
def go(self):
# 1. Check if nex-file present
if not _isfile(self.inFn):
sys.exit(" ERROR: .nex file not found.")
# 2. Performing nex2xtg via Treegraph2
cmdL = ["java -jar", self.pathToTG2, "-convert", self.inFn, "-xtg", self.outFn]
CFO.extprog(cmdL)
# 3. Check if nex-file present
if not _isfile(self.inFn):
sys.exit(" ERROR: Conversion .nex to .xtg unsuccessful.")
class ConversionXTG2IMG:
''' class for performing XTG2IMG conversion in TreeGraph2 via commandline '''
def __init__(self, a, b, c):
self.inFn = a
self.pathToTG2 = b
self.flags = c
def go(self):
#cwd = os.getcwd()
# 1. Check if xtg-file present
if not _isfile(self.inFn):
sys.exit(" ERROR: .xtg file not found.")
# 2. Set if plotting as phylo- or cladogram
resolut = "-width 600mm -res 120ppi"
if self.flags.upper() in ["C", "CLADO"]:
pass
if self.flags.upper() in ["P", "PHYLO"]:
resolut = "-phyl " + resolut
# 3. Save as .svg and as .png
for fEnd in [".png", ".svg"]:
outPath = self.inFn + fEnd
cmdL = ["java -jar", self.pathToTG2, "-image", self.inFn, outPath, resolut]
CFO.extprog(cmdL)
class LabelingNodesXTG:
'''class for labeling nodes of phylogenetic tree in XTG format'''
def __init__(self, a):
self.inFn = a
def go(self):
# 1. Check if .xtg file present
if not _isfile(self.inFn):
sys.exit(" ERROR: .xtg file not found.")
# 2. Parsing of .xtg file:
try:
tree = _ET.parse(self.inFn)
root = tree.getroot()
except:
sys.exit(" ERROR: Parsing of XML code unsuccessful: " + sys.exc_info()[0])
# 3. Adding node labels:
for c,n in enumerate(root.iter('Node')):
n.attrib["UniqueName"] = "Node_"+str(c+1)
tree.write(self.inFn)
class PrettyPrintXTG:
'''class for adding tabs to make XTG code human-readable'''
def __init__(self, a):
self.inStr = a
def go(self):
outStr = self.inStr
outStr = CSO.rmblanklns(outStr) # Remove empty lines from string
outStr = outStr.replace("\n\n", "\n") # Remove any double newlines
outStr = re.sub(r'<([A-Z])', r'\n<\1', outStr, flags=re.M) # Newline before every xml starttag
outStr = re.sub('^<Branch ', '\t<Branch ', outStr, flags=re.M) # don't alter spaces in keywords; they are important
outStr = re.sub('^<LeafMargin ', '\t<LeafMargin ', outStr, flags=re.M)
outStr = outStr.replace('\n<LabelMargin ', ' <LabelMargin ') # Do NOT do via re.sub, which acts line by line
outStr = re.sub('^<TextLabel ', '\t\t<TextLabel ', outStr, flags=re.M)
outStr = re.sub('^<PieChartLabel ', '\t\t<PieChartLabel ', outStr, flags=re.M)
outStr = re.sub('^<DataId ', '\t\t\t<DataId ', outStr, flags=re.M)
outStr = re.sub('^<DataIds>', '\t\t\t<DataIds>', outStr, flags=re.M)
outStr = re.sub('^<InvisibleData ', '\t\t\t<InvisibleData ', outStr, flags=re.M)
outStr = outStr.replace("\t\n", "\n")
outStr = CSO.rmblanklns(outStr) # Remove empty lines from string
return outStr
###########
# MODULES #
###########
def main(reconstrFn, treeFn, pathToTG2, charsFn, charnum, colordictFn, flags, keepTmpFile, verbose):
#######################
# 0. Setting file names
#######################
fileprfx = CSO.rmpath(CSO.rmext(reconstrFn))
tmpFn = fileprfx + ".tmp"
compiledInFn = fileprfx + ".xtg"
try:
############################################
#1. Loading infiles, customizing color dict.
############################################
# 1.1. Loading infiles
reconstrD = CFO.loadR(reconstrFn)
# 1.2. Loading color dictionary and running rudimentary checks
# 1.2.1. Loading areas
handle = [a.split(",")[1] for a in reconstrD.splitlines() if ":" in a]
charL = []
for line in handle:
indxL = [x-1 for x in CSO.findall(":", line)]
resL = [list(line)[i] for i in indxL]
charL.extend(resL)
charL = sorted(set(charL)) # command 'set' keeps only unqiue items
# 1.2.2. If color dictionary supplied by user
if colordictFn:
# 1.2.2.1. Loading color dictionary
color_specs = CFO.loadRL(colordictFn)
color_specs = [_f for _f in color_specs if _f] # removing all empty elements of tmp
colorDict = {}
# 1.2.2.2. Checking individual colors
for line in color_specs:
tmp = line.split(",")
char = tmp[0]
color = tmp[1]
if len(char) != 1:
sys.exit(" ERROR: Please use only single letters or digits as char codes in your color dictionary.")
if color[0] != "#" or len(color) != 7:
sys.exit(" ERROR: The colors in your color dictionary do not constitutes hex codes.")
colorDict[char] = color
# 1.2.2.3. Checking if as many colors as reconstructions
if not CSO.sublistinlist(list(colorDict.keys()), charL):
sys.exit(" ERROR: Number of chars in reconstruction results unequal to number of chars in color dictionary.")
# 1.2.3. If color dictionary not supplied by user
if not colordictFn:
# 1.2.3.1. Generate default color dictionary
colorDict = {}
for c,char in enumerate(charL, start=0):
colorDict[char] = default_palette[c]
#######################################################
# 2. Converting NEX to XTG, extracting relevant section
#######################################################
# 2.1. Conversion from .nex to .xtg format
if verbose.upper() in ["T", "TRUE"]:
print(" Step 1: Conversion .nex -> .xtg")
ConversionNEX2XTG(treeFn, tmpFn, pathToTG2).go()
out_step1 = CFO.loadR(tmpFn)
# 2.2. Extraction of relevant XML part
try:
split1 = out_step1.split("<GlobalFormats")
split1[1] = "<GlobalFormats" + split1[1]
split2 = split1[1].split("</GlobalFormats>") # Extract element "<Tree></Tree>", because parser cannot read flanking code
split2[0] = split2[0] + "</GlobalFormats>" # Reattaching delimiter keyword
split3 = split2[1].split("</Tree>")
split3[0] = split3[0] + "</Tree>" # Reattaching delimiter keyword
out_step1 = split3[0]
start_cap_1 = split1[0] # Needed for later
start_cap_2 = split2[0] # Needed for later
end_cap = "</TreegraphDocument>" # Needed for later
except IndexError:
sys.exit(" ERROR: Malformed .xtg file.")
# 2.3. Make certain that out_step1 only the element "<Tree></Tree>"
tmp = CSO.exstr(out_step1, "<Tree>", "</Tree>")
out_step1 = "<Tree>" + tmp + "</Tree>"
#############################
# 3. Labelling internal nodes
#############################
# 3.1. Pretty-print the .xtg file
if verbose.upper() in ["T", "TRUE"]:
print(" Step 2: Pretty-print of xtg code")
out_step2 = PrettyPrintXTG(out_step1).go()
CFO.saveFile(tmpFn, out_step2)
# 3.2. Label internal nodes
if verbose.upper() in ["T", "TRUE"]:
print(" Step 3: Labeling nodes of tree")
LabelingNodesXTG(tmpFn).go()
###########################################
# 4. Customize graphical param. in XTG file
###########################################
if verbose.upper() in ["T", "TRUE"]:
print(" Step 4: Customize XTG file")
CustomizeXTG_Nodes(tmpFn).go()
#start_cap_2 = CustomizeXTG_Global(start_cap_2, flags).go()
##############################
# 5. Add Pie labels and charts
##############################
out_step5 = CFO.loadR(tmpFn)
if reconstrD:
if verbose.upper() in ["T", "TRUE"]:
print(" Step 5: Adding pie labels and charts")
out_step5 = AddPieCharts(out_step5, reconstrD, colorDict).go() # Indentation important, because pie data if statement above
else:
print(" Warning: No pie data available.")
CFO.saveFile(tmpFn, out_step5)
############################
# 6. Improving visualization
############################
if verbose.upper() in ["T", "TRUE"]:
print(" Step 6: Improving visualization")
# 6.1. Option to visualize char states of terminal taxa
if charsFn and charnum and colorDict:
try:
char_specs = CFO.loadRL(charsFn)
char_specs = [_f for _f in char_specs if _f] # removing all empty elements of char_specs
terminalDict = {}
for line in char_specs:
tmp = line.split(",")
taxon = tmp[0]
char = tmp[int(charnum)].rstrip()
terminalDict[taxon] = char
except:
sys.exit(" ERROR: Generating dictionary of character states of terminal taxa not successful.")
try:
for k in list(terminalDict.keys()):
terminalDict[k] = colorDict[terminalDict[k]]
except:
sys.exit(" ERROR: Character states in color dictionary not identical to those of terminal taxa.")
try:
tree = _ET.parse(tmpFn)
root = tree.getroot()
except:
print(sys.exc_info()[0])
sys.exit(" ERROR: Parsing of XML code unsuccessful.")
for n in root.iter("Tree"):
for k,v in six.iteritems(terminalDict):
for i in n.findall(".//Node"):
if "Text" in i.attrib and i.attrib["Text"] in [k, k.replace("_"," ")]:
uniqueName = i.attrib["UniqueName"]
legendText = _ET.fromstring(legendText_xml)
legendText.attrib["Anchor0"] = uniqueName
legendText.attrib["LineColor"] = v
n.append(legendText)
if PY2:
out_handle = _ET.tostring(root)
if PY3:
out_handle = _ET.tostring(root).decode()
CFO.saveFile(tmpFn, out_handle)
# 6.2. Minor improvements to visual aspects
try:
tree = _ET.parse(tmpFn)
root = tree.getroot()
except:
print(sys.exc_info()[0])
sys.exit(" ERROR: Parsing of XML code unsuccessful.")
for n in root.iter('Text'):
n.attrib["TextColor"] = "#808080" # Make pielabels grey
if "BS" in flags.upper():
bsvalue = int(n.attrib["Text"]) # strip commas and digits from bootstrap values
n.attrib["Text"] = bsvalue
n.attrib["IsDecimal"] = "false"
if PY2:
out_step6 = _ET.tostring(root)
if PY3:
out_step6 = _ET.tostring(root).decode()
CFO.saveFile(tmpFn, out_step6)
###############################
# 7. Pretty-print the .xtg file
###############################
if verbose.upper() in ["T", "TRUE"]:
print(" Step 7: Pretty-print of xtg code")
out_handle = PrettyPrintXTG(out_step6).go()
CFO.saveFile(tmpFn, out_handle)
########################
# 8. Combining all parts
########################
if verbose.upper() in ["T", "TRUE"]:
print(" Step 8: Combining all xml sections")
finalL = [start_cap_1, start_cap_2, out_handle, end_cap]
CFO.saveFile(compiledInFn, "\n".join(finalL))
########################################
# 9. Conversion from .xtg to .png format
########################################
if verbose.upper() in ["T", "TRUE"]:
print(" Step 9: Conversion .xtg -> .png")
ConversionXTG2IMG(compiledInFn, pathToTG2, flags).go()
####################
# 10. Deleting files
####################
# 10.1. Decision on deleting temporary input file
if keepTmpFile.upper() in ["F", "FALSE"]:
CFO.deleteFile(compiledInFn)
# 10.2. Always delete unnecessary files
finally:
try:
CFO.deleteFile(tmpFn)
except:
pass
############
# ARGPARSE #
############
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=" -- ".join([__author__, __copyright__, __info__, __version__]))
parser.add_argument('-r', '--reconstrdata',
help='/path_to_input/reconstrdata.csv',
required=True)
parser.add_argument('-p', '--plottree',
help='/path_to_input/plotting_tree.tre',
required=True)
parser.add_argument('-s', '--software',
help='/path_to_software/TreeGraph.jar',
required=True)
parser.add_argument('-c', '--chars',
help='/path_to_input/character_state_distribution.csv',
required=False)
parser.add_argument('-n', '--charnumber',
help='Which character state distribution to be used (e.g. 1); an integer',
default='1',
required=False)
parser.add_argument('-d', '--dictionary',
help='/path_to_input/color_dictionary.csv',
required=False)
parser.add_argument('-f', '--flags',
help="Select type of tree representation: 'CLADO' (for cladogram) or 'PHYLO' (for phylogram)",
required=False,
default="CLADO")
parser.add_argument('-k', '--keep',
help='Keeping the temporary input file; a boolean operator',
required=False,
default='False')
parser.add_argument('-v', '--verbose',
help='Displaying full; a boolean operator',
required=False,
default='False')
parser.add_argument('-V', '--version',
help='Print version information and exit',
action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
########
# MAIN #
########
main(args.reconstrdata, args.plottree, args.software, args.chars, args.charnumber, args.dictionary, args.flags, args.keep, args.verbose)
|
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import imp
import sys
# Plugin install_dependencies_plugin can reload pip_common and pip_utils. Do not use from ... import ...
from pybuilder import pip_utils
from pybuilder.core import (before,
task,
description,
use_plugin,
init,
Dependency,
RequirementsFile)
from pybuilder.errors import BuildFailedException
from pybuilder.utils import mkdir, as_list, safe_log_file_name, tail_log
__author__ = "Alexander Metzner, Arcadiy Ivanov"
use_plugin("core")
@init
def initialize_install_dependencies_plugin(project):
project.set_property_if_unset("dir_install_logs", "$dir_logs/install_dependencies")
project.set_property_if_unset("install_dependencies_index_url", None)
project.set_property_if_unset("install_dependencies_local_mapping", {})
project.set_property_if_unset("install_dependencies_extra_index_url", None)
project.set_property_if_unset("install_dependencies_trusted_host", None)
# Deprecated - has no effect
project.set_property_if_unset("install_dependencies_upgrade", False)
project.set_property_if_unset("install_dependencies_insecure_installation", [])
@task
@description("Installs all (both runtime and build) dependencies specified in the build descriptor")
def install_dependencies(logger, project):
logger.info("Installing all dependencies")
install_dependency(logger, project, as_list(project.build_dependencies) + as_list(project.dependencies))
@task
@description("Installs all build dependencies specified in the build descriptor")
def install_build_dependencies(logger, project):
logger.info("Installing build dependencies")
install_dependency(logger, project, project.build_dependencies)
@task
@description("Installs all runtime dependencies specified in the build descriptor")
def install_runtime_dependencies(logger, project):
logger.info("Installing runtime dependencies")
install_dependency(logger, project, project.dependencies)
@task
@description("Displays all dependencies the project requires")
def list_dependencies(project):
print("\n".join(
map(lambda d: "{0}".format(" ".join(pip_utils.as_pip_install_target(d))),
project.build_dependencies + project.dependencies)))
@before((install_build_dependencies, install_runtime_dependencies, install_dependencies), only_once=True)
def create_install_log_directory(logger, project):
log_dir = project.expand("$dir_install_logs")
logger.debug("Creating log directory '%s'", log_dir)
mkdir(log_dir)
def install_dependency(logger, project, dependencies):
dependencies_to_install, orig_installed_pkgs, dependency_constraints = _filter_dependencies(logger, project,
dependencies)
batch_dependencies = []
standalone_dependencies = []
local_mapping = project.get_property("install_dependencies_local_mapping")
constraints_file = project.expand_path("$dir_target", "install_dependencies_constraints")
pip_utils.create_constraint_file(constraints_file, dependency_constraints)
for dependency in dependencies_to_install:
url = getattr(dependency, "url", None)
if dependency.name in local_mapping or url:
install_type = "standalone"
logger.debug("Dependency '%s' has to be installed standalone" % dependency)
standalone_dependencies.append(dependency)
else:
install_type = "batch"
logger.debug("Dependency '%s' will be included in batch install" % dependency)
batch_dependencies.append(dependency)
logger.info("Processing %s dependency '%s%s'%s", install_type, dependency.name,
dependency.version if dependency.version else "",
" from %s" % url if url else "")
for standalone_dependency in standalone_dependencies:
url = getattr(standalone_dependency, "url", None)
log_file = project.expand_path("$dir_install_logs", safe_log_file_name(dependency.name))
_do_install_dependency(logger=logger,
project=project,
dependency=standalone_dependency,
upgrade=True,
eager_upgrade=False,
force_reinstall=url,
constraint_file=constraints_file,
target_dir=local_mapping.get(dependency.name),
log_file=log_file)
if len(batch_dependencies):
log_file = project.expand_path("$dir_install_logs", "install_batch")
_do_install_dependency(logger=logger,
project=project,
dependency=batch_dependencies,
upgrade=True,
eager_upgrade=False,
force_reinstall=False,
constraint_file=constraints_file,
target_dir=None,
log_file=log_file)
__reload_pip_if_updated(logger, dependencies_to_install)
def _filter_dependencies(logger, project, dependencies):
dependencies = as_list(dependencies)
installed_packages = pip_utils.get_package_version(dependencies)
dependencies_to_install = []
dependency_constraints = []
for dependency in dependencies:
logger.debug("Inspecting dependency '%s'" % dependency)
if isinstance(dependency, RequirementsFile):
# Always add requirement file-based dependencies
logger.debug("Dependency '%s' is a requirement file and will be included" % dependency)
dependencies_to_install.append(dependency)
continue
elif isinstance(dependency, Dependency):
if dependency.version:
dependency_constraints.append(dependency)
logger.debug(
"Dependency '%s' is added to the list of installation constraints" % dependency)
if dependency.url:
# Always add dependency that is url-based
logger.debug("Dependency '%s' is URL-based and will be included" % dependency)
dependencies_to_install.append(dependency)
continue
if pip_utils.should_update_package(dependency.version) \
and not getattr(dependency, "version_not_a_spec", False):
# Always add dependency that has a version specifier indicating desire to always update
logger.debug("Dependency '%s' has a non-exact version specifier and will be included" % dependency)
dependencies_to_install.append(dependency)
continue
dependency_name = dependency.name.lower()
if dependency_name not in installed_packages:
# If dependency not installed at all then install it
logger.debug("Dependency '%s' is not installed and will be included" % dependency)
dependencies_to_install.append(dependency)
continue
if dependency.version \
and not pip_utils.version_satisfies_spec(dependency.version, installed_packages[dependency_name]):
# If version is specified and version constraint is not satisfied
logger.debug("Dependency '%s' is not satisfied by installed dependency version '%s' and will be included" %
(dependency, installed_packages[dependency_name]))
dependencies_to_install.append(dependency)
continue
logger.debug("Dependency '%s' is already up-to-date and will be skipped" % dependency)
return dependencies_to_install, installed_packages, dependency_constraints
def _do_install_dependency(logger, project, dependency, upgrade, eager_upgrade,
force_reinstall, constraint_file, target_dir, log_file):
batch = isinstance(dependency, collections.Iterable)
exit_code = pip_utils.pip_install(
install_targets=dependency,
index_url=project.get_property("install_dependencies_index_url"),
extra_index_url=project.get_property("install_dependencies_extra_index_url"),
upgrade=upgrade,
insecure_installs=project.get_property("install_dependencies_insecure_installation"),
force_reinstall=force_reinstall,
target_dir=target_dir,
verbose=project.get_property("verbose"),
trusted_host=project.get_property("install_dependencies_trusted_host"),
constraint_file=constraint_file,
eager_upgrade=eager_upgrade,
logger=logger,
outfile_name=log_file)
if exit_code != 0:
if batch:
dependency_name = " batch dependencies."
else:
dependency_name = " dependency '%s'." % dependency.name
raise BuildFailedException("Unable to install%s See %s for full details:\n%s",
dependency_name,
log_file,
tail_log(log_file))
def __reload_pip_if_updated(logger, dependencies_to_install):
reload_pip = False
for dependency in dependencies_to_install:
if dependency.name == "pip":
reload_pip = True
break
if reload_pip:
__reload_pip(logger)
def __reload_pip(logger):
logger.debug("Reloading PIP-related modules")
modules_to_unload = []
for module_name in sys.modules:
if module_name.startswith("pip.") or module_name == "pip":
modules_to_unload.append(module_name)
for module_name in modules_to_unload:
del sys.modules[module_name]
from pybuilder import pip_utils, pip_common
# Pay attention that reload doesn't affect "from ... import ..." objects
# Carefully add modules to reload
imp.reload(pip_common)
imp.reload(pip_utils)
|
|
import json
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from crits.core.handlers import get_item_names
from crits.core.user_tools import user_can_view_data
from crits.core.user_tools import user_is_admin
from crits.raw_data.forms import UploadRawDataFileForm, UploadRawDataForm
from crits.raw_data.forms import NewRawDataTypeForm
from crits.raw_data.handlers import update_raw_data_tool_details
from crits.raw_data.handlers import update_raw_data_tool_name
from crits.raw_data.handlers import update_raw_data_type
from crits.raw_data.handlers import handle_raw_data_file
from crits.raw_data.handlers import delete_raw_data, get_raw_data_details
from crits.raw_data.handlers import generate_raw_data_jtable
from crits.raw_data.handlers import generate_raw_data_csv, new_inline_comment
from crits.raw_data.handlers import generate_inline_comments
from crits.raw_data.handlers import generate_raw_data_versions
from crits.raw_data.handlers import get_id_from_link_and_version
from crits.raw_data.handlers import add_new_raw_data_type, new_highlight
from crits.raw_data.handlers import update_raw_data_highlight_comment
from crits.raw_data.handlers import delete_highlight
from crits.raw_data.handlers import update_raw_data_highlight_date
from crits.raw_data.raw_data import RawDataType
@user_passes_test(user_can_view_data)
def raw_data_listing(request,option=None):
"""
Generate RawData Listing template.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param option: Whether or not we should generate a CSV (yes if option is "csv")
:type option: str
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_raw_data_csv(request)
return generate_raw_data_jtable(request, option)
@user_passes_test(user_can_view_data)
def set_raw_data_tool_details(request, _id):
"""
Set the RawData tool details. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
details = request.POST['details']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_tool_details(_id,
details,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_raw_data_tool_name(request, _id):
"""
Set the RawData tool name. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
name = request.POST['name']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_tool_name(_id,
name,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_raw_data_type(request, _id):
"""
Set the RawData datatype. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
data_type = request.POST['data_type']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_type(_id,
data_type,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_raw_data_highlight_comment(request, _id):
"""
Set a highlight comment in RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
comment = request.POST['comment']
line = request.POST['line']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_highlight_comment(_id,
comment,
line,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def set_raw_data_highlight_date(request, _id):
"""
Set a highlight date in RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
date = request.POST['date']
line = request.POST['line']
analyst = request.user.username
return HttpResponse(json.dumps(update_raw_data_highlight_date(_id,
date,
line,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def add_inline_comment(request, _id):
"""
Add an inline comment to RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
comment = request.POST['comment']
analyst = request.user.username
line_num = request.GET.get('line', 1)
return HttpResponse(json.dumps(new_inline_comment(_id,
comment,
line_num,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def add_highlight(request, _id):
"""
Set a line as highlighted for RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
analyst = request.user.username
line_num = request.POST.get('line', 1)
line_data = request.POST.get('line_data', None)
return HttpResponse(json.dumps(new_highlight(_id,
line_num,
line_data,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def remove_highlight(request, _id):
"""
Remove a line highlight from RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
analyst = request.user.username
line_num = request.POST.get('line', 1)
return HttpResponse(json.dumps(delete_highlight(_id,
line_num,
analyst)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def get_inline_comments(request, _id):
"""
Get inline comments for RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
return HttpResponse(json.dumps(generate_inline_comments(_id)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def get_raw_data_versions(request, _id):
"""
Get a list of versions for RawData. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
return HttpResponse(json.dumps(generate_raw_data_versions(_id)),
content_type="application/json")
else:
error = "Expected POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def raw_data_details(request, _id):
"""
Generate RawData details page.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
template = 'raw_data_details.html'
analyst = request.user.username
(new_template, args) = get_raw_data_details(_id, analyst)
if new_template:
template = new_template
return render_to_response(template,
args,
RequestContext(request))
@user_passes_test(user_can_view_data)
def details_by_link(request, link):
"""
Generate RawData details page by link.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param link: The LinkId of the RawData.
:type link: str
:returns: :class:`django.http.HttpResponse`
"""
version = request.GET.get('version', 1)
return raw_data_details(request,
get_id_from_link_and_version(link, version))
@user_passes_test(user_can_view_data)
def upload_raw_data(request, link_id=None):
"""
Upload new RawData to CRITs.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param link_id: The LinkId of RawData if this is a new version upload.
:type link_id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST':
if 'filedata' in request.FILES:
form = UploadRawDataFileForm(request.user,
request.POST,
request.FILES)
filedata = request.FILES['filedata']
data = filedata.read() # XXX: Should be using chunks here.
has_file = True
else:
form = UploadRawDataForm(request.user,request.POST)
data = request.POST.get('data', None)
has_file = False
if form.is_valid():
source = form.cleaned_data.get('source')
user = request.user.username
description = form.cleaned_data.get('description', '')
title = form.cleaned_data.get('title', None)
tool_name = form.cleaned_data.get('tool_name', '')
tool_version = form.cleaned_data.get('tool_version', '')
tool_details = form.cleaned_data.get('tool_details', '')
data_type = form.cleaned_data.get('data_type', None)
copy_rels = request.POST.get('copy_relationships', False)
link_id = link_id
bucket_list = form.cleaned_data.get('bucket_list')
ticket = form.cleaned_data.get('ticket')
method = form.cleaned_data.get('method', '') or 'Upload'
reference = form.cleaned_data.get('reference', '')
status = handle_raw_data_file(data, source, user,
description, title, data_type,
tool_name, tool_version, tool_details,
link_id,
method=method,
reference=reference,
copy_rels=copy_rels,
bucket_list=bucket_list,
ticket=ticket)
if status['success']:
jdump = json.dumps({
'message': 'raw_data uploaded successfully! <a href="%s">View raw_data</a>'
% reverse('crits.raw_data.views.raw_data_details',
args=[status['_id']]), 'success': True})
if not has_file:
return HttpResponse(jdump, content_type="application/json")
return render_to_response('file_upload_response.html',
{'response': jdump},
RequestContext(request))
else:
jdump = json.dumps({'success': False,
'message': status['message']})
if not has_file:
return HttpResponse(jdump, content_type="application/json")
return render_to_response('file_upload_response.html',
{'response': jdump},
RequestContext(request))
else:
jdump = json.dumps({'success': False,
'form': form.as_table()})
if not has_file:
return HttpResponse(jdump, content_type="application/json")
return render_to_response('file_upload_response.html',
{'response': jdump},
RequestContext(request))
else:
return render_to_response('error.html',
{'error': "Expected POST."},
RequestContext(request))
@user_passes_test(user_is_admin)
def remove_raw_data(request, _id):
"""
Remove RawData from CRITs.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param _id: The ObjectId of the RawData to remove.
:type _id: str
:returns: :class:`django.http.HttpResponse`
"""
result = delete_raw_data(_id, '%s' % request.user.username)
if result:
return HttpResponseRedirect(reverse('crits.raw_data.views.raw_data_listing'))
else:
return render_to_response('error.html',
{'error': "Could not delete raw_data"})
@user_passes_test(user_can_view_data)
def new_raw_data_type(request):
"""
Add a new RawData datatype to CRITs. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
form = NewRawDataTypeForm(request.POST)
analyst = request.user.username
if form.is_valid():
result = add_new_raw_data_type(form.cleaned_data['data_type'],
analyst)
if result:
message = {'message': '<div>Raw Data Type added successfully!</div>',
'success': True}
else:
message = {'message': '<div>Raw Data Type addition failed!</div>',
'success': False}
else:
message = {'form': form.as_table()}
return HttpResponse(json.dumps(message),
content_type="application/json")
return render_to_response('error.html',
{'error':'Expected AJAX POST'})
@user_passes_test(user_can_view_data)
def get_raw_data_type_dropdown(request):
"""
Generate RawData datetypes dropdown information. Should be an AJAX POST.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
dt_types = get_item_names(RawDataType)
dt_final = []
for dt in dt_types:
dt_final.append(dt.name)
result = {'data': dt_final}
return HttpResponse(json.dumps(result),
content_type="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{'error': error},
RequestContext(request))
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resources management functions
"""
import sys
import bigml.api
from bigmler.utils import (dated, get_url, log_message, check_resource,
plural,
check_resource_error, log_created_resources)
from bigmler.reports import report
from bigmler.resourcesapi.common import set_basic_model_args, \
update_json_args, configure_input_fields, update_sample_parameters_args,\
wait_for_available_tasks, relative_input_fields, update_attributes
from bigmler.labels import label_model_args, get_all_labels
from bigmler.resourcesapi.common import SEED, EVALUATE_SAMPLE_RATE, \
ALL_FIELDS_QS, BOOSTING_OPTIONS
def set_label_ensemble_args(args, labels, multi_label_data,
number_of_ensembles, fields):
"""Set of args needed to build an ensemble per label
"""
if not args.model_fields_:
args.model_fields_ = relative_input_fields(fields, args.model_fields_)
if args.objective_field is None:
args.objective_field = fields.objective_field
try:
objective_id = fields.field_id(args.objective_field)
except ValueError as exc:
sys.exit(exc)
objective_field = fields.fields[objective_id]['name']
ensemble_args_list = []
for index in range(number_of_ensembles - 1, -1, -1):
label = labels[index]
all_labels = get_all_labels(multi_label_data)
(new_name, label_field, single_label_fields) = label_model_args(
args.name, label, all_labels, args.model_fields_,
objective_field)
ensemble_args = set_ensemble_args(args, name=new_name,
objective_id=label_field,
model_fields=single_label_fields,
fields=fields)
if multi_label_data is not None:
ensemble_args.update(
user_metadata={'multi_label_data': multi_label_data})
ensemble_args_list.append(ensemble_args)
return ensemble_args_list
def set_ensemble_args(args, name=None,
objective_id=None, model_fields=None, fields=None):
"""Return ensemble arguments dict
"""
if name is None:
name = args.name
if objective_id is None:
objective_id = args.objective_id_
if model_fields is None:
model_fields = args.model_fields_
ensemble_args = set_basic_model_args(args, name)
ensemble_args.update({
"missing_splits": args.missing_splits,
"ensemble_sample": {"seed": SEED if args.ensemble_sample_seed is None \
else args.ensemble_sample_seed},
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
ensemble_args.update({"objective_field": objective_id})
if args.boosting:
boosting_args = {}
for option in BOOSTING_OPTIONS:
if hasattr(args, option) and getattr(args, option) is not None:
boosting_args.update({option: getattr(args, option)})
ensemble_args.update({"boosting": boosting_args})
else:
ensemble_args.update({"number_of_models": args.number_of_models})
# If evaluate flag is on and no test_split flag is provided,
# we choose a deterministic sampling with
# args.sample_rate (80% by default) of the data to create the model
if (args.evaluate and args.test_split == 0 and
args.test_datasets is None and not args.dataset_off):
ensemble_args.update({"seed": SEED})
if args.sample_rate == 1:
args.sample_rate = EVALUATE_SAMPLE_RATE
if model_fields and fields is not None:
input_fields = configure_input_fields(fields, model_fields)
ensemble_args.update(input_fields=input_fields)
if args.pruning and args.pruning != 'smart':
ensemble_args.update(stat_pruning=(args.pruning == 'statistical'))
if args.node_threshold > 0:
ensemble_args.update(node_threshold=args.node_threshold)
if args.balance:
ensemble_args.update(balance_objective=True)
if args.weight_field:
try:
weight_field = fields.field_id(args.weight_field)
except ValueError as exc:
sys.exit(exc)
ensemble_args.update(weight_field=weight_field)
if args.objective_weights:
ensemble_args.update(objective_weights=args.objective_weights_json)
if args.random_candidates:
ensemble_args.update(random_candidates=args.random_candidates)
update_attributes(ensemble_args, args.json_args.get('model'))
ensemble_args = update_sample_parameters_args(ensemble_args, args)
ensemble_args["ensemble_sample"].update( \
{"rate": args.ensemble_sample_rate,
"replacement": args.ensemble_sample_replacement})
if 'ensemble' in args.json_args:
update_json_args(ensemble_args, args.json_args.get('ensemble'), fields)
return ensemble_args
def create_ensembles(datasets, ensemble_ids, ensemble_args, args,
number_of_ensembles=1,
api=None, path=None, session_file=None, log=None):
"""Create ensembles from input data
"""
if api is None:
api = bigml.api.BigML()
ensembles = ensemble_ids[:]
existing_ensembles = len(ensembles)
model_ids = []
ensemble_args_list = []
if isinstance(ensemble_args, list):
ensemble_args_list = ensemble_args
if args.dataset_off and args.evaluate:
args.test_dataset_ids = datasets[:]
if not args.multi_label:
datasets = datasets[existing_ensembles:]
if number_of_ensembles > 0:
message = dated("Creating %s.\n" %
plural("ensemble", number_of_ensembles))
log_message(message, log_file=session_file,
console=args.verbosity)
inprogress = []
for i in range(0, number_of_ensembles):
wait_for_available_tasks(inprogress, args.max_parallel_ensembles,
api, "ensemble",
wait_step=args.number_of_models)
if ensemble_args_list:
ensemble_args = ensemble_args_list[i]
if args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_ensembles]
ensemble = api.create_ensemble(multi_dataset,
ensemble_args,
retries=None)
else:
ensemble = api.create_ensemble(datasets, ensemble_args,
retries=None)
ensemble_id = check_resource_error(ensemble,
"Failed to create ensemble: ")
log_message("%s\n" % ensemble_id, log_file=log)
ensemble_ids.append(ensemble_id)
inprogress.append(ensemble_id)
ensembles.append(ensemble)
log_created_resources("ensembles", path, ensemble_id,
mode='a')
models, model_ids = retrieve_ensembles_models(ensembles, api, path)
if number_of_ensembles < 2 and args.verbosity:
message = dated("Ensemble created: %s\n" %
get_url(ensemble))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, ensemble)
return ensembles, ensemble_ids, models, model_ids
def retrieve_ensembles_models(ensembles, api, path=None):
"""Retrieves the models associated to a list of ensembles
"""
models = []
model_ids = []
for index, ensemble in enumerate(ensembles):
if (isinstance(ensemble, str) or
bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
try:
ensemble = check_resource(ensemble, api.get_ensemble,
raise_on_error=True)
ensembles[index] = ensemble
except Exception as exception:
sys.exit("Failed to get a finished ensemble: %s" %
str(exception))
model_ids.extend(ensemble['object']['models'])
if path is not None:
for model_id in model_ids:
log_created_resources("models", path, model_id, mode='a')
models = model_ids[:]
models[0] = check_resource(models[0], api.get_model,
query_string=ALL_FIELDS_QS,
raise_on_error=True)
return models, model_ids
def get_ensemble(ensemble, api=None, verbosity=True, session_file=None):
"""Retrieves remote ensemble in its actual status
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(ensemble, str) or
bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
message = dated("Retrieving ensemble. %s\n" %
get_url(ensemble))
log_message(message, log_file=session_file,
console=verbosity)
ensemble = check_resource(ensemble, api.get_ensemble,
raise_on_error=True)
check_resource_error(ensemble, "Failed to get ensemble: ")
return ensemble
|
|
#
# Poradnia documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 12 06:34:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import inspect
import os
import sys
import django
from django.urls.resolvers import get_resolver
from django.utils.html import strip_tags
# Py3 compatible, TODO: rebuild config
from builtins import str as unicode
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath("."))
sys.path.append(os.path.abspath(".."))
os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.local"
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# #needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.graphviz",
]
intersphinx_mapping = {
"python": ("https://python.readthedocs.io/en/v2.7.2/", None),
"django": (
"https://docs.djangoproject.com/en/dev/",
"https://docs.djangoproject.com/en/dev/_objects/",
),
"sphinx": ("https://sphinx.readthedocs.io/en/latest/", None),
"mailbox": ("https://django-mailbox.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# #source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Poradnia"
copyright = "2016, Adam Dobrawy"
author = "Adam Dobrawy"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "pl"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# #today = ''
# Else, today_fmt is used as the format for a strftime call.
# #today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# #default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# #add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# #add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# #show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# #modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# #keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# #html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# #html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# #html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# #html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# #html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# #html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# #html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# #html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# #html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# #html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# #html_additional_pages = {}
# If false, no module index is generated.
# #html_domain_indices = True
# If false, no index is generated.
# #html_use_index = True
# If true, the index is split into individual pages for each letter.
# #html_split_index = False
# If true, links to the reST sources are added to the pages.
# #html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# #html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# #html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# #html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# #html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# #html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# #html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "Poradniadoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "Poradnia.tex", "Poradnia Documentation", "Adam Dobrawy", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# #latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# #latex_use_parts = False
# If true, show page references after internal links.
# #latex_show_pagerefs = False
# If true, show URL addresses after external links.
# #latex_show_urls = False
# Documents to append as an appendix to all manuals.
# #latex_appendices = []
# If false, no module index is generated.
# #latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "poradnia", "Poradnia Documentation", [author], 1)]
# If true, show URL addresses after external links.
# #man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Poradnia",
"Poradnia Documentation",
author,
"Poradnia",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# #texinfo_appendices = []
# If false, no module index is generated.
# #texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# #texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# #texinfo_no_detailmenu = False
def process_django_model(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta.fields
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(":param {}: {}".format(field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(":param {}: {}".format(field.attname, verbose_name))
# Add the field's type to the docstring
if isinstance(
field, (models.ForeignKey, models.OneToOneField, models.ManyToManyField)
):
lines.append(
":type %s: %s to :class:`%s.%s`"
% (
field.attname,
type(field).__name__,
field.related_model.__module__,
field.related_model.__name__,
)
)
else:
lines.append(":type {}: {}".format(field.attname, type(field).__name__))
# Return the extended docstring
return lines
def process_django_view(app, what, name, obj, options, lines):
res = get_resolver()
flat_patterns = []
def walker(flat_patterns, urlpatterns, namespace=None):
for pattern in urlpatterns:
if hasattr(pattern, "url_patterns"):
walker(flat_patterns, pattern.url_patterns, pattern.namespace)
else:
urlname = (
"{}:{}".format(namespace, pattern.name)
if namespace else
pattern.name
)
flat_patterns.append([urlname, pattern.callback])
walker(flat_patterns, res.url_patterns)
for urlname, callback in flat_patterns:
if (
hasattr(callback, "view_class") and callback.view_class == obj
) or callback == obj:
lines.append(":param url_name: ``%s``\n" % urlname)
return lines
def process_django_form(app, what, name, obj, options, lines):
from django import forms
if inspect.isclass(obj) and issubclass(obj, (forms.Form, forms.ModelForm)):
for fieldname, field in obj.base_fields.items():
lines.append(":param {}: {}".format(fieldname, field.label))
def setup(app):
app.connect("autodoc-process-docstring", process_django_model)
app.connect("autodoc-process-docstring", process_django_view)
app.connect("autodoc-process-docstring", process_django_form)
|
|
#!/usr/bin/env python
# Copyright (c) 2013, Carnegie Mellon University
# All rights reserved.
# Authors: Michael Koval <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import abc
import functools
import logging
import numpy
import openravepy
from ..clone import Clone, CloneException
from ..futures import defer
from ..util import CopyTrajectory, GetTrajectoryTags, SetTrajectoryTags
from .exceptions import (ClonedPlanningError, MetaPlanningError,
PlanningError, UnsupportedPlanningError)
logger = logging.getLogger(__name__)
class Tags(object):
SMOOTH = 'smooth'
"""
The `SMOOTH` tag means waypoints are close enough together that we can
approximate derivatives at the waypoints using divided differences. i.e.
we can safely fit a spline without collision checking.
"""
CONSTRAINED = 'constrained'
"""
The `CONSTRAINED` tag means that the geometric path described by these
waypoints respects a constraint. This means that the path cannot be
geometrically altered arbitrarily, at the risk of violating the original
constraint, it should only be changed in timing.
"""
PLANNER = 'planner'
"""
The name of the planner used to generate a trajectory.
"""
METHOD = 'planning_method'
"""
The type of planning call used to generate a trajectory.
"""
PLAN_TIME = 'planning_time'
"""
The amount of time that was spent by a planner finding a solution.
"""
POSTPROCESS_TIME = 'postprocess_time'
"""
The amount of time that was spent modifying the trajectory for execution.
"""
EXECUTION_TIME = 'execution_time'
"""
The amount of time that was spent actually running a trajectory.
"""
DETERMINISTIC_TRAJECTORY = 'deterministic'
"""
Whether repeating the same planning query will produce the same trajectory.
"""
DETERMINISTIC_ENDPOINT = 'deterministic_endpoint'
"""
Whether repeating the same planning query will produce a trajectory with
the same endpoint as this trajectory.
"""
class LockedPlanningMethod(object):
"""
Decorate a planning method that locks the calling environment.
"""
def __init__(self, func):
self.func = func
def __call__(self, instance, robot, *args, **kw_args):
with robot.GetEnv():
# Perform the actual planning operation.
traj = self.func(instance, robot, *args, **kw_args)
# Tag the trajectory with the planner and planning method
# used to generate it. We don't overwrite these tags if
# they already exist.
tags = GetTrajectoryTags(traj)
tags.setdefault(Tags.PLANNER, instance.__class__.__name__)
tags.setdefault(Tags.METHOD, self.func.__name__)
SetTrajectoryTags(traj, tags, append=False)
return traj
def __get__(self, instance, instancetype):
# Bind the self reference and use update_wrapper to propagate the
# function's metadata (e.g. name and docstring).
wrapper = functools.partial(self.__call__, instance)
functools.update_wrapper(wrapper, self.func)
wrapper.is_planning_method = True
return wrapper
class ClonedPlanningMethod(LockedPlanningMethod):
"""
Decorate a planning method that clones the calling environment.
"""
def __call__(self, instance, robot, *args, **kw_args):
env = robot.GetEnv()
# Store the original joint values and indices.
joint_indices = [robot.GetActiveDOFIndices(), None]
joint_values = [robot.GetActiveDOFValues(), None]
try:
with Clone(env, clone_env=instance.env) as cloned_env:
cloned_robot = cloned_env.Cloned(robot)
# Store the cloned joint values and indices.
joint_indices[1] = cloned_robot.GetActiveDOFIndices()
joint_values[1] = cloned_robot.GetActiveDOFValues()
# Check for mismatches in the cloning and hackily reset them.
# (This is due to a possible bug in OpenRAVE environment
# cloning where in certain situations, the Active DOF ordering
# and values do not match the parent environment. It seems to
# be exacerbated by multirotation joints, but the exact cause
# and repeatability is unclear at this point.)
if not numpy.array_equal(joint_indices[0], joint_indices[1]):
logger.warning(
"Cloned Active DOF index mismatch: %s != %s",
str(joint_indices[0]), str(joint_indices[1]))
cloned_robot.SetActiveDOFs(joint_indices[0])
if not numpy.allclose(joint_values[0], joint_values[1]):
logger.warning(
"Cloned Active DOF value mismatch: %s != %s",
str(joint_values[0]), str(joint_values[1]))
cloned_robot.SetActiveDOFValues(joint_values[0])
traj = super(ClonedPlanningMethod, self).__call__(
instance, cloned_robot, *args, **kw_args)
return CopyTrajectory(traj, env=env)
except CloneException as e:
raise ClonedPlanningError(e)
class PlanningMethod(ClonedPlanningMethod):
def __init__(self, func):
logger.warn("Please explicitly declare a ClonedPlanningMethod "
"instead of using PlanningMethod.")
super(ClonedPlanningMethod, self).__init__(func)
class Planner(object):
def has_planning_method(self, method_name):
if hasattr(self, method_name):
method = getattr(self, method_name)
if hasattr(method, 'is_planning_method'):
return method.is_planning_method
else:
return False
else:
return False
def get_planning_method_names(self):
return filter(lambda method_name: self.has_planning_method(method_name), dir(self))
class BasePlanner(Planner):
def __init__(self):
super(BasePlanner, self).__init__()
self.env = openravepy.Environment()
class MetaPlanner(Planner):
__metaclass__ = abc.ABCMeta
def __init__(self):
super(MetaPlanner, self).__init__()
self._planners = list()
def has_planning_method(self, method_name):
for planner in self._planners:
if planner.has_planning_method(method_name):
return True
return False
def get_planning_method_names(self):
method_names = set()
for planner in self._planners:
method_names.update(planner.get_planning_method_names())
return list(method_names)
@abc.abstractmethod
def get_planners(self, method_name):
pass
def get_planners_recursive(self, method):
all_planners = set()
for planner in self.get_planners(method):
if isinstance(planner, MetaPlanner):
sub_planners = planner.get_planners_recursive(method)
all_planners = all_planners.union(sub_planners)
else:
all_planners.add(planner)
return list(all_planners)
def __dir__(self):
return self.get_planning_method_names()
def __getattr__(self, method_name):
if not self.has_planning_method(method_name):
raise AttributeError("Object {:s} has no attribute '{:s}'.".format(
repr(self), method_name))
def meta_wrapper(*args, **kw_args):
return self.plan(method_name, args, kw_args)
# Grab docstrings from the delegate planners.
meta_wrapper.__name__ = method_name
docstrings = list()
for planner in self.get_planners_recursive(method_name):
if planner.has_planning_method(method_name):
planner_method = getattr(planner, method_name)
docstrings.append((planner, planner_method))
# Concatenate the docstrings.
if docstrings:
meta_wrapper.__doc__ = ''
for planner, planner_method in docstrings:
formatted_docstring = ''
if isinstance(planner, MetaPlanner):
if planner_method.__doc__ is not None:
formatted_docstring += planner_method.__doc__
else:
# Header for this planner.
formatted_docstring += str(planner) + ': ' + planner_method.__name__ + '\n'
formatted_docstring += '-' * (len(formatted_docstring) - 1) + '\n'
# Docstring.
if planner_method.__doc__ is not None:
formatted_docstring += planner_method.__doc__
else:
formatted_docstring += '<no docstring>\n'
# Blank line.
formatted_docstring += '\n'
meta_wrapper.__doc__ += formatted_docstring
return meta_wrapper
class Sequence(MetaPlanner):
KNOWN_KWARGS = set(['allow_nondeterministic'])
def __init__(self, *planners, **kwargs):
assert self.KNOWN_KWARGS.issuperset(kwargs.keys())
super(Sequence, self).__init__()
self._planners = planners
self._allow_nondeterministic = kwargs.get(
'allow_nondeterministic', False)
def __str__(self):
return 'Sequence({:s})'.format(', '.join(map(str, self._planners)))
def get_planners(self, method_name):
return [planner for planner in self._planners
if planner.has_planning_method(method_name)]
def plan(self, method, args, kw_args):
from ..util import Timer
errors = dict()
is_sequence_deterministic = True
for planner in self._planners:
e = None
try:
if planner.has_planning_method(method):
logger.info('Sequence - Calling planner "%s".', str(planner))
planner_method = getattr(planner, method)
with Timer() as timer:
output = planner_method(*args, **kw_args)
if not is_sequence_deterministic:
# TODO: It is overly conservative to set _ENDPOINT,
# e.g. for PlanToConfiguration. Unfortunately, there is
# no easy way to detect this special case.
SetTrajectoryTags(output, {
Tags.DETERMINISTIC_TRAJECTORY: False,
Tags.DETERMINISTIC_ENDPOINT: False,
}, append=True)
if not self._allow_nondeterministic:
logger.warning(
'Tagging trajectory as non-deterministic because an'
' earlier planner in the Sequence threw a'
' non-deterministic PlanningError. Pass the'
' "allow_nondeterministic" to this Sequence'
' constructor if you intended this behavior.')
logger.info('Sequence - Planning succeeded after %.3f'
' seconds with "%s".',
timer.get_duration(), str(planner))
return output
else:
logger.debug('Sequence - Skipping planner "%s"; does not'
' have "%s" method.', str(planner), method)
except MetaPlanningError as e:
pass # Exception handled below.
except PlanningError as e:
logger.warning('Planning with %s failed: %s', planner, e)
# Exception handled below.
if e is not None:
if e.deterministic is None:
is_sequence_deterministic = False
logger.warning(
'Planner %s raised a PlanningError without the'
' "deterministic" flag set. Assuming the result'
' is non-deterministic.', planner)
elif not e.deterministic:
is_sequence_deterministic = False
errors[planner] = e
raise MetaPlanningError(
'All planners failed.', errors, deterministic=is_sequence_deterministic)
class Ranked(MetaPlanner):
def __init__(self, *planners):
super(Ranked, self).__init__()
self._planners = planners
def __str__(self):
return 'Ranked({0:s})'.format(', '.join(map(str, self._planners)))
def get_planners(self, method_name):
return [planner for planner in self._planners
if planner.has_planning_method(method_name)]
def plan(self, method, args, kw_args):
all_planners = self._planners
futures = []
results = [None] * len(self._planners)
# Helper function to call a planner and return its result.
def call_planner(planner):
planning_method = getattr(planner, method)
return planning_method(*args, **kw_args)
# Find only planners that support the required planning method.
# Call every planners in parallel using a concurrent executor and
# return the first non-error result in the ordering when available.
for index, planner in enumerate(all_planners):
if not planner.has_planning_method(method):
results[index] = PlanningError(
"{:s} does not implement method {:s}."
.format(planner, method))
continue
else:
futures.append((index, defer(call_planner, args=(planner,))))
# Each time a planner completes, check if we have a valid result
# (a planner found a solution and all higher-ranked planners had
# already failed).
for index, future in futures:
try:
return future.result()
except MetaPlanningError as e:
results[index] = e
except PlanningError as e:
logger.warning("Planning with {:s} failed: {:s}"
.format(planner, e))
results[index] = e
# TODO: if `cancel()` is supported, call it in a `finally` block here.
raise MetaPlanningError("All planners failed.",
dict(zip(all_planners, results)))
class FirstSupported(MetaPlanner):
def __init__(self, *planners):
super(FirstSupported, self).__init__()
self._planners = planners
def __str__(self):
return 'Fallback({:s})'.format(', '.join(map(str, self._planners)))
def get_planners(self, method_name):
return [planner for planner in self._planners
if planner.has_planning_method(method_name)]
def plan(self, method, args, kw_args):
for planner in self._planners:
if planner.has_planning_method(method):
plan_fn = getattr(planner, method)
try:
return plan_fn(*args, **kw_args)
except UnsupportedPlanningError:
continue
raise UnsupportedPlanningError()
class MethodMask(MetaPlanner):
def __init__(self, planner, methods):
super(MethodMask, self).__init__()
self._methods = set(methods)
self._planner = planner
self._planners = [planner]
def __str__(self):
return 'Only({:s}, methods={:s})'.format(
self._planner, list(self._methods))
def get_planners(self, method_name):
if method_name in self._methods:
return [self._planner]
else:
return []
def plan(self, method, args, kw_args):
if method in self._methods:
plan_fn = getattr(self._planner, method)
return plan_fn(*args, **kw_args)
else:
raise UnsupportedPlanningError()
|
|
# (C) Fractal Industries, Inc. 2016
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from copy import deepcopy
from datetime import datetime, timedelta
from hashlib import md5
from Queue import Empty, Queue
import re
import ssl
import time
import traceback
# 3p
from pyVim import connect
from pyVmomi import vim # pylint: disable=E0611
# project
from checks import AgentCheck
from checks.libs.thread_pool import Pool
from checks.libs.vmware.basic_metrics import BASIC_METRICS
from checks.libs.vmware.all_metrics import ALL_METRICS
from util import Timer
SOURCE_TYPE = 'vsphere'
REAL_TIME_INTERVAL = 20 # Default vCenter sampling interval
# The size of the ThreadPool used to process the request queue
DEFAULT_SIZE_POOL = 4
# The interval in seconds between two refresh of the entities list
REFRESH_MORLIST_INTERVAL = 3 * 60
# The interval in seconds between two refresh of metrics metadata (id<->name)
REFRESH_METRICS_METADATA_INTERVAL = 10 * 60
# The amount of jobs batched at the same time in the queue to query available metrics
BATCH_MORLIST_SIZE = 50
# Time after which we reap the jobs that clog the queue
# TODO: use it
JOB_TIMEOUT = 10
EXCLUDE_FILTERS = {
'AlarmStatusChangedEvent': [r'Gray'],
'TaskEvent': [
r'Initialize powering On',
r'Power Off virtual machine',
r'Power On virtual machine',
r'Reconfigure virtual machine',
r'Relocate virtual machine',
r'Suspend virtual machine',
r'Migrate virtual machine',
],
'VmBeingHotMigratedEvent': [],
'VmMessageEvent': [],
'VmMigratedEvent': [],
'VmPoweredOnEvent': [],
'VmPoweredOffEvent': [],
'VmReconfiguredEvent': [],
'VmResumedEvent': [],
'VmSuspendedEvent': [],
}
MORLIST = 'morlist'
METRICS_METADATA = 'metrics_metadata'
LAST = 'last'
INTERVAL = 'interval'
class VSphereEvent(object):
UNKNOWN = 'unknown'
def __init__(self, raw_event, event_config=None):
self.raw_event = raw_event
if self.raw_event and self.raw_event.__class__.__name__.startswith('vim.event'):
self.event_type = self.raw_event.__class__.__name__[10:]
else:
self.event_type = VSphereEvent.UNKNOWN
self.timestamp = int((self.raw_event.createdTime.replace(tzinfo=None) - datetime(1970, 1, 1)).total_seconds())
self.payload = {
"timestamp": self.timestamp,
"event_type": SOURCE_TYPE,
"source_type_name": SOURCE_TYPE,
}
if event_config is None:
self.event_config = {}
else:
self.event_config = event_config
def _is_filtered(self):
# Filter the unwanted types
if self.event_type not in EXCLUDE_FILTERS:
return True
filters = EXCLUDE_FILTERS[self.event_type]
for f in filters:
if re.search(f, self.raw_event.fullFormattedMessage):
return True
return False
def get_conmon_payload(self):
if self._is_filtered():
return None
transform_method = getattr(self, 'transform_%s' % self.event_type.lower(), None)
if callable(transform_method):
return transform_method()
# Default event transformation
self.payload["msg_title"] = u"{0}".format(self.event_type)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
return self.payload
def transform_vmbeinghotmigratedevent(self):
self.payload["msg_title"] = u"VM {0} is being migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} has launched a hot migration of this virtual machine:\n".format(user=self.raw_event.userName)
changes = []
pre_host = self.raw_event.host.name
new_host = self.raw_event.destHost.name
pre_dc = self.raw_event.datacenter.name
new_dc = self.raw_event.destDatacenter.name
pre_ds = self.raw_event.ds.name
new_ds = self.raw_event.destDatastore.name
if pre_host == new_host:
changes.append(u"- No host migration: still {0}".format(new_host))
else:
# Insert in front if it's a change
changes = [u"- Host MIGRATION: from {0} to {1}".format(pre_host, new_host)] + changes
if pre_dc == new_dc:
changes.append(u"- No datacenter migration: still {0}".format(new_dc))
else:
# Insert in front if it's a change
changes = [u"- Datacenter MIGRATION: from {0} to {1}".format(pre_dc, new_dc)] + changes
if pre_ds == new_ds:
changes.append(u"- No datastore migration: still {0}".format(new_ds))
else:
# Insert in front if it's a change
changes = [u"- Datastore MIGRATION: from {0} to {1}".format(pre_ds, new_ds)] + changes
self.payload["msg_text"] += "\n".join(changes)
self.payload['host'] = self.raw_event.vm.name
self.payload['tags'] = [
'vsphere_host:%s' % pre_host,
'vsphere_host:%s' % new_host,
'vsphere_datacenter:%s' % pre_dc,
'vsphere_datacenter:%s' % new_dc,
]
return self.payload
def transform_alarmstatuschangedevent(self):
if self.event_config.get('collect_vcenter_alarms') is None:
return None
def get_transition(before, after):
vals = {
'gray': -1,
'green': 0,
'yellow': 1,
'red': 2
}
before = before.lower()
after = after.lower()
if before not in vals or after not in vals:
return None
if vals[before] < vals[after]:
return 'Triggered'
else:
return 'Recovered'
TO_ALERT_TYPE = {
'green': 'success',
'yellow': 'warning',
'red': 'error'
}
def get_agg_key(alarm_event):
return 'h:{0}|dc:{1}|a:{2}'.format(
md5(alarm_event.entity.name).hexdigest()[:10],
md5(alarm_event.datacenter.name).hexdigest()[:10],
md5(alarm_event.alarm.name).hexdigest()[:10]
)
# Get the entity type/name
if self.raw_event.entity.entity.__class__ == vim.VirtualMachine:
host_type = 'VM'
elif self.raw_event.entity.entity.__class__ == vim.HostSystem:
host_type = 'host'
else:
return None
host_name = self.raw_event.entity.name
# Need a getattr because from is a reserved keyword...
trans_before = getattr(self.raw_event, 'from')
trans_after = self.raw_event.to
transition = get_transition(trans_before, trans_after)
# Bad transition, we shouldn't have got this transition
if transition is None:
return None
self.payload['msg_title'] = u"[{transition}] {monitor} on {host_type} {host_name} is now {status}".format(
transition=transition,
monitor=self.raw_event.alarm.name,
host_type=host_type,
host_name=host_name,
status=trans_after
)
self.payload['alert_type'] = TO_ALERT_TYPE[trans_after]
self.payload['event_object'] = get_agg_key(self.raw_event)
self.payload['msg_text'] = u"""vCenter monitor status changed on this alarm, it was {before} and it's now {after}.""".format(
before=trans_before,
after=trans_after
)
self.payload['host'] = host_name
return self.payload
def transform_vmmessageevent(self):
self.payload["msg_title"] = u"VM {0} is reporting".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmmigratedevent(self):
self.payload["msg_title"] = u"VM {0} has been migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredoffevent(self):
self.payload["msg_title"] = u"VM {0} has been powered OFF".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered off this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredonevent(self):
self.payload["msg_title"] = u"VM {0} has been powered ON".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered on this virtual machine. It is running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmresumingevent(self):
self.payload["msg_title"] = u"VM {0} is RESUMING".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has resumed {vm}. It will soon be powered on.""".format(
user=self.raw_event.userName,
vm=self.raw_event.vm.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmsuspendedevent(self):
self.payload["msg_title"] = u"VM {0} has been SUSPENDED".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has suspended this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmreconfiguredevent(self):
self.payload["msg_title"] = u"VM {0} configuration has been changed".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} saved the new configuration:\n@@@\n".format(user=self.raw_event.userName)
# Add lines for configuration change don't show unset, that's hacky...
config_change_lines = [line for line in self.raw_event.configSpec.__repr__().splitlines() if 'unset' not in line]
self.payload["msg_text"] += u"\n".join(config_change_lines)
self.payload["msg_text"] += u"\n@@@"
self.payload['host'] = self.raw_event.vm.name
return self.payload
def atomic_method(method):
""" Decorator to catch the exceptions that happen in detached thread atomic tasks
and display them in the logs.
"""
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except Exception:
args[0].exceptionq.put("A worker thread crashed:\n" + traceback.format_exc())
return wrapper
class VSphereCheck(AgentCheck):
""" Get performance metrics from a vCenter server and upload them to Conmon
References:
http://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.PerformanceManager.html
*_atomic jobs perform one single task asynchronously in the ThreadPool, we
don't know exactly when they will finish, but we reap them if they're stuck.
The other calls are performed synchronously.
"""
SERVICE_CHECK_NAME = 'vcenter.can_connect'
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.time_started = time.time()
self.pool_started = False
self.exceptionq = Queue()
# Connections open to vCenter instances
self.server_instances = {}
# Event configuration
self.event_config = {}
# Caching resources, timeouts
self.cache_times = {}
for instance in self.instances:
i_key = self._instance_key(instance)
self.cache_times[i_key] = {
MORLIST: {
LAST: 0,
INTERVAL: init_config.get('refresh_morlist_interval',
REFRESH_MORLIST_INTERVAL)
},
METRICS_METADATA: {
LAST: 0,
INTERVAL: init_config.get('refresh_metrics_metadata_interval',
REFRESH_METRICS_METADATA_INTERVAL)
}
}
self.event_config[i_key] = instance.get('event_config')
# First layer of cache (get entities from the tree)
self.morlist_raw = {}
# Second layer, processed from the first one
self.morlist = {}
# Metrics metadata, basically perfCounterId -> {name, group, description}
self.metrics_metadata = {}
self.latest_event_query = {}
def stop(self):
self.stop_pool()
def start_pool(self):
self.log.info("Starting Thread Pool")
self.pool_size = int(self.init_config.get('threads_count', DEFAULT_SIZE_POOL))
self.pool = Pool(self.pool_size)
self.pool_started = True
self.jobs_status = {}
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
self.pool_started = False
def restart_pool(self):
self.stop_pool()
self.start_pool()
def _clean(self):
now = time.time()
# TODO: use that
for name in self.jobs_status.keys():
start_time = self.jobs_status[name]
if now - start_time > JOB_TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck.")
self.restart_pool()
break
def _query_event(self, instance):
i_key = self._instance_key(instance)
last_time = self.latest_event_query.get(i_key)
server_instance = self._get_server_instance(instance)
event_manager = server_instance.content.eventManager
# Be sure we don't duplicate any event, never query the "past"
if not last_time:
last_time = self.latest_event_query[i_key] = \
event_manager.latestEvent.createdTime + timedelta(seconds=1)
query_filter = vim.event.EventFilterSpec()
time_filter = vim.event.EventFilterSpec.ByTime(beginTime=self.latest_event_query[i_key])
query_filter.time = time_filter
try:
new_events = event_manager.QueryEvents(query_filter)
self.log.debug("Got {0} events from vCenter event manager".format(len(new_events)))
for event in new_events:
normalized_event = VSphereEvent(event, self.event_config[i_key])
# Can return None if the event if filtered out
event_payload = normalized_event.get_conmon_payload()
if event_payload is not None:
self.event(event_payload)
last_time = event.createdTime + timedelta(seconds=1)
except Exception as e:
# Don't get stuck on a failure to fetch an event
# Ignore them for next pass
self.log.warning("Unable to fetch Events %s", e)
last_time = event_manager.latestEvent.createdTime + timedelta(seconds=1)
self.latest_event_query[i_key] = last_time
def _instance_key(self, instance):
i_key = instance.get('name')
if i_key is None:
raise Exception("Must define a unique 'name' per vCenter instance")
return i_key
def _should_cache(self, instance, entity):
i_key = self._instance_key(instance)
now = time.time()
return now - self.cache_times[i_key][entity][LAST] > self.cache_times[i_key][entity][INTERVAL]
def _get_server_instance(self, instance):
i_key = self._instance_key(instance)
service_check_tags = [
'vcenter_server:{0}'.format(instance.get('name')),
'vcenter_host:{0}'.format(instance.get('host')),
]
# Check for ssl configs and generate an appropriate ssl context object
ssl_verify = instance.get('ssl_verify', True)
ssl_capath = instance.get('ssl_capath', None)
if not ssl_verify:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
elif ssl_capath:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(capath=ssl_capath)
# If both configs are used, log a message explaining the default
if not ssl_verify and ssl_capath:
self.log.debug("Your configuration is incorrectly attempting to "
"specify both a CA path, and to disable SSL "
"verification. You cannot do both. Proceeding with "
"disabling ssl verification.")
if i_key not in self.server_instances:
try:
server_instance = connect.SmartConnect(
host = instance.get('host'),
user = instance.get('username'),
pwd = instance.get('password'),
sslContext = context if not ssl_verify or ssl_capath else None
)
except Exception as e:
err_msg = "Connection to %s failed: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
self.server_instances[i_key] = server_instance
# Test if the connection is working
try:
self.server_instances[i_key].RetrieveContent()
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags)
except Exception as e:
err_msg = "Connection to %s died unexpectedly: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
return self.server_instances[i_key]
def _compute_needed_metrics(self, instance, available_metrics):
""" Compare the available metrics for one MOR we have computed and intersect them
with the set of metrics we want to report
"""
if instance.get('all_metrics', False):
return available_metrics
i_key = self._instance_key(instance)
wanted_metrics = []
# Get only the basic metrics
for metric in available_metrics:
# No cache yet, skip it for now
if (i_key not in self.metrics_metadata
or metric.counterId not in self.metrics_metadata[i_key]):
continue
if self.metrics_metadata[i_key][metric.counterId]['name'] in BASIC_METRICS:
wanted_metrics.append(metric)
return wanted_metrics
def get_external_host_tags(self):
""" Returns a list of tags for every host that is detected by the vSphere
integration.
List of pairs (hostname, list_of_tags)
"""
self.log.info("Sending external_host_tags now")
external_host_tags = []
for instance in self.instances:
i_key = self._instance_key(instance)
mor_list = self.morlist[i_key].items()
for mor_name, mor in mor_list:
external_host_tags.append((mor['hostname'], {SOURCE_TYPE: mor['tags']}))
return external_host_tags
@atomic_method
def _cache_morlist_raw_atomic(self, i_key, obj_type, obj, tags, regexes=None):
""" Compute tags for a single node in the vCenter rootFolder
and queue other such jobs for children nodes.
Usual hierarchy:
rootFolder
- datacenter1
- compute_resource1 == cluster
- host1
- host2
- host3
- compute_resource2
- host5
- vm1
- vm2
If it's a node we want to query metric for, queue it in self.morlist_raw
that will be processed by another job.
"""
### <TEST-INSTRUMENTATION>
t = Timer()
self.log.debug("job_atomic: Exploring MOR {0} (type={1})".format(obj, obj_type))
### </TEST-INSTRUMENTATION>
tags_copy = deepcopy(tags)
if obj_type == 'rootFolder':
for datacenter in obj.childEntity:
# Skip non-datacenter
if not hasattr(datacenter, 'hostFolder'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'datacenter', datacenter, tags_copy, regexes)
)
elif obj_type == 'datacenter':
dc_tag = "vsphere_datacenter:%s" % obj.name
tags_copy.append(dc_tag)
for compute_resource in obj.hostFolder.childEntity:
# Skip non-compute resource
if not hasattr(compute_resource, 'host'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'compute_resource', compute_resource, tags_copy, regexes)
)
elif obj_type == 'compute_resource':
if obj.__class__ == vim.ClusterComputeResource:
cluster_tag = "vsphere_cluster:%s" % obj.name
tags_copy.append(cluster_tag)
for host in obj.host:
# Skip non-host
if not hasattr(host, 'vm'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'host', host, tags_copy, regexes)
)
elif obj_type == 'host':
if regexes and regexes.get('host_include') is not None:
match = re.search(regexes['host_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of host_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='host', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:host'])
self.morlist_raw[i_key].append(watched_mor)
host_tag = "vsphere_host:%s" % obj.name
tags_copy.append(host_tag)
for vm in obj.vm:
if vm.runtime.powerState != 'poweredOn':
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'vm', vm, tags_copy, regexes)
)
elif obj_type == 'vm':
if regexes and regexes.get('vm_include') is not None:
match = re.search(regexes['vm_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of vm_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='vm', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:vm'])
self.morlist_raw[i_key].append(watched_mor)
### <TEST-INSTRUMENTATION>
self.histogram('conmon.agent.vsphere.morlist_raw_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_raw(self, instance):
""" Initiate the first layer to refresh self.morlist by queueing
_cache_morlist_raw_atomic on the rootFolder in a recursive/asncy approach
"""
i_key = self._instance_key(instance)
self.log.debug("Caching the morlist for vcenter instance %s" % i_key)
if i_key in self.morlist_raw and len(self.morlist_raw[i_key]) > 0:
self.log.debug(
"Skipping morlist collection now, RAW results "
"processing not over (latest refresh was {0}s ago)".format(
time.time() - self.cache_times[i_key][MORLIST][LAST])
)
return
self.morlist_raw[i_key] = []
server_instance = self._get_server_instance(instance)
root_folder = server_instance.content.rootFolder
instance_tag = "vcenter_server:%s" % instance.get('name')
regexes = {
'host_include': instance.get('host_include_only_regex'),
'vm_include': instance.get('vm_include_only_regex')
}
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'rootFolder', root_folder, [instance_tag], regexes)
)
self.cache_times[i_key][MORLIST][LAST] = time.time()
@atomic_method
def _cache_morlist_process_atomic(self, instance, mor):
""" Process one item of the self.morlist_raw list by querying the available
metrics for this MOR and then putting it in self.morlist
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
self.log.debug(
"job_atomic: Querying available metrics"
" for MOR {0} (type={1})".format(mor['mor'], mor['mor_type'])
)
available_metrics = perfManager.QueryAvailablePerfMetric(
mor['mor'], intervalId=REAL_TIME_INTERVAL)
mor['metrics'] = self._compute_needed_metrics(instance, available_metrics)
mor_name = str(mor['mor'])
if mor_name in self.morlist[i_key]:
# Was already here last iteration
self.morlist[i_key][mor_name]['metrics'] = mor['metrics']
else:
self.morlist[i_key][mor_name] = mor
self.morlist[i_key][mor_name]['last_seen'] = time.time()
### <TEST-INSTRUMENTATION>
self.histogram('conmon.agent.vsphere.morlist_process_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_process(self, instance):
""" Empties the self.morlist_raw by popping items and running asynchronously
the _cache_morlist_process_atomic operation that will get the available
metrics for this MOR and put it in self.morlist
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.morlist[i_key] = {}
batch_size = self.init_config.get('batch_morlist_size', BATCH_MORLIST_SIZE)
for i in xrange(batch_size):
try:
mor = self.morlist_raw[i_key].pop()
self.pool.apply_async(self._cache_morlist_process_atomic, args=(instance, mor))
except (IndexError, KeyError):
self.log.debug("No more work to process in morlist_raw")
return
def _vacuum_morlist(self, instance):
""" Check if self.morlist doesn't have some old MORs that are gone, ie
we cannot get any metrics from them anyway (or =0)
"""
i_key = self._instance_key(instance)
morlist = self.morlist[i_key].items()
for mor_name, mor in morlist:
last_seen = mor['last_seen']
if (time.time() - last_seen) > 2 * REFRESH_MORLIST_INTERVAL:
del self.morlist[i_key][mor_name]
def _cache_metrics_metadata(self, instance):
""" Get from the server instance, all the performance counters metadata
meaning name/group/description... attached with the corresponding ID
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
self.log.info("Warming metrics metadata cache for instance {0}".format(i_key))
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
new_metadata = {}
for counter in perfManager.perfCounter:
d = dict(
name = "%s.%s" % (counter.groupInfo.key, counter.nameInfo.key),
unit = counter.unitInfo.key,
instance_tag = 'instance' # FIXME: replace by what we want to tag!
)
new_metadata[counter.key] = d
self.cache_times[i_key][METRICS_METADATA][LAST] = time.time()
self.log.info("Finished metadata collection for instance {0}".format(i_key))
# Reset metadata
self.metrics_metadata[i_key] = new_metadata
### <TEST-INSTRUMENTATION>
self.histogram('conmon.agent.vsphere.metric_metadata_collection.time', t.total())
### </TEST-INSTRUMENTATION>
def _transform_value(self, instance, counter_id, value):
""" Given the counter_id, look up for the metrics metadata to check the vsphere
type of the counter and apply pre-reporting transformation if needed.
"""
i_key = self._instance_key(instance)
if counter_id in self.metrics_metadata[i_key]:
unit = self.metrics_metadata[i_key][counter_id]['unit']
if unit == 'percent':
return float(value) / 100
# Defaults to return the value without transformation
return value
@atomic_method
def _collect_metrics_atomic(self, instance, mor):
""" Task that collects the metrics listed in the morlist for one MOR
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
query = vim.PerformanceManager.QuerySpec(maxSample=1,
entity=mor['mor'],
metricId=mor['metrics'],
intervalId=20,
format='normal')
results = perfManager.QueryPerf(querySpec=[query])
if results:
for result in results[0].value:
if result.id.counterId not in self.metrics_metadata[i_key]:
self.log.debug("Skipping this metric value, because there is no metadata about it")
continue
instance_name = result.id.instance or "none"
value = self._transform_value(instance, result.id.counterId, result.value[0])
# Metric types are absolute, delta, and rate
if ALL_METRICS[self.metrics_metadata[i_key][result.id.counterId]['name']]['s_type'] == 'rate':
record_metric = self.rate
else:
record_metric = self.gauge
record_metric(
"vsphere.%s" % self.metrics_metadata[i_key][result.id.counterId]['name'],
value,
hostname=mor['hostname'],
tags=['instance:%s' % instance_name]
)
### <TEST-INSTRUMENTATION>
self.histogram('conmon.agent.vsphere.metric_colection.time', t.total())
### </TEST-INSTRUMENTATION>
def collect_metrics(self, instance):
""" Calls asynchronously _collect_metrics_atomic on all MORs, as the
job queue is processed the Aggregator will receive the metrics.
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.log.debug("Not collecting metrics for this instance, nothing to do yet: {0}".format(i_key))
return
mors = self.morlist[i_key].items()
self.log.debug("Collecting metrics of %d mors" % len(mors))
vm_count = 0
for mor_name, mor in mors:
if mor['mor_type'] == 'vm':
vm_count += 1
if 'metrics' not in mor:
# self.log.debug("Skipping entity %s collection because we didn't cache its metrics yet" % mor['hostname'])
continue
self.pool.apply_async(self._collect_metrics_atomic, args=(instance, mor))
self.gauge('vsphere.vm.count', vm_count, tags=["vcenter_server:%s" % instance.get('name')])
def check(self, instance):
if not self.pool_started:
self.start_pool()
### <TEST-INSTRUMENTATION>
self.gauge('conmon.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:initial'])
### </TEST-INSTRUMENTATION>
# First part: make sure our object repository is neat & clean
if self._should_cache(instance, METRICS_METADATA):
self._cache_metrics_metadata(instance)
if self._should_cache(instance, MORLIST):
self._cache_morlist_raw(instance)
self._cache_morlist_process(instance)
self._vacuum_morlist(instance)
# Second part: do the job
self.collect_metrics(instance)
self._query_event(instance)
# For our own sanity
self._clean()
thread_crashed = False
try:
while True:
self.log.critical(self.exceptionq.get_nowait())
thread_crashed = True
except Empty:
pass
if thread_crashed:
self.stop_pool()
raise Exception("One thread in the pool crashed, check the logs")
### <TEST-INSTRUMENTATION>
self.gauge('conmon.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:final'])
### </TEST-INSTRUMENTATION>
if __name__ == '__main__':
check, _instances = VSphereCheck.from_yaml('conf.d/vsphere.yaml')
try:
for i in xrange(200):
print "Loop %d" % i
for instance in check.instances:
check.check(instance)
if check.has_events():
print 'Events: %s' % (check.get_events())
print 'Metrics: %d' % (len(check.get_metrics()))
time.sleep(10)
except Exception as e:
print "Whoops something happened {0}".format(traceback.format_exc())
finally:
check.stop()
|
Subsets and Splits